text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Author: Braden Czapla (2019)
# Last modified: 2019-04-30
# Original data: Tsuda et al. 2018, https://doi.org/10.1364/OE.26.006899
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.special import wofz
import matplotlib.pyplot as plt
###############################################################################
# Determine wavelengths to sample
def w(w_max, w_min, step):
linspace_lower = (np.floor_divide(w_min, step)+1)*step
N = np.floor_divide(w_max-w_min, step)
linspace_upper = linspace_lower + N*step
w = np.linspace(linspace_lower, linspace_upper, int(N)+1)
if not np.isclose(w[0], w_min, atol=step/5.):
w = np.concatenate((np.array([w_min]), w))
if not np.isclose(w[-1], w_max, atol=step/5.):
w = np.concatenate((w,np.array([w_max])))
return w, len(w)
# Compute dielectric function using Brendel-Bormann (aka Gaussian or Gaussian-convoluted Drude–Lorentz) model.
# Units of w and ResFreq must match and must be directly proportional to angular frequency. All other parameters are unitless.
def Gaussian(w, ResFreq, Strength, Damping_L, Damping_G, EpsInf): # Brendel-Bormann model
# Model Source: https://doi.org/10.1063/1.350737; https://doi.org/10.1364/AO.37.005271
Permittivity = EpsInf*np.ones(len(w), dtype=np.complex)
for ii in range(len(ResFreq)):
w_bar = w/ResFreq[ii]
a = w_bar/np.sqrt(2.)*( np.sqrt( np.sqrt( 1. + (Damping_L[ii]/w_bar)**2 ) + 1. ) + 1j*np.sqrt( np.sqrt( 1. + (Damping_L[ii]/w_bar)**2 ) - 1. ) )
coeff = 1j*np.sqrt(np.pi/2.)*Strength[ii]/(2.*a*Damping_G[ii])
Permittivity += coeff*wofz( (a-1.)/(np.sqrt(2.)*Damping_G[ii]) )
Permittivity += coeff*wofz( (a+1.)/(np.sqrt(2.)*Damping_G[ii]) )
return Permittivity
# Save w, n, k to YML file
def SaveYML(w_um, RefInd, filename, references='', comments=''):
header = np.empty(9, dtype=object)
header[0] = '# this file is part of refractiveindex.info database'
header[1] = '# refractiveindex.info database is in the public domain'
header[2] = '# copyright and related rights waived via CC0 1.0'
header[3] = ''
header[4] = 'REFERENCES:' + references
header[5] = 'COMMENTS:' + comments
header[6] = 'DATA:'
header[7] = ' - type: tabulated nk'
header[8] = ' data: |'
export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
np.savetxt(filename, export, fmt='%4.2f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n ')
return
###############################################################################
## Wavelengths to sample ##
w_um_max = 10000./550. # [um]
w_um_min = 10000./4000. # [um]
step_um = 0.01 # [um]
w_um, N_freq = w(w_um_max, w_um_min, step_um)
#w_um = np.linspace(10000./4000., 10000./550., 10000)
w_invcm = 10000./w_um
## ##
## Model Parameters ##
# See Table 2
ResFreq = np.array([752.35, 807.94, 825.16, 843.27, 916.18, 967.56, 991.66, 1067.00, 1149.39, 1190.59, 1240.53, 1269.65, 1366.54, 1388.26, 1434.65, 1450.70, 1482.33, 1730.54, 2840.70, 2928.24, 2951.16, 2997.47, 3440.08]) # [cm^-1]
Strength = np.array([3.14E-03, 7.52E-04, 7.92E-05, 3.11E-03, 2.14E-03, 3.57E-03, 1.96E-03, 1.07E-03, 3.00E-02, 9.82E-03, 4.80E-03, 6.43E-03, 2.14E-03, 6.03E-04, 8.89E-04, 4.87E-03, 1.51E-03, 1.23E-02, 4.70E-05, 1.23E-03, 3.60E-04, 8.83E-04, 3.95E-05])
Damping_G = np.array([5.67, 0.87, 5.04, 0.41, 26.67, 35.26, 18.17, 21.11, 0.30, 19.38, 25.11, 0.52, 1.35, 17.00, 12.66, 4.69, 19.33, 16.23, 17.94, 4.06, 18.31, 24.82, 20.73])/ResFreq/(2.*np.sqrt(2.*np.log(2.)))
Damping_L = np.array([12.26, 16.78, 0.09, 24.88, 23.95, 0.02, 0.01, 0.02, 32.76, 13.60, 0.11, 25.53, 65.85, 0.13, 0.02, 27.42, 3.54, 7.31, 0.25, 66.46, 0.39, 25.65, 25.81])/ResFreq
EpsInf = 2.162
## ##
## Generate and Save Data ##
eps = Gaussian(w_invcm, ResFreq, Strength, Damping_L, Damping_G, EpsInf)
RefInd = np.sqrt(eps)
references = ' "S. Tsuda, S. Yamaguchi, Y. Kanamori, and H. Yugami. Spectral and angular shaping of infrared radiation in a polymer resonator with molecular vibrational modes, <a href=\"https://doi.org/10.1364/OE.26.006899\"><i>Opt. Express</i> <b>26</b>, 6899-6915 (2018)</a>"'
comments = ' "MicroChem PMMA resist with a molecular weight of 950,000; Baked at 100°C for 10 min on a hot plate; Brendel-Bormann model parameters provided in Table 2 of manuscript."'
SaveYML(w_um, RefInd, 'Tsuda-PMMA (Brendel-Bormann Model).yml', references, comments)
## ##
## Plotting ##
plt.figure('Figure 3a - Real(ϵ)')
plt.plot(w_invcm, np.real(eps), label='PMMA')
plt.legend(loc=1)
plt.xlim(500,4000)
plt.ylim(1,3)
plt.xlabel('Wavenumber (cm$^{-1}$)')
plt.ylabel('Real(ϵ)')
plt.figure('Figure 3b - Imag(ϵ)')
plt.plot(w_invcm, np.imag(eps), label='PMMA')
plt.legend(loc=1)
plt.xlim(500,4000)
plt.ylim(0,2)
plt.xlabel('Wavenumber (cm$^{-1}$)')
plt.ylabel('Imag(ϵ)')
## ##
|
polyanskiy/refractiveindex.info-scripts
|
scripts/Tsuda 2018 - PMMA (BB model).py
|
Python
|
gpl-3.0
| 4,973
|
[
"Gaussian"
] |
c7f078c1cff38cf03abcadac5e9be2735c2d3a2aa6639d5b81bc51a9038acc5e
|
# MARTINIZE
# A simple, versatile tool for coarse-graining molecular systems
# Copyright (C) 2017 Tsjerk A. Wassenaar and contributors
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import absolute_import
import sys
import os
import logging
import inspect
import simopt
from simopt import MULTI, MA
from . import core
from .converters import atom, atoms, Link
from .ForceFields.forcefield import FORCE_FIELD_COLLECTION, JSONForceField
# Option list
OPTIONS = simopt.Options([
# level opt attribute type num default flags description
"""
Input/output related options
""",
(0, "-v", "verbose", str, 1, None, 0, "Verbosity level"),
(0, "-f", "input", str, 1, None, 0, "Input GRO or PDB file"),
(0, "-o", "outtop", str, 1, "martini.top", 0, "Output topology (TOP)"),
(0, "-x", "outstruc", str, 1, None, 0, "Output coarse grained structure (PDB)"),
(0, "-n", "index", str, 1, None, 0, "Output index file with CG (and multiscale) beads."),
(1, "-nmap", "mapping", str, 1, None, 0, "Output index file containing per bead mapping."),
(0, "-v", "verbose", bool, 0, False, 0, "Verbose. Be load and noisy."),
(1, "-ss", "secstruc", str, 1, None, 0, "Secondary structure (File or string)"),
(1, "-ssc", "sscutoff", float, 1, 0.5, 0, "Cutoff fraction for ss in case of ambiguity (default: 0.5)."),
(0, "-dssp", "dsspexe", str, 1, None, 0, "DSSP executable for determining structure"),
# ("-pymol", "pymolexe", str, 1, None, "PyMOL executable for determining structure"),
(0, "-collagen", "collagen", bool, 0, False, 0, "Use collagen parameters"),
(1, "-his", "sethischarge", bool, 0, False, 0, "Interactively set the charge of each His-residue."),
(0, "-nt", "neutraltermini", bool, 0, False, 0, "Set neutral termini (charged is default)"),
(1, "-cb", "chargedbreaks", bool, 0, False, 0, "Set charges at chain breaks (neutral is default)"),
(0, "-cys", "cystines", str, 1, None, MULTI, "Disulphide bond (+)"),
(1, "-link", "links", Link, 1, None, MULTI, "Link (+)"),
(1, "-merge", "merges", str, 1, None, MULTI, "Merge chains: e.g. -merge A,B,C (+)"),
(0, "-name", "name", str, 1, None, 0, "Moleculetype name"),
(1, "-p", "posres", str, 1, 'None', 0, "Output position restraints (None/All/Backbone) (default: None)"),
(1, "-pf", "posrefc", float, 1, 1000, 0, "Position restraints force constant (default: 1000 kJ/mol/nm^2)"),
(1, "-ed", "extdih", bool, 0, False, 0, "Use dihedrals for extended regions rather than elastic bonds)"),
(1, "-sep", "separate", bool, 0, False, 0, "Write separate topologies for identical chains."),
(0, "-ff", "forcefield", str, 1, 'martini22', 0, "Which forcefield to use"),
# Fij = Fc exp( -a (rij - lo)**p )
(1, "-elastic", "elastic", bool, 0, False, 0, "Write elastic bonds"),
(1, "-ef", "elastic_fc", float, 1, 500, 0, "Elastic bond force constant Fc"),
(1, "-el", "ellowerbound", float, 1, 0, 0, "Elastic bond lower cutoff: F = Fc if rij < lo"),
(1, "-eu", "elupperbound", float, 1, 0.90, 0, "Elastic bond upper cutoff: F = 0 if rij > up"),
(1, "-ea", "eldecay", float, 1, 0, 0, "Elastic bond decay factor a"),
(1, "-ep", "elpower", float, 1, 1, 0, "Elastic bond decay power p"),
(1, "-em", "elminforce", float, 1, 0, 0, "Remove elastic bonds with force constant lower than this"),
(1, "-eb", "elbeads", str, 1, 'BB', 0, "Comma separated list of bead names for elastic bonds"),
# ("-hetatm", "hetatm", bool, 0, False, "Include HETATM records from PDB file (Use with care!)"),
(1, "-multi", "multi", str, 1, None, MULTI, "Chain to be set up for multiscaling (+)"),
])
class MartinizeException(BaseException): pass
def update_options(options):
options["Version"] = ""
if options['forcefield'].lower() in FORCE_FIELD_COLLECTION:
options['ForceField'] = FORCE_FIELD_COLLECTION[options['forcefield'].lower()]()
elif os.path.isfile(options['forcefield']):
options['ForceField'] = JSONForceField(options['forcefield'])
else:
message = "Forcefield '{}' can not be loaded.".format(options['forcefield'])
logging.error(message)
raise MartinizeException(message)
# Process the raw options from the command line
# Boolean options are set to more intuitive variables
options['RetainHETATM'] = False # options['-hetatm']
options['MixedChains'] = False # options['-mixed']
options['elbeads'] = options['elbeads'].split(',')
options['posres'] = [i.lower() for i in options['posres'].split(",")]
if "backbone" in options['posres']:
options['posres'].append("BB")
if "none" in options['posres']:
options['posres'] = []
if options['ForceField'].ElasticNetwork:
# Some forcefields, like elnedyn, always use an elatic network.
# This is set in the forcefield file, with the parameter ElasticNetwork.
options['elastic'] = True
# Merges, links and cystines
options['merges'] = "all" in options['merges'] and ["all"] or [i.split(",") for i in options['merges']]
# Cystines
# This should be done for all special bonds listed in the _special_ dictionary
CystineCheckBonds = False # By default, do not detect cystine bridges
CystineMaxDist2 = (10*0.22)**2 # Maximum distance (A) for detection of SS bonds
for i in options['cystines']:
if i.lower() == "auto":
CystineCheckBonds = True
elif i.replace(".", "").isdigit():
CystineCheckBonds = True
CystineMaxDist2 = (10*float(i))**2
else:
# This item should be a pair of cysteines
cysA, cysB = [atom(j) for j in i.split(",")]
# Internally we handle the residue number shifted by ord(' ')<<20.
# We have to add this to the cys-residue numbers given here as well.
constant = 32 << 20
options.links.append(Link(a=("SG", "CYS", cysA[2]+constant, cysA[3]),
b=("SG", "CYS", cysB[2]+constant, cysB[3]),
length=-1, fc=-1))
# Now we have done everything to it, we can add Link/cystine related stuff to options
# 'multi' is not stored anywhere else, so that we also add
options['CystineCheckBonds'] = CystineCheckBonds
options['CystineMaxDist2'] = CystineMaxDist2
## LOGGING ##
# Set the log level and communicate which options are set and what is happening
# If 'Verbose' is set, change the logger level
logLevel = options["verbose"] and logging.DEBUG or logging.INFO
logging.basicConfig(format='%(levelname)-7s %(message)s', level=logLevel)
#logging.info('MARTINIZE, script version %s'%__version__)
logging.info('If you use this script please cite:')
logging.info('de Jong et al., J. Chem. Theory Comput., 2013, DOI:10.1021/ct300646g')
logging.info("Chain termini will%s be charged"%(options['neutraltermini'] and " not" or ""))
logging.info("Residues at chain brakes will%s be charged"%((not options['chargedbreaks']) and " not" or ""))
if 'ForceField' in options:
logging.info("The %s forcefield will be used."%(options['ForceField'].name))
else:
logging.error("Forcefield '%s' has not been implemented."%(options['forcefield']))
sys.exit()
if options['extdih']:
logging.info('Dihedrals will be used for extended regions. (Elastic bonds may be more stable)')
else:
logging.info('Local elastic bonds will be used for extended regions.')
if options['posres']:
logging.info("Position restraints will be generated.")
logging.warning("Position restraints are only enabled if -DPOSRES is set in the MDP file")
if options['MixedChains']:
logging.warning("So far no parameters for mixed chains are available. This might crash the program!")
if options['RetainHETATM']:
logging.warning("I don't know how to handle HETATMs. This will probably crash the program.")
return options
def main(argv):
## TEMPORARY ---
# Exception to be defined in martinize
## <---
## OPTIONS
# Parse options
try:
options = OPTIONS.parse(argv[1:])
options["Arguments"] = argv[1:]
update_options(options)
except simopt.SimoptHelp:
print(OPTIONS.help(argv[1:]))
return 0
except simopt.MissingMandatoryError as e:
print(e)
return 3
except simopt.Usage as e:
print(e)
return 1
except MartinizeException as e:
print(e)
return 5
## WORK
try:
system = core.main(options)
except MartinizeException as e:
print(e)
return 2
except OSError:
return 4
## OUTPUT
# Build atom list
# Build topology
# Build index
return 0
def cli():
sys.exit(main(sys.argv))
|
Tsjerk/Martinize
|
martinize/cli.py
|
Python
|
gpl-2.0
| 10,450
|
[
"PyMOL"
] |
3161e494d10fd12e99c3070e15e0399871069235254dd931187002173c17d58c
|
import argparse
from coalib.misc import Constants
from coalib.collecting.Collectors import get_all_bears_names
class CustomFormatter(argparse.RawDescriptionHelpFormatter):
"""
A Custom Formatter that will keep the metavars in the usage but remove them
in the more detailed arguments section.
"""
def _format_action_invocation(self, action):
if not action.option_strings:
# For arguments that don't have options strings
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
# Option string arguments (like "-f, --files")
parts = action.option_strings
return ', '.join(parts)
def default_arg_parser(formatter_class=None):
"""
This function creates an ArgParser to parse command line arguments.
:param formatter_class: Formatting the arg_parser output into a specific
form. For example: In the manpage format.
"""
formatter_class = (CustomFormatter if formatter_class is None
else formatter_class)
description = """
coala provides a common command-line interface for linting and fixing all your
code, regardless of the programming languages you use.
To find out what kind of analysis coala offers for the languages you use, visit
http://coala.io/languages, or run::
$ coala --show-bears --filter-by-language C Python
To perform code analysis, simply specify the analysis routines (bears) and the
files you want it to run on, for example:
spaceBear::
$ coala --bears SpaceConsistencyBear --files **.py
coala can also automatically fix your code:
spacePatchBear::
$ coala --bears SpaceConsistencyBear --files **.py --apply-patches
To run coala without user interaction, run the `coala --non-interactive`,
`coala --json` and `coala --format` commands.
"""
arg_parser = argparse.ArgumentParser(
formatter_class=formatter_class,
prog='coala',
description=description,
# Use our own help so that we can put it in the group we want
add_help=False)
arg_parser.add_argument('TARGETS',
nargs='*',
help='sections to be executed exclusively')
info_group = arg_parser.add_argument_group('Info')
info_group.add_argument('-h',
'--help',
action='help',
help='show this help message and exit')
info_group.add_argument('-v',
'--version',
action='version',
version=Constants.VERSION)
mode_group = arg_parser.add_argument_group('Mode')
mode_group.add_argument(
'-C', '--non-interactive', const=True, action='store_const',
help='run coala in non interactive mode')
mode_group.add_argument(
'--ci', action='store_const', dest='non_interactive', const=True,
help='continuous integration run, alias for `--non-interactive`')
mode_group.add_argument(
'--json', const=True, action='store_const',
help='mode in which coala will display output as json')
mode_group.add_argument(
'--format', const=True, nargs='?', metavar='STR',
help='output results with a custom format string, e.g. '
'"Message: {message}"; possible placeholders: '
'id, origin, file, line, end_line, column, end_column, '
'severity, severity_str, message, message_base, '
'message_arguments, affected_code, source_lines')
config_group = arg_parser.add_argument_group('Configuration')
config_group.add_argument(
'-c', '--config', nargs=1, metavar='FILE',
help='configuration file to be used, defaults to {}'.format(
Constants.default_coafile))
config_group.add_argument(
'-F', '--find-config', action='store_const', const=True,
help='find {} in ancestors of the working directory'.format(
Constants.default_coafile))
config_group.add_argument(
'-I', '--no-config', const=True, action='store_const',
help='run without using any config file')
config_group.add_argument(
'-s', '--save', nargs='?', const=True, metavar='FILE',
help='save used arguments to a config file to a {}, the given path, '
'or at the value of -c'.format(Constants.default_coafile))
config_group.add_argument(
'--disable-caching', const=True, action='store_const',
help='run on all files even if unchanged')
config_group.add_argument(
'--flush-cache', const=True, action='store_const',
help='rebuild the file cache')
config_group.add_argument(
'--no-autoapply-warn', const=True, action='store_const',
help='turn off warning about patches not being auto applicable')
inputs_group = arg_parser.add_argument_group('Inputs')
inputs_group.add_argument(
'-b', '--bears', nargs='+', metavar='NAME',
help='names of bears to use').completer = (
lambda *args, **kwargs: get_all_bears_names()) # pragma: no cover
inputs_group.add_argument(
'-f', '--files', nargs='+', metavar='FILE',
help='files that should be checked')
inputs_group.add_argument(
'-i', '--ignore', nargs='+', metavar='FILE',
help='files that should be ignored')
inputs_group.add_argument(
'--limit-files', nargs='+', metavar='FILE',
help="filter the `--files` argument's matches further")
inputs_group.add_argument(
'-d', '--bear-dirs', nargs='+', metavar='DIR',
help='additional directories which may contain bears')
outputs_group = arg_parser.add_argument_group('Outputs')
outputs_group.add_argument(
'-V', '--verbose', action='store_const',
dest='log_level', const='DEBUG',
help='alias for `-L DEBUG`')
outputs_group.add_argument(
'-L', '--log-level', nargs=1,
choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'], metavar='ENUM',
help='set log output level to DEBUG/INFO/WARNING/ERROR, '
'defaults to INFO')
outputs_group.add_argument(
'-m', '--min-severity', nargs=1,
choices=('INFO', 'NORMAL', 'MAJOR'), metavar='ENUM',
help='set minimal result severity to INFO/NORMAL/MAJOR')
outputs_group.add_argument(
'-N', '--no-color', const=True, action='store_const',
help='display output without coloring (excluding logs)')
outputs_group.add_argument(
'-B', '--show-bears', const=True, action='store_const',
help='list all bears')
outputs_group.add_argument(
'-l', '--filter-by-language', nargs='+', metavar='LANG',
help='filters `--show-bears` by the given languages')
outputs_group.add_argument(
'-p', '--show-capabilities', nargs='+', metavar='LANG',
help='show what coala can fix and detect for the given languages')
outputs_group.add_argument(
'-D', '--show-description', const=True, action='store_const',
help='show bear descriptions for `--show-bears`')
outputs_group.add_argument(
'--show-details', const=True, action='store_const',
help='show bear details for `--show-bears`')
outputs_group.add_argument(
'--log-json', const=True, action='store_const',
help='output logs as json along with results'
' (must be called with --json)')
outputs_group.add_argument(
'-o', '--output', nargs=1, metavar='FILE',
help='write results to the given file (must be called with --json)')
outputs_group.add_argument(
'-r', '--relpath', nargs='?', const=True,
help='return relative paths for files (must be called with --json)')
misc_group = arg_parser.add_argument_group('Miscellaneous')
misc_group.add_argument(
'-S', '--settings', nargs='+', metavar='SETTING',
help='arbitrary settings in the form of section.key=value')
misc_group.add_argument(
'-a', '--apply-patches', action='store_const',
dest='default_actions', const='*: ApplyPatchAction',
help='apply all patches automatically if possible')
misc_group.add_argument(
'-j', '--jobs', type=int,
help='number of jobs to use in parallel')
misc_group.add_argument(
'-n', '--no-orig', const=True, action='store_const',
help="don't create .orig backup files before patching")
misc_group.add_argument(
'--debug', const=True, action='store_const',
help='run coala in debug mode, starting ipdb, '
'which must be separately installed, '
'on unexpected internal exceptions '
'(implies --verbose)')
try:
# Auto completion should be optional, because of somewhat complicated
# setup.
import argcomplete
argcomplete.autocomplete(arg_parser)
except ImportError: # pragma: no cover
pass
return arg_parser
|
arjunsinghy96/coala
|
coalib/parsing/DefaultArgParser.py
|
Python
|
agpl-3.0
| 9,113
|
[
"VisIt"
] |
3fb9e8a79c72d80367ca8023a281e06da6d29eab338bdb9b43f07fcf4307aa3a
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_alertemailconfig
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of AlertEmailConfig Avi RESTful Object
description:
- This module is used to configure AlertEmailConfig object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cc_emails:
description:
- Alerts are copied to the comma separated list of email recipients.
description:
description:
- User defined description for the object.
name:
description:
- A user-friendly name of the email notification service.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
to_emails:
description:
- Alerts are sent to the comma separated list of email recipients.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create AlertEmailConfig object
avi_alertemailconfig:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_alertemailconfig
"""
RETURN = '''
obj:
description: AlertEmailConfig (api/alertemailconfig) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cc_emails=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
to_emails=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'alertemailconfig',
set([]))
if __name__ == '__main__':
main()
|
kvar/ansible
|
lib/ansible/modules/network/avi/avi_alertemailconfig.py
|
Python
|
gpl-3.0
| 3,896
|
[
"VisIt"
] |
ce713bf1a97466b7a55ad2c1f0213fb3ebc53949f93d314a57044274d35c3ecb
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import MediaManagerItem, Registry, ItemCapabilities, ServiceItemContext, Settings, UiStrings, \
build_icon, check_item_selected, create_thumb, translate, validate_thumb
from openlp.core.lib.ui import critical_error_message_box, create_horizontal_adjusting_combo_box
from openlp.core.utils import get_locale_key
from openlp.plugins.presentations.lib import MessageListener
log = logging.getLogger(__name__)
ERROR_IMAGE = QtGui.QImage(':/general/general_delete.png')
class PresentationMediaItem(MediaManagerItem):
"""
This is the Presentation media manager item for Presentation Items. It can present files using Openoffice and
Powerpoint
"""
log.info('Presentations Media Item loaded')
def __init__(self, parent, plugin, icon, controllers):
"""
Constructor. Setup defaults
"""
self.controllers = controllers
self.icon_path = 'presentations/presentation'
self.Automatic = ''
super(PresentationMediaItem, self).__init__(parent, plugin)
self.message_listener = MessageListener(self)
self.has_search = True
self.single_service_item = False
Registry().register_function('mediaitem_presentation_rebuild', self.populate_display_types)
Registry().register_function('mediaitem_suffixes', self.build_file_mask_string)
# Allow DnD from the desktop
self.list_view.activateDnD()
def retranslateUi(self):
"""
The name of the plugin media displayed in UI
"""
self.on_new_prompt = translate('PresentationPlugin.MediaItem', 'Select Presentation(s)')
self.Automatic = translate('PresentationPlugin.MediaItem', 'Automatic')
self.display_type_label.setText(translate('PresentationPlugin.MediaItem', 'Present using:'))
def build_file_mask_string(self):
"""
Build the list of file extensions to be used in the Open file dialog.
"""
file_type_string = ''
for controller in self.controllers:
if self.controllers[controller].enabled():
file_types = self.controllers[controller].supports + self.controllers[controller].also_supports
for file_type in file_types:
if file_type not in file_type_string:
file_type_string += '*.%s ' % file_type
self.service_manager.supported_suffixes(file_type)
self.on_new_file_masks = translate('PresentationPlugin.MediaItem', 'Presentations (%s)') % file_type_string
def required_icons(self):
"""
Set which icons the media manager tab should show.
"""
MediaManagerItem.required_icons(self)
self.has_file_icon = True
self.has_new_icon = False
self.has_edit_icon = False
def add_end_header_bar(self):
"""
Display custom media manager items for presentations.
"""
self.presentation_widget = QtGui.QWidget(self)
self.presentation_widget.setObjectName('presentation_widget')
self.display_layout = QtGui.QFormLayout(self.presentation_widget)
self.display_layout.setMargin(self.display_layout.spacing())
self.display_layout.setObjectName('display_layout')
self.display_type_label = QtGui.QLabel(self.presentation_widget)
self.display_type_label.setObjectName('display_type_label')
self.display_type_combo_box = create_horizontal_adjusting_combo_box(self.presentation_widget,
'display_type_combo_box')
self.display_type_label.setBuddy(self.display_type_combo_box)
self.display_layout.addRow(self.display_type_label, self.display_type_combo_box)
# Add the Presentation widget to the page layout.
self.page_layout.addWidget(self.presentation_widget)
def initialise(self):
"""
Populate the media manager tab
"""
self.list_view.setIconSize(QtCore.QSize(88, 50))
files = Settings().value(self.settings_section + '/presentations files')
self.load_list(files, initial_load=True)
self.populate_display_types()
def populate_display_types(self):
"""
Load the combobox with the enabled presentation controllers, allowing user to select a specific app if settings
allow.
"""
self.display_type_combo_box.clear()
for item in self.controllers:
# load the drop down selection
if self.controllers[item].enabled():
self.display_type_combo_box.addItem(item)
if self.display_type_combo_box.count() > 1:
self.display_type_combo_box.insertItem(0, self.Automatic)
self.display_type_combo_box.setCurrentIndex(0)
if Settings().value(self.settings_section + '/override app') == QtCore.Qt.Checked:
self.presentation_widget.show()
else:
self.presentation_widget.hide()
def load_list(self, files, target_group=None, initial_load=False):
"""
Add presentations into the media manager. This is called both on initial load of the plugin to populate with
existing files, and when the user adds new files via the media manager.
"""
current_list = self.get_file_list()
titles = [os.path.split(file)[1] for file in current_list]
self.application.set_busy_cursor()
if not initial_load:
self.main_window.display_progress_bar(len(files))
# Sort the presentations by its filename considering language specific characters.
files.sort(key=lambda filename: get_locale_key(os.path.split(str(filename))[1]))
for file in files:
if not initial_load:
self.main_window.increment_progress_bar()
if current_list.count(file) > 0:
continue
filename = os.path.split(str(file))[1]
if not os.path.exists(file):
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(build_icon(ERROR_IMAGE))
item_name.setData(QtCore.Qt.UserRole, file)
item_name.setToolTip(file)
self.list_view.addItem(item_name)
else:
if titles.count(filename) > 0:
if not initial_load:
critical_error_message_box(translate('PresentationPlugin.MediaItem', 'File Exists'),
translate('PresentationPlugin.MediaItem',
'A presentation with that filename already exists.')
)
continue
controller_name = self.findControllerByType(filename)
if controller_name:
controller = self.controllers[controller_name]
doc = controller.add_document(str(file))
thumb = os.path.join(doc.get_thumbnail_folder(), 'icon.png')
preview = doc.get_thumbnail_path(1, True)
if not preview and not initial_load:
doc.load_presentation()
preview = doc.get_thumbnail_path(1, True)
doc.close_presentation()
if not (preview and os.path.exists(preview)):
icon = build_icon(':/general/general_delete.png')
else:
if validate_thumb(preview, thumb):
icon = build_icon(thumb)
else:
icon = create_thumb(preview, thumb)
else:
if initial_load:
icon = build_icon(':/general/general_delete.png')
else:
critical_error_message_box(UiStrings().UnsupportedFile,
translate('PresentationPlugin.MediaItem', 'This type of presentation is not supported.'))
continue
item_name = QtGui.QListWidgetItem(filename)
item_name.setData(QtCore.Qt.UserRole, file)
item_name.setIcon(icon)
item_name.setToolTip(file)
self.list_view.addItem(item_name)
if not initial_load:
self.main_window.finished_progress_bar()
self.application.set_normal_cursor()
def on_delete_click(self):
"""
Remove a presentation item from the list.
"""
if check_item_selected(self.list_view, UiStrings().SelectDelete):
items = self.list_view.selectedIndexes()
row_list = [item.row() for item in items]
row_list.sort(reverse=True)
self.application.set_busy_cursor()
self.main_window.display_progress_bar(len(row_list))
for item in items:
filepath = str(item.data(QtCore.Qt.UserRole))
for cidx in self.controllers:
doc = self.controllers[cidx].add_document(filepath)
doc.presentation_deleted()
doc.close_presentation()
self.main_window.increment_progress_bar()
self.main_window.finished_progress_bar()
self.application.set_busy_cursor()
for row in row_list:
self.list_view.takeItem(row)
Settings().setValue(self.settings_section + '/presentations files', self.get_file_list())
def generate_slide_data(self, service_item, item=None, xml_version=False,
remote=False, context=ServiceItemContext.Service):
"""
Load the relevant information for displaying the presentation in the slidecontroller. In the case of
powerpoints, an image for each slide.
"""
if item:
items = [item]
else:
items = self.list_view.selectedItems()
if len(items) > 1:
return False
service_item.processor = self.display_type_combo_box.currentText()
service_item.add_capability(ItemCapabilities.ProvidesOwnDisplay)
if not self.display_type_combo_box.currentText():
return False
for bitem in items:
filename = bitem.data(QtCore.Qt.UserRole)
(path, name) = os.path.split(filename)
service_item.title = name
if os.path.exists(filename):
if service_item.processor == self.Automatic:
service_item.processor = self.findControllerByType(filename)
if not service_item.processor:
return False
controller = self.controllers[service_item.processor]
doc = controller.add_document(filename)
if doc.get_thumbnail_path(1, True) is None:
doc.load_presentation()
i = 1
img = doc.get_thumbnail_path(i, True)
if img:
while img:
service_item.add_from_command(path, name, img)
i += 1
img = doc.get_thumbnail_path(i, True)
doc.close_presentation()
return True
else:
# File is no longer present
if not remote:
critical_error_message_box(translate('PresentationPlugin.MediaItem', 'Missing Presentation'),
translate('PresentationPlugin.MediaItem',
'The presentation %s is incomplete, please reload.') % filename)
return False
else:
# File is no longer present
if not remote:
critical_error_message_box(translate('PresentationPlugin.MediaItem', 'Missing Presentation'),
translate('PresentationPlugin.MediaItem', 'The presentation %s no longer exists.') % filename)
return False
def findControllerByType(self, filename):
"""
Determine the default application controller to use for the selected file type. This is used if "Automatic" is
set as the preferred controller. Find the first (alphabetic) enabled controller which "supports" the extension.
If none found, then look for a controller which "also supports" it instead.
"""
file_type = os.path.splitext(filename)[1][1:]
if not file_type:
return None
for controller in self.controllers:
if self.controllers[controller].enabled():
if file_type in self.controllers[controller].supports:
return controller
for controller in self.controllers:
if self.controllers[controller].enabled():
if file_type in self.controllers[controller].also_supports:
return controller
return None
def search(self, string, show_error):
files = Settings().value(self.settings_section + '/presentations files')
results = []
string = string.lower()
for file in files:
filename = os.path.split(str(file))[1]
if filename.lower().find(string) > -1:
results.append([file, filename])
return results
|
marmyshev/bug_1117098
|
openlp/plugins/presentations/lib/mediaitem.py
|
Python
|
gpl-2.0
| 15,484
|
[
"Brian"
] |
18fb475319efd494cc4c66b0db7f825f03947408c90fa3f82584aad2614835d1
|
#@PydevCodeAnalysisIgnore
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
rossumai/keras-multi-gpu
|
keras_tf_multigpu/examples/avolkov1/cifar/tf_examples/cifar10.py
|
Python
|
mit
| 14,682
|
[
"Gaussian"
] |
ab5bdb835f102a99fac32288b8768cec538714729daff799e49a0a0b1ffc373a
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkPNMReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkPNMReader(), 'Reading vtkPNM.',
(), ('vtkPNM',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkPNMReader.py
|
Python
|
bsd-3-clause
| 464
|
[
"VTK"
] |
da190df26bf07a8814557c72f612483da2e694467456e329e7048cc1c879b04b
|
import numpy
import scipy # use numpy if scipy unavailable
import scipy.linalg # use numpy if scipy unavailable
## Copyright (c) 2004-2007, Andrew D. Straw. All rights reserved.
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following
## disclaimer in the documentation and/or other materials provided
## with the distribution.
## * Neither the name of the Andrew D. Straw nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def ransac(data,model,n,k,t,d,debug=False,return_all=False):
"""fit model parameters to data using the RANSAC algorithm
This implementation written from pseudocode found at
http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182
{{{
Given:
data - a set of observed data points
model - a model that can be fitted to data points
n - the minimum number of data values required to fit the model
k - the maximum number of iterations allowed in the algorithm
t - a threshold value for determining when a data point fits a model
d - the number of close data values required to assert that a model fits well to data
Return:
bestfit - model parameters which best fit the data (or nil if no good model is found)
iterations = 0
bestfit = nil
besterr = something really large
while iterations < k {
maybeinliers = n randomly selected values from data
maybemodel = model parameters fitted to maybeinliers
alsoinliers = empty set
for every point in data not in maybeinliers {
if point fits maybemodel with an error smaller than t
add point to alsoinliers
}
if the number of elements in alsoinliers is > d {
% this implies that we may have found a good model
% now test how good it is
bettermodel = model parameters fitted to all points in maybeinliers and alsoinliers
thiserr = a measure of how well model fits these points
if thiserr < besterr {
bestfit = bettermodel
besterr = thiserr
}
}
increment iterations
}
return bestfit
}}}
"""
iterations = 0
bestfit = None
besterr = numpy.inf
best_inlier_idxs = None
while iterations < k:
maybe_idxs, test_idxs = random_partition(n,data.shape[0])
maybeinliers = data[maybe_idxs,:]
test_points = data[test_idxs]
maybemodel = model.fit(maybeinliers)
test_err = model.get_error( test_points, maybemodel)
also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points
alsoinliers = data[also_idxs,:]
if debug:
print 'test_err.min()',test_err.min()
print 'test_err.max()',test_err.max()
print 'numpy.mean(test_err)',numpy.mean(test_err)
print 'iteration %d:len(alsoinliers) = %d'%(
iterations,len(alsoinliers))
if len(alsoinliers) > d:
betterdata = numpy.concatenate( (maybeinliers, alsoinliers) )
bettermodel = model.fit(betterdata)
better_errs = model.get_error( betterdata, bettermodel)
thiserr = numpy.mean( better_errs )
if thiserr < besterr:
bestfit = bettermodel
besterr = thiserr
best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) )
iterations+=1
if bestfit is None:
raise ValueError("did not meet fit acceptance criteria")
if return_all:
return bestfit, {'inliers':best_inlier_idxs}
else:
return bestfit
def random_partition(n,n_data):
"""return n random rows of data (and also the other len(data)-n rows)"""
all_idxs = numpy.arange( n_data )
numpy.random.shuffle(all_idxs)
idxs1 = all_idxs[:n]
idxs2 = all_idxs[n:]
return idxs1, idxs2
class LinearLeastSquaresModel:
"""linear system solved using linear least squares
This class serves as an example that fulfills the model interface
needed by the ransac() function.
"""
def __init__(self,input_columns,output_columns,debug=False):
self.input_columns = input_columns
self.output_columns = output_columns
self.debug = debug
def fit(self, data):
A = numpy.vstack([data[:,i] for i in self.input_columns]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
x,resids,rank,s = numpy.linalg.lstsq(A,B)
return x
def get_error( self, data, model):
A = numpy.vstack([data[:,i] for i in self.input_columns]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
B_fit = scipy.dot(A,model)
err_per_point = numpy.sum((B-B_fit)**2,axis=1) # sum squared error per row
return err_per_point
def test():
# generate perfect input data
n_samples = 500
n_inputs = 1
n_outputs = 1
A_exact = 20*numpy.random.random((n_samples,n_inputs) )
perfect_fit = 60*numpy.random.normal(size=(n_inputs,n_outputs) ) # the model
B_exact = scipy.dot(A_exact,perfect_fit)
assert B_exact.shape == (n_samples,n_outputs)
# add a little gaussian noise (linear least squares alone should handle this well)
A_noisy = A_exact + numpy.random.normal(size=A_exact.shape )
B_noisy = B_exact + numpy.random.normal(size=B_exact.shape )
if 1:
# add some outliers
n_outliers = 100
all_idxs = numpy.arange( A_noisy.shape[0] )
numpy.random.shuffle(all_idxs)
outlier_idxs = all_idxs[:n_outliers]
non_outlier_idxs = all_idxs[n_outliers:]
A_noisy[outlier_idxs] = 20*numpy.random.random((n_outliers,n_inputs) )
B_noisy[outlier_idxs] = 50*numpy.random.normal(size=(n_outliers,n_outputs) )
# setup model
all_data = numpy.hstack( (A_noisy,B_noisy) )
input_columns = range(n_inputs) # the first columns of the array
output_columns = [n_inputs+i for i in range(n_outputs)] # the last columns of the array
debug = True
model = LinearLeastSquaresModel(input_columns,output_columns,debug=debug)
linear_fit,resids,rank,s = numpy.linalg.lstsq(all_data[:,input_columns],all_data[:,output_columns])
# run RANSAC algorithm
ransac_fit, ransac_data = ransac(all_data,model,
5, 5000, 7e4, 50, # misc. parameters
debug=debug,return_all=True)
if 1:
import pylab
sort_idxs = numpy.argsort(A_exact[:,0])
A_col0_sorted = A_exact[sort_idxs] # maintain as rank-2 array
if 1:
pylab.plot( A_noisy[:,0], B_noisy[:,0], 'k.', label='data' )
pylab.plot( A_noisy[ransac_data['inliers'],0], B_noisy[ransac_data['inliers'],0], 'bx', label='RANSAC data' )
else:
pylab.plot( A_noisy[non_outlier_idxs,0], B_noisy[non_outlier_idxs,0], 'k.', label='noisy data' )
pylab.plot( A_noisy[outlier_idxs,0], B_noisy[outlier_idxs,0], 'r.', label='outlier data' )
pylab.plot( A_col0_sorted[:,0],
numpy.dot(A_col0_sorted,ransac_fit)[:,0],
label='RANSAC fit' )
pylab.plot( A_col0_sorted[:,0],
numpy.dot(A_col0_sorted,perfect_fit)[:,0],
label='exact system' )
pylab.plot( A_col0_sorted[:,0],
numpy.dot(A_col0_sorted,linear_fit)[:,0],
label='linear fit' )
pylab.legend()
pylab.show()
if __name__=='__main__':
test()
|
DLlearn/PCV
|
pcv_book/ransac.py
|
Python
|
bsd-2-clause
| 8,704
|
[
"Gaussian"
] |
730996c37437ce9e29d235c3b412354f9af9f68529ca3f00c832590296f96b1e
|
"""
The B{0install list-feeds} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model
syntax = "URI"
def add_options(parser):
pass
def handle(config, options, args):
if len(args) != 1: raise UsageError()
uri = model.canonical_iface_uri(args[0])
iface = config.iface_cache.get_interface(uri)
if iface.extra_feeds:
for f in iface.extra_feeds:
print(f.uri)
else:
print(_("(no feeds)"))
|
timdiels/0install
|
zeroinstall/cmd/list_feeds.py
|
Python
|
lgpl-2.1
| 620
|
[
"VisIt"
] |
d463ab1b3ff3f1fa208a3d76619c343ddf4d1041a4392c21e09fb14dd5e1d329
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Yambo(AutotoolsPackage):
"""Yambo is a FORTRAN/C code for Many-Body calculations in solid
state and molecular physics.
Yambo relies on the Kohn-Sham wavefunctions generated by two DFT
public codes: abinit, and PWscf. The code was originally developed
in the Condensed Matter Theoretical Group of the Physics Department
at the University of Rome "Tor Vergata" by Andrea Marini. Previous
to its release under the GPL license, yambo was known as SELF.
"""
homepage = "http://www.yambo-code.org/index.php"
url = "https://github.com/yambo-code/yambo/archive/4.1.3.tar.gz"
version('4.2.1', '99027014192c0f0f4b5d9b48414ad85d')
version('4.2.0', '0cbb4d7c9790596d163ebe872d95bd30')
variant('dp', default=False, description='Enable double precision')
variant(
'profile',
values=('time', 'memory'),
default='',
description='Activate profiling of specific sections',
multi=True
)
variant(
'io',
values=('iotk', 'etsf-io'),
default='',
description='Activate support for different io formats (requires network access)', # noqa
multi=True
)
# MPI + OpenMP parallelism
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=False, description='Enable OpenMP support')
depends_on('blas')
depends_on('lapack')
# MPI dependencies are forced, until we have proper forwarding of variants
#
# Note that yambo is used as an application, and not linked as a library,
# thus there will be no case where another package pulls-in e.g. netcdf+mpi
# and wants to depend on yambo~mpi.
depends_on('mpi', when='+mpi')
depends_on('netcdf+mpi', when='+mpi')
depends_on('hdf5+mpi', when='+mpi')
depends_on('fftw+mpi', when='+mpi')
depends_on('scalapack', when='+mpi')
depends_on('netcdf~mpi', when='~mpi')
depends_on('hdf5~mpi', when='~mpi')
depends_on('fftw~mpi', when='~mpi')
depends_on('hdf5+fortran')
depends_on('netcdf')
depends_on('netcdf-fortran')
depends_on('libxc@2.0.3:')
build_targets = ['all']
parallel = False
# The configure in the package has the string 'cat config/report'
# hard-coded, which causes a failure at configure time due to the
# current working directory in Spack. Fix this by using the absolute
# path to the file.
@run_before('configure')
def filter_configure(self):
report_abspath = join_path(self.build_directory, 'config', 'report')
filter_file('config/report', report_abspath, 'configure')
def enable_or_disable_time(self, activated):
return '--enable-time-profile' if activated else '--disable-time-profile' # noqa: E501
def enable_or_disable_memory(self, activated):
return '--enable-memory-profile' if activated else '--disable-memory-profile' # noqa: E501
def enable_or_disable_openmp(self, activated):
return '--enable-open-mp' if activated else '--disable-open-mp'
def configure_args(self):
args = [
# As of version 4.2.1 there are hard-coded paths that make
# the build process fail if the target prefix is not the
# configure directory
'--prefix={0}'.format(self.stage.source_path),
'--disable-keep-objects',
'--with-editor=none'
]
spec = self.spec
# Double precision
args.extend(self.enable_or_disable('dp'))
# Application profiling
args.extend(self.enable_or_disable('profile'))
# MPI + threading
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('openmp'))
# LAPACK
if '+mpi' in spec:
args.append('--with-scalapack-libs={0}'.format(
spec['scalapack'].libs +
spec['lapack'].libs +
spec['blas'].libs
))
args.extend([
'--with-blas-libs={0}'.format(spec['blas'].libs),
'--with-lapack-libs={0}'.format(spec['lapack'].libs)
])
# Netcdf
args.extend([
'--enable-netcdf-hdf5',
'--enable-hdf5-compression',
'--with-hdf5-libs={0}'.format(spec['hdf5'].libs),
'--with-netcdf-path={0}'.format(spec['netcdf'].prefix),
'--with-netcdff-path={0}'.format(spec['netcdf-fortran'].prefix)
])
args.extend(self.enable_or_disable('io'))
# Other dependencies
args.append('--with-fft-path={0}'.format(spec['fftw'].prefix))
args.append('--with-libxc-path={0}'.format(spec['libxc'].prefix))
return args
def install(self, spec, prefix):
# As of version 4.2.1 an 'install' target is advertized,
# but not present
install_tree('bin', prefix.bin)
install_tree('lib', prefix.lib)
install_tree('include', prefix.include)
install_tree('driver', prefix.driver)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/yambo/package.py
|
Python
|
lgpl-2.1
| 6,257
|
[
"ABINIT",
"NetCDF",
"Yambo"
] |
1a9982aec69efb64e9baadbec49fd9e435fa2283a251d9eaa2540b973c3e1c4d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''plotting/analysis routines on output of example_parallel_network.py
Copyright (C) 2018 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
from __future__ import division
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.collections import PolyCollection
import os
import numpy as np
import scipy.signal as ss
import h5py
from copy import copy
from LFPy import NetworkCell
from mpi4py import MPI
import neuron
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# set default plotting parameters
fontsize = 14
titlesize = 16
legendsize = 12
plt.rcParams.update({
'axes.xmargin': 0.0,
'axes.ymargin': 0.0,
'axes.labelsize': fontsize,
'axes.titlesize': titlesize,
'figure.titlesize': fontsize,
'font.size': fontsize,
'legend.fontsize': legendsize,
})
def decimate(x, q=10, n=4, k=0.8, filterfun=ss.cheby1):
"""
scipy.signal.decimate like downsampling using filtfilt instead of lfilter,
and filter coeffs from butterworth or chebyshev type 1.
Parameters
----------
x : numpy.ndarray
Array to be downsampled along last axis.
q : int
Downsampling factor.
n : int
Filter order.
k : float
Aliasing filter critical frequency Wn will be set as Wn=k/q.
filterfun : function
`scipy.signal.filter_design.cheby1` or
`scipy.signal.filter_design.butter` function
Returns
-------
numpy.ndarray
Array of downsampled signal.
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
n = 1
if filterfun == ss.butter:
b, a = filterfun(n, k / q)
elif filterfun == ss.cheby1:
b, a = filterfun(n, 0.05, k / q)
else:
raise Exception('only ss.butter or ss.cheby1 supported')
try:
y = ss.filtfilt(b, a, x)
except BaseException:
# Multidim array can only be processed at once for scipy >= 0.9.0
y = []
for data in x:
y.append(ss.filtfilt(b, a, data))
y = np.array(y)
try:
return y[:, ::q]
except BaseException:
return y[::q]
def draw_lineplot(
ax, data, dt=0.1,
T=(0, 200),
scaling_factor=1.,
vlimround=None,
label='local',
scalebar=True,
scalebarpos=0,
scalebarbasis='log2',
unit='mV',
ylabels=True,
color='r',
ztransform=True,
filter=False,
filterargs=dict(N=2, Wn=0.02, btype='lowpass')):
''' draw some nice lines'''
tvec = np.arange(data.shape[1]) * dt
try:
tinds = (tvec >= T[0]) & (tvec <= T[1])
except TypeError:
print(data.shape, T)
raise Exception
# apply temporal filter
if filter:
b, a = ss.butter(**filterargs)
data = ss.filtfilt(b, a, data, axis=-1)
# subtract mean in each channel
if ztransform:
dataT = data.T - data.mean(axis=1)
data = dataT.T
zvec = -np.arange(data.shape[0])
vlim = abs(data[:, tinds]).max()
if vlimround is None:
vlimround = 2.**np.round(np.log2(vlim)) / scaling_factor
else:
pass
yticklabels = []
yticks = []
for i, z in enumerate(zvec):
if i == 0:
ax.plot(tvec[tinds], data[i][tinds] / vlimround + z, lw=1,
rasterized=False, label=label, clip_on=False,
color=color)
else:
ax.plot(tvec[tinds], data[i][tinds] / vlimround + z, lw=1,
rasterized=False, clip_on=False,
color=color)
yticklabels.append('ch. %i' % (i + 1))
yticks.append(z)
if scalebar:
if scalebarbasis == 'log2':
ax.plot([tvec[tinds][-1], tvec[tinds][-1]],
[-1 - scalebarpos, -2 - scalebarpos], lw=2,
color=color, clip_on=False)
ax.text(tvec[tinds][-1] + np.diff(T) * 0.03, -1.5 - scalebarpos,
'$2^{' + '{}'.format(int(np.log2(vlimround))) +
'}$ ' + '{0}'.format(unit),
color=color, rotation='vertical',
va='center')
elif scalebarbasis == 'log10':
# recompute scale bar size to show it on scientific format
vlimround10 = 10**np.round(np.log10(vlimround))
if vlimround10 >= 1:
vlimround10 = int(np.round(vlimround10))
rescale = vlimround10 / vlimround
ax.plot([tvec[tinds][-1], tvec[tinds][-1]],
np.array([0.5, -0.5]) * rescale - 1.5 - scalebarpos,
lw=2, color=color, clip_on=False)
ax.text(tvec[tinds][-1] + np.diff(T) * 0.03, -1.5 - scalebarpos,
'{0} '.format(vlimround10) + '{0}'.format(unit),
color=color, rotation='vertical',
va='center')
ax.axis(ax.axis('tight'))
ax.yaxis.set_ticks(yticks)
if ylabels:
ax.yaxis.set_ticklabels(yticklabels)
ax.set_ylabel('channel', labelpad=0.1)
else:
ax.yaxis.set_ticklabels([])
remove_axis_junk(ax, lines=['right', 'top'])
ax.set_xlabel(r'time (ms)', labelpad=0.1)
return vlimround
def remove_axis_junk(ax, lines=['right', 'top']):
for loc, spine in ax.spines.items():
if loc in lines:
spine.set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def plot_connectivity(ax, PSET, cmap=plt.get_cmap('inferno'),
data='connprob', cbarlabel=r'$C_{YX}$'):
'''make an imshow of the intranetwork connectivity'''
masked_array = np.ma.array(
PSET.connParams[data], mask=np.array(
PSET.connParams[data]) == 0.)
cmap = copy(cmap)
cmap.set_bad('k', 0.5)
# interpolation='nearest')
im = ax.pcolormesh(masked_array, cmap=cmap, vmin=0, )
ax.axis(ax.axis('tight'))
ax.invert_yaxis()
ax.xaxis.set_ticks_position('top')
ax.set_xticks(np.arange(PSET.populationParameters.size) + 0.5)
ax.set_yticks(np.arange(PSET.populationParameters.size) + 0.5)
ax.set_xticklabels(PSET.populationParameters['m_type'], rotation=270)
ax.set_yticklabels(PSET.populationParameters['m_type'], )
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$Y$', labelpad=0)
ax.set_ylabel(r'$X$', labelpad=0, rotation=0)
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.0025
rect[2] = 0.005
fig = plt.gcf()
cax = fig.add_axes(rect)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label(cbarlabel, labelpad=0)
def plot_quantity_yXL(fig, left, bottom, top, PSET, quantity,
y=['p23', 'b23', 'nb23', 'p4', 'ss4(L23)', 'ss4(L4)',
'b4', 'nb4', 'p5(L23)', 'p5(L56)', 'b5', 'nb5',
'p6(L4)', 'p6(L56)', 'b6', 'nb6'],
label=r'$\mathcal{L}_{yXL}$',
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
cmap=plt.get_cmap('inferno')):
'''make a bunch of image plots, each showing the spatial normalized
connectivity of synapses'''
ncols = 3 # int(np.floor(np.sqrt(len(y))))
nrows = int(len(y) // ncols)
if len(y) % ncols > 0:
nrows += 1
# assess vlims
vmin = 0
vmax = 0
for yi in y:
if quantity[yi].max() > vmax:
vmax = quantity[yi].max()
gs = GridSpec(nrows, ncols, left=left, bottom=bottom, top=top)
for i, yi in enumerate(y):
ax = fig.add_subplot(gs[i // ncols, i % ncols])
masked_array = np.ma.array(quantity[yi], mask=quantity[yi] == 0)
im = ax.pcolormesh(masked_array,
vmin=vmin, vmax=vmax,
cmap=cmap,
)
ax.invert_yaxis()
ax.axis(ax.axis('tight'))
ax.xaxis.set_ticks_position('top')
ax.set_xticks(np.arange(len(y)) + 0.5)
ax.set_yticks(np.arange(len(layers)) + 0.5)
if i % ncols == 0:
ax.set_yticklabels(layers, )
ax.set_ylabel('$L$', labelpad=0.)
else:
ax.set_yticklabels([])
if i < ncols:
ax.set_xlabel(r'$X$', labelpad=-1, fontsize=8)
ax.set_xticklabels(y, rotation=90)
else:
ax.set_xticklabels([])
ax.xaxis.set_label_position('top')
ax.text(0.5, -0.13, r'$y=$' + yi,
horizontalalignment='center',
verticalalignment='center',
#
transform=ax.transAxes, fontsize=5.5)
# colorbar
if (i // ncols == 0) and (i % ncols) == ncols - 1:
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.01
rect[1] = bottom
rect[2] = 0.01
rect[3] = top - bottom
cax = fig.add_axes(rect)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label(label, labelpad=0)
def plot_m_types(ax, PSET, colors, section=[
'dend', 'apic'], spacing=300, linewidths=0.05):
'''draw comparison plot of each individual morphology'''
CWD = PSET.CWD
CELLPATH = PSET.CELLPATH
n_segs = []
areas = []
for i, data in enumerate(PSET.populationParameters):
NRN = data["me_type"]
os.chdir(os.path.join(CWD, CELLPATH, NRN))
cell = NetworkCell(**PSET.cellParameters[NRN])
cell.set_pos(x=i * spacing, y=0, z=data['pop_args']['loc'])
cell.set_rotation(x=np.pi / 2)
n_segs += [cell.totnsegs]
areas += [cell.area[cell.get_idx(section)].sum()]
zips = []
for x, z in cell.get_idx_polygons(projection=('x', 'z')):
zips.append(list(zip(x, z)))
polycol = PolyCollection(zips,
edgecolors=colors[i],
linewidths=linewidths,
facecolors=colors[i],
label=NRN,
)
ax.add_collection(polycol)
os.chdir(CWD)
axis = ax.axis(ax.axis('tight'))
# draw lines showing the layer boundaries
ax.hlines(np.r_[0., -PSET.layer_data['thickness'].cumsum()]
[:4], axis[0], axis[1] - 300, 'k', lw=0.5)
ax.hlines(np.r_[0., -PSET.layer_data['thickness'].cumsum()]
[4:], axis[0], axis[1], 'k', lw=0.5)
# annotate hlines with values
for z in np.r_[0., -PSET.layer_data['thickness'].cumsum()]:
ax.text(
axis[0],
z,
r'$z={}$'.format(
int(z)) +
r'$\mu$m',
ha='right',
va='center')
ax.set_yticks(PSET.layer_data['center'])
ax.set_yticklabels(PSET.layer_data['layer'])
ax.set_xticks(np.arange(PSET.populationParameters.size) * spacing)
ax.set_xticklabels(
PSET.populationParameters['m_type'],
rotation='vertical')
ax.axis(ax.axis('equal'))
ax.set_title('m-types')
neuron.h("forall delete_section()")
return n_segs, areas
if __name__ == '__main__':
# get simulation parameters
from example_parallel_network_parameters import PSET
##########################################################################
# Plot simulated output
##########################################################################
if not os.path.isdir(PSET.OUTPUTPATH):
if RANK == 0:
os.mkdir(PSET.OUTPUTPATH)
COMM.Barrier()
############################################
T = (PSET.TRANSIENT, PSET.tstop)
colors = [
plt.get_cmap(
'Set1',
PSET.populationParameters.size)(i) for i in range(
PSET.populationParameters.size)]
############################################
# plot m-types in network
fig, ax = plt.subplots(1, 1, figsize=(PSET.populationParameters.size, 10))
plot_m_types(ax, PSET, colors, spacing=300.)
fig.savefig(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_m_types.pdf'),
bbox_inches='tight')
plt.close(fig)
# plot connection probabilities between pre and postsynaptic populations
fig, ax = plt.subplots(1, 1)
fig.subplots_adjust(top=0.85)
plot_connectivity(ax, PSET)
fig.savefig(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_connectivity.pdf'),
bbox_inches='tight')
plt.close(fig)
# plot layer specificity of connections between pre and postsynaptic cell
# types
fig = plt.figure()
fig.suptitle('layer specificity of connections')
plot_quantity_yXL(fig=fig, left=0.1, bottom=0.05, top=0.8, PSET=PSET,
quantity=PSET.L_YXL_m_types,
y=PSET.populationParameters['m_type'],
layers=PSET.layer_data['layer'],
label=r'$\mathcal{L}_{YXL}$')
fig.savefig(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_L_YXL.pdf'),
bbox_inches='tight')
plt.close(fig)
# plot summed LFP and contributions of leak and capacitive currents
if RANK == 0 and PSET.COMPUTE_LFP:
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for data, title, suffix, color in zip(
[ # f['SUMMED_OUTPUT'].value['imem'],
# f['SUMMED_OUTPUT'].value['ipas'],
# f['SUMMED_OUTPUT'].value['icap'],
# f['SUMMED_OUTPUT'].value['isyn_e'],
# f['SUMMED_OUTPUT'].value['isyn_i'],
# f['SUMMED_OUTPUT'].value['isyn_e']
# + f['SUMMED_OUTPUT'].value['isyn_i'],
# f['SUMMED_OUTPUT'].value['imem']
# - f['SUMMED_OUTPUT'].value['ipas']
# - f['SUMMED_OUTPUT'].value['icap']
# - f['SUMMED_OUTPUT'].value['isyn_e']
# - f['SUMMED_OUTPUT'].value['isyn_i'],
] + [f['SUMMED_OUTPUT'].value[name]
for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'extracellular potentials, summed',
# 'extracellular potential, leak currents',
# 'extracellular potential, capacitive currents',
# 'extracellular potential, exc. synaptic currents',
# 'extracellular potential, inh. synaptic currents',
# 'extracellular potential, exc. + inh. synaptic currents',
# 'extracellular potential, residual',
] + [name for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'LFP',
# 'i_pas', 'i_cap', 'i_syn_e', 'i_syn_i', 'i_syn_ei', 'i_gX'
] + [name for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'k',
# 'r', 'b', 'c', 'm', 'g', 'y'
] + [colors[i]
for i in range(PSET.populationParameters.size)]):
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(121)
ax.set_title(title)
vlimround = draw_lineplot(ax=ax,
data=decimate(data, q=PSET.decimate_q),
dt=PSET.dt * PSET.decimate_q,
T=T, color=color)
ax = fig.add_subplot(122)
ax.set_title(title + r' (LP filtered, $f_\mathrm{crit}=100$ Hz)')
vlimround = draw_lineplot(ax=ax,
data=decimate(data, q=PSET.decimate_q),
dt=PSET.dt * PSET.decimate_q,
T=T, color=color,
ztransform=True, filter=True,
filterargs=PSET.filterargs)
# save figure output
fig.savefig(
os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_summed_{}.pdf'.format(
suffix)),
bbox_inches='tight')
plt.close(fig)
f.close()
if RANK == 0 and PSET.COMPUTE_LFP:
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(211)
ax.set_title('extracellular signal variance')
y = PSET.electrodeParams['z']
yticklabels = ['ch. {}'.format(x + 1) for x in range(y.size)]
tind = int(PSET.TRANSIENT / PSET.dt)
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for data, label, color in zip([ # f['SUMMED_OUTPUT'].value['imem'],
# f['SUMMED_OUTPUT'].value['ipas'],
# f['SUMMED_OUTPUT'].value['icap'],
# f['SUMMED_OUTPUT'].value['isyn_e'],
# f['SUMMED_OUTPUT'].value['isyn_i'],
# f['SUMMED_OUTPUT'].value['isyn_e']
# + f['SUMMED_OUTPUT'].value['isyn_i'],
# f['SUMMED_OUTPUT'].value['imem']
# - f['SUMMED_OUTPUT'].value['ipas']
# - f['SUMMED_OUTPUT'].value['icap']
# - f['SUMMED_OUTPUT'].value['isyn_e']
# - f['SUMMED_OUTPUT'].value['isyn_i']
] + [f['SUMMED_OUTPUT'].value[name]
for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'sum',
# r'$i_\mathrm{pas}$', r'$i_\mathrm{cap}$',
# r'$i_\mathrm{syn, E}$', r'$i_\mathrm{syn, I}$',
# r'$i_\mathrm{syn, E}+i_\mathrm{syn, I}$', 'residual'
] + [name for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'k',
# 'r', 'b', 'c', 'm', 'g', 'y'
] + ['k'] + [colors[i]
for i in range(PSET.populationParameters.size)]):
ax.semilogx(data[:, tind:].var(axis=1), y,
lw=2, label=label, color=color)
f.close()
ax.set_yticks(y)
ax.set_yticklabels(yticklabels)
ax.axis(ax.axis('tight'))
ax.legend(loc='best')
ax.set_xlabel(r'variance (mV$^2$)')
ax = fig.add_subplot(212)
ax.set_title(
r'LP filtered signals ($f_\mathrm{crit}=100$ Hz, '
+ '4th order Butterworth, filtfilt)')
b, a = ss.butter(**PSET.filterargs)
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for data, label, color in zip([ # f['SUMMED_OUTPUT'].value['imem'],
# f['SUMMED_OUTPUT'].value['ipas'],
# f['SUMMED_OUTPUT'].value['icap'],
# f['SUMMED_OUTPUT'].value['isyn_e'],
# f['SUMMED_OUTPUT'].value['isyn_i'],
# f['SUMMED_OUTPUT'].value['isyn_e']
# + f['SUMMED_OUTPUT'].value['isyn_i'],
# f['SUMMED_OUTPUT'].value['imem']
# - f['SUMMED_OUTPUT'].value['ipas']
# - f['SUMMED_OUTPUT'].value['icap']
# - f['SUMMED_OUTPUT'].value['isyn_e']
# - f['SUMMED_OUTPUT'].value['isyn_i']
] + [f['SUMMED_OUTPUT'].value[name]
for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'sum',
# r'$i_\mathrm{pas}$', r'$i_\mathrm{cap}$', r'$i_\mathrm{syn, E}$',
# r'$i_\mathrm{syn, I}$', r'$i_\mathrm{syn, E}+i_\mathrm{syn, I}$',
# 'residual'
] + [name for name in f['SUMMED_OUTPUT'].dtype.names],
[ # 'k',
# 'r', 'b', 'c', 'm', 'g', 'y'
] + ['k'] + [colors[i]
for i in range(PSET.populationParameters.size)]):
ax.semilogx(ss.filtfilt(b, a, data, axis=-1)
[:, tind:].var(axis=1), y, lw=2,
label=label, color=color)
f.close()
ax.set_yticks(y)
ax.set_yticklabels(yticklabels)
ax.axis(ax.axis('tight'))
ax.set_xlabel(r'variance (mV$^2$)')
fig.savefig(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_variance.pdf'),
bbox_inches='tight')
plt.close(fig)
# spike raster plot of all spiking activity from file
if RANK == 0:
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for i, name in enumerate(PSET.populationParameters['me_type']):
x = []
y = []
ax.hlines(
f['SPIKES'][name]['gids'].value.min(),
T[0],
T[1],
'k',
lw=0.25)
for gid, spt in zip(f['SPIKES'][name]['gids'],
f['SPIKES'][name]['times']):
if len(spt) > 0:
y += [gid] * spt.size
x += list(spt)
ax.plot(x, y, '|',
color=colors[i], markersize=2, lw=2,
clip_on=True, label=name)
f.close()
ax.axis(ax.axis('tight'))
ax.set_xlim(PSET.TRANSIENT, PSET.tstop)
ax.set_ylim(ax.axis()[2] - 0.5, ax.axis()[3] + 0.5)
ax.invert_yaxis()
ax.legend(loc='best')
ax.set_xlabel('time (ms)')
ax.set_ylabel('gid')
ax.set_title('spike raster')
# save figure output
fig.savefig(os.path.join(PSET.OUTPUTPATH,
'example_parallel_network_raster.pdf'),
bbox_inches='tight')
plt.close(fig)
# spike count rate histograms of all spiking activity from file
if RANK == 0:
fig, axes = plt.subplots(PSET.populationParameters.size, 1,
figsize=(10, 10), sharex=True)
fig.subplots_adjust(left=0.2)
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
dt = 10. # bin size for histograms
bins = np.arange(T[0], T[1] + dt, dt)
axes[0].set_title(
r'population spike time histogram, ($\Delta t={}$ ms)'.format(dt))
for i, name in enumerate(PSET.populationParameters['me_type']):
ax = axes[i]
data = np.hstack(f['SPIKES'][name]['times'].value.flat)
# , histtype='step', color=colors[i])
ax.hist(data, bins=bins, color=colors[i])
ax.axis(ax.axis('tight'))
ax.set_xlim(PSET.TRANSIENT, PSET.tstop)
ax.set_ylabel(name, rotation='horizontal', labelpad=50)
if ax != axes[-1]:
ax.set_xticklabels([])
else:
ax.set_xlabel('time (ms)')
f.close()
# save figure output
fig.savefig(os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_spike_time_histogram.pdf'),
bbox_inches='tight')
plt.close(fig)
# spike count histogram across populations from file
if RANK == 0:
n = PSET.populationParameters['me_type'].size
ncols = int(np.floor(np.sqrt(n)))
nrows = int(np.ceil(float(n) / ncols))
gs = GridSpec(nrows, ncols)
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=0.4)
fig.suptitle(
'per-cell spike count hist., T={} s'.format(
(PSET.tstop - PSET.TRANSIENT) / 1000.))
bins = np.arange(42) * (PSET.tstop - PSET.TRANSIENT) / \
1000. # make count bins conform to bin size of 1 Hz.
f = h5py.File(
os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_output.h5'),
'r')
for i, name in enumerate(PSET.populationParameters['me_type']):
ax = fig.add_subplot(gs[i // ncols, i % ncols])
x = []
for spt in f['SPIKES'][name]['times']:
if spt.size == 0:
x += [0]
else:
if np.any(spt >= PSET.TRANSIENT):
x += [spt[spt >= PSET.TRANSIENT].size]
else:
x += [0]
ax.hist(
x,
bins=bins,
color=colors[i],
clip_on=True,
label=name) # histtype='step',
ax.axis(ax.axis('tight'))
ax.set_title(name)
if i >= (n - ncols):
ax.set_xlabel('spike count')
else:
ax.set_xticklabels([])
if i % ncols == 0:
ax.set_ylabel('observations')
f.close()
# save figure output
fig.savefig(os.path.join(
PSET.OUTPUTPATH,
'example_parallel_network_spike_count_hist.pdf'),
bbox_inches='tight')
plt.close(fig)
|
LFPy/LFPy
|
examples/bioRxiv281717/example_parallel_network_plotting.py
|
Python
|
gpl-3.0
| 25,689
|
[
"NEURON"
] |
519f92cd7f84d93882bfa11c7c711892cacbfb7a001c5126d374b1764ab72dbb
|
# $Id$
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
class ExcludedVolume(object):
def __init__(self, featInfo,index=-1,exclusionDist=3.0):
"""
featInfo should be a sequence of ([indices],min,max) tuples
"""
self.index = index
try:
l = len(featInfo)
except AttributeError:
raise ValueError('featInfo argument must be a sequence of sequences')
if not len(featInfo):
raise ValueError('featInfo argument must non-empty')
try:
a,b,c = featInfo[0]
except Type:
raise ValueError('featInfo elements must be 3-sequences')
except ValueError:
raise ValueError('featInfo elements must be 3-sequences')
self.featInfo = featInfo[:]
self.exclusionDist = exclusionDist
self.pos = None
|
soerendip42/rdkit
|
rdkit/Chem/Pharm3D/ExcludedVolume.py
|
Python
|
bsd-3-clause
| 1,002
|
[
"RDKit"
] |
3d73a9ab558ffc364502087010b68a16fa804e38b6ef7cf04faae5fe87ae52c0
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import warnings
import qcelemental as qcel
def B787(cgeom,
rgeom,
cuniq,
runiq,
do_plot=False,
verbose=1,
atoms_map=False,
run_resorting=False,
mols_align=False,
run_to_completion=False,
uno_cutoff=1.e-3,
run_mirror=False):
"""Use Kabsch algorithm to find best alignment of geometry `cgeom` onto
`rgeom` while sampling atom mappings restricted by `runiq` and `cuniq`.
Parameters
----------
rgeom : ndarray of float
(nat, 3) array of reference/target/unchanged geometry. Assumed [a0]
for RMSD purposes.
cgeom : ndarray of float
(nat, 3) array of concern/changeable geometry. Assumed [a0] for RMSD
purposes. Must have same nat, units, and atom content as rgeom.
runiq : ndarray of str
(nat,) array indicating which rows (atoms) in `rgeom` are shuffleable
without changing the molecule. Generally hashes of element symbol and
mass are used, but could be as simple as ['C', 'H', 'H', 'D', 'H'] for
monodeuterated methane.
cuniq : ndarray of str
(nat,) array indicating which rows (atoms) in `cgeom` are shuffleable.
See `runiq` for more details. Strings and count in `cuniq` must match
`runiq`. That is, `sorted(cuniq) == sorted(runiq)`.
do_plot : bool, optional
Pops up a mpl plot showing before, after, and ref geometries.
verbose : int, optional
Quantity of printing. 0 to silence.
atoms_map : bool, optional
Whether atom1 of rgeom already corresponds to atom1 of cgeom and so on.
If `True`, no resorting will be run, parameters `runiq` and `cuniq`
may be passed as `None`, and much time will be saved.
run_resorting : bool, optional
Run the resorting machinery even if unnecessary because `atoms_map=True`.
mols_align : bool or float, optional
Whether ref_mol and concern_mol have identical geometries by eye
(barring orientation or atom mapping) and expected final RMSD = 0.
If `True`, procedure is truncated when RMSD condition met, saving time.
If float, convcrit at which search for minimium truncates.
run_to_completion : bool, optional
Run reorderings to completion (past RMSD = 0) even if unnecessary because
`mols_align=True`. Used to test worst-case timings.
uno_cutoff : float, optional
TODO
run_mirror : bool, optional
Run alternate geometries potentially allowing best match to `rgeom`
from mirror image of `cgeom`. Only run if system confirmed to
be nonsuperimposable upon mirror reflection.
Returns
-------
float, tuple
First item is RMSD [A] between `rgeom` and the optimally aligned
geometry computed.
Second item is a AlignmentMill namedtuple with fields
(shift, rotation, atommap, mirror) that prescribe the transformation
from `cgeom` and the optimally aligned geometry.
"""
warnings.warn(
"Using `qcdb.align.B787` instead of `qcelemental.molutil.B787` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return qcel.molutil.B787(cgeom,
rgeom,
cuniq,
runiq,
do_plot=do_plot,
verbose=verbose,
atoms_map=atoms_map,
run_resorting=run_resorting,
mols_align=mols_align,
run_to_completion=run_to_completion,
uno_cutoff=uno_cutoff,
run_mirror=run_mirror)
def compute_scramble(nat, do_resort=True, do_shift=True, do_rotate=True, deflection=1.0, do_mirror=False):
"""Generate a random or directed translation, rotation, and atom shuffling.
Parameters
----------
nat : int
Number of atoms for which to prepare an atom mapping.
do_resort : bool or array-like, optional
Whether to randomly shuffle atoms (`True`) or leave 1st atom 1st, etc. (`False`)
or shuffle according to specified (nat, ) indices (e.g., [2, 1, 0])
do_shift : bool or array-like, optional
Whether to generate a random atom shift on interval [-3, 3) in each
dimension (`True`) or leave at current origin (`False`) or shift along
specified (3, ) vector (e.g., np.array([0., 1., -1.])).
do_rotate : bool or array-like, optional
Whether to generate a random 3D rotation according to algorithm of Arvo (`True`)
or leave at current orientation (`False`) or rotate with specified (3, 3) matrix.
deflection : float, optional
If `do_rotate`, how random a rotation: 0.0 is no change, 0.1 is small
perturbation, 1.0 is completely random.
do_mirror : bool, optional
Whether to set mirror reflection instruction. Changes identity of
molecule so off by default.
Returns
-------
tuple
AlignmentMill namedtuple with fields (shift, rotation, atommap, mirror)
as requested: identity, random, or specified.
"""
warnings.warn(
"Using `qcdb.align.compute_scramble` instead of `qcelemental.molutil.compute_scramble` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return qcel.molutil.compute_scramble(nat, do_resort=do_resort, do_shift=do_shift, do_rotate=do_rotate, deflection=deflection, do_mirror=do_mirror)
|
lothian/psi4
|
psi4/driver/qcdb/align.py
|
Python
|
lgpl-3.0
| 6,553
|
[
"Psi4"
] |
0adceeb44195e828c1855bd1670b757959f5aaee7a64195bc6d358822576b38d
|
import collections
from importlib import import_module
from enhatts import FieldPreparationErrors, DeleteField
from tinkerpy import anonymous_class
from inspect import isclass
FieldDefinition = collections.namedtuple('FieldDefinition',
'field_class attributes')
def _mapping_repr(header, mapping,
getter=lambda mapping, name: mapping[name]):
fields_repr = '{' + ', '.join(
'{}: {}'.format(name, getter(mapping, name))
for name in mapping
) + '}'
return '{}{}'.format(header, fields_repr)
class Fields(collections.Mapping):
def __init__(self, cls):
self._own_field_definitions = dict()
self._own_field_order = list()
self._own_field_names = set()
self._field_class_mapping = dict()
self._cls = cls
self._before = dict()
def _clone(self, cls):
other = Fields(cls)
other._own_field_definitions = dict(self._own_field_definitions)
other._own_field_order = list(self._own_field_order)
other._own_field_names = set(self._own_field_names)
other._field_class_mapping = dict(self._field_class_mapping)
return other
def _register(self, name, field_class, before, attributes):
self._own_field_definitions[name] = FieldDefinition(field_class,
attributes)
def del_attr(attr_name):
try:
delattr(self, attr_name)
except AttributeError:
pass
if before is not None:
if before not in self._get_field_names():
raise KeyError(
'The field "{}", which the field "{}" should be insert before, does not exist.'.format(
name, before))
self._before[before] = name
if name not in self._get_field_names():
self._own_field_order.insert(0, name)
self._own_field_names.add(name)
del_attr('_field_names')
del_attr('_length')
del_attr('_field_order')
def _base_fields_iterator(self):
for base_cls in self._cls.__bases__:
try:
base_fields = base_cls.FIELDS
except AttributeError:
pass
else:
yield base_fields
def _get_field_definition(self, name):
try:
return self._own_field_definitions[name]
except KeyError:
for base_fields in self._base_fields_iterator():
try:
return base_fields._get_field_definition(name)
except KeyError:
pass
raise KeyError(name)
def _get_lazy_field_class(self, field_class_name):
def get_field_class(cls):
module = import_module(cls.__module__)
return getattr(module, field_class_name)
for base in self._cls.__mro__:
try:
return get_field_class(base)
except AttributeError:
pass
def _get_field_class(self, field_definition):
field_class, attributes = field_definition
if isinstance(field_class, str):
field_class_name = field_class
field_class = self._get_lazy_field_class(field_class_name)
if field_class is None or not isclass(field_class):
raise LookupError(
'Could not find a field class named "{}" on the modules of the classes in the method resolution order.'.format(
field_class_name))
if len(attributes) > 0:
module_name = field_class.__module__
field_class = anonymous_class(field_class, **attributes)
field_class.__module__ = module_name
return field_class
def __getitem__(self, name):
try:
return self._field_class_mapping[name]
except KeyError:
field_definition = self._get_field_definition(name)
field_class = self._get_field_class(field_definition)
self._field_class_mapping[name] = field_class
return field_class
def __contains__(self, name):
return name in self._get_field_names()
def __iter__(self):
return iter(self._get_field_order())
def __len__(self):
try:
return self._length
except AttributeError:
self._length = len(self._get_field_names())
return self._length
def _update_fields(self):
field_order = list()
field_names = set()
def visit(name):
field_before = self._before.get(name, None)
if field_before is not None:
visit(field_before)
if name not in field_names:
field_names.add(name)
field_order.append(name)
for base_fields in self._base_fields_iterator():
for name in base_fields._get_field_order():
visit(name)
for name in self._own_field_order:
visit(name)
self._field_order = field_order
self._field_names = field_names
def _get_field_order(self):
try:
return self._field_order
except AttributeError:
self._update_fields()
return self._field_order
def _get_field_names(self):
try:
return self._field_names
except AttributeError:
self._update_fields()
return self._field_names
def __repr__(self):
return _mapping_repr('FIELDS on {}: '.format(repr(self._cls)), self)
class FieldValuesProxy(collections.Mapping):
__slots__ = {'_instance_fields', '_changed_fields', '_deleted_fields',
'_v_mutable', '_changed_field_names', '_deleted_field_names'}
def __init__(self, instance_fields):
self._instance_fields = instance_fields
self._changed_fields = dict()
self._deleted_fields = set()
self._changed_field_names = []
self._deleted_field_names = []
def __contains__(self, name):
return self._instance_fields[name]
def __getitem__(self, name):
if name in self._deleted_fields:
raise KeyError('The field "{}" has been deleted.'.format(name))
try:
return self._changed_fields[name]
except KeyError:
return self._instance_fields[name]
def __iter__(self):
return iter(self._instance_fields)
def __len__(self):
return len(self._instance_fields)
def _set(self, name, value):
try:
self._deleted_fields.remove(name)
except KeyError:
pass
else:
self._deleted_field_names.remove(name)
if name not in self._changed_fields:
self._changed_field_names.append(name)
self._changed_fields[name] = value
def _delete(self, name):
self[name]
try:
del self._changed_fields[name]
except KeyError:
pass
else:
self._changed_field_names.remove(name)
if name not in self._deleted_fields:
self._deleted_fields.add(name)
self._deleted_field_names.append(name)
@property
def _mutable(self):
try:
return self._v_mutable
except AttributeError:
mutable = MutableFieldValuesProxy(self._instance_fields,
self._changed_fields, self._deleted_fields,
self._changed_field_names, self._deleted_field_names)
self._v_mutable = mutable
return mutable
@property
def changed(self):
for name in self._changed_field_names:
yield name
@property
def deleted(self):
for name in self._deleted_field_names:
yield name
class MutableFieldValuesProxy(FieldValuesProxy):
def __init__(self, instance_fields, changed_fields, deleted_fields,
changed_field_names, deleted_field_names):
self._instance_fields = instance_fields
self._changed_fields = changed_fields
self._deleted_fields = deleted_fields
self._changed_field_names = changed_field_names
self._deleted_field_names = deleted_field_names
__setitem__ = FieldValuesProxy._set
__delitem__ = FieldValuesProxy._delete
@property
def _mutable(self):
return self
class InstanceFields(collections.MutableMapping):
__slots__ = {'_fields', '_obj', '_field_instances'}
def __init__(self, fields, obj):
self._fields = fields
self._obj = obj
self._field_instances = dict()
def __contains__(self, name):
return self._fields[name]
def _get_field_instance(self, name):
try:
field_instance = self._field_instances[name]
except KeyError:
field_class = self._fields[name]
field_instance = field_class(self._obj, name)
self._field_instances[name] = field_instance
return field_instance
def __getitem__(self, name):
return self._get_field_instance(name)
def __setitem__(self, name, value):
try:
self._set_multiple({name: value})
except FieldPreparationErrors as e:
raise e[name]
def __delitem__(self, name):
self._set_multiple({name: DeleteField})
def __iter__(self):
return iter(self._fields)
def __len__(self):
return len(self._fields)
def __repr__(self):
return _mapping_repr('FIELDS on {}: '.format(repr(self._obj)), self,
lambda mapping, name: repr(mapping[name]))
def _before_prepare(self, field_values):
try:
before_prepare = self._obj.FIELDS_before_prepare
except AttributeError:
pass
else:
before_prepare(field_values)
def _before_modifications(self, field_values_proxy):
try:
before_modifications = self._obj.FIELDS_before_modifications
except AttributeError:
pass
else:
before_modifications(field_values_proxy._mutable)
def _after_modifications(self, field_values_proxy):
try:
after_modifications = self._obj.FIELDS_after_modifications
except AttributeError:
pass
else:
after_modifications(field_values_proxy)
def _set_multiple(self, field_values):
self._before_prepare(field_values)
field_values_proxy = FieldValuesProxy(self)
exceptions = {}
for name in self:
try:
value = field_values[name]
except KeyError:
pass
else:
if value is DeleteField:
field_values_proxy._delete(name)
else:
field_instance = self._get_field_instance(name)
try:
prepared_value = field_instance.prepare(value,
field_values_proxy)
except Exception as e:
exceptions[name] = e
else:
field_values_proxy._set(name, prepared_value)
if len(exceptions) > 0:
raise FieldPreparationErrors(exceptions)
self._before_modifications(field_values_proxy)
for name in field_values_proxy.changed:
field_instance = self._get_field_instance(name)
field_instance.set(field_values_proxy[name])
for name in field_values_proxy.deleted:
field_instance = self._get_field_instance(name)
field_instance.delete()
self._after_modifications(field_values_proxy)
del collections
|
IvIePhisto/EnhAtts
|
enhatts/_fields.py
|
Python
|
mit
| 11,686
|
[
"VisIt"
] |
722cd7d863b9660a837285632df1171b5b4fdfe8a04e498ebf470f8a69a8130f
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************************
**espressopp.interaction.Harmonic**
*********************************************
.. math::
U = K (d - r_0)^2
.. function:: espressopp.interaction.Harmonic(K, r0, cutoff, shift)
:param K: (default: 1.0)
:param r0: (default: 0.0)
:param cutoff: (default: infinity)
:param shift: (default: 0.0)
:type K: real
:type r0: real
:type cutoff:
:type shift: real
.. function:: espressopp.interaction.FixedPairListHarmonic(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListHarmonic.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedPairListHarmonic.setFixedPairList(fixedpairlist)
:param fixedpairlist:
:type fixedpairlist:
.. function:: espressopp.interaction.FixedPairListHarmonic.setPotential(potential)
:param potential:
:type potential:
.. function:: espressopp.interaction.FixedPairListTypesHarmonic(system, vl)
:param system:
:param vl:
:type system:
:type vl:
.. function:: espressopp.interaction.FixedPairListTypesHarmonic.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedPairListTypesHarmonic.setFixedPairList(fixedpairlist)
:param fixedpairlist:
:type fixedpairlist:
.. function:: espressopp.interaction.FixedPairListTypesHarmonic.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListTypesHarmonic.getPotential(type1,type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_Harmonic, interaction_FixedPairListHarmonic, \
interaction_FixedPairListTypesHarmonic
class HarmonicLocal(PotentialLocal, interaction_Harmonic):
def __init__(self, K=1.0, r0=0.0,
cutoff=infinity, shift=0.0):
"""Initialize the local Harmonic object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift == "auto":
cxxinit(self, interaction_Harmonic, K, r0, cutoff)
else:
cxxinit(self, interaction_Harmonic, K, r0, cutoff, shift)
class FixedPairListHarmonicLocal(InteractionLocal, interaction_FixedPairListHarmonic):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListHarmonic, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
class FixedPairListTypesHarmonicLocal(InteractionLocal, interaction_FixedPairListTypesHarmonic):
def __init__(self, system, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTypesHarmonic, system, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class Harmonic(Potential):
'The Harmonic potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.HarmonicLocal',
pmiproperty = ['K', 'r0']
)
class FixedPairListHarmonic(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListHarmonicLocal',
pmicall = ['setPotential','getPotential','setFixedPairList','getFixedPairList']
)
class FixedPairListTypesHarmonic(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTypesHarmonicLocal',
pmicall = ['setPotential','getPotential','setFixedPairList','getFixedPairList']
)
|
junghans/espressopp
|
src/interaction/Harmonic.py
|
Python
|
gpl-3.0
| 6,574
|
[
"ESPResSo"
] |
f2163565888e96b8fa7a3a63a6806c44f8d940124b8c3cef222b355a95b5a036
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import numpy as np
from ..sile import add_sile, sile_fh_open, sile_raise_write, SileError
from .sile import SileSiesta
from sisl._internal import set_module
from sisl import Geometry, Atom, AtomGhost, AtomUnknown, Atoms, SuperCell
from sisl.unit.siesta import unit_convert
__all__ = ['xvSileSiesta']
Bohr2Ang = unit_convert('Bohr', 'Ang')
@set_module("sisl.io.siesta")
class xvSileSiesta(SileSiesta):
""" Geometry file """
@sile_fh_open()
def write_geometry(self, geometry, fmt='.9f', velocity=None):
""" Writes the geometry to the contained file
Parameters
----------
geometry : Geometry
geometry to write in the XV file
fmt : str, optional
the precision used for writing the XV file
velocity : numpy.ndarray, optional
velocities to write in the XV file (will be zero if not specified).
Units input must be in Ang/fs.
"""
# Check that we can write to the file
sile_raise_write(self)
if velocity is None:
velocity = np.zeros([geometry.na, 3], np.float32)
if geometry.xyz.shape != velocity.shape:
raise SileError(f'{self}.write_geometry requires the input'
'velocity to have equal length to the input geometry.')
# Write unit-cell
tmp = np.zeros(6, np.float64)
# Create format string for the cell-parameters
fmt_str = (' ' + ('{:' + fmt + '} ') * 3) * 2 + '\n'
for i in range(3):
tmp[0:3] = geometry.cell[i, :] / Bohr2Ang
self._write(fmt_str.format(*tmp))
self._write(f'{geometry.na:12d}\n')
# Create format string for the atomic coordinates
fmt_str = '{:3d}{:6d} '
fmt_str += ('{:' + fmt + '} ') * 3 + ' '
fmt_str += ('{:' + fmt + '} ') * 3 + '\n'
for ia, a, ips in geometry.iter_species():
tmp[0:3] = geometry.xyz[ia, :] / Bohr2Ang
tmp[3:] = velocity[ia, :] / Bohr2Ang
if isinstance(a, AtomGhost):
self._write(fmt_str.format(ips + 1, -a.Z, *tmp))
else:
self._write(fmt_str.format(ips + 1, a.Z, *tmp))
@sile_fh_open()
def read_supercell(self):
""" Returns `SuperCell` object from the XV file """
cell = np.empty([3, 3], np.float64)
for i in range(3):
cell[i, :] = list(map(float, self.readline().split()[:3]))
cell *= Bohr2Ang
return SuperCell(cell)
@sile_fh_open()
def read_geometry(self, velocity=False, species_Z=False):
""" Returns a `Geometry` object from the XV file
Parameters
----------
species_Z : bool, optional
if ``True`` the atomic numbers are the species indices (useful when
reading the ChemicalSpeciesLabel block simultaneously).
velocity : bool, optional
also return the velocities in the file
Returns
-------
Geometry
velocity : only if `velocity` is true.
"""
sc = self.read_supercell()
# Read number of atoms
na = int(self.readline())
xyz = np.empty([na, 3], np.float64)
vel = np.empty([na, 3], np.float64)
atms = [None] * na
sp = np.empty([na], np.int32)
for ia in range(na):
line = list(map(float, self.readline().split()[:8]))
sp[ia] = int(line[0])
if species_Z:
atms[ia] = Atom(sp[ia])
else:
atms[ia] = Atom(int(line[1]))
xyz[ia, :] = line[2:5]
vel[ia, :] = line[5:8]
xyz *= Bohr2Ang
vel *= Bohr2Ang
# Ensure correct sorting
max_s = sp.max()
sp -= 1
# Ensure we can remove the atom after having aligned them
atms2 = Atoms(AtomUnknown(1000), na=na)
for i in range(max_s):
idx = (sp[:] == i).nonzero()[0]
if len(idx) == 0:
# Always ensure we have "something" for the unoccupied places
atms2[idx] = AtomUnknown(1000 + i)
else:
atms2[idx] = atms[idx[0]]
geom = Geometry(xyz, atms2.reduce(), sc=sc)
if velocity:
return geom, vel
return geom
@sile_fh_open()
def read_velocity(self):
""" Returns an array with the velocities from the XV file
Returns
-------
velocity :
"""
self.read_supercell()
na = int(self.readline())
vel = np.empty([na, 3], np.float64)
for ia in range(na):
line = list(map(float, self.readline().split()[:8]))
vel[ia, :] = line[5:8]
vel *= Bohr2Ang
return vel
read_data = read_velocity
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
newkw = Geometry._ArgumentParser_args_single()
newkw.update(kwargs)
return self.read_geometry().ArgumentParser(p, *args, **newkw)
add_sile('XV', xvSileSiesta, gzip=True)
|
zerothi/sisl
|
sisl/io/siesta/xv.py
|
Python
|
mpl-2.0
| 5,323
|
[
"SIESTA"
] |
3d032f3913b3e9661219d9d85159bf96d3707e2b4f474ae5b91c9904e7da662b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Wrapper for netCDF readers."""
import os.path
import warnings
import numpy as np
from collections import OrderedDict
from monty.dev import requires
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.string import marquee
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.xcfunc import XcFunc
from pymatgen.core.structure import Structure
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"as_ncreader",
"as_etsfreader",
"NetcdfReader",
"ETSF_Reader",
"NO_DEFAULT",
"structure_from_ncdata",
]
try:
import netCDF4
except ImportError as exc:
netCDF4 = None
warnings.warn("""\
`import netCDF4` failed with the following error:
%s
Please install netcdf4 with `conda install netcdf4`
If the conda version does not work, uninstall it with `conda uninstall hdf4 hdf5 netcdf4`
and use `pip install netcdf4`""" % str(exc))
def _asreader(file, cls):
closeit = False
if not isinstance(file, cls):
file, closeit = cls(file), True
return file, closeit
def as_ncreader(file):
"""
Convert file into a NetcdfReader instance.
Returns reader, closeit where closeit is set to True
if we have to close the file before leaving the procedure.
"""
return _asreader(file, NetcdfReader)
def as_etsfreader(file):
"""Return an ETSF_Reader. Accepts filename or ETSF_Reader."""
return _asreader(file, ETSF_Reader)
class NetcdfReaderError(Exception):
"""Base error class for NetcdfReader"""
class NO_DEFAULT:
"""Signal that read_value should raise an Error"""
class NetcdfReader:
"""
Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements.
Additional documentation available at:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
Error = NetcdfReaderError
@requires(netCDF4 is not None, "netCDF4 must be installed to use this class")
def __init__(self, path):
"""Open the Netcdf file specified by path (read mode)."""
self.path = os.path.abspath(path)
try:
self.rootgrp = netCDF4.Dataset(self.path, mode="r")
except Exception as exc:
raise self.Error("In file %s: %s" % (self.path, str(exc)))
self.ngroups = len(list(self.walk_tree()))
# Always return non-masked numpy arrays.
# Slicing a ncvar returns a MaskedArrray and this is really annoying
# because it can lead to unexpected behaviour in e.g. calls to np.matmul!
# See also https://github.com/Unidata/netcdf4-python/issues/785
self.rootgrp.set_auto_mask(False)
def __enter__(self):
"""Activated when used in the with statement."""
return self
def __exit__(self, type, value, traceback):
"""Activated at the end of the with statement. It automatically closes the file."""
self.rootgrp.close()
def close(self):
"""Close the file."""
try:
self.rootgrp.close()
except Exception as exc:
logger.warning("Exception %s while trying to close %s" % (exc, self.path))
def walk_tree(self, top=None):
"""
Navigate all the groups in the file starting from top.
If top is None, the root group is used.
"""
if top is None:
top = self.rootgrp
values = top.groups.values()
yield values
for value in top.groups.values():
for children in self.walk_tree(value):
yield children
def print_tree(self):
"""Print all the groups in the file."""
for children in self.walk_tree():
for child in children:
print(child)
def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT):
"""
Returns the value of a dimension.
Args:
dimname: Name of the variable
path: path to the group.
default: return `default` if `dimname` is not present and
`default` is not `NO_DEFAULT` else raise self.Error.
"""
try:
dim = self._read_dimensions(dimname, path=path)[0]
return len(dim)
except self.Error:
if default is NO_DEFAULT:
raise
return default
def read_varnames(self, path="/"):
"""List of variable names stored in the group specified by path."""
if path == "/":
return self.rootgrp.variables.keys()
else:
group = self.path2group[path]
return group.variables.keys()
def read_value(self, varname, path="/", cmode=None, default=NO_DEFAULT):
"""
Returns the values of variable with name varname in the group specified by path.
Args:
varname: Name of the variable
path: path to the group.
cmode: if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
default: returns default if varname is not present.
self.Error is raised if default is default is NO_DEFAULT
Returns:
numpy array if varname represents an array, scalar otherwise.
"""
try:
var = self.read_variable(varname, path=path)
except self.Error:
if default is NO_DEFAULT:
raise
return default
if cmode is None:
# scalar or array
# getValue is not portable!
try:
return var.getValue()[0] if not var.shape else var[:]
except IndexError:
return var.getValue() if not var.shape else var[:]
else:
assert var.shape[-1] == 2
if cmode == "c":
return var[..., 0] + 1j * var[..., 1]
else:
raise ValueError("Wrong value for cmode %s" % cmode)
def read_variable(self, varname, path="/"):
"""Returns the variable with name varname in the group specified by path."""
return self._read_variables(varname, path=path)[0]
def _read_dimensions(self, *dimnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.dimensions[dname] for dname in dimnames]
else:
group = self.path2group[path]
return [group.dimensions[dname] for dname in dimnames]
except KeyError:
raise self.Error("In file %s:\nError while reading dimensions: `%s` with kwargs: `%s`" %
(self.path, dimnames, kwargs))
def _read_variables(self, *varnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.variables[vname] for vname in varnames]
else:
group = self.path2group[path]
return [group.variables[vname] for vname in varnames]
except KeyError:
raise self.Error("In file %s:\nError while reading variables: `%s` with kwargs `%s`." %
(self.path, varnames, kwargs))
def read_keys(self, keys, dict_cls=AttrDict, path="/"):
"""
Read a list of variables/dimensions from file. If a key is not present the corresponding
entry in the output dictionary is set to None.
"""
od = dict_cls()
for k in keys:
try:
# Try to read a variable.
od[k] = self.read_value(k, path=path)
except self.Error:
try:
# Try to read a dimension.
od[k] = self.read_dimvalue(k, path=path)
except self.Error:
od[k] = None
return od
class ETSF_Reader(NetcdfReader):
"""
This object reads data from a file written according to the ETSF-IO specifications.
We assume that the netcdf file contains at least the crystallographic section.
"""
@lazy_property
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
charr = self.read_value("chemical_symbols")
symbols = []
for v in charr:
s = "".join(c.decode("utf-8") for c in v)
symbols.append(s.strip())
return symbols
def typeidx_from_symbol(self, symbol):
"""Returns the type index from the chemical symbol. Note python convention."""
return self.chemical_symbols.index(symbol)
def read_structure(self, cls=Structure):
"""Returns the crystalline structure."""
if self.ngroups != 1:
raise NotImplementedError("In file %s: ngroups != 1" % self.path)
return structure_from_ncdata(self, cls=cls)
def read_abinit_xcfunc(self):
"""
Read ixc from an Abinit file. Return :class:`XcFunc` object.
"""
ixc = int(self.read_value("ixc"))
return XcFunc.from_abinit_ixc(ixc)
def read_abinit_hdr(self):
"""
Read the variables associated to the Abinit header.
Return :class:`AbinitHeader`
"""
d = {}
for hvar in _HDR_VARIABLES.values():
ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name
if ncname in self.rootgrp.variables:
d[hvar.name] = self.read_value(ncname)
elif ncname in self.rootgrp.dimensions:
d[hvar.name] = self.read_dimvalue(ncname)
else:
raise ValueError("Cannot find `%s` in `%s`" % (ncname, self.path))
# Convert scalars to (well) scalars.
if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape:
d[hvar.name] = np.asarray(d[hvar.name]).item()
if hvar.name in ("title", "md5_pseudos", "codvsn"):
# Convert array of numpy bytes to list of strings
if hvar.name == "codvsn":
d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name])
else:
d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip()
for astr in d[hvar.name]]
return AbinitHeader(d)
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):
"""
Reads and returns a pymatgen structure from a NetCDF file
containing crystallographic data in the ETSF-IO format.
Args:
ncdata: filename or NetcdfReader instance.
site_properties: Dictionary with site properties.
cls: The Structure class to instanciate.
"""
ncdata, closeit = as_ncreader(ncdata)
# TODO check whether atomic units are used
lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang")
red_coords = ncdata.read_value("reduced_atom_positions")
natom = len(red_coords)
znucl_type = ncdata.read_value("atomic_numbers")
# type_atom[0:natom] --> index Between 1 and number of atom species
type_atom = ncdata.read_value("atom_species")
# Fortran to C index and float --> int conversion.
species = natom * [None]
for atom in range(natom):
type_idx = type_atom[atom] - 1
species[atom] = int(znucl_type[type_idx])
d = {}
if site_properties is not None:
for prop in site_properties:
d[property] = ncdata.read_value(prop)
structure = cls(lattice, species, red_coords, site_properties=d)
# Quick and dirty hack.
# I need an abipy structure since I need to_abivars and other methods.
try:
from abipy.core.structure import Structure as AbipyStructure
structure.__class__ = AbipyStructure
except ImportError:
pass
if closeit:
ncdata.close()
return structure
class _H:
__slots__ = ["name", "doc", "etsf_name"]
def __init__(self, name, doc, etsf_name=None):
self.name, self.doc, self.etsf_name = name, doc, etsf_name
_HDR_VARIABLES = (
# Scalars
_H("bantot", "total number of bands (sum of nband on all kpts and spins)"),
_H("date", "starting date"),
_H("headform", "format of the header"),
_H("intxc", "input variable"),
_H("ixc", "input variable"),
_H("mband", "maxval(hdr%nband)", etsf_name="max_number_of_states"),
_H("natom", "input variable", etsf_name="number_of_atoms"),
_H("nkpt", "input variable", etsf_name="number_of_kpoints"),
_H("npsp", "input variable"),
_H("nspden", "input variable", etsf_name="number_of_components"),
_H("nspinor", "input variable", etsf_name="number_of_spinor_components"),
_H("nsppol", "input variable", etsf_name="number_of_spins"),
_H("nsym", "input variable", etsf_name="number_of_symmetry_operations"),
_H("ntypat", "input variable", etsf_name="number_of_atom_species"),
_H("occopt", "input variable"),
_H("pertcase", "the index of the perturbation, 0 if GS calculation"),
_H("usepaw", "input variable (0=norm-conserving psps, 1=paw)"),
_H("usewvl", "input variable (0=plane-waves, 1=wavelets)"),
_H("kptopt", "input variable (defines symmetries used for k-point sampling)"),
_H("pawcpxocc", "input variable"),
_H("nshiftk_orig", "original number of shifts given in input (changed in inkpts, the actual value is nshiftk)"),
_H("nshiftk", "number of shifts after inkpts."),
_H("icoulomb", "input variable."),
_H("ecut", "input variable", etsf_name="kinetic_energy_cutoff"),
_H("ecutdg", "input variable (ecut for NC psps, pawecutdg for paw)"),
_H("ecutsm", "input variable"),
_H("ecut_eff", "ecut*dilatmx**2 (dilatmx is an input variable)"),
_H("etot", "EVOLVING variable"),
_H("fermie", "EVOLVING variable", etsf_name="fermi_energy"),
_H("residm", "EVOLVING variable"),
_H("stmbias", "input variable"),
_H("tphysel", "input variable"),
_H("tsmear", "input variable"),
_H("nelect", "number of electrons (computed from pseudos and charge)"),
_H("charge", "input variable"),
# Arrays
_H("qptn", "qptn(3) the wavevector, in case of a perturbation"),
# _H("rprimd", "rprimd(3,3) EVOLVING variables", etsf_name="primitive_vectors"),
# _H(ngfft, "ngfft(3) input variable", number_of_grid_points_vector1"
# _H("nwvlarr", "nwvlarr(2) the number of wavelets for each resolution.", etsf_name="number_of_wavelets"),
_H("kptrlatt_orig", "kptrlatt_orig(3,3) Original kptrlatt"),
_H("kptrlatt", "kptrlatt(3,3) kptrlatt after inkpts."),
_H("istwfk", "input variable istwfk(nkpt)"),
_H("lmn_size", "lmn_size(npsp) from psps"),
_H("nband", "input variable nband(nkpt*nsppol)", etsf_name="number_of_states"),
_H("npwarr", "npwarr(nkpt) array holding npw for each k point", etsf_name="number_of_coefficients"),
_H("pspcod", "pscod(npsp) from psps"),
_H("pspdat", "psdat(npsp) from psps"),
_H("pspso", "pspso(npsp) from psps"),
_H("pspxc", "pspxc(npsp) from psps"),
_H("so_psp", "input variable so_psp(npsp)"),
_H("symafm", "input variable symafm(nsym)"),
# _H(symrel="input variable symrel(3,3,nsym)", etsf_name="reduced_symmetry_matrices"),
_H("typat", "input variable typat(natom)", etsf_name="atom_species"),
_H("kptns", "input variable kptns(nkpt, 3)", etsf_name="reduced_coordinates_of_kpoints"),
_H("occ", "EVOLVING variable occ(mband, nkpt, nsppol)", etsf_name="occupations"),
_H("tnons", "input variable tnons(nsym, 3)", etsf_name="reduced_symmetry_translations"),
_H("wtk", "weight of kpoints wtk(nkpt)", etsf_name="kpoint_weights"),
_H("shiftk_orig", "original shifts given in input (changed in inkpts)."),
_H("shiftk", "shiftk(3,nshiftk), shiftks after inkpts"),
_H("amu", "amu(ntypat) ! EVOLVING variable"),
# _H("xred", "EVOLVING variable xred(3,natom)", etsf_name="reduced_atom_positions"),
_H("zionpsp", "zionpsp(npsp) from psps"),
_H("znuclpsp", "znuclpsp(npsp) from psps. Note the difference between (znucl|znucltypat) and znuclpsp"),
_H("znucltypat", "znucltypat(ntypat) from alchemy", etsf_name="atomic_numbers"),
_H("codvsn", "version of the code"),
_H("title", "title(npsp) from psps"),
_H("md5_pseudos", "md5pseudos(npsp), md5 checksums associated to pseudos (read from file)"),
# _H(type(pawrhoij_type), allocatable :: pawrhoij(:) ! EVOLVING variable, only for paw
)
_HDR_VARIABLES = OrderedDict([(h.name, h) for h in _HDR_VARIABLES])
class AbinitHeader(AttrDict):
"""Stores the values reported in the Abinit header."""
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# for k, v in self.items():
# v.__doc__ = _HDR_VARIABLES[k].doc
def __str__(self):
return self.to_string()
def to_string(self, verbose=0, title=None, **kwargs):
"""
String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string.
"""
from pprint import pformat
s = pformat(self, **kwargs)
if title is not None:
return "\n".join([marquee(title, mark="="), s])
return s
|
gVallverdu/pymatgen
|
pymatgen/io/abinit/netcdf.py
|
Python
|
mit
| 17,629
|
[
"ABINIT",
"NetCDF",
"pymatgen"
] |
f00d40900c97b58fef33af2f7ccedc9711468e4bd3e26538a38b03c82f7e3bac
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-add-shifter
# Author : Federico Stagni
########################################################################
""" Adds or modify a shifter, in the operations section of the CS
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC import exit as DIRACExit, gLogger
if __name__ == "__main__":
Script.setUsageMessage( '\n'.join( [__doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... ShifterRole UserName DIRACGroup ...' % Script.scriptName,
'Arguments:',
' ShifterRole: Name of the shifter role, e.g. DataManager',
' UserName: A user name, as registered in Registry section',
' DIRACGroup: DIRAC Group, e.g. diracAdmin (the user has to have this role)'] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs( )
csAPI = CSAPI( )
if len( args ) < 3:
Script.showHelp( )
DIRACExit( -1 )
shifterRole = args[0]
userName = args[1]
diracGroup = args[2]
res = csAPI.addShifter( {shifterRole: {'User': userName, 'Group': diracGroup}} )
if not res['OK']:
gLogger.error( "Could not add shifter", ": " + res['Message'] )
DIRACExit( 1 )
gLogger.notice( "Added shifter %s as user %s with group %s" % (shifterRole, userName, diracGroup) )
|
coberger/DIRAC
|
ConfigurationSystem/scripts/dirac-admin-add-shifter.py
|
Python
|
gpl-3.0
| 1,627
|
[
"DIRAC"
] |
f5469967ad81ff48137bce753f2e85e434b0db3899c5f7d983143569e605ffde
|
# -*- coding: utf-8 -*-
__author__ = 'akiokio'
from django.core.management.base import NoArgsCommand
from salesReport.models import item as product, brands
from salesReport.pymagento import Magento
from salesReport.views import getBrand, getVMD30ForDatabaseItem
import datetime
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
class Command(NoArgsCommand):
help = "Describe the Command Here"
def handle_noargs(self, **options):
print 'Inicio'
salesReport = Magento()
salesReport.connect()
for item in salesReport.getProductArray():
if RepresentsInt(item['sku']):
try:
database_item = product.objects.get(sku=item['sku'])
except:
break
if not 'marca' in item:
item['marca'] = getBrand(item)
try:
marca = brands.objects.get(name=item['marca'][:100])
except Exception as e:
print e
marca = brands.objects.create(name=item['marca'][:100], meta_dias_estoque=1)
database_item.brand = marca
database_item.save()
dateInit = datetime.datetime.today().replace(hour=0, minute=0, second=0) - datetime.timedelta(hours=3)
dateEnd = datetime.datetime.today().replace(hour=23, minute=59, second=59) - datetime.timedelta(days=30) - datetime.timedelta(hours=3)
for item in product.objects.all():
item.vmd = getVMD30ForDatabaseItem(item, dateEnd, dateInit)
item.save()
|
akiokio/centralfitestoque
|
src/salesReport/management/commands/update_product_brand.py
|
Python
|
bsd-2-clause
| 1,654
|
[
"VMD"
] |
c73bb0ff6c08e5bb57e8be05800a81b47ddaf63299241e3fb5edd4dcfd95a169
|
"""
Data-driven tests for reads
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import os
import ga4gh.server.backend as backend
import ga4gh.server.datamodel as datamodel
import ga4gh.server.datamodel.datasets as datasets
import ga4gh.server.datamodel.reads as reads
import ga4gh.server.datamodel.references as references
import ga4gh.server.datarepo as datarepo
import tests.datadriven as datadriven
import tests.paths as paths
import ga4gh.common.utils as utils
import ga4gh.schemas.protocol as protocol
import pysam
def testReads():
testDataDir = os.path.join(paths.testDataDir, "datasets/dataset1/reads")
for test in datadriven.makeTests(
testDataDir, ReadGroupSetTest, '*.bam'):
yield test
class ReadGroupSetInfo(object):
"""
Container class for information about a read group set
"""
def __init__(self, samFile):
self.numAlignedReads = samFile.mapped
self.numUnalignedReads = samFile.unmapped
class ReadGroupInfo(object):
"""
Container class for information about a read group
"""
def __init__(self, gaReadGroupSet, samFile, readGroupName):
self.gaReadGroup = reads.AbstractReadGroup(
gaReadGroupSet, readGroupName)
self.id = self.gaReadGroup.getId()
self.samFile = samFile
self.mappedReads = collections.defaultdict(list)
for read in self.samFile:
tags = dict(read.tags)
if 'RG' not in tags or tags['RG'] != readGroupName:
continue
if read.reference_id != -1:
# mapped read
referenceName = self.samFile.getrname(read.reference_id)
self.mappedReads[referenceName].append(read)
self.numAlignedReads = -1
self.numUnalignedReads = -1
self.programs = []
if 'PG' in self.samFile.header:
self.programs = self.samFile.header['PG']
self.sampleName = None
self.description = None
self.predictedInsertSize = None
self.instrumentModel = None
self.sequencingCenter = None
self.experimentDescription = None
self.library = None
self.platformUnit = None
self.runTime = None
if 'RG' in self.samFile.header:
readGroupHeader = [
rgHeader for rgHeader in self.samFile.header['RG']
if rgHeader['ID'] == readGroupName][0]
self.sampleName = readGroupHeader.get('SM', None)
self.description = readGroupHeader.get('DS', None)
if 'PI' in readGroupHeader:
self.predictedInsertSize = int(readGroupHeader['PI'])
self.instrumentModel = readGroupHeader.get('PL', None)
self.sequencingCenter = readGroupHeader.get('CN', None)
self.experimentDescription = readGroupHeader.get('DS', None)
self.library = readGroupHeader.get('LB', None)
self.platformUnit = readGroupHeader.get('PU', None)
self.runTime = readGroupHeader.get('DT', None)
class ReadGroupSetTest(datadriven.DataDrivenTest):
"""
Data driven test for read group sets
"""
def __init__(self, localId, dataPath):
self._backend = backend.Backend(datarepo.AbstractDataRepository())
self._referenceSet = None
self._dataset = datasets.Dataset("ds")
self._readGroupInfos = {}
self._readGroupSetInfo = None
self._samFile = pysam.AlignmentFile(dataPath)
self._readReferences()
super(ReadGroupSetTest, self).__init__(localId, dataPath)
self._readAlignmentInfo()
def _readReferences(self):
# Read the reference information from the samfile
referenceSetName = None
for referenceInfo in self._samFile.header['SQ']:
if 'AS' not in referenceInfo:
infoDict = reads.parseMalformedBamHeader(referenceInfo)
# If there's still no reference set name in there we use
# a default name.
name = infoDict.get("AS", "Default")
if referenceSetName is None:
referenceSetName = name
self._addReferenceSet(referenceSetName)
else:
self.assertEqual(referenceSetName, name)
self._addReference(infoDict['SN'])
def _addReferenceSet(self, referenceSetName):
self._referenceSet = references.AbstractReferenceSet(referenceSetName)
self._backend.getDataRepository().addReferenceSet(self._referenceSet)
def _addReference(self, referenceName):
reference = references.AbstractReference(
self._referenceSet, referenceName)
self._referenceSet.addReference(reference)
def _readAlignmentInfo(self):
self._readGroupSetInfo = ReadGroupSetInfo(self._samFile)
if 'RG' in self._samFile.header:
readGroupHeaders = self._samFile.header['RG']
readGroupNames = [
readGroupHeader['ID'] for readGroupHeader
in readGroupHeaders]
else:
readGroupNames = ['default']
for readGroupName in readGroupNames:
readGroupInfo = ReadGroupInfo(
self._gaObject, self._samFile, readGroupName)
self._readGroupInfos[readGroupName] = readGroupInfo
def getDataModelInstance(self, localId, dataPath):
readGroupSet = reads.HtslibReadGroupSet(self._dataset, localId)
readGroupSet.populateFromFile(dataPath)
return readGroupSet
def getProtocolClass(self):
return protocol.ReadGroupSet
def testSampleNameEtc(self):
# test that sampleId and other misc fields are set correctly
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
readGroupInfo = self._readGroupInfos[readGroup.getLocalId()]
gaReadGroup = readGroup.toProtocolElement()
self.assertEqual(
readGroupInfo.sampleName,
gaReadGroup.sample_name)
self.assertEqual(
readGroupInfo.predictedInsertSize,
gaReadGroup.predicted_insert_size)
self.assertEqual(
readGroupInfo.description,
gaReadGroup.description)
def testExperiments(self):
# test that the experiment field is set correctly
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
readGroupInfo = self._readGroupInfos[readGroup.getLocalId()]
gaReadGroup = readGroup.toProtocolElement()
self.assertIn(
"experiment",
datamodel.CompoundId.deobfuscate(gaReadGroup.experiment.id))
self.assertEqual(
readGroupInfo.instrumentModel,
gaReadGroup.experiment.instrument_model)
self.assertEqual(
readGroupInfo.sequencingCenter,
gaReadGroup.experiment.sequencing_center)
self.assertEqual(
readGroupInfo.experimentDescription,
gaReadGroup.experiment.description)
self.assertEqual(
readGroupInfo.library,
gaReadGroup.experiment.library)
self.assertEqual(
readGroupInfo.platformUnit,
gaReadGroup.experiment.platform_unit)
self.assertEqual(
readGroupInfo.runTime,
gaReadGroup.experiment.run_time)
def testPrograms(self):
# test that program info is set correctly
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
readGroupInfo = self._readGroupInfos[readGroup.getLocalId()]
gaPrograms = readGroup.getPrograms()
htslibPrograms = readGroupInfo.programs
for gaProgram, htslibProgram in utils.zipLists(
gaPrograms, htslibPrograms):
self.assertEqual(
gaProgram.id, htslibProgram.get('ID'))
self.assertEqual(
gaProgram.command_line, htslibProgram.get('CL', None))
self.assertEqual(
gaProgram.name, htslibProgram.get('PN', None))
self.assertEqual(
gaProgram.prev_program_id, htslibProgram.get('PP', None))
self.assertEqual(
gaProgram.version, htslibProgram.get('VN', None))
def testReadGroupStats(self):
# test that the stats attrs are populated correctly
readGroupSet = self._gaObject
gaReadGroupSet = readGroupSet.toProtocolElement()
readGroupSetInfo = self._readGroupSetInfo
self.assertEqual(
readGroupSet.getNumAlignedReads(),
readGroupSetInfo.numAlignedReads)
self.assertEqual(
readGroupSet.getNumUnalignedReads(),
readGroupSetInfo.numUnalignedReads)
self.assertEqual(
gaReadGroupSet.stats.aligned_read_count,
readGroupSetInfo.numAlignedReads)
self.assertEqual(
gaReadGroupSet.stats.unaligned_read_count,
readGroupSetInfo.numUnalignedReads)
for readGroup in readGroupSet.getReadGroups():
gaReadGroup = readGroup.toProtocolElement()
self.assertEqual(
readGroup.getNumAlignedReads(), -1)
self.assertEqual(
readGroup.getNumUnalignedReads(), -1)
self.assertEqual(
gaReadGroup.stats.aligned_read_count, -1)
self.assertEqual(
gaReadGroup.stats.unaligned_read_count, -1)
def testValidateObjects(self):
# test that validation works on read groups and reads
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
self.assertIsInstance(
readGroup.toProtocolElement(), protocol.ReadGroup)
for reference in self._referenceSet.getReferences():
for gaAlignment in readGroup.getReadAlignments(reference):
self.assertIsInstance(
gaAlignment, protocol.ReadAlignment)
def testGetReadAlignmentsRefId(self):
# test that searching with a reference id succeeds
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
readGroupInfo = self._readGroupInfos[readGroup.getLocalId()]
for name, alignments in readGroupInfo.mappedReads.items():
reference = self._referenceSet.getReferenceByName(name)
self.assertAlignmentListsEqual(
list(readGroup.getReadAlignments(reference)), alignments,
readGroupInfo)
def testGetReadAlignmentsStartEnd(self):
# test that searching with start and end coords succeeds
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
readGroupInfo = self._readGroupInfos[readGroup.getLocalId()]
for name, alignments, in readGroupInfo.mappedReads.items():
bigNumThatPysamWontChokeOn = 2**30
reference = self._referenceSet.getReferenceByName(name)
gaAlignments = list(readGroup.getReadAlignments(
reference, 0, bigNumThatPysamWontChokeOn))
self.assertAlignmentListsEqual(
gaAlignments, alignments, readGroupInfo)
def testGetReadAlignmentSearchRanges(self):
# test that various range searches work
readGroupSet = self._gaObject
for readGroup in readGroupSet.getReadGroups():
readGroupInfo = self._readGroupInfos[readGroup.getLocalId()]
for name in readGroupInfo.mappedReads.keys():
reference = self._referenceSet.getReferenceByName(name)
alignments = list(readGroup.getReadAlignments(reference))
length = len(alignments)
if length < 2:
continue
positions = [
read.alignment.position.position for read in alignments
if read.alignment is not None]
if length != len(set(positions)):
continue
begin = positions[0]
end = positions[-1]
self.assertGetReadAlignmentsRangeResult(
readGroup, reference, begin, end + 1, length)
self.assertGetReadAlignmentsRangeResult(
readGroup, reference, begin, end, length - 1)
self.assertGetReadAlignmentsRangeResult(
readGroup, reference, begin, begin, 0)
def assertGetReadAlignmentsRangeResult(
self, readGroup, reference, start, end, result):
alignments = list(readGroup.getReadAlignments(reference, start, end))
self.assertEqual(len(alignments), result)
def assertAlignmentListsEqual(
self, gaAlignments, pysamAlignments, readGroupInfo):
for gaAlignment, pysamAlignment in utils.zipLists(
gaAlignments, pysamAlignments):
self.assertAlignmentsEqual(
gaAlignment, pysamAlignment, readGroupInfo)
def getDictFromMessageMap(self, messageMap):
return dict([
(k, [protocol.getValueFromValue(x) for x in v.values])
for (k, v) in messageMap._values.items()])
def assertAlignmentsEqual(self, gaAlignment, pysamAlignment,
readGroupInfo):
if pysamAlignment.query_qualities is None:
self.assertEqual(gaAlignment.aligned_quality, [])
else:
self.assertEqual(
gaAlignment.aligned_quality,
list(pysamAlignment.query_qualities))
self.assertEqual(
gaAlignment.aligned_sequence,
pysamAlignment.query_sequence)
if reads.SamFlags.isFlagSet(
pysamAlignment.flag, reads.SamFlags.READ_UNMAPPED):
self.assertEqual(0, gaAlignment.alignment.ByteSize())
else:
self.assertEqual(
gaAlignment.alignment.mapping_quality,
pysamAlignment.mapping_quality)
self.assertEqual(
gaAlignment.alignment.position.reference_name,
readGroupInfo.samFile.getrname(pysamAlignment.reference_id))
self.assertEqual(
gaAlignment.alignment.position.position,
pysamAlignment.reference_start)
# TODO test reverseStrand on position and on
# nextMatePosition once it has been implemented.
self.assertCigarEqual(
gaAlignment.alignment.cigar,
pysamAlignment.cigar)
self.assertFlag(
gaAlignment.duplicate_fragment,
pysamAlignment, reads.SamFlags.DUPLICATE_READ)
self.assertFlag(
gaAlignment.failed_vendor_quality_checks,
pysamAlignment, reads.SamFlags.FAILED_QUALITY_CHECK)
self.assertEqual(
gaAlignment.fragment_length,
pysamAlignment.template_length)
self.assertEqual(
gaAlignment.fragment_name,
pysamAlignment.query_name)
compoundId = datamodel.ReadAlignmentCompoundId(
self._gaObject.getCompoundId(),
pysamAlignment.query_name)
self.assertEqual(gaAlignment.id, str(compoundId))
ret = protocol.ReadAlignment()
for key, value in pysamAlignment.tags:
protocol.setAttribute(ret.attributes.attr[key].values, value)
self.assertEqual(
protocol.toJson(gaAlignment.attributes),
protocol.toJson(ret.attributes))
if reads.SamFlags.isFlagSet(
pysamAlignment.flag, reads.SamFlags.MATE_UNMAPPED):
self.assertEqual(0, gaAlignment.next_mate_position.ByteSize())
else:
self.assertEqual(
gaAlignment.next_mate_position.position,
pysamAlignment.next_reference_start)
if pysamAlignment.next_reference_id != -1:
self.assertEqual(
gaAlignment.next_mate_position.reference_name,
readGroupInfo.samFile.getrname(
pysamAlignment.next_reference_id))
else:
self.assertEqual(
gaAlignment.next_mate_position.reference_name, "")
if gaAlignment.number_reads == 1:
self.assertFlag(
False, pysamAlignment, reads.SamFlags.READ_PAIRED)
elif gaAlignment.number_reads == 2:
self.assertFlag(
True, pysamAlignment, reads.SamFlags.READ_PAIRED)
else:
# we shouldn't be setting numberReads to anything else
self.assertTrue(False)
if gaAlignment.read_number is -1:
self.assertFlag(
False, pysamAlignment, reads.SamFlags.FIRST_IN_PAIR)
self.assertFlag(
False, pysamAlignment, reads.SamFlags.SECOND_IN_PAIR)
elif gaAlignment.read_number == 0:
self.assertFlag(
True, pysamAlignment, reads.SamFlags.FIRST_IN_PAIR)
self.assertFlag(
False, pysamAlignment, reads.SamFlags.SECOND_IN_PAIR)
elif gaAlignment.read_number == 1:
self.assertFlag(
False, pysamAlignment, reads.SamFlags.FIRST_IN_PAIR)
self.assertFlag(
True, pysamAlignment, reads.SamFlags.SECOND_IN_PAIR)
elif gaAlignment.read_number == 2:
self.assertFlag(
True, pysamAlignment, reads.SamFlags.FIRST_IN_PAIR)
self.assertFlag(
True, pysamAlignment, reads.SamFlags.SECOND_IN_PAIR)
else:
# we shouldn't be setting readNumber to anything else
self.assertTrue(False)
self.assertFlag(
not gaAlignment.improper_placement,
pysamAlignment, reads.SamFlags.READ_PROPER_PAIR)
self.assertEqual(
gaAlignment.read_group_id,
readGroupInfo.id)
self.assertFlag(
gaAlignment.secondary_alignment,
pysamAlignment, reads.SamFlags.SECONDARY_ALIGNMENT)
self.assertFlag(
gaAlignment.supplementary_alignment,
pysamAlignment, reads.SamFlags.SUPPLEMENTARY_ALIGNMENT)
def assertFlag(self, gaAlignmentAttr, pysamAlignment, mask):
flagSet = reads.SamFlags.isFlagSet(pysamAlignment.flag, mask)
self.assertEqual(gaAlignmentAttr, flagSet)
def assertCigarEqual(self, gaCigar, pysamCigar):
self.assertEqual(len(gaCigar), len(pysamCigar))
for i, gaCigarUnit in enumerate(gaCigar):
operation, length = pysamCigar[i]
gaCigarUnitOperation = reads.SamCigar.ga2int(
gaCigarUnit.operation)
self.assertEqual(
gaCigarUnitOperation, operation)
self.assertEqual(
gaCigarUnit.operation_length, length)
|
saupchurch/server
|
tests/datadriven/test_reads.py
|
Python
|
apache-2.0
| 19,167
|
[
"pysam"
] |
6882017c827d773720dd58d603764fdb11c711b6f58cb4d3d84cad68aa11c62c
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAims(RPackage):
"""This package contains the AIMS implementation. It contains
necessary functions to assign the five intrinsic molecular
subtypes (Luminal A, Luminal B, Her2-enriched, Basal-like,
Normal-like). Assignments could be done on individual samples
as well as on dataset of gene expression data."""
homepage = "http://bioconductor.org/packages/AIMS/"
url = "https://git.bioconductor.org/packages/AIMS"
version('1.8.0', git='https://git.bioconductor.org/packages/AIMS', commit='86b866c20e191047492c51b43e3f73082c3f8357')
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
depends_on('r-e1071', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-aims/package.py
|
Python
|
lgpl-2.1
| 1,979
|
[
"Bioconductor"
] |
2b562400b9f318a6c23aabd5f7922d1fa9083f86a23695ee93b83dd368f9aa01
|
from time import time
import numpy as np
from petsc4py import PETSc
from src import stokes_flow as sf
from src.objComposite import *
# from src.stokes_flow import obj_dic
# from src.ref_solution import *
# from src.geo import *
__all__ = ['save_singleEcoli_vtk', 'save_singleEcoli_U_vtk', 'save_singleEcoli_U_4part_vtk',
'save_grid_sphere_vtk',
'save_singleRod_vtk', ]
def save_singleEcoli_vtk(problem: sf.StokesFlowProblem, createHandle=createEcoliComp_tunnel):
# force free
OptDB = PETSc.Options()
if not OptDB.getBool('save_singleEcoli_vtk', True):
return False
t0 = time()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
# with_T_geo = len(problem.get_all_obj_list()) == 4
with_T_geo = problem_kwargs['with_T_geo']
ref_U = problem.get_obj_list()[0].get_ref_U()
# problem.vtk_obj(fileHandle)
problem.vtk_self(fileHandle)
# bgeo = geo()
# bnodesHeadle = problem_kwargs['bnodesHeadle']
# matname = problem_kwargs['matname']
# bgeo.mat_nodes(filename=matname, mat_handle=bnodesHeadle)
# belemsHeadle = problem_kwargs['belemsHeadle']
# bgeo.mat_elmes(filename=matname, mat_handle=belemsHeadle, elemtype='tetra')
# problem.vtk_tetra(fileHandle + '_Velocity', bgeo)
# create check obj
check_kwargs = problem_kwargs.copy()
check_kwargs['nth'] = problem_kwargs['nth'] - 2 if problem_kwargs['nth'] >= 10 else \
problem_kwargs['nth'] + 1
check_kwargs['ds'] = problem_kwargs['ds'] * 1.2
check_kwargs['hfct'] = 1
check_kwargs['Tfct'] = 1
ecoli_comp_check = createHandle(**check_kwargs)
ecoli_comp_check.set_ref_U(ref_U)
ecoli_comp_check.set_problem(problem)
# ecoli_comp_check.set_name('%s_check' % fileHandle)
# # dbg
# for obj in ecoli_comp_check.get_obj_list():
# filename = fileHandle + '_check_' + str(obj)
# obj.get_u_geo().save_nodes(filename + '_U')
# obj.get_f_geo().save_nodes(filename + '_f')
velocity_err_list = problem.vtk_check(fileHandle, ecoli_comp_check)
PETSc.Sys.Print('velocity error of sphere (total, x, y, z): ', next(velocity_err_list))
PETSc.Sys.Print('velocity error of helix0 (total, x, y, z): ', next(velocity_err_list))
PETSc.Sys.Print('velocity error of helix1 (total, x, y, z): ', next(velocity_err_list))
if with_T_geo:
PETSc.Sys.Print('velocity error of Tgeo (total, x, y, z): ', next(velocity_err_list))
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return True
# given velocity case
def save_singleEcoli_U_vtk(problem: sf.StokesFlowProblem, createHandle=createEcoliComp_tunnel,
part='full', prefix=''):
def save_head():
vsobj = createHandle(**check_kwargs)[0]
vsobj.set_rigid_velocity(rel_Us + ecoli_U, center=center)
velocity_err_sphere = next(problem.vtk_check(fileHandle, vsobj))
PETSc.Sys.Print('velocity error of sphere (total, x, y, z): ', velocity_err_sphere)
def save_tail():
# tail_obj_list = createHandle(**check_kwargs)[1]
# for tail_obj in tail_obj_list:
# tail_obj.set_rigid_velocity(rel_Uh + ecoli_U, center=center)
# velocity_err_list = problem.vtk_check(fileHandle, tail_obj_list)
# PETSc.Sys.Print('velocity error of helix0 (total, x, y, z): ', next(velocity_err_list))
# PETSc.Sys.Print('velocity error of helix1 (total, x, y, z): ', next(velocity_err_list))
# if with_T_geo:
# PETSc.Sys.Print('velocity error of Tgeo (total, x, y, z): ', next(velocity_err_list))
tail_obj_list = createHandle(**check_kwargs)[1]
tail_obj_all = sf.StokesFlowObj()
tail_obj_all.combine(tail_obj_list, set_re_u=True, set_force=True)
tail_obj_all.set_name('tail')
tidx = np.arange(tail_obj_all.get_n_u_node())
np.random.shuffle(tidx)
tidx = tidx[:np.min((tidx.size, 3000))]
tail_obj_all.get_u_geo().set_nodes(tail_obj_all.get_u_nodes()[tidx].copy(), deltalength=0)
tail_obj_all.get_f_geo().set_nodes(tail_obj_all.get_f_nodes()[tidx].copy(), deltalength=0)
tail_obj_all.set_rigid_velocity(rel_Uh + ecoli_U, center=center)
velocity_err_list = problem.vtk_check(fileHandle, tail_obj_all)
PETSc.Sys.Print(' velocity error of tails (total, x, y, z): ', next(velocity_err_list))
def save_full():
save_head()
save_tail()
def do_save_part():
return {'head': save_head,
'tail': save_tail,
'full': save_full}[part]
OptDB = PETSc.Options()
if not OptDB.getBool('save_singleEcoli_vtk', True):
return False
t0 = time()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle'] + prefix
ecoli_U = problem_kwargs['ecoli_U']
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
center = problem_kwargs['center']
with_T_geo = problem_kwargs['with_T_geo'] if 'with_T_geo' in problem_kwargs.keys() else 0
# problem.vtk_obj(fileHandle)
problem.vtk_self(fileHandle)
# bgeo = geo()
# bnodesHeadle = problem_kwargs['bnodesHeadle']
# matname = problem_kwargs['matname']
# bgeo.mat_nodes(filename=matname, mat_handle=bnodesHeadle)
# belemsHeadle = problem_kwargs['belemsHeadle']
# bgeo.mat_elmes(filename=matname, mat_handle=belemsHeadle, elemtype='tetra')
# problem.vtk_tetra(fileHandle + '_Velocity', bgeo)
# create check obj
check_kwargs = problem_kwargs.copy()
# check_kwargs['nth'] = problem_kwargs['nth'] - 2 if problem_kwargs['nth'] >= 6 else problem_kwargs['nth'] + 1
# check_kwargs['ds'] = problem_kwargs['ds'] * 1.2
check_kwargs['nth'] = problem_kwargs['nth'] * 2
check_kwargs['ds'] = problem_kwargs['ds'] * 2
check_kwargs['hfct'] = 1
check_kwargs['Tfct'] = 1
check_kwargs['eh'] = 0
check_kwargs['es'] = 0
check_kwargs['eT'] = 0
do_save_part()()
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return True
def save_singleEcoli_U_4part_vtk(problem: sf.StokesFlowProblem, U_list,
createHandle=createEcoliComp_tunnel):
# given velocity case,
# consider the ecoli constituted by four separate part: head, helix0, helix1, and Tgeo.
# each part have its own velocity U=[ux, uy, uz, wx, wy ,wz]
OptDB = PETSc.Options()
if not OptDB.getBool('save_singleEcoli_vtk', True):
return False
t0 = time()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
center = problem_kwargs['center']
# with_T_geo = len(problem.get_all_obj_list()) == 4
with_T_geo = problem_kwargs['with_T_geo'] if 'with_T_geo' in problem_kwargs.keys() else 0
# problem.vtk_obj(fileHandle)
problem.vtk_self(fileHandle)
# bgeo = geo()
# bnodesHeadle = problem_kwargs['bnodesHeadle']
# matname = problem_kwargs['matname']
# bgeo.mat_nodes(filename=matname, mat_handle=bnodesHeadle)
# belemsHeadle = problem_kwargs['belemsHeadle']
# bgeo.mat_elmes(filename=matname, mat_handle=belemsHeadle, elemtype='tetra')
# problem.vtk_tetra(fileHandle + '_Velocity', bgeo)
# create check obj
check_kwargs = problem_kwargs.copy()
check_kwargs['nth'] = problem_kwargs['nth'] - 2 if problem_kwargs['nth'] >= 6 else \
problem_kwargs['nth'] + 1
check_kwargs['ds'] = problem_kwargs['ds'] * 1.2
check_kwargs['hfct'] = 1
check_kwargs['Tfct'] = 1
obj_list = createHandle(**check_kwargs)
for obj, t_U in zip(sf.tube_flatten(obj_list), U_list):
obj.set_rigid_velocity(t_U, center=center)
velocity_err_list = problem.vtk_check(fileHandle, obj_list)
PETSc.Sys.Print('velocity error of sphere (total, x, y, z): ', next(velocity_err_list))
PETSc.Sys.Print('velocity error of helix0 (total, x, y, z): ', next(velocity_err_list))
PETSc.Sys.Print('velocity error of helix1 (total, x, y, z): ', next(velocity_err_list))
if with_T_geo:
PETSc.Sys.Print('velocity error of Tgeo (total, x, y, z): ', next(velocity_err_list))
cbd_obj = sf.StokesFlowObj()
cbd_obj.combine(obj_list)
velocity_err = problem.vtk_check(fileHandle, cbd_obj)
PETSc.Sys.Print('velocity error of ecoli (total, x, y, z): ', next(velocity_err))
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return True
def save_grid_sphere_vtk(problem: sf.StokesFlowProblem, createHandle=create_sphere):
OptDB = PETSc.Options()
if not OptDB.getBool('save_grid_sphere_vtk', True):
return False
t0 = time()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
# problem.vtk_obj(fileHandle)
# problem.vtk_velocity('%s_Velocity' % fileHandle)
problem.vtk_self(fileHandle)
check_kwargs = problem_kwargs.copy()
check_kwargs['ds'] = problem_kwargs['ds'] * 1.2
obj_sphere_check = sf.obj_dic[problem_kwargs['matrix_method']]()
obj_sphere_check.combine(createHandle(**check_kwargs))
obj_sphere_check.set_name('fullPro')
velocity_err = problem.vtk_check(fileHandle, obj_sphere_check)
PETSc.Sys.Print('velocity error (total, x, y, z): ', next(velocity_err))
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return velocity_err
def save_singleRod_vtk(problem: sf.StokesFlowProblem, ref_U=None, createHandle=create_rod):
OptDB = PETSc.Options()
if not OptDB.getBool('save_singleRod_vtk', True):
return False
t0 = time()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
rod_comp = problem.get_obj_list()[0]
ref_U = rod_comp.get_ref_U() if ref_U is None else ref_U
# create check obj
check_kwargs = problem_kwargs.copy()
check_kwargs['ntRod'] = 13 if np.abs(problem_kwargs['ntRod'] - 13) > 1 else 17
rod_comp_check = createHandle(**check_kwargs)[0]
rod_comp_check.set_ref_U(ref_U)
problem.vtk_obj(fileHandle)
velocity_err_rod = problem.vtk_check(fileHandle, rod_comp_check)
PETSc.Sys.Print('velocity error of rod (total, x, y, z): ', velocity_err_rod)
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return True
|
pcmagic/stokes_flow
|
src/myvtk.py
|
Python
|
mit
| 10,423
|
[
"VTK"
] |
ec3874c74464c31e150dfd251488694427e4c3f954d906b24c04d927adfcc954
|
########################################################################
# $HeadURL $
# File: AdlerTestCase.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2011/02/11 09:08:19
########################################################################
""" :mod: AdlerTestCase
=======================
.. module: AdlerTestCase
:synopsis: test case for DIRAC.Core.Utilities.Adler module
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
test case for DIRAC.Core.Utilities.Adler module
"""
__RCSID__ = "$Id $"
##
# @file AdlerTestCase.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2011/02/11 09:08:37
# @brief Definition of AdlerTestCase class.
## imports
import os
import unittest
import string
import tempfile
from zlib import adler32
## from DIRAC
from DIRAC.Core.Utilities import Adler
########################################################################
class AdlerTestCase(unittest.TestCase):
"""
.. class:: AdlerTestCase
test case for DIRAC.Core.Utilities.Adler module
"""
def setUp( self ):
self.emptyAdler = hex(adler32( "" ))[2:]
self.lettersAdler = hex(adler32( string.letters ))[2:]
def testStringAdler( self ):
""" stringAdler tests """
# no arguments supplied - TypeError
try:
Adler.stringAdler()
except Exception, error:
self.assertEqual( isinstance(error, TypeError), True )
# wrong argument type
self.assertEqual( Adler.stringAdler([]), False )
# empty string
self.assertEqual( int(Adler.stringAdler("")), int(self.emptyAdler) )
# all letters
self.assertEqual( Adler.stringAdler(string.letters), self.lettersAdler )
def testConversion( self ):
""" intAdlerToHex and hexAdlerToInt tests """
# no arguments
try:
Adler.intAdlerToHex()
except Exception, error:
self.assertEqual( isinstance(error, TypeError), True )
# wrong type of arg (should it really print out to stdout)
self.assertEqual( Adler.intAdlerToHex("a"), False )
# normal operation
self.assertEqual( int(Adler.intAdlerToHex(1)),
Adler.hexAdlerToInt( Adler.intAdlerToHex(1) ) )
self.assertEqual( Adler.hexAdlerToInt( "0x01" ),
int( Adler.intAdlerToHex( Adler.hexAdlerToInt( "0x01" ) ) ) )
def testFileAdler( self ):
""" fileAdler tests """
# no args
try:
Adler.fileAdler()
except Exception, error:
self.assertEqual( isinstance(error,TypeError ), True )
# read-protected file
self.assertEqual( Adler.fileAdler( "/root/.login" ), False )
# inexisting file
self.assertEqual( Adler.fileAdler( "Stone/Dead/Norwegian/Blue/Parrot/In/Camelot" ), False )
# normal operation
fd, path = tempfile.mkstemp("_adler32", "norewgian_blue")
self.assertEqual( int(Adler.fileAdler( path )), int(self.emptyAdler) )
os.write( fd, string.letters )
self.assertEqual( Adler.fileAdler( path ), self.lettersAdler )
def testCompareAdler( self ):
""" compareAdler tests """
# same adlers
self.assertEqual( Adler.compareAdler( Adler.stringAdler(""), Adler.stringAdler("") ), True )
# diff adlers
self.assertEqual( Adler.compareAdler( Adler.stringAdler(""), Adler.stringAdler( string.letters ) ), False )
## test suite execution
if __name__ == "__main__":
TESTLOADER = unittest.TestLoader()
SUITE = TESTLOADER.loadTestsFromTestCase( AdlerTestCase )
unittest.TextTestRunner(verbosity=3).run( SUITE )
|
Sbalbp/DIRAC
|
Core/Utilities/test/AdlerTestCase.py
|
Python
|
gpl-3.0
| 3,483
|
[
"DIRAC"
] |
5c60a5536ad27e2465d8373935e719304e721bbc401da11b23dcafe42c3866f3
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vrfcontext
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of VrfContext Avi RESTful Object
description:
- This module is used to configure VrfContext object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
bgp_profile:
description:
- Bgp local and peer info.
cloud_ref:
description:
- It is a reference to an object of type cloud.
debugvrfcontext:
description:
- Configure debug flags for vrf.
- Field introduced in 17.1.1.
description:
description:
- User defined description for the object.
gateway_mon:
description:
- Configure ping based heartbeat check for gateway in service engines of vrf.
internal_gateway_monitor:
description:
- Configure ping based heartbeat check for all default gateways in service engines of vrf.
- Field introduced in 17.1.1.
name:
description:
- Name of the object.
required: true
static_routes:
description:
- List of staticroute.
system_default:
description:
- Boolean flag to set system_default.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VrfContext object
avi_vrfcontext:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vrfcontext
"""
RETURN = '''
obj:
description: VrfContext (api/vrfcontext) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
bgp_profile=dict(type='dict',),
cloud_ref=dict(type='str',),
debugvrfcontext=dict(type='dict',),
description=dict(type='str',),
gateway_mon=dict(type='list',),
internal_gateway_monitor=dict(type='dict',),
name=dict(type='str', required=True),
static_routes=dict(type='list',),
system_default=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vrfcontext',
set([]))
if __name__ == '__main__':
main()
|
le9i0nx/ansible
|
lib/ansible/modules/network/avi/avi_vrfcontext.py
|
Python
|
gpl-3.0
| 5,176
|
[
"VisIt"
] |
2e0e1a959fb2fb7c85680dab191eac5e41a4b916ed27bd3f4e449ab36e5ee937
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014-2015 Brett Cannon <brett@python.org>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Pavel Roskin <proski@gnu.org>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Viorel Stirbu <viorels@gmail.com>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Roy Williams <roy.williams.iii@gmail.com>
# Copyright (c) 2016 Roy Williams <rwilliams@lyft.com>
# Copyright (c) 2016 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Erik <erik.eriksson@yahoo.com>
# Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2017 Daniel Miller <millerdev@gmail.com>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 ahirnish <ahirnish@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2018 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@upcloud.com>
# Copyright (c) 2018 gaurikholkar <f2013002@goa.bits-pilani.ac.in>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Check Python 2 code for Python 2/3 source-compatible issues."""
from __future__ import absolute_import, print_function
from collections import namedtuple
import re
import sys
import tokenize
from typing import FrozenSet
import astroid
from astroid import bases
from pylint import checkers, interfaces
from pylint.checkers.utils import node_ignores_exception, find_try_except_wrapper_node
from pylint.interfaces import INFERENCE_FAILURE, INFERENCE
from pylint.utils import WarningScope
from pylint.checkers import utils
_ZERO = re.compile("^0+$")
def _is_old_octal(literal):
if _ZERO.match(literal):
return False
if re.match(r"0\d+", literal):
try:
int(literal, 8)
except ValueError:
return False
return True
return None
def _inferred_value_is_dict(value):
if isinstance(value, astroid.Dict):
return True
return isinstance(value, astroid.Instance) and "dict" in value.basenames
def _is_builtin(node):
return getattr(node, "name", None) in ("__builtin__", "builtins")
_ACCEPTS_ITERATOR = {
"iter",
"list",
"tuple",
"sorted",
"set",
"sum",
"any",
"all",
"enumerate",
"dict",
"filter",
"reversed",
"max",
"min",
"frozenset",
"OrderedDict",
}
ATTRIBUTES_ACCEPTS_ITERATOR = {"join", "from_iterable"}
_BUILTIN_METHOD_ACCEPTS_ITERATOR = {
"builtins.list.extend",
"builtins.dict.update",
"builtins.set.update",
}
DICT_METHODS = {"items", "keys", "values"}
def _in_iterating_context(node):
"""Check if the node is being used as an iterator.
Definition is taken from lib2to3.fixer_util.in_special_context().
"""
parent = node.parent
# Since a call can't be the loop variant we only need to know if the node's
# parent is a 'for' loop to know it's being used as the iterator for the
# loop.
if isinstance(parent, astroid.For):
return True
# Need to make sure the use of the node is in the iterator part of the
# comprehension.
if isinstance(parent, astroid.Comprehension):
if parent.iter == node:
return True
# Various built-ins can take in an iterable or list and lead to the same
# value.
elif isinstance(parent, astroid.Call):
if isinstance(parent.func, astroid.Name):
parent_scope = parent.func.lookup(parent.func.name)[0]
if _is_builtin(parent_scope) and parent.func.name in _ACCEPTS_ITERATOR:
return True
elif isinstance(parent.func, astroid.Attribute):
if parent.func.attrname in ATTRIBUTES_ACCEPTS_ITERATOR:
return True
inferred = utils.safe_infer(parent.func)
if inferred:
if inferred.qname() in _BUILTIN_METHOD_ACCEPTS_ITERATOR:
return True
root = inferred.root()
if root and root.name == "itertools":
return True
# If the call is in an unpacking, there's no need to warn,
# since it can be considered iterating.
elif isinstance(parent, astroid.Assign) and isinstance(
parent.targets[0], (astroid.List, astroid.Tuple)
):
if len(parent.targets[0].elts) > 1:
return True
# If the call is in a containment check, we consider that to
# be an iterating context
elif (
isinstance(parent, astroid.Compare)
and len(parent.ops) == 1
and parent.ops[0][0] == "in"
):
return True
# Also if it's an `yield from`, that's fair
elif isinstance(parent, astroid.YieldFrom):
return True
if isinstance(parent, astroid.Starred):
return True
return False
def _is_conditional_import(node):
"""Checks if an import node is in the context of a conditional.
"""
parent = node.parent
return isinstance(
parent, (astroid.TryExcept, astroid.ExceptHandler, astroid.If, astroid.IfExp)
)
Branch = namedtuple("Branch", ["node", "is_py2_only"])
class Python3Checker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
enabled = False
name = "python3"
msgs = {
# Errors for what will syntactically break in Python 3, warnings for
# everything else.
"E1601": (
"print statement used",
"print-statement",
"Used when a print statement is used "
"(`print` is a function in Python 3)",
),
"E1602": (
"Parameter unpacking specified",
"parameter-unpacking",
"Used when parameter unpacking is specified for a function"
"(Python 3 doesn't allow it)",
),
"E1603": (
"Implicit unpacking of exceptions is not supported in Python 3",
"unpacking-in-except",
"Python3 will not allow implicit unpacking of "
"exceptions in except clauses. "
"See http://www.python.org/dev/peps/pep-3110/",
{"old_names": [("W0712", "unpacking-in-except")]},
),
"E1604": (
"Use raise ErrorClass(args) instead of raise ErrorClass, args.",
"old-raise-syntax",
"Used when the alternate raise syntax "
"'raise foo, bar' is used "
"instead of 'raise foo(bar)'.",
{"old_names": [("W0121", "old-raise-syntax")]},
),
"E1605": (
"Use of the `` operator",
"backtick",
'Used when the deprecated "``" (backtick) operator is used '
"instead of the str() function.",
{"scope": WarningScope.NODE, "old_names": [("W0333", "backtick")]},
),
"E1609": (
"Import * only allowed at module level",
"import-star-module-level",
"Used when the import star syntax is used somewhere "
"else than the module level.",
{"maxversion": (3, 0)},
),
"W1601": (
"apply built-in referenced",
"apply-builtin",
"Used when the apply built-in function is referenced "
"(missing from Python 3)",
),
"W1602": (
"basestring built-in referenced",
"basestring-builtin",
"Used when the basestring built-in function is referenced "
"(missing from Python 3)",
),
"W1603": (
"buffer built-in referenced",
"buffer-builtin",
"Used when the buffer built-in function is referenced "
"(missing from Python 3)",
),
"W1604": (
"cmp built-in referenced",
"cmp-builtin",
"Used when the cmp built-in function is referenced "
"(missing from Python 3)",
),
"W1605": (
"coerce built-in referenced",
"coerce-builtin",
"Used when the coerce built-in function is referenced "
"(missing from Python 3)",
),
"W1606": (
"execfile built-in referenced",
"execfile-builtin",
"Used when the execfile built-in function is referenced "
"(missing from Python 3)",
),
"W1607": (
"file built-in referenced",
"file-builtin",
"Used when the file built-in function is referenced "
"(missing from Python 3)",
),
"W1608": (
"long built-in referenced",
"long-builtin",
"Used when the long built-in function is referenced "
"(missing from Python 3)",
),
"W1609": (
"raw_input built-in referenced",
"raw_input-builtin",
"Used when the raw_input built-in function is referenced "
"(missing from Python 3)",
),
"W1610": (
"reduce built-in referenced",
"reduce-builtin",
"Used when the reduce built-in function is referenced "
"(missing from Python 3)",
),
"W1611": (
"StandardError built-in referenced",
"standarderror-builtin",
"Used when the StandardError built-in function is referenced "
"(missing from Python 3)",
),
"W1612": (
"unicode built-in referenced",
"unicode-builtin",
"Used when the unicode built-in function is referenced "
"(missing from Python 3)",
),
"W1613": (
"xrange built-in referenced",
"xrange-builtin",
"Used when the xrange built-in function is referenced "
"(missing from Python 3)",
),
"W1614": (
"__coerce__ method defined",
"coerce-method",
"Used when a __coerce__ method is defined "
"(method is not used by Python 3)",
),
"W1615": (
"__delslice__ method defined",
"delslice-method",
"Used when a __delslice__ method is defined "
"(method is not used by Python 3)",
),
"W1616": (
"__getslice__ method defined",
"getslice-method",
"Used when a __getslice__ method is defined "
"(method is not used by Python 3)",
),
"W1617": (
"__setslice__ method defined",
"setslice-method",
"Used when a __setslice__ method is defined "
"(method is not used by Python 3)",
),
"W1618": (
"import missing `from __future__ import absolute_import`",
"no-absolute-import",
"Used when an import is not accompanied by "
"``from __future__ import absolute_import`` "
"(default behaviour in Python 3)",
),
"W1619": (
"division w/o __future__ statement",
"old-division",
"Used for non-floor division w/o a float literal or "
"``from __future__ import division`` "
"(Python 3 returns a float for int division unconditionally)",
),
"W1620": (
"Calling a dict.iter*() method",
"dict-iter-method",
"Used for calls to dict.iterkeys(), itervalues() or iteritems() "
"(Python 3 lacks these methods)",
),
"W1621": (
"Calling a dict.view*() method",
"dict-view-method",
"Used for calls to dict.viewkeys(), viewvalues() or viewitems() "
"(Python 3 lacks these methods)",
),
"W1622": (
"Called a next() method on an object",
"next-method-called",
"Used when an object's next() method is called "
"(Python 3 uses the next() built-in function)",
),
"W1623": (
"Assigning to a class's __metaclass__ attribute",
"metaclass-assignment",
"Used when a metaclass is specified by assigning to __metaclass__ "
"(Python 3 specifies the metaclass as a class statement argument)",
),
"W1624": (
"Indexing exceptions will not work on Python 3",
"indexing-exception",
"Indexing exceptions will not work on Python 3. Use "
"`exception.args[index]` instead.",
{"old_names": [("W0713", "indexing-exception")]},
),
"W1625": (
"Raising a string exception",
"raising-string",
"Used when a string exception is raised. This will not "
"work on Python 3.",
{"old_names": [("W0701", "raising-string")]},
),
"W1626": (
"reload built-in referenced",
"reload-builtin",
"Used when the reload built-in function is referenced "
"(missing from Python 3). You can use instead imp.reload "
"or importlib.reload.",
),
"W1627": (
"__oct__ method defined",
"oct-method",
"Used when an __oct__ method is defined "
"(method is not used by Python 3)",
),
"W1628": (
"__hex__ method defined",
"hex-method",
"Used when a __hex__ method is defined (method is not used by Python 3)",
),
"W1629": (
"__nonzero__ method defined",
"nonzero-method",
"Used when a __nonzero__ method is defined "
"(method is not used by Python 3)",
),
"W1630": (
"__cmp__ method defined",
"cmp-method",
"Used when a __cmp__ method is defined (method is not used by Python 3)",
),
# 'W1631': replaced by W1636
"W1632": (
"input built-in referenced",
"input-builtin",
"Used when the input built-in is referenced "
"(backwards-incompatible semantics in Python 3)",
),
"W1633": (
"round built-in referenced",
"round-builtin",
"Used when the round built-in is referenced "
"(backwards-incompatible semantics in Python 3)",
),
"W1634": (
"intern built-in referenced",
"intern-builtin",
"Used when the intern built-in is referenced "
"(Moved to sys.intern in Python 3)",
),
"W1635": (
"unichr built-in referenced",
"unichr-builtin",
"Used when the unichr built-in is referenced (Use chr in Python 3)",
),
"W1636": (
"map built-in referenced when not iterating",
"map-builtin-not-iterating",
"Used when the map built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
{"old_names": [("W1631", "implicit-map-evaluation")]},
),
"W1637": (
"zip built-in referenced when not iterating",
"zip-builtin-not-iterating",
"Used when the zip built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1638": (
"range built-in referenced when not iterating",
"range-builtin-not-iterating",
"Used when the range built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1639": (
"filter built-in referenced when not iterating",
"filter-builtin-not-iterating",
"Used when the filter built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1640": (
"Using the cmp argument for list.sort / sorted",
"using-cmp-argument",
"Using the cmp argument for list.sort or the sorted "
"builtin should be avoided, since it was removed in "
"Python 3. Using either `key` or `functools.cmp_to_key` "
"should be preferred.",
),
"W1641": (
"Implementing __eq__ without also implementing __hash__",
"eq-without-hash",
"Used when a class implements __eq__ but not __hash__. In Python 2, objects "
"get object.__hash__ as the default implementation, in Python 3 objects get "
"None as their default __hash__ implementation if they also implement __eq__.",
),
"W1642": (
"__div__ method defined",
"div-method",
"Used when a __div__ method is defined. Using `__truediv__` and setting"
"__div__ = __truediv__ should be preferred."
"(method is not used by Python 3)",
),
"W1643": (
"__idiv__ method defined",
"idiv-method",
"Used when an __idiv__ method is defined. Using `__itruediv__` and setting"
"__idiv__ = __itruediv__ should be preferred."
"(method is not used by Python 3)",
),
"W1644": (
"__rdiv__ method defined",
"rdiv-method",
"Used when a __rdiv__ method is defined. Using `__rtruediv__` and setting"
"__rdiv__ = __rtruediv__ should be preferred."
"(method is not used by Python 3)",
),
"W1645": (
"Exception.message removed in Python 3",
"exception-message-attribute",
"Used when the message attribute is accessed on an Exception. Use "
"str(exception) instead.",
),
"W1646": (
"non-text encoding used in str.decode",
"invalid-str-codec",
"Used when using str.encode or str.decode with a non-text encoding. Use "
"codecs module to handle arbitrary codecs.",
),
"W1647": (
"sys.maxint removed in Python 3",
"sys-max-int",
"Used when accessing sys.maxint. Use sys.maxsize instead.",
),
"W1648": (
"Module moved in Python 3",
"bad-python3-import",
"Used when importing a module that no longer exists in Python 3.",
),
"W1649": (
"Accessing a deprecated function on the string module",
"deprecated-string-function",
"Used when accessing a string function that has been deprecated in Python 3.",
),
"W1650": (
"Using str.translate with deprecated deletechars parameters",
"deprecated-str-translate-call",
"Used when using the deprecated deletechars parameters from str.translate. Use "
"re.sub to remove the desired characters ",
),
"W1651": (
"Accessing a deprecated function on the itertools module",
"deprecated-itertools-function",
"Used when accessing a function on itertools that has been removed in Python 3.",
),
"W1652": (
"Accessing a deprecated fields on the types module",
"deprecated-types-field",
"Used when accessing a field on types that has been removed in Python 3.",
),
"W1653": (
"next method defined",
"next-method-defined",
"Used when a next method is defined that would be an iterator in Python 2 but "
"is treated as a normal function in Python 3.",
),
"W1654": (
"dict.items referenced when not iterating",
"dict-items-not-iterating",
"Used when dict.items is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1655": (
"dict.keys referenced when not iterating",
"dict-keys-not-iterating",
"Used when dict.keys is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1656": (
"dict.values referenced when not iterating",
"dict-values-not-iterating",
"Used when dict.values is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1657": (
"Accessing a removed attribute on the operator module",
"deprecated-operator-function",
"Used when accessing a field on operator module that has been "
"removed in Python 3.",
),
"W1658": (
"Accessing a removed attribute on the urllib module",
"deprecated-urllib-function",
"Used when accessing a field on urllib module that has been "
"removed or moved in Python 3.",
),
"W1659": (
"Accessing a removed xreadlines attribute",
"xreadlines-attribute",
"Used when accessing the xreadlines() function on a file stream, "
"removed in Python 3.",
),
"W1660": (
"Accessing a removed attribute on the sys module",
"deprecated-sys-function",
"Used when accessing a field on sys module that has been "
"removed in Python 3.",
),
"W1661": (
"Using an exception object that was bound by an except handler",
"exception-escape",
"Emitted when using an exception, that was bound in an except "
"handler, outside of the except handler. On Python 3 these "
"exceptions will be deleted once they get out "
"of the except handler.",
),
"W1662": (
"Using a variable that was bound inside a comprehension",
"comprehension-escape",
"Emitted when using a variable, that was bound in a comprehension "
"handler, outside of the comprehension itself. On Python 3 these "
"variables will be deleted outside of the "
"comprehension.",
),
}
_bad_builtins = frozenset(
[
"apply",
"basestring",
"buffer",
"cmp",
"coerce",
"execfile",
"file",
"input", # Not missing, but incompatible semantics
"intern",
"long",
"raw_input",
"reduce",
"round", # Not missing, but incompatible semantics
"StandardError",
"unichr",
"unicode",
"xrange",
"reload",
]
)
_unused_magic_methods = frozenset(
[
"__coerce__",
"__delslice__",
"__getslice__",
"__setslice__",
"__oct__",
"__hex__",
"__nonzero__",
"__cmp__",
"__div__",
"__idiv__",
"__rdiv__",
]
)
_invalid_encodings = frozenset(
[
"base64_codec",
"base64",
"base_64",
"bz2_codec",
"bz2",
"hex_codec",
"hex",
"quopri_codec",
"quopri",
"quotedprintable",
"quoted_printable",
"uu_codec",
"uu",
"zlib_codec",
"zlib",
"zip",
"rot13",
"rot_13",
]
)
_bad_python3_module_map = {
"sys-max-int": {"sys": frozenset(["maxint"])},
"deprecated-itertools-function": {
"itertools": frozenset(
["izip", "ifilter", "imap", "izip_longest", "ifilterfalse"]
)
},
"deprecated-types-field": {
"types": frozenset(
[
"EllipsisType",
"XRangeType",
"ComplexType",
"StringType",
"TypeType",
"LongType",
"UnicodeType",
"ClassType",
"BufferType",
"StringTypes",
"NotImplementedType",
"NoneType",
"InstanceType",
"FloatType",
"SliceType",
"UnboundMethodType",
"ObjectType",
"IntType",
"TupleType",
"ListType",
"DictType",
"FileType",
"DictionaryType",
"BooleanType",
"DictProxyType",
]
)
},
"bad-python3-import": frozenset(
[
"anydbm",
"BaseHTTPServer",
"__builtin__",
"CGIHTTPServer",
"ConfigParser",
"copy_reg",
"cPickle",
"cStringIO",
"Cookie",
"cookielib",
"dbhash",
"dumbdbm",
"dumbdb",
"Dialog",
"DocXMLRPCServer",
"FileDialog",
"FixTk",
"gdbm",
"htmlentitydefs",
"HTMLParser",
"httplib",
"markupbase",
"Queue",
"repr",
"robotparser",
"ScrolledText",
"SimpleDialog",
"SimpleHTTPServer",
"SimpleXMLRPCServer",
"StringIO",
"dummy_thread",
"SocketServer",
"test.test_support",
"Tkinter",
"Tix",
"Tkconstants",
"tkColorChooser",
"tkCommonDialog",
"Tkdnd",
"tkFileDialog",
"tkFont",
"tkMessageBox",
"tkSimpleDialog",
"UserList",
"UserString",
"whichdb",
"_winreg",
"xmlrpclib",
"audiodev",
"Bastion",
"bsddb185",
"bsddb3",
"Canvas",
"cfmfile",
"cl",
"commands",
"compiler",
"dircache",
"dl",
"exception",
"fpformat",
"htmllib",
"ihooks",
"imageop",
"imputil",
"linuxaudiodev",
"md5",
"mhlib",
"mimetools",
"MimeWriter",
"mimify",
"multifile",
"mutex",
"new",
"popen2",
"posixfile",
"pure",
"rexec",
"rfc822",
"sets",
"sha",
"sgmllib",
"sre",
"stringold",
"sunaudio",
"sv",
"test.testall",
"thread",
"timing",
"toaiff",
"user",
"urllib2",
"urlparse",
]
),
"deprecated-string-function": {
"string": frozenset(
[
"maketrans",
"atof",
"atoi",
"atol",
"capitalize",
"expandtabs",
"find",
"rfind",
"index",
"rindex",
"count",
"lower",
"letters",
"split",
"rsplit",
"splitfields",
"join",
"joinfields",
"lstrip",
"rstrip",
"strip",
"swapcase",
"translate",
"upper",
"ljust",
"rjust",
"center",
"zfill",
"replace",
"lowercase",
"letters",
"uppercase",
"atol_error",
"atof_error",
"atoi_error",
"index_error",
]
)
},
"deprecated-operator-function": {"operator": frozenset({"div"})},
"deprecated-urllib-function": {
"urllib": frozenset(
{
"addbase",
"addclosehook",
"addinfo",
"addinfourl",
"always_safe",
"basejoin",
"ftpcache",
"ftperrors",
"ftpwrapper",
"getproxies",
"getproxies_environment",
"getproxies_macosx_sysconf",
"main",
"noheaders",
"pathname2url",
"proxy_bypass",
"proxy_bypass_environment",
"proxy_bypass_macosx_sysconf",
"quote",
"quote_plus",
"reporthook",
"splitattr",
"splithost",
"splitnport",
"splitpasswd",
"splitport",
"splitquery",
"splittag",
"splittype",
"splituser",
"splitvalue",
"unquote",
"unquote_plus",
"unwrap",
"url2pathname",
"urlcleanup",
"urlencode",
"urlopen",
"urlretrieve",
}
)
},
"deprecated-sys-function": {"sys": frozenset({"exc_clear"})},
}
if (3, 4) <= sys.version_info < (3, 4, 4):
# Python 3.4.0 -> 3.4.3 has a bug which breaks `repr_tree()`:
# https://bugs.python.org/issue23572
_python_2_tests = frozenset() # type: FrozenSet[str]
else:
_python_2_tests = frozenset(
[
astroid.extract_node(x).repr_tree()
for x in [
"sys.version_info[0] == 2",
"sys.version_info[0] < 3",
"sys.version_info == (2, 7)",
"sys.version_info <= (2, 7)",
"sys.version_info < (3, 0)",
]
]
)
def __init__(self, *args, **kwargs):
self._future_division = False
self._future_absolute_import = False
self._modules_warned_about = set()
self._branch_stack = []
super(Python3Checker, self).__init__(*args, **kwargs)
# pylint: disable=keyword-arg-before-vararg, arguments-differ
def add_message(self, msg_id, always_warn=False, *args, **kwargs):
if always_warn or not (
self._branch_stack and self._branch_stack[-1].is_py2_only
):
super(Python3Checker, self).add_message(msg_id, *args, **kwargs)
def _is_py2_test(self, node):
if isinstance(node.test, astroid.Attribute) and isinstance(
node.test.expr, astroid.Name
):
if node.test.expr.name == "six" and node.test.attrname == "PY2":
return True
elif (
isinstance(node.test, astroid.Compare)
and node.test.repr_tree() in self._python_2_tests
):
return True
return False
def visit_if(self, node):
self._branch_stack.append(Branch(node, self._is_py2_test(node)))
def leave_if(self, node):
assert self._branch_stack.pop().node == node
def visit_ifexp(self, node):
self._branch_stack.append(Branch(node, self._is_py2_test(node)))
def leave_ifexp(self, node):
assert self._branch_stack.pop().node == node
def visit_module(self, node): # pylint: disable=unused-argument
"""Clear checker state after previous module."""
self._future_division = False
self._future_absolute_import = False
def visit_functiondef(self, node):
if node.is_method():
if node.name in self._unused_magic_methods:
method_name = node.name
if node.name.startswith("__"):
method_name = node.name[2:-2]
self.add_message(method_name + "-method", node=node)
elif node.name == "next":
# If there is a method named `next` declared, if it is invokable
# with zero arguments then it implements the Iterator protocol.
# This means if the method is an instance method or a
# classmethod 1 argument should cause a failure, if it is a
# staticmethod 0 arguments should cause a failure.
failing_arg_count = 1
if utils.decorated_with(node, [bases.BUILTINS + ".staticmethod"]):
failing_arg_count = 0
if len(node.args.args) == failing_arg_count:
self.add_message("next-method-defined", node=node)
@utils.check_messages("parameter-unpacking")
def visit_arguments(self, node):
for arg in node.args:
if isinstance(arg, astroid.Tuple):
self.add_message("parameter-unpacking", node=arg)
@utils.check_messages("comprehension-escape")
def visit_listcomp(self, node):
names = {
generator.target.name
for generator in node.generators
if isinstance(generator.target, astroid.AssignName)
}
scope = node.parent.scope()
scope_names = scope.nodes_of_class(astroid.Name, skip_klass=astroid.FunctionDef)
has_redefined_assign_name = any(
assign_name
for assign_name in scope.nodes_of_class(
astroid.AssignName, skip_klass=astroid.FunctionDef
)
if assign_name.name in names and assign_name.lineno > node.lineno
)
if has_redefined_assign_name:
return
emitted_for_names = set()
scope_names = list(scope_names)
for scope_name in scope_names:
if (
scope_name.name not in names
or scope_name.lineno <= node.lineno
or scope_name.name in emitted_for_names
or scope_name.scope() == node
):
continue
emitted_for_names.add(scope_name.name)
self.add_message("comprehension-escape", node=scope_name)
def visit_name(self, node):
"""Detect when a "bad" built-in is referenced."""
found_node, _ = node.lookup(node.name)
if not _is_builtin(found_node):
return
if node.name not in self._bad_builtins:
return
if node_ignores_exception(node) or isinstance(
find_try_except_wrapper_node(node), astroid.ExceptHandler
):
return
message = node.name.lower() + "-builtin"
self.add_message(message, node=node)
@utils.check_messages("print-statement")
def visit_print(self, node):
self.add_message("print-statement", node=node, always_warn=True)
def _warn_if_deprecated(self, node, module, attributes, report_on_modules=True):
for message, module_map in self._bad_python3_module_map.items():
if module in module_map and module not in self._modules_warned_about:
if isinstance(module_map, frozenset):
if report_on_modules:
self._modules_warned_about.add(module)
self.add_message(message, node=node)
elif attributes and module_map[module].intersection(attributes):
self.add_message(message, node=node)
def visit_importfrom(self, node):
if node.modname == "__future__":
for name, _ in node.names:
if name == "division":
self._future_division = True
elif name == "absolute_import":
self._future_absolute_import = True
else:
if not self._future_absolute_import:
if self.linter.is_message_enabled("no-absolute-import"):
self.add_message("no-absolute-import", node=node)
self._future_absolute_import = True
if not _is_conditional_import(node) and not node.level:
self._warn_if_deprecated(node, node.modname, {x[0] for x in node.names})
if node.names[0][0] == "*":
if self.linter.is_message_enabled("import-star-module-level"):
if not isinstance(node.scope(), astroid.Module):
self.add_message("import-star-module-level", node=node)
def visit_import(self, node):
if not self._future_absolute_import:
if self.linter.is_message_enabled("no-absolute-import"):
self.add_message("no-absolute-import", node=node)
self._future_absolute_import = True
if not _is_conditional_import(node):
for name, _ in node.names:
self._warn_if_deprecated(node, name, None)
@utils.check_messages("metaclass-assignment")
def visit_classdef(self, node):
if "__metaclass__" in node.locals:
self.add_message("metaclass-assignment", node=node)
locals_and_methods = set(node.locals).union(x.name for x in node.mymethods())
if "__eq__" in locals_and_methods and "__hash__" not in locals_and_methods:
self.add_message("eq-without-hash", node=node)
@utils.check_messages("old-division")
def visit_binop(self, node):
if not self._future_division and node.op == "/":
for arg in (node.left, node.right):
if isinstance(arg, astroid.Const) and isinstance(arg.value, float):
break
else:
self.add_message("old-division", node=node)
def _check_cmp_argument(self, node):
# Check that the `cmp` argument is used
kwargs = []
if isinstance(node.func, astroid.Attribute) and node.func.attrname == "sort":
inferred = utils.safe_infer(node.func.expr)
if not inferred:
return
builtins_list = "{}.list".format(bases.BUILTINS)
if isinstance(inferred, astroid.List) or inferred.qname() == builtins_list:
kwargs = node.keywords
elif isinstance(node.func, astroid.Name) and node.func.name == "sorted":
inferred = utils.safe_infer(node.func)
if not inferred:
return
builtins_sorted = "{}.sorted".format(bases.BUILTINS)
if inferred.qname() == builtins_sorted:
kwargs = node.keywords
for kwarg in kwargs or []:
if kwarg.arg == "cmp":
self.add_message("using-cmp-argument", node=node)
return
@staticmethod
def _is_constant_string_or_name(node):
if isinstance(node, astroid.Const):
return isinstance(node.value, str)
return isinstance(node, astroid.Name)
@staticmethod
def _is_none(node):
return isinstance(node, astroid.Const) and node.value is None
@staticmethod
def _has_only_n_positional_args(node, number_of_args):
return len(node.args) == number_of_args and all(node.args) and not node.keywords
@staticmethod
def _could_be_string(inferred_types):
confidence = INFERENCE if inferred_types else INFERENCE_FAILURE
for inferred_type in inferred_types:
if inferred_type is astroid.Uninferable:
confidence = INFERENCE_FAILURE
elif not (
isinstance(inferred_type, astroid.Const)
and isinstance(inferred_type.value, str)
):
return None
return confidence
def visit_call(self, node):
self._check_cmp_argument(node)
if isinstance(node.func, astroid.Attribute):
inferred_types = set()
try:
for inferred_receiver in node.func.expr.infer():
if inferred_receiver is astroid.Uninferable:
continue
inferred_types.add(inferred_receiver)
if isinstance(inferred_receiver, astroid.Module):
self._warn_if_deprecated(
node,
inferred_receiver.name,
{node.func.attrname},
report_on_modules=False,
)
if (
_inferred_value_is_dict(inferred_receiver)
and node.func.attrname in DICT_METHODS
):
if not _in_iterating_context(node):
checker = "dict-{}-not-iterating".format(node.func.attrname)
self.add_message(checker, node=node)
except astroid.InferenceError:
pass
if node.args:
is_str_confidence = self._could_be_string(inferred_types)
if is_str_confidence:
if (
node.func.attrname in ("encode", "decode")
and len(node.args) >= 1
and node.args[0]
):
first_arg = node.args[0]
self._validate_encoding(first_arg, node)
if (
node.func.attrname == "translate"
and self._has_only_n_positional_args(node, 2)
and self._is_none(node.args[0])
and self._is_constant_string_or_name(node.args[1])
):
# The above statement looking for calls of the form:
#
# foo.translate(None, 'abc123')
#
# or
#
# foo.translate(None, some_variable)
#
# This check is somewhat broad and _may_ have some false positives, but
# after checking several large codebases it did not have any false
# positives while finding several real issues. This call pattern seems
# rare enough that the trade off is worth it.
self.add_message(
"deprecated-str-translate-call",
node=node,
confidence=is_str_confidence,
)
return
if node.keywords:
return
if node.func.attrname == "next":
self.add_message("next-method-called", node=node)
else:
if node.func.attrname in ("iterkeys", "itervalues", "iteritems"):
self.add_message("dict-iter-method", node=node)
elif node.func.attrname in ("viewkeys", "viewvalues", "viewitems"):
self.add_message("dict-view-method", node=node)
elif isinstance(node.func, astroid.Name):
found_node = node.func.lookup(node.func.name)[0]
if _is_builtin(found_node):
if node.func.name in ("filter", "map", "range", "zip"):
if not _in_iterating_context(node):
checker = "{}-builtin-not-iterating".format(node.func.name)
self.add_message(checker, node=node)
if node.func.name == "open" and node.keywords:
kwargs = node.keywords
for kwarg in kwargs or []:
if kwarg.arg == "encoding":
self._validate_encoding(kwarg.value, node)
break
def _validate_encoding(self, encoding, node):
if isinstance(encoding, astroid.Const):
value = encoding.value
if value in self._invalid_encodings:
self.add_message("invalid-str-codec", node=node)
@utils.check_messages("indexing-exception")
def visit_subscript(self, node):
""" Look for indexing exceptions. """
try:
for inferred in node.value.infer():
if not isinstance(inferred, astroid.Instance):
continue
if utils.inherit_from_std_ex(inferred):
self.add_message("indexing-exception", node=node)
except astroid.InferenceError:
return
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_attribute(node)
def visit_delattr(self, node):
self.visit_attribute(node)
@utils.check_messages("exception-message-attribute", "xreadlines-attribute")
def visit_attribute(self, node):
"""Look for removed attributes"""
if node.attrname == "xreadlines":
self.add_message("xreadlines-attribute", node=node)
return
exception_message = "message"
try:
for inferred in node.expr.infer():
if isinstance(inferred, astroid.Instance) and utils.inherit_from_std_ex(
inferred
):
if node.attrname == exception_message:
# Exceptions with .message clearly defined are an exception
if exception_message in inferred.instance_attrs:
continue
self.add_message("exception-message-attribute", node=node)
if isinstance(inferred, astroid.Module):
self._warn_if_deprecated(
node, inferred.name, {node.attrname}, report_on_modules=False
)
except astroid.InferenceError:
return
@utils.check_messages("unpacking-in-except", "comprehension-escape")
def visit_excepthandler(self, node):
"""Visit an except handler block and check for exception unpacking."""
def _is_used_in_except_block(node):
scope = node.scope()
current = node
while (
current
and current != scope
and not isinstance(current, astroid.ExceptHandler)
):
current = current.parent
return isinstance(current, astroid.ExceptHandler) and current.type != node
if isinstance(node.name, (astroid.Tuple, astroid.List)):
self.add_message("unpacking-in-except", node=node)
return
if not node.name:
return
# Find any names
scope = node.parent.scope()
scope_names = scope.nodes_of_class(astroid.Name, skip_klass=astroid.FunctionDef)
scope_names = list(scope_names)
potential_leaked_names = [
scope_name
for scope_name in scope_names
if scope_name.name == node.name.name
and scope_name.lineno > node.lineno
and not _is_used_in_except_block(scope_name)
]
reassignments_for_same_name = {
assign_name.lineno
for assign_name in scope.nodes_of_class(
astroid.AssignName, skip_klass=astroid.FunctionDef
)
if assign_name.name == node.name.name
}
for leaked_name in potential_leaked_names:
if any(
node.lineno < elem < leaked_name.lineno
for elem in reassignments_for_same_name
):
continue
self.add_message("exception-escape", node=leaked_name)
@utils.check_messages("backtick")
def visit_repr(self, node):
self.add_message("backtick", node=node)
@utils.check_messages("raising-string", "old-raise-syntax")
def visit_raise(self, node):
"""Visit a raise statement and check for raising
strings or old-raise-syntax.
"""
# Ignore empty raise.
if node.exc is None:
return
expr = node.exc
if self._check_raise_value(node, expr):
return
try:
value = next(astroid.unpack_infer(expr))
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
if isinstance(expr, astroid.Const):
value = expr.value
if isinstance(value, str):
self.add_message("raising-string", node=node)
return True
return None
class Python3TokenChecker(checkers.BaseTokenChecker):
__implements__ = interfaces.ITokenChecker
name = "python3"
enabled = False
msgs = {
"E1606": (
"Use of long suffix",
"long-suffix",
'Used when "l" or "L" is used to mark a long integer. '
"This will not work in Python 3, since `int` and `long` "
"types have merged.",
{"maxversion": (3, 0)},
),
"E1607": (
"Use of the <> operator",
"old-ne-operator",
'Used when the deprecated "<>" operator is used instead '
'of "!=". This is removed in Python 3.',
{"maxversion": (3, 0), "old_names": [("W0331", "old-ne-operator")]},
),
"E1608": (
"Use of old octal literal",
"old-octal-literal",
"Used when encountering the old octal syntax, "
"removed in Python 3. To use the new syntax, "
"prepend 0o on the number.",
{"maxversion": (3, 0)},
),
"E1610": (
"Non-ascii bytes literals not supported in 3.x",
"non-ascii-bytes-literal",
"Used when non-ascii bytes literals are found in a program. "
"They are no longer supported in Python 3.",
{"maxversion": (3, 0)},
),
}
def process_tokens(self, tokens):
for idx, (tok_type, token, start, _, _) in enumerate(tokens):
if tok_type == tokenize.NUMBER:
if token.lower().endswith("l"):
# This has a different semantic than lowercase-l-suffix.
self.add_message("long-suffix", line=start[0])
elif _is_old_octal(token):
self.add_message("old-octal-literal", line=start[0])
if tokens[idx][1] == "<>":
self.add_message("old-ne-operator", line=tokens[idx][2][0])
if tok_type == tokenize.STRING and token.startswith("b"):
if any(elem for elem in token if ord(elem) > 127):
self.add_message("non-ascii-bytes-literal", line=start[0])
def register(linter):
linter.register_checker(Python3Checker(linter))
linter.register_checker(Python3TokenChecker(linter))
|
ekwoodrich/python-dvrip
|
env/lib/python3.5/site-packages/pylint/checkers/python3.py
|
Python
|
mit
| 51,928
|
[
"VisIt"
] |
3692e4c58a7f54f4df17436361b80ebd11b76ae61f5058cc092cf927fcb6d659
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, get_function_code, string_types
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
return subprocess.call([command, "-R", "-c", commandstring])
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same as
well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
if subprocess:
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is not False:
return not bool(ret)
return not bool(_test(*paths, **kwargs))
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", "")
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
if len(paths) == 0:
matched = test_files
else:
paths = convert_to_native_paths(paths)
matched = []
for f in test_files:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
if subprocess:
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is not False:
return not bool(ret)
return not bool(_doctest(*paths, **kwargs))
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/mpmath", # needs to be fixed upstream
"sympy/mpmath", # needs to be fixed upstream
"doc/src/modules/plotting.rst", # generates live plots
"sympy/statistics", # prints a deprecation
"doc/src/modules/statistics.rst", # warning (the module is deprecated)
"sympy/utilities/compilef.py" # needs tcc
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# pytest = import_module('pytest')
# py = import_module('py')
# if py is None or pytest is None:
# blacklist.extend([
# "sympy/conftest.py",
# "sympy/utilities/benchmarking.py"
# ])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
# out = pdoctest.testfile(
# rst_file, module_relative=False, encoding='utf-8',
# optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE)
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
funcs = []
try:
clear_cache()
self._count += 1
gl = {'__file__': filename}
random.seed(self._seed)
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
self._reporter.entering_filename(filename, len(funcs))
raise
self._reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
self._reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
self._reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if self._kw == "":
return True
return x.__name__.find(self._kw) != -1
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
# print "EXE %s found %s" %(ex, found)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
# print "EXTMODULE matplotlib version %s found" % \
# matplotlib.__version__
else:
# print "EXTMODULE matplotlib > 1.0.0 not found"
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
# print "EXTMODULE %s version %s found" %(extmod, version)
else:
# print "EXTMODULE %s not found" %(extmod)
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
except ValueError:
raise
except Exception:
pass
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
tobj = obj if not isinstance(obj, property) else obj.fget
lineno = self._find_lineno(tobj, source_lines)
assert lineno is not None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
return int(columns)
except ValueError:
pass
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
#self.write_center("These tests raised an exception", "_")
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
#self.write_center("Failed", "_")
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
#self.write_center("Failed", "_")
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
|
ojengwa/sympy
|
sympy/utilities/runtests.py
|
Python
|
bsd-3-clause
| 77,702
|
[
"VisIt"
] |
2275f137bdd6cb746b73c5d0a32d72bd1a361b27a938da1a1bc10e29c8555f61
|
# Python 2.7
# Requires splinter and phantomjs
from splinter import Browser
import os, zipfile
from safesetup import SafeSetup
browser = Browser('phantomjs')
#browser = Browser() # Debugging - runs in Firefox
class SafeSync:
url = ''
username = ''
password = ''
def __init__(self):
self.get_config()
self.login()
def get_config(self):
setup = SafeSetup()
(self.username, self.password) = setup.get_login()
self.url = setup.get_safe_url()
def login(self):
browser.visit(self.url)
if browser.status_code == 302:
browser.fill('username', self.username)
browser.fill('password', self.password)
button = browser.find_by_name('submit')
button.click()
if not browser.is_text_present('Files submitted on time'):
raise ValueError('Authentication failed')
def submit_file(self, filepath):
browser.attach_file('File', filepath)
button = browser.find_by_css('input.button:nth-child(1)')
button.click()
self.submit_check(filepath)
def submit_check(self, filepath):
if not browser.is_text_present(os.path.basename(filepath)):
raise Exception('File failed to be uploaded')
def get_files(self, dirpath, absolute=True):
if absolute:
return [os.path.join(dirpath, f) for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath,f))]
else:
return [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath,f))]
def zip_dir(self, zf, path):
for root, dirs, files in os.walk(path):
if '/.safe' not in root:
for file in files:
zf.write(os.path.join(root, file))
def submit_directory(self, dirpath):
files = self.get_files(dirpath)
for f in files:
self.submit_file(f)
def submit_directory_zip(self, dirpath, zip_name='dir.zip'):
zip_path = os.path.abspath('./.safe/'+zip_name)
zf = zipfile.ZipFile(zip_path, 'w')
self.zip_dir(zf, dirpath)
zf.close()
assert(os.path.isfile(zip_path))
self.submit_file(zip_path)
os.remove(zip_path)
def main():
sync = SafeSync()
sync.submit_directory_zip('.')
if __name__ == '__main__':
main()
|
BenElgar/SafeSync
|
safesync.py
|
Python
|
gpl-2.0
| 2,358
|
[
"VisIt"
] |
8cad0d98229c8b12d34919bbe955b2ec82e9771f35017d3d48580fab7f999819
|
import numpy as np
def correlated_timeseries_example(N=10000, tau=5.0, seed=None):
"""Generate synthetic timeseries data with known correlation time.
Parameters
----------
N : int, optional
length (in number of samples) of timeseries to generate
tau : float, optional
correlation time (in number of samples) for timeseries
seed : int, optional
If not None, specify the numpy random number seed.
Returns
-------
dih : np.ndarray, shape=(num_dihedrals), dtype=float
dih[i,j] gives the dihedral angle at traj[i] correponding to indices[j].
Notes
-----
Synthetic timeseries generated using bivariate Gaussian process described
by Janke (Eq. 41 of Ref. [1]).
As noted in Eq. 45-46 of Ref. [1], the true integrated autocorrelation time will be given by
tau_int = (1/2) coth(1 / 2 tau) = (1/2) (1+rho)/(1-rho)
which, for tau >> 1, is approximated by
tau_int = tau + 1/(12 tau) + O(1/tau^3)
So for tau >> 1, tau_int is approximately the given exponential tau.
References
----------
.. [1] Janke W. Statistical analysis of simulations: Data correlations and error estimation. In 'Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms'. NIC Series, VOl. 10, pages 423-445, 2002.
Examples
--------
Generate a timeseries of length 10000 with correlation time of 10.
>>> A_t = correlated_timeseries_example(N=10000, tau=10.0)
Generate an uncorrelated timeseries of length 1000.
>>> A_t = correlated_timeseries_example(N=1000, tau=1.0)
Generate a correlated timeseries with correlation time longer than the length.
>>> A_t = correlated_timeseries_example(N=1000, tau=2000.0)
"""
# Set random number generator into a known state for reproducibility.
random = np.random.RandomState(seed)
# Compute correlation coefficient rho, 0 <= rho < 1.
rho = np.exp(-1.0 / tau)
sigma = np.sqrt(1.0 - rho * rho)
# Generate uncorrelated Gaussian variates.
e_n = random.randn(N)
# Generate correlated signal from uncorrelated Gaussian variates using correlation coefficient.
# NOTE: This will be slow.
# TODO: Can we speed this up using vector operations?
A_n = np.zeros([N], np.float32)
A_n[0] = e_n[0]
for n in range(1, N):
A_n[n] = rho * A_n[n - 1] + sigma * e_n[n]
return A_n
|
kyleabeauchamp/pymbar
|
pymbar/testsystems/timeseries.py
|
Python
|
lgpl-2.1
| 2,409
|
[
"Gaussian"
] |
84304d4f7b5240164751afcea1fe16f4beb60541950c084599afae631cf19384
|
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Base.Script import parseCommandLine
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.FrameworkSystem.Client.LoggerClient import LoggerClient
parseCommandLine()
LClient = LoggerClient()
retval = LClient.getSites()
if not retval['OK']: print retval['Message']
else: print retval['Value'][0:2]
conditions = { 'SystemName': ['Framework/SecurityLog', 'DataManagement/TransferDBMonitoring']}
retval = LClient.getMessages( conds = conditions, beginDate = '2008-10-06' )
if not retval['OK']: print retval['Message']
else:
print retval['Value']['ParameterNames']
print retval['Value']['Records'][0:2]
retval = LClient.getSystems()
if not retval['OK']: print retval['Message']
else: print retval['Value'][0:2]
retval = LClient.getSubSystems()
if not retval['OK']: print retval['Message']
else: print retval['Value'][0:2]
retval = LClient.getGroups()
if not retval['OK']: print retval['Message']
else: print retval['Value'][0:2]
retval = LClient.getFixedTextStrings()
if not retval['OK']: print retval['Message']
else: print retval['Value'][0:2]
retval = LClient.getMessagesByFixedText( 'File not found!' )
if not retval['OK']: print retval['Message']
else:
print retval['Value']['ParameterNames']
print retval['Value']['Records'][0:4]
showFields= [ 'SystemName', 'SubSystemName', 'OwnerDN' ]
conditions={ 'SystemName': [ 'WorkloadManagement/Matcher', 'Framework/ProxyManager' ],
'LogLevel': 'ERROR' }
orderFields = [ [ 'OwnerDN', 'ASC' ], ['SystemName', 'ASC' ] ]
retval = LClient.getGroupedMessages( fieldList = showFields, conds = conditions,
beginDate = '2008-09-18',endDate = '2008-09-20',
groupField = 'SystemName', orderList = orderFields )
if not retval['OK']: print retval['Message']
else:
print retval['Value']['ParameterNames']
print retval['Value']['Records'][0:4]
orderFields = [ [ 'recordCount', 'DESC' ] ]
retval = LClient.getGroupedMessages( groupField = 'FixedTextString', orderList = orderFields,
maxRecords = 10 )
if not retval['OK']: print retval['Message']
else:
print retval['Value']['ParameterNames']
print retval['Value']['Records'][0:4]
|
sposs/DIRAC
|
FrameworkSystem/test/testLoggerClient.py
|
Python
|
gpl-3.0
| 2,273
|
[
"DIRAC"
] |
c3fd4f8730f077134592abad689ede5fcd30291e76ce129b761c4388178fd981
|
import unittest
from .cgen import *
class TestModule(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testEmptyModule(self):
p = Module()
p = self.fix.visit(p)
self.assertEqual(self.gen.generate(p), '')
def testEmptyMainModule(self):
p = Module(main=True)
p = self.fix.generate(p)
code = self.gen.generate(p)
self.assert_('__main__' in code)
self.assert_(' pass' in code)
def testContent(self):
p = Module()
p.content.append("a")
self.assert_('a' in self.gen.generate(p))
p = self.fix.generate(p)
self.assert_('a' in self.gen.generate(p))
def testContentExtended(self):
p = Module()
p.content.append(Function("function"))
p = self.fix.generate(p)
self.assert_('function' in self.gen.generate(p))
class TestForLoop(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testEmptyForLoop(self):
n = ForLoop("i", ["xrange(10)"], [])
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("for i" in code)
self.assert_("xrange(10):" in code)
self.assert_(" pass" in code)
def testStringForLoop(self):
n = ForLoop("i", ["xrange(10)"], ['pass'])
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("for i" in code)
self.assert_("xrange(10):" in code)
self.assert_(" pass" in code)
class TestIfStatement(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testEmptyIfStatement(self):
n = IfStatement("i", [])
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("if i:" in code)
self.assert_("pass" in code)
n = IfStatement("i", [], [])
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("if i:" in code)
self.assert_("pass" in code)
self.assert_("else:" in code)
class TestFunction(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testEmptyFunction(self):
n = Function("test")
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("def test" in code)
self.assert_("pass" in code)
def testFunction(self):
class TestStatement(Statement):
def get(self):
return "TestStatement"
def fix(self):
return self.get()
def CallableStatement():
return "CallableStatement"
n = Function("test")
n.content.append(TestStatement())
n.content.append("StringStatement")
n.content.append(CallableStatement)
code = self.gen.generate(n)
self.assert_("def test" in code)
self.assert_("pass" not in code)
self.assert_("TestStatement" in code)
self.assert_("StringStatement" in code)
self.assert_("CallableStatement" in code)
# Now fix all statements
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("def test" in code)
self.assert_("pass" not in code)
self.assert_("TestStatement" in code)
self.assert_("StringStatement" in code)
self.assert_("CallableStatement" in code)
class TestClass(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testEmptyClass(self):
n = Class('Test')
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("class Test" in code)
self.assert_("(object)" in code)
self.assert_("pass" in code)
def testClass(self):
n = Class('Test')
n.content.append('x = 5')
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("class Test" in code)
self.assert_("(object)" in code)
self.assert_("pass" not in code)
def testEmptyMethod(self):
n = Method('test')
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("def test" in code)
self.assert_("(self)" in code)
self.assert_("pass" in code)
def testEmptyMethodWithAssignment(self):
n = Method('test')
n.content.append('x = 5')
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("def test" in code)
self.assert_("(self)" in code)
self.assert_("pass" not in code)
class TestCallStatement(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testCallStatement(self):
func = Function("test")
n = CallStatement(func, ['arg1', 'arg2'])
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("test" in code)
self.assert_("(arg1, arg2)" in code)
class TestAssignment(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
self.fix = FixGenerator()
def testAssignment(self):
def CallableStatement():
return "CallableStatement"
n = Assignment("target", "=", [CallableStatement])
code = self.gen.generate(n)
self.assert_("target = CallableStatement" in code)
n = self.fix.generate(n)
code = self.gen.generate(n)
self.assert_("target = CallableStatement" in code)
class TestFixGenerator(unittest.TestCase):
def setUp(self):
self.gen = FixGenerator()
def testEmptyGenerator(self):
try:
self.gen.generate(None)
self.fail()
except:
pass
def testStatement(self):
class TestStatement(Statement):
def get(self):
return "a"
def fix(self):
return "a"
fixed = self.gen.visit(TestStatement())
self.assertEqual(fixed, "a")
def testVisitArgs(self):
class TestStatement(Statement):
def get(self):
return "a"
def fix(self):
return "a"
def func():
return "c"
fixed = self.gen.visit_args([func, 'b', TestStatement()])
self.assert_("a" in fixed)
self.assert_("b" in fixed)
self.assert_("c" in fixed)
fixed = self.gen.visit_args([['b']])
self.assert_(isinstance(fixed[0], list))
class TestCodeGenerator(unittest.TestCase):
def setUp(self):
self.gen = CodeGenerator()
def testEmptyGenerator(self):
try:
self.gen.generate(None)
self.fail()
except:
pass
def testCodeFunction(self):
self.assertEqual(self.gen.code(4, 'a'), (' '*4) + 'a')
def testStatement(self):
class TestStatement(Statement):
def get(self):
return "a"
self.assert_("a" in self.gen.generate(TestStatement()))
def testVisitArgs(self):
code = self.gen.visit_args(['b'])
self.assert_('b' in code)
code = self.gen.visit_args([['b']])
self.assert_('b' in code)
def func():
return "c"
code = self.gen.visit_args([func])
self.assert_('c' in code)
if __name__ == "__main__":
unittest.main()
|
myint/pyfuzz
|
pygen/tests.py
|
Python
|
bsd-3-clause
| 7,572
|
[
"VisIt"
] |
db43d437549f3a606c2bce2ee0734412baa8afb4099c6eaf8e093d5e705bf640
|
#!/usr/bin/env python3
"""
extract reads from a bam file and a list
write a fasta file
useful benchmark:
https://timoast.github.io/blog/2015-10-12-extractreads/
"""
import pysam
def extract_reads(options):
with open(options.names, "r") as f:
n = f.readlines()
bamfile = pysam.AlignmentFile(options.bam, 'rb')
name_indexed = pysam.IndexedReads(bamfile)
name_indexed.build()
f_out = open(options.out, "w")
for name in n:
name = name.rstrip()
try:
name_indexed.find(name)
except KeyError:
pass
else:
iterator = name_indexed.find(name)
for x in iterator:
f_out.write(f">{x.query_name}_{x.reference_name}_{x.reference_start+1}_{x.cigarstring}\n")
f_out.write(x.query_alignment_sequence + "\n")
f_out.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description = "Extract reads by read name from the bam (all hits) and write to fasta")
parser.add_argument("-b", "--bam", help = "bam file", required = True)
parser.add_argument("-n", "--names", help = "list of read names to extract", required = True)
parser.add_argument("-o", "--out", help = "output.fasta", required = True)
options = parser.parse_args()
extract_reads(options)
|
naumenko-sa/bioscripts
|
crispr/extract_reads.py
|
Python
|
mit
| 1,381
|
[
"pysam"
] |
c48e21b94c02c9c081fa5f9df9400a7902f572e5195a128a42237038badddf06
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to perform analyses of
the local environments (e.g., finding near neighbors)
of single sites in molecules and structures.
"""
import json
import math
import os
import warnings
from bisect import bisect_left
from collections import defaultdict, namedtuple
from copy import deepcopy
from functools import lru_cache
from math import acos, asin, atan2, cos, exp, fabs, pi, pow, sin, sqrt
from typing import List, Optional, Union, Dict, Any
import numpy as np
from monty.dev import requires
from monty.serialization import loadfn
from scipy.spatial import Voronoi
from pymatgen import yaml
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import IStructure, Structure
from pymatgen.analysis.bond_valence import BV_PARAMS, BVAnalyzer
from pymatgen.analysis.molecule_structure_comparator import CovalentRadius
from pymatgen.core.sites import PeriodicSite, Site
from pymatgen.core.structure import PeriodicNeighbor
try:
from openbabel import openbabel as ob
except Exception:
ob = None
__author__ = (
"Shyue Ping Ong, Geoffroy Hautier, Sai Jayaraman," + " Nils E. R. Zimmermann, Bharat Medasani, Evan Spotte-Smith"
)
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Nils E. R. Zimmermann"
__email__ = "nils.e.r.zimmermann@gmail.com"
__status__ = "Production"
__date__ = "August 17, 2017"
_directory = os.path.join(os.path.dirname(__file__))
with open(os.path.join(_directory, "op_params.yaml"), "rt") as f:
default_op_params = yaml.safe_load(f)
with open(os.path.join(_directory, "cn_opt_params.yaml"), "r") as f:
cn_opt_params = yaml.safe_load(f)
with open(os.path.join(_directory, "ionic_radii.json"), "r") as fp:
_ion_radii = json.load(fp)
class ValenceIonicRadiusEvaluator:
"""
Computes site valences and ionic radii for a structure using bond valence
analyzer
"""
def __init__(self, structure):
"""
Args:
structure: pymatgen.core.structure.Structure
"""
self._structure = structure.copy()
self._valences = self._get_valences()
self._ionic_radii = self._get_ionic_radii()
@property
def radii(self):
"""
List of ionic radii of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
# print radii_dict
return radii_dict
@property
def valences(self):
"""
List of oxidation states of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
valence_dict = dict(zip(el, self._valences))
return valence_dict
@property
def structure(self):
"""
Returns oxidation state decorated structure.
"""
return self._structure.copy()
def _get_ionic_radii(self):
"""
Computes ionic radii of elements for all sites in the structure.
If valence is zero, atomic radius is used.
"""
radii = []
vnn = VoronoiNN()
def nearest_key(sorted_vals, skey):
n = bisect_left(sorted_vals, skey)
if n == len(sorted_vals):
return sorted_vals[-1]
if n == 0:
return sorted_vals[0]
before = sorted_vals[n - 1]
after = sorted_vals[n]
if after - skey < skey - before:
return after
return before
for i in range(len(self._structure.sites)):
site = self._structure.sites[i]
if isinstance(site.specie, Element):
radius = site.specie.atomic_radius
# Handle elements with no atomic_radius
# by using calculated values instead.
if radius is None:
radius = site.specie.atomic_radius_calculated
if radius is None:
raise ValueError("cannot assign radius to element {}".format(site.specie))
radii.append(radius)
continue
el = site.specie.symbol
oxi_state = int(round(site.specie.oxi_state))
coord_no = int(round(vnn.get_cn(self._structure, i)))
try:
tab_oxi_states = sorted(map(int, _ion_radii[el].keys()))
oxi_state = nearest_key(tab_oxi_states, oxi_state)
radius = _ion_radii[el][str(oxi_state)][str(coord_no)]
except KeyError:
if vnn.get_cn(self._structure, i) - coord_no > 0:
new_coord_no = coord_no + 1
else:
new_coord_no = coord_no - 1
try:
radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)]
coord_no = new_coord_no
except Exception:
tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys()))
new_coord_no = nearest_key(tab_coords, coord_no)
i = 0
for val in tab_coords:
if val > coord_no:
break
i = i + 1
if i == len(tab_coords):
key = str(tab_coords[-1])
radius = _ion_radii[el][str(oxi_state)][key]
elif i == 0:
key = str(tab_coords[0])
radius = _ion_radii[el][str(oxi_state)][key]
else:
key = str(tab_coords[i - 1])
radius1 = _ion_radii[el][str(oxi_state)][key]
key = str(tab_coords[i])
radius2 = _ion_radii[el][str(oxi_state)][key]
radius = (radius1 + radius2) / 2
# implement complex checks later
radii.append(radius)
return radii
def _get_valences(self):
"""
Computes ionic valences of elements for all sites in the structure.
"""
try:
bv = BVAnalyzer()
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except Exception:
try:
bv = BVAnalyzer(symm_tol=0.0)
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except Exception:
valences = []
for site in self._structure.sites:
if len(site.specie.common_oxidation_states) > 0:
valences.append(site.specie.common_oxidation_states[0])
# Handle noble gas species
# which have no entries in common_oxidation_states.
else:
valences.append(0)
if sum(valences):
valences = [0] * self._structure.num_sites
else:
self._structure.add_oxidation_state_by_site(valences)
# raise
# el = [site.specie.symbol for site in self._structure.sites]
# el = [site.species_string for site in self._structure.sites]
# el = [site.specie for site in self._structure.sites]
# valence_dict = dict(zip(el, valences))
# print valence_dict
return valences
class NearNeighbors:
"""
Base class to determine near neighbors that typically include nearest
neighbors and others that are within some tolerable distance.
"""
def __eq__(self, other):
if isinstance(other, type(self)):
return self.__dict__ == other.__dict__
return False
def __hash__(self):
return len(self.__dict__.items())
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
raise NotImplementedError("structures_allowed" " is not defined!")
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
raise NotImplementedError("molecules_allowed" " is not defined!")
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
raise NotImplementedError("extend_structures_molecule" " is not defined!")
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
siw = self.get_nn_info(structure, n)
return sum([e["weight"] for e in siw]) if use_weights else len(siw)
def get_cn_dict(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of each element bonded to site with index n in structure
Args:
structure (Structure): input structure
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (dict): dictionary of CN of each element bonded to site
"""
siw = self.get_nn_info(structure, n)
cn_dict = {}
for i in siw:
site_element = i["site"].species_string
if site_element not in cn_dict:
if use_weights:
cn_dict[site_element] = i["weight"]
else:
cn_dict[site_element] = 1
else:
if use_weights:
cn_dict[site_element] += i["weight"]
else:
cn_dict[site_element] += 1
return cn_dict
def get_nn(self, structure, n):
"""
Get near neighbors of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site in structure for which to determine
neighbors.
Returns:
sites (list of Site objects): near neighbors.
"""
return [e["site"] for e in self.get_nn_info(structure, n)]
def get_weights_of_nn_sites(self, structure, n):
"""
Get weight associated with each near neighbor of site with
index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the weights.
Returns:
weights (list of floats): near-neighbor weights.
"""
return [e["weight"] for e in self.get_nn_info(structure, n)]
def get_nn_images(self, structure, n):
"""
Get image location of all near neighbors of site with index n in
structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the image
location of near neighbors.
Returns:
images (list of 3D integer array): image locations of
near neighbors.
"""
return [e["image"] for e in self.get_nn_info(structure, n)]
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
information.
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
raise NotImplementedError("get_nn_info(structure, n)" " is not defined!")
def get_all_nn_info(self, structure):
"""Get a listing of all neighbors for all sites in a structure
Args:
structure (Structure): Input structure
Return:
List of NN site information for each site in the structure. Each
entry has the same format as `get_nn_info`
"""
return [self.get_nn_info(structure, n) for n in range(len(structure))]
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Structure): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info["site_index"]]
info["site"] = PeriodicSite(
orig_site.species,
np.add(orig_site.frac_coords, info["image"]),
structure.lattice,
properties=orig_site.properties,
)
output.append(info)
return output
def _get_nn_shell_info(
self,
structure,
all_nn_info,
site_idx,
shell,
_previous_steps=frozenset(),
_cur_image=(0, 0, 0),
):
"""Private method for computing the neighbor shell information
Args:
structure (Structure) - Structure being assessed
all_nn_info ([[dict]]) - Results from `get_all_nn_info`
site_idx (int) - index of site for which to determine neighbor
information.
shell (int) - Which neighbor shell to retrieve (1 == 1st NN shell)
_previous_steps ({(site_idx, image}) - Internal use only: Set of
sites that have already been traversed.
_cur_image (tuple) - Internal use only Image coordinates of current atom
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`. Does not update the site positions
"""
if shell <= 0:
raise ValueError("Shell must be positive")
# Append this site to the list of previously-visited sites
_previous_steps = _previous_steps.union({(site_idx, _cur_image)})
# Get all the neighbors of this site
possible_steps = list(all_nn_info[site_idx])
for i, step in enumerate(possible_steps):
# Update the image information
# Note: We do not update the site position yet, as making a
# PeriodicSite for each intermediate step is too costly
step = dict(step)
step["image"] = tuple(np.add(step["image"], _cur_image).tolist())
possible_steps[i] = step
# Get only the non-backtracking steps
allowed_steps = [x for x in possible_steps if (x["site_index"], x["image"]) not in _previous_steps]
# If we are the last step (i.e., shell == 1), done!
if shell == 1:
# No further work needed, just package these results
return allowed_steps
# If not, Get the N-1 NNs of these allowed steps
terminal_neighbors = [
self._get_nn_shell_info(
structure,
all_nn_info,
x["site_index"],
shell - 1,
_previous_steps,
x["image"],
)
for x in allowed_steps
]
# Each allowed step results in many terminal neighbors
# And, different first steps might results in the same neighbor
# Now, we condense those neighbors into a single entry per neighbor
all_sites = dict()
for first_site, term_sites in zip(allowed_steps, terminal_neighbors):
for term_site in term_sites:
key = (term_site["site_index"], tuple(term_site["image"]))
# The weight for this site is equal to the weight of the
# first step multiplied by the weight of the terminal neighbor
term_site["weight"] *= first_site["weight"]
# Check if this site is already known
value = all_sites.get(key)
if value is not None:
# If so, add to its weight
value["weight"] += term_site["weight"]
else:
# If not, prepare to add it
value = term_site
all_sites[key] = value
return list(all_sites.values())
@staticmethod
def _get_image(structure, site):
"""Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite and Structure.
Image is defined as displacement from original site in structure to a given site.
i.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).
Note that this method takes O(number of sites) due to searching an original site.
Args:
structure: Structure Object
site: PeriodicSite Object
Returns:
image: ((int)*3) Lattice image
"""
original_site = structure[NearNeighbors._get_original_site(structure, site)]
image = np.around(np.subtract(site.frac_coords, original_site.frac_coords))
image = tuple(image.astype(int))
return image
@staticmethod
def _get_original_site(structure, site):
"""Private convenience method for get_nn_info,
gives original site index from ProvidedPeriodicSite."""
for i, s in enumerate(structure):
if site.is_periodic_image(s):
return i
raise Exception("Site not found!")
def get_bonded_structure(self, structure, decorate=False, weights=True):
"""
Obtain a StructureGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Structure object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
weights (bool): whether to include edge weights from
NearNeighbor class in StructureGraph
Returns: a pymatgen.analysis.graphs.StructureGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import StructureGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property("order_parameters", order_parameters)
sg = StructureGraph.with_local_env_strategy(structure, self, weights=weights)
return sg
def get_local_order_parameters(self, structure, n):
"""
Calculate those local structure order parameters for
the given site whose ideal CN corresponds to the
underlying motif (e.g., CN=4, then calculate the
square planar, tetrahedral, see-saw-like,
rectangular see-saw-like order paramters).
Args:
structure: Structure object
n (int): site index.
Returns (Dict[str, float]):
A dict of order parameters (values) and the
underlying motif type (keys; for example, tetrahedral).
"""
# code from @nisse3000, moved here from graphs to avoid circular
# import, also makes sense to have this as a general NN method
cn = self.get_cn(structure, n)
int_cn = [int(k_cn) for k_cn in cn_opt_params.keys()]
if cn in int_cn:
names = list(cn_opt_params[cn].keys())
types = []
params = []
for name in names:
types.append(cn_opt_params[cn][name][0])
tmp = cn_opt_params[cn][name][1] if len(cn_opt_params[cn][name]) > 1 else None
params.append(tmp)
lostops = LocalStructOrderParams(types, parameters=params)
sites = [structure[n]] + self.get_nn(structure, n)
lostop_vals = lostops.get_order_parameters(sites, 0, indices_neighs=list(range(1, cn + 1)))
d = {}
for i, lostop in enumerate(lostop_vals):
d[names[i]] = lostop
return d
return None
class VoronoiNN(NearNeighbors):
"""
Uses a Voronoi algorithm to determine near neighbors for each site in a
structure.
"""
def __init__(
self,
tol=0,
targets=None,
cutoff=13.0,
allow_pathological=False,
weight="solid_angle",
extra_nn_info=True,
compute_adj_neighbors=True,
):
"""
Args:
tol (float): tolerance parameter for near-neighbor finding. Faces that are
smaller than `tol` fraction of the largest face are not included in the
tessellation. (default: 0).
targets (Element or list of Elements): target element(s).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 13.0.
allow_pathological (bool): whether to allow infinite vertices in
determination of Voronoi coordination.
weight (string) - Statistic used to weigh neighbors (see the statistics
available in get_voronoi_polyhedra)
extra_nn_info (bool) - Add all polyhedron info to `get_nn_info`
compute_adj_neighbors (bool) - Whether to compute which neighbors are
adjacent. Turn off for faster performance
"""
super().__init__()
self.tol = tol
self.cutoff = cutoff
self.allow_pathological = allow_pathological
self.targets = targets
self.weight = weight
self.extra_nn_info = extra_nn_info
self.compute_adj_neighbors = compute_adj_neighbors
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_voronoi_polyhedra(self, structure, n):
"""
Gives a weighted polyhedra around a site.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
structure (Structure): structure for which to evaluate the
coordination environment.
n (integer): site index.
Returns:
A dict of sites sharing a common Voronoi facet with the site
n mapped to a directory containing statistics about the facet:
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
"""
# Assemble the list of neighbors used in the tessellation
# Gets all atoms within a certain radius
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
center = structure[n]
cutoff = self.cutoff
# max cutoff is the longest diagonal of the cell + room for noise
corners = [[1, 1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, -1]]
d_corners = [np.linalg.norm(structure.lattice.get_cartesian_coords(c)) for c in corners]
max_cutoff = max(d_corners) + 0.01
while True:
try:
neighbors = structure.get_sites_in_sphere(center.coords, cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
# Run the Voronoi tessellation
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(qvoronoi_input) # can give seg fault if cutoff is too small
# Extract data about the site in question
cell_info = self._extract_cell_info(structure, 0, neighbors, targets, voro, self.compute_adj_neighbors)
break
except RuntimeError as e:
if cutoff >= max_cutoff:
if e.args and "vertex" in e.args[0]:
# pass through the error raised by _extract_cell_info
raise e
raise RuntimeError("Error in Voronoi neighbor finding; " "max cutoff exceeded")
cutoff = min(cutoff * 2, max_cutoff + 0.001)
return cell_info
def get_all_voronoi_polyhedra(self, structure):
"""Get the Voronoi polyhedra for all site in a simulation cell
Args:
structure (Structure): Structure to be evaluated
Returns:
A dict of sites sharing a common Voronoi facet with the site
n mapped to a directory containing statistics about the facet:
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
"""
# Special case: For atoms with 1 site, the atom in the root image is not
# included in the get_all_neighbors output. Rather than creating logic to add
# that atom to the neighbor list, which requires detecting whether it will be
# translated to reside within the unit cell before neighbor detection, it is
# less complex to just call the one-by-one operation
if len(structure) == 1:
return [self.get_voronoi_polyhedra(structure, 0)]
# Assemble the list of neighbors used in the tessellation
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
# Initialize the list of sites with the atoms in the origin unit cell
# The `get_all_neighbors` function returns neighbors for each site's image in
# the original unit cell. We start off with these central atoms to ensure they
# are included in the tessellation
sites = [x.to_unit_cell() for x in structure]
indices = [(i, 0, 0, 0) for i, _ in enumerate(structure)]
# Get all neighbors within a certain cutoff
# Record both the list of these neighbors, and the site indices
all_neighs = structure.get_all_neighbors(self.cutoff, include_index=True, include_image=True)
for neighs in all_neighs:
sites.extend([x[0] for x in neighs])
indices.extend([(x[2],) + x[3] for x in neighs])
# Get the non-duplicates (using the site indices for numerical stability)
indices = np.array(indices, dtype=np.int_)
indices, uniq_inds = np.unique(indices, return_index=True, axis=0)
sites = [sites[i] for i in uniq_inds]
# Sort array such that atoms in the root image are first
# Exploit the fact that the array is sorted by the unique operation such that
# the images associated with atom 0 are first, followed by atom 1, etc.
(root_images,) = np.nonzero(np.abs(indices[:, 1:]).max(axis=1) == 0)
del indices # Save memory (tessellations can be costly)
# Run the tessellation
qvoronoi_input = [s.coords for s in sites]
voro = Voronoi(qvoronoi_input)
# Get the information for each neighbor
return [
self._extract_cell_info(structure, i, sites, targets, voro, self.compute_adj_neighbors)
for i in root_images.tolist()
]
def _extract_cell_info(self, structure, site_idx, sites, targets, voro, compute_adj_neighbors=False):
"""Get the information about a certain atom from the results of a tessellation
Args:
structure (Structure) - Structure being assessed
site_idx (int) - Index of the atom in question
sites ([Site]) - List of all sites in the tessellation
targets ([Element]) - Target elements
voro - Output of qvoronoi
compute_adj_neighbors (boolean) - Whether to compute which neighbors are adjacent
Returns:
A dict of sites sharing a common Voronoi facet. Key is facet id
(not useful) and values are dictionaries containing statistics
about the facet:
- site: Pymatgen site
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
- adj_neighbors - Facet id's for the adjacent neighbors
"""
# Get the coordinates of every vertex
all_vertices = voro.vertices
# Get the coordinates of the central site
center_coords = sites[site_idx].coords
# Iterate through all the faces in the tessellation
results = {}
for nn, vind in voro.ridge_dict.items():
# Get only those that include the site in question
if site_idx in nn:
other_site = nn[0] if nn[1] == site_idx else nn[1]
if -1 in vind:
# -1 indices correspond to the Voronoi cell
# missing a face
if self.allow_pathological:
continue
raise RuntimeError(
"This structure is pathological," " infinite vertex in the voronoi " "construction"
)
# Get the solid angle of the face
facets = [all_vertices[i] for i in vind]
angle = solid_angle(center_coords, facets)
# Compute the volume of associated with this face
volume = 0
# qvoronoi returns vertices in CCW order, so I can break
# the face up in to segments (0,1,2), (0,2,3), ... to compute
# its area where each number is a vertex size
for j, k in zip(vind[1:], vind[2:]):
volume += vol_tetra(
center_coords,
all_vertices[vind[0]],
all_vertices[j],
all_vertices[k],
)
# Compute the distance of the site to the face
face_dist = np.linalg.norm(center_coords - sites[other_site].coords) / 2
# Compute the area of the face (knowing V=Ad/3)
face_area = 3 * volume / face_dist
# Compute the normal of the facet
normal = np.subtract(sites[other_site].coords, center_coords)
normal /= np.linalg.norm(normal)
# Store by face index
results[other_site] = {
"site": sites[other_site],
"normal": normal,
"solid_angle": angle,
"volume": volume,
"face_dist": face_dist,
"area": face_area,
"n_verts": len(vind),
}
# If we are computing which neighbors are adjacent, store the vertices
if compute_adj_neighbors:
results[other_site]["verts"] = vind
# all sites should have atleast two connected ridges in periodic system
if not results:
raise ValueError("No Voronoi neighbours found for site - try increasing cutoff")
# Get only target elements
resultweighted = {}
for nn_index, nstats in results.items():
# Check if this is a target site
nn = nstats["site"]
if nn.is_ordered:
if nn.specie in targets:
resultweighted[nn_index] = nstats
else: # is nn site is disordered
for disordered_sp in nn.species.keys():
if disordered_sp in targets:
resultweighted[nn_index] = nstats
# If desired, determine which neighbors are adjacent
if compute_adj_neighbors:
# Initialize storage for the adjacent neighbors
adj_neighbors = dict((i, []) for i in resultweighted.keys())
# Find the neighbors that are adjacent by finding those
# that contain exactly two vertices
for a_ind, a_nninfo in resultweighted.items():
# Get the indices for this site
a_verts = set(a_nninfo["verts"])
# Loop over all neighbors that have an index lower that this one
# The goal here is to exploit the fact that neighbor adjacency is
# symmetric (if A is adj to B, B is adj to A)
for b_ind, b_nninfo in resultweighted.items():
if b_ind > a_ind:
continue
if len(a_verts.intersection(b_nninfo["verts"])) == 2:
adj_neighbors[a_ind].append(b_ind)
adj_neighbors[b_ind].append(a_ind)
# Store the results in the nn_info
for key, neighbors in adj_neighbors.items():
resultweighted[key]["adj_neighbors"] = neighbors
return resultweighted
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure
using Voronoi decomposition.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
# Run the tessellation
nns = self.get_voronoi_polyhedra(structure, n)
# Extract the NN info
return self._extract_nn_info(structure, nns)
def get_all_nn_info(self, structure):
"""
Args:
structure (Structure): input structure.
Returns:
All nn info for all sites.
"""
all_voro_cells = self.get_all_voronoi_polyhedra(structure)
return [self._extract_nn_info(structure, cell) for cell in all_voro_cells]
def _extract_nn_info(self, structure, nns):
"""Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors
Args:
structure (Structure): Structure being evaluated
nns ([dicts]): Nearest neighbor information for a structure
Returns:
(list of tuples (Site, array, float)): See nn_info
"""
# Get the target information
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
# Extract the NN info
siw = []
max_weight = max(nn[self.weight] for nn in nns.values())
for nstats in nns.values():
site = nstats["site"]
if nstats[self.weight] > self.tol * max_weight and _is_in_targets(site, targets):
nn_info = {
"site": site,
"image": self._get_image(structure, site),
"weight": nstats[self.weight] / max_weight,
"site_index": self._get_original_site(structure, site),
}
if self.extra_nn_info:
# Add all the information about the site
poly_info = nstats
del poly_info["site"]
nn_info["poly_info"] = poly_info
siw.append(nn_info)
return siw
class IsayevNN(VoronoiNN):
"""
Uses the algorithm defined in 10.1038/ncomms15679
Sites are considered neighbors if (i) they share a Voronoi facet and (ii) the
bond distance is less than the sum of the Cordero covalent radii + 0.25 Å.
"""
def __init__(
self,
tol: float = 0.25,
targets: Optional[Union[Element, List[Element]]] = None,
cutoff: float = 13.0,
allow_pathological: bool = False,
extra_nn_info: bool = True,
compute_adj_neighbors: bool = True,
):
"""
Args:
tol: Tolerance in Å for bond distances that are considered coordinated.
targets: Target element(s).
cutoff: Cutoff radius in Angstrom to look for near-neighbor atoms.
allow_pathological: Whether to allow infinite vertices in Voronoi
coordination.
extra_nn_info: Add all polyhedron info to `get_nn_info`.
compute_adj_neighbors: Whether to compute which neighbors are adjacent. Turn
off for faster performance.
"""
super().__init__()
self.tol = tol
self.cutoff = cutoff
self.allow_pathological = allow_pathological
self.targets = targets
self.extra_nn_info = extra_nn_info
self.compute_adj_neighbors = compute_adj_neighbors
def get_nn_info(self, structure: Structure, n: int) -> List[Dict[str, Any]]:
"""
Get all near-neighbor site information.
Gets the the associated image locations and weights of the site with index n
in structure using Voronoi decomposition and distance cutoff.
Args:
structure: Input structure.
n: Index of site for which to determine near-neighbor sites.
Returns:
List of dicts containing the near-neighbor information. Each dict has the
keys:
- "site": The near-neighbor site.
- "image": The periodic image of the near-neighbor site.
- "weight": The face weight of the Voronoi decomposition.
- "site_index": The index of the near-neighbor site in the original
structure.
"""
nns = self.get_voronoi_polyhedra(structure, n)
return self._filter_nns(structure, n, nns)
def get_all_nn_info(self, structure: Structure) -> List[List[Dict[str, Any]]]:
"""
Args:
structure (Structure): input structure.
Returns:
List of near neighbor information for each site. See get_nn_info for the
format of the data for each site.
"""
all_nns = self.get_all_voronoi_polyhedra(structure)
return [self._filter_nns(structure, n, nns) for n, nns in enumerate(all_nns)]
def _filter_nns(self, structure: Structure, n: int, nns: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Extract and filter the NN info into the format needed by NearestNeighbors.
Args:
structure: The structure.
n: The central site index.
nns: Nearest neighbor information for the structure.
Returns:
See get_nn_info for the format of the returned data.
"""
# Get the target information
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
site = structure[n]
# Extract the NN info
siw = []
max_weight = max(nn["area"] for nn in nns.values())
for nstats in nns.values():
nn = nstats.pop("site")
# use the Cordero radius if it is available, otherwise the atomic radius
cov_distance = _get_default_radius(site) + _get_default_radius(nn)
nn_distance = np.linalg.norm(site.coords - nn.coords)
# by default VoronoiNN only returns neighbors which share a Voronoi facet
# therefore we don't need do to additional filtering based on the weight
if _is_in_targets(nn, targets) and nn_distance <= cov_distance + self.tol:
nn_info = {
"site": nn,
"image": self._get_image(structure, nn),
"weight": nstats["area"] / max_weight,
"site_index": self._get_original_site(structure, nn),
}
if self.extra_nn_info:
nn_info["poly_info"] = nstats
siw.append(nn_info)
return siw
def _is_in_targets(site, targets):
"""
Test whether a site contains elements in the target list
Args:
site (Site): Site to assess
targets ([Element]) List of elements
Returns:
(boolean) Whether this site contains a certain list of elements
"""
elems = _get_elements(site)
for elem in elems:
if elem not in targets:
return False
return True
def _get_elements(site):
"""
Get the list of elements for a Site
Args:
site (Site): Site to assess
Returns:
[Element]: List of elements
"""
try:
if isinstance(site.specie, Element):
return [site.specie]
return [Element(site.specie)]
except Exception:
return site.species.elements
class JmolNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using an emulation
of Jmol's default autoBond() algorithm. This version of the algorithm
does not take into account any information regarding known charge
states.
"""
def __init__(self, tol=0.45, min_bond_distance=0.4, el_radius_updates=None):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 0.56).
el_radius_updates: (dict) symbol->float to override default atomic
radii table values
"""
self.tol = tol
self.min_bond_distance = min_bond_distance
# Load elemental radii table
bonds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bonds_jmol_ob.yaml")
with open(bonds_file, "r") as f:
self.el_radius = yaml.safe_load(f)
# Update any user preference elemental radii
if el_radius_updates:
self.el_radius.update(el_radius_updates)
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_max_bond_distance(self, el1_sym, el2_sym):
"""
Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length
"""
return sqrt((self.el_radius[el1_sym] + self.el_radius[el2_sym] + self.tol) ** 2)
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the bond identification
algorithm underlying Jmol.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
# Determine relevant bond lengths based on atomic radii table
bonds = {}
for el in structure.composition.elements:
bonds[site.specie, el] = self.get_max_bond_distance(site.specie.symbol, el.symbol)
# Search for neighbors up to max bond length + tolerance
max_rad = max(bonds.values()) + self.tol
min_rad = min(bonds.values())
siw = []
for nn in structure.get_neighbors(site, max_rad):
dist = nn.nn_distance
# Confirm neighbor based on bond length specific to atom pair
if dist <= (bonds[(site.specie, nn.specie)]) and (nn.nn_distance > self.min_bond_distance):
weight = min_rad / dist
siw.append(
{
"site": nn,
"image": self._get_image(structure, nn),
"weight": weight,
"site_index": self._get_original_site(structure, nn),
}
)
return siw
class MinimumDistanceNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
nearest neighbor(s) at distance, d_min, plus all neighbors
within a distance (1 + tol) * d_min, where tol is a
(relative) distance tolerance parameter.
"""
def __init__(self, tol=0.1, cutoff=10.0, get_all_sites=False):
"""
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
get_all_sites (boolean): If this is set to True then the neighbor
sites are only determined by the cutoff radius, tol is ignored
"""
self.tol = tol
self.cutoff = cutoff
self.get_all_sites = get_all_sites
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest neighbor
distance-based method.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
siw = []
if self.get_all_sites:
for nn in neighs_dists:
w = nn.nn_distance
siw.append(
{
"site": nn,
"image": self._get_image(structure, nn),
"weight": w,
"site_index": self._get_original_site(structure, nn),
}
)
else:
min_dist = min([nn.nn_distance for nn in neighs_dists])
for nn in neighs_dists:
dist = nn.nn_distance
if dist < (1.0 + self.tol) * min_dist:
w = min_dist / dist
siw.append(
{
"site": nn,
"image": self._get_image(structure, nn),
"weight": w,
"site_index": self._get_original_site(structure, nn),
}
)
return siw
class OpenBabelNN(NearNeighbors):
"""
Determine near-neighbor sites and bond orders using OpenBabel API.
NOTE: This strategy is only appropriate for molecules, and not for
structures.
"""
@requires(
ob,
"BabelMolAdaptor requires openbabel to be installed with "
"Python bindings. Please get it at http://openbabel.org "
"(version >=3.0.0).",
)
def __init__(self, order=True):
"""
Args:
order (bool): True if bond order should be returned as a weight, False
if bond length should be used as a weight.
"""
self.order = order
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return False
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites and weights (orders) of bonds for a given
atom.
Args:
structure: Molecule object.
n: index of site for which to determine near neighbors.
Returns:
(dict): representing a neighboring site and the type of
bond present between site n and the neighboring site.
"""
from pymatgen.io.babel import BabelMolAdaptor
obmol = BabelMolAdaptor(structure).openbabel_mol
siw = []
# Get only the atom of interest
site_atom = [
a
for i, a in enumerate(ob.OBMolAtomDFSIter(obmol))
if [a.GetX(), a.GetY(), a.GetZ()] == list(structure[n].coords)
][0]
for neighbor in ob.OBAtomAtomIter(site_atom):
coords = [neighbor.GetX(), neighbor.GetY(), neighbor.GetZ()]
site = [a for a in structure if list(a.coords) == coords][0]
index = structure.index(site)
bond = site_atom.GetBond(neighbor)
if self.order:
obmol.PerceiveBondOrders()
weight = bond.GetBondOrder()
else:
weight = bond.GetLength()
siw.append(
{
"site": site,
"image": (0, 0, 0),
"weight": weight,
"site_index": index,
}
)
return siw
def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a MoleculeGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import MoleculeGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property("order_parameters", order_parameters)
mg = MoleculeGraph.with_local_env_strategy(structure, self)
return mg
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info["site_index"]]
info["site"] = Site(orig_site.species, orig_site._coords, properties=orig_site.properties)
output.append(info)
return output
class CovalentBondNN(NearNeighbors):
"""
Determine near-neighbor sites and bond orders using built-in
pymatgen.Molecule CovalentBond functionality.
NOTE: This strategy is only appropriate for molecules, and not for
structures.
"""
def __init__(self, tol=0.2, order=True):
"""
Args:
tol (float): Tolerance for covalent bond checking.
order (bool): If True (default), this class will compute bond
orders. If False, bond lengths will be computed
"""
self.tol = tol
self.order = order
self.bonds = None
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return False
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites and weights (orders) of bonds for a given
atom.
:param structure: input Molecule.
:param n: index of site for which to determine near neighbors.
:return: [dict] representing a neighboring site and the type of
bond present between site n and the neighboring site.
"""
# This is unfortunately inefficient, but is the best way to fit the
# current NearNeighbors scheme
self.bonds = structure.get_covalent_bonds(tol=self.tol)
siw = []
for bond in self.bonds:
capture_bond = False
if bond.site1 == structure[n]:
site = bond.site2
capture_bond = True
elif bond.site2 == structure[n]:
site = bond.site1
capture_bond = True
if capture_bond:
index = structure.index(site)
if self.order:
weight = bond.get_bond_order()
else:
weight = bond.length
siw.append(
{
"site": site,
"image": (0, 0, 0),
"weight": weight,
"site_index": index,
}
)
return siw
def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a MoleculeGraph object using this NearNeighbor
class.
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import MoleculeGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property("order_parameters", order_parameters)
mg = MoleculeGraph.with_local_env_strategy(structure, self)
return mg
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info["site_index"]]
info["site"] = Site(orig_site.species, orig_site._coords, properties=orig_site.properties)
output.append(info)
return output
class MinimumOKeeffeNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_OKeffee, plus some
relative tolerance, where bond valence parameters from O'Keeffe's
bond valence method (J. Am. Chem. Soc. 1991, 3226-3229) are used
to calculate relative distances.
"""
def __init__(self, tol=0.1, cutoff=10.0):
"""
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except Exception:
eln = site.species_string
reldists_neighs = []
for nn in neighs_dists:
neigh = nn
dist = nn.nn_distance
try:
el2 = neigh.specie.element
except Exception:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(eln, el2), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class MinimumVIRENN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_VIRE, plus some
relative tolerance, where atom radii from the
ValenceIonicRadiusEvaluator (VIRE) are used
to calculate relative distances.
"""
def __init__(self, tol=0.1, cutoff=10.0):
"""
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
vire = _get_vire(structure)
site = vire.structure[n]
neighs_dists = vire.structure.get_neighbors(site, self.cutoff)
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for nn in neighs_dists:
reldists_neighs.append([nn.nn_distance / (vire.radii[nn.species_string] + rn), nn])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append(
{
"site": s,
"image": self._get_image(vire.structure, s),
"weight": w,
"site_index": self._get_original_site(vire.structure, s),
}
)
return siw
def _get_vire(structure: Union[Structure, IStructure]):
"""Get the ValenceIonicRadiusEvaluator object for an structure taking
advantage of caching.
Args:
structure: A structure.
Returns:
Output of `ValenceIonicRadiusEvaluator(structure)`
"""
# pymatgen does not hash Structure objects, so we need
# to cast from Structure to the immutable IStructure
if isinstance(structure, Structure):
structure = IStructure.from_sites(structure)
return _get_vire_istructure(structure)
@lru_cache(maxsize=1)
def _get_vire_istructure(structure: IStructure):
"""Get the ValenceIonicRadiusEvaluator object for an immutable structure
taking advantage of caching.
Args:
structure: A structure.
Returns:
Output of `ValenceIonicRadiusEvaluator(structure)`
"""
return ValenceIonicRadiusEvaluator(structure)
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
# Compute the displacement from the center
r = [np.subtract(c, center) for c in coords]
# Compute the magnitude of each vector
r_norm = [np.linalg.norm(i) for i in r]
# Compute the solid angle for each tetrahedron that makes up the facet
# Following: https://en.wikipedia.org/wiki/Solid_angle#Tetrahedron
angle = 0
for i in range(1, len(r) - 1):
j = i + 1
tp = np.abs(np.dot(r[0], np.cross(r[i], r[j])))
de = (
r_norm[0] * r_norm[i] * r_norm[j]
+ r_norm[j] * np.dot(r[0], r[i])
+ r_norm[i] * np.dot(r[0], r[j])
+ r_norm[0] * np.dot(r[i], r[j])
)
if de == 0:
my_angle = 0.5 * pi if tp > 0 else -0.5 * pi
else:
my_angle = np.arctan(tp / de)
angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2
return angle
def vol_tetra(vt1, vt2, vt3, vt4):
"""
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
"""
vol_tetra = np.abs(np.dot((vt1 - vt4), np.cross((vt2 - vt4), (vt3 - vt4)))) / 6
return vol_tetra
def get_okeeffe_params(el_symbol):
"""
Returns the elemental parameters related to atom size and
electronegativity which are used for estimating bond-valence
parameters (bond length) of pairs of atoms on the basis of data
provided in 'Atoms Sizes and Bond Lengths in Molecules and Crystals'
(O'Keeffe & Brese, 1991).
Args:
el_symbol (str): element symbol.
Returns:
(dict): atom-size ('r') and electronegativity-related ('c')
parameter.
"""
el = Element(el_symbol)
if el not in list(BV_PARAMS.keys()):
raise RuntimeError(
"Could not find O'Keeffe parameters for element"
' "{}" in "BV_PARAMS"dictonary'
" provided by pymatgen".format(el_symbol)
)
return BV_PARAMS[el]
def get_okeeffe_distance_prediction(el1, el2):
"""
Returns an estimate of the bond valence parameter (bond length) using
the derived parameters from 'Atoms Sizes and Bond Lengths in Molecules
and Crystals' (O'Keeffe & Brese, 1991). The estimate is based on two
experimental parameters: r and c. The value for r is based off radius,
while c is (usually) the Allred-Rochow electronegativity. Values used
are *not* generated from pymatgen, and are found in
'okeeffe_params.json'.
Args:
el1, el2 (Element): two Element objects
Returns:
a float value of the predicted bond length
"""
el1_okeeffe_params = get_okeeffe_params(el1)
el2_okeeffe_params = get_okeeffe_params(el2)
r1 = el1_okeeffe_params["r"]
r2 = el2_okeeffe_params["r"]
c1 = el1_okeeffe_params["c"]
c2 = el2_okeeffe_params["c"]
return r1 + r2 - r1 * r2 * pow(sqrt(c1) - sqrt(c2), 2) / (c1 * r1 + c2 * r2)
def get_neighbors_of_site_with_index(struct, n, approach="min_dist", delta=0.1, cutoff=10.0):
"""
Returns the neighbors of a given site using a specific neighbor-finding
method.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
Returns: neighbor sites.
"""
if approach == "min_dist":
return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(struct, n)
if approach == "voronoi":
return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(struct, n)
if approach == "min_OKeeffe":
return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(struct, n)
if approach == "min_VIRE":
return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(struct, n)
raise RuntimeError("unsupported neighbor-finding method ({}).".format(approach))
def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, cutoff=10.0, thresh=None):
"""
Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str).
"""
if thresh is None:
thresh = {
"qtet": 0.5,
"qoct": 0.5,
"qbcc": 0.5,
"q6": 0.4,
"qtribipyr": 0.8,
"qsqpyr": 0.8,
}
ops = LocalStructOrderParams(["cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"])
neighs_cent = get_neighbors_of_site_with_index(struct, n, approach=approach, delta=delta, cutoff=cutoff)
neighs_cent.append(struct.sites[n])
opvals = ops.get_order_parameters(
neighs_cent,
len(neighs_cent) - 1,
indices_neighs=list(range(len(neighs_cent) - 1)),
)
cn = int(opvals[0] + 0.5)
motif_type = "unrecognized"
nmotif = 0
if cn == 4 and opvals[1] > thresh["qtet"]:
motif_type = "tetrahedral"
nmotif += 1
if cn == 5 and opvals[5] > thresh["qsqpyr"]:
motif_type = "square pyramidal"
nmotif += 1
if cn == 5 and opvals[6] > thresh["qtribipyr"]:
motif_type = "trigonal bipyramidal"
nmotif += 1
if cn == 6 and opvals[2] > thresh["qoct"]:
motif_type = "octahedral"
nmotif += 1
if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]):
motif_type = "bcc"
nmotif += 1
if cn == 12 and (
opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]
):
motif_type = "cp"
nmotif += 1
if nmotif > 1:
motif_type = "multiple assignments"
return motif_type
def gramschmidt(vin, uin):
"""
Returns that part of the first input vector
that is orthogonal to the second input vector.
The output vector is not normalized.
Args:
vin (numpy array):
first input vector
uin (numpy array):
second input vector
"""
vin_uin = np.inner(vin, uin)
uin_uin = np.inner(uin, uin)
if uin_uin <= 0.0:
raise ValueError("Zero or negative inner product!")
return vin - (vin_uin / uin_uin) * uin
class LocalStructOrderParams:
"""
This class permits the calculation of various types of local
structure order parameters.
"""
__supported_types = (
"cn",
"sgl_bd",
"bent",
"tri_plan",
"tri_plan_max",
"reg_tri",
"sq_plan",
"sq_plan_max",
"pent_plan",
"pent_plan_max",
"sq",
"tet",
"tet_max",
"tri_pyr",
"sq_pyr",
"sq_pyr_legacy",
"tri_bipyr",
"sq_bipyr",
"oct",
"oct_legacy",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"T",
"cuboct",
"cuboct_max",
"see_saw_rect",
"bcc",
"q2",
"q4",
"q6",
"oct_max",
"hex_plan_max",
"sq_face_cap_trig_pris",
)
def __init__(self, types, parameters=None, cutoff=-10.0):
"""
Args:
types ([string]): list of strings representing the types of
order parameters to be calculated. Note that multiple
mentions of the same type may occur. Currently available
types recognize following environments:
"cn": simple coordination number---normalized
if desired;
"sgl_bd": single bonds;
"bent": bent (angular) coordinations
(Zimmermann & Jain, in progress, 2017);
"T": T-shape coordinations;
"see_saw_rect": see saw-like coordinations;
"tet": tetrahedra
(Zimmermann et al., submitted, 2017);
"oct": octahedra
(Zimmermann et al., submitted, 2017);
"bcc": body-centered cubic environments (Peters,
J. Chem. Phys., 131, 244103, 2009);
"tri_plan": trigonal planar environments;
"sq_plan": square planar environments;
"pent_plan": pentagonal planar environments;
"tri_pyr": trigonal pyramids (coordinated atom is in
the center of the basal plane);
"sq_pyr": square pyramids;
"pent_pyr": pentagonal pyramids;
"hex_pyr": hexagonal pyramids;
"tri_bipyr": trigonal bipyramids;
"sq_bipyr": square bipyramids;
"pent_bipyr": pentagonal bipyramids;
"hex_bipyr": hexagonal bipyramids;
"cuboct": cuboctahedra;
"q2": motif-unspecific bond orientational order
parameter (BOOP) of weight l=2 (Steinhardt
et al., Phys. Rev. B, 28, 784-805, 1983);
"q4": BOOP of weight l=4;
"q6": BOOP of weight l=6.
"reg_tri": regular triangle with varying height
to basal plane;
"sq": square coordination (cf., "reg_tri");
"oct_legacy": original Peters-style OP recognizing
octahedral coordination environments
(Zimmermann et al., J. Am. Chem. Soc.,
137, 13352-13361, 2015) that can, however,
produce small negative values sometimes.
"sq_pyr_legacy": square pyramids (legacy);
parameters ([dict]): list of dictionaries
that store float-type parameters associated with the
definitions of the different order parameters
(length of list = number of OPs). If an entry
is None, default values are used that are read from
the op_params.yaml file. With few exceptions, 9 different
parameters are used across all OPs:
"norm": normalizing constant (used in "cn"
(default value: 1)).
"TA": target angle (TA) in fraction of 180 degrees
("bent" (1), "tet" (0.6081734479693927),
"tri_plan" (0.66666666667), "pent_plan" (0.6),
"sq_pyr_legacy" (0.5)).
"IGW_TA": inverse Gaussian width (IGW) for penalizing
angles away from the target angle in inverse
fractions of 180 degrees to ("bent" and "tet" (15),
"tri_plan" (13.5), "pent_plan" (18),
"sq_pyr_legacy" (30)).
"IGW_EP": IGW for penalizing angles away from the
equatorial plane (EP) at 90 degrees ("T", "see_saw_rect",
"oct", "sq_plan", "tri_pyr", "sq_pyr", "pent_pyr",
"hex_pyr", "tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", and "oct_legacy" (18)).
"fac_AA": factor applied to azimuth angle (AA) in cosine
term ("T", "tri_plan", and "sq_plan" (1), "tet",
"tri_pyr", and "tri_bipyr" (1.5), "oct", "sq_pyr",
"sq_bipyr", and "oct_legacy" (2), "pent_pyr"
and "pent_bipyr" (2.5), "hex_pyr" and
"hex_bipyr" (3)).
"exp_cos_AA": exponent applied to cosine term of AA
("T", "tet", "oct", "tri_plan", "sq_plan",
"tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr",
"tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr",
and "oct_legacy" (2)).
"min_SPP": smallest angle (in radians) to consider
a neighbor to be
at South pole position ("see_saw_rect", "oct", "bcc",
"sq_plan", "tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", "cuboct", and "oct_legacy"
(2.792526803190927)).
"IGW_SPP": IGW for penalizing angles away from South
pole position ("see_saw_rect", "oct", "bcc", "sq_plan",
"tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr",
"cuboct", and "oct_legacy" (15)).
"w_SPP": weight for South pole position relative to
equatorial positions ("see_saw_rect" and "sq_plan" (1),
"cuboct" (1.8), "tri_bipyr" (2), "oct",
"sq_bipyr", and "oct_legacy" (3), "pent_bipyr" (4),
"hex_bipyr" (5), "bcc" (6)).
cutoff (float): Cutoff radius to determine which nearest
neighbors are supposed to contribute to the order
parameters. If the value is negative the neighboring
sites found by distance and cutoff radius are further
pruned using the get_nn method from the
VoronoiNN class.
"""
for t in types:
if t not in LocalStructOrderParams.__supported_types:
raise ValueError("Unknown order parameter type (" + t + ")!")
self._types = tuple(types)
self._comp_azi = False
self._params = []
for i, t in enumerate(self._types):
d = deepcopy(default_op_params[t]) if default_op_params[t] is not None else None
if parameters is None:
self._params.append(d)
elif parameters[i] is None:
self._params.append(d)
else:
self._params.append(deepcopy(parameters[i]))
self._computerijs = self._computerjks = self._geomops = False
self._geomops2 = self._boops = False
self._max_trig_order = -1
# Add here any additional flags to be used during calculation.
if "sgl_bd" in self._types:
self._computerijs = True
if not set(self._types).isdisjoint(
[
"tet",
"oct",
"bcc",
"sq_pyr",
"sq_pyr_legacy",
"tri_bipyr",
"sq_bipyr",
"oct_legacy",
"tri_plan",
"sq_plan",
"pent_plan",
"tri_pyr",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"T",
"cuboct",
"oct_max",
"tet_max",
"tri_plan_max",
"sq_plan_max",
"pent_plan_max",
"cuboct_max",
"bent",
"see_saw_rect",
"hex_plan_max",
"sq_face_cap_trig_pris",
]
):
self._computerijs = self._geomops = True
if "sq_face_cap_trig_pris" in self._types:
self._comp_azi = True
if not set(self._types).isdisjoint(["reg_tri", "sq"]):
self._computerijs = self._computerjks = self._geomops2 = True
if not set(self._types).isdisjoint(["q2", "q4", "q6"]):
self._computerijs = self._boops = True
if "q2" in self._types:
self._max_trig_order = 2
if "q4" in self._types:
self._max_trig_order = 4
if "q6" in self._types:
self._max_trig_order = 6
# Finish parameter treatment.
if cutoff < 0.0:
self._cutoff = -cutoff
self._voroneigh = True
elif cutoff > 0.0:
self._cutoff = cutoff
self._voroneigh = False
else:
raise ValueError("Cutoff radius is zero!")
# Further variable definitions.
self._last_nneigh = -1
self._pow_sin_t = {}
self._pow_cos_t = {}
self._sin_n_p = {}
self._cos_n_p = {}
@property
def num_ops(self):
"""
Returns:
int: the number of different order parameters that are targeted
to be calculated.
"""
return len(self._types)
@property
def last_nneigh(self):
"""
Returns:
int: the number of neighbors encountered during the most
recent order parameter calculation. A value of -1 indicates
that no such calculation has yet been performed for this
instance.
"""
return len(self._last_nneigh)
def compute_trigonometric_terms(self, thetas, phis):
"""
Computes trigonometric terms that are required to
calculate bond orientational order parameters using
internal variables.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
The list of
azimuth angles of all neighbors in radians. The list of
azimuth angles is expected to have the same size as the
list of polar angles; otherwise, a ValueError is raised.
Also, the two lists of angles have to be coherent in
order. That is, it is expected that the order in the list
of azimuth angles corresponds to a distinct sequence of
neighbors. And, this sequence has to equal the sequence
of neighbors in the list of polar angles.
"""
if len(thetas) != len(phis):
raise ValueError("List of polar and azimuthal angles have to be" " equal!")
self._pow_sin_t.clear()
self._pow_cos_t.clear()
self._sin_n_p.clear()
self._cos_n_p.clear()
self._pow_sin_t[1] = [sin(float(t)) for t in thetas]
self._pow_cos_t[1] = [cos(float(t)) for t in thetas]
self._sin_n_p[1] = [sin(float(p)) for p in phis]
self._cos_n_p[1] = [cos(float(p)) for p in phis]
for i in range(2, self._max_trig_order + 1):
self._pow_sin_t[i] = [e[0] * e[1] for e in zip(self._pow_sin_t[i - 1], self._pow_sin_t[1])]
self._pow_cos_t[i] = [e[0] * e[1] for e in zip(self._pow_cos_t[i - 1], self._pow_cos_t[1])]
self._sin_n_p[i] = [sin(float(i) * float(p)) for p in phis]
self._cos_n_p[i] = [cos(float(i) * float(p)) for p in phis]
def get_q2(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=2. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=2
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
sqrt_15_2pi = sqrt(15.0 / (2.0 * pi))
sqrt_5_pi = sqrt(5.0 / pi)
pre_y_2_2 = [0.25 * sqrt_15_2pi * val for val in self._pow_sin_t[2]]
pre_y_2_1 = [0.5 * sqrt_15_2pi * val[0] * val[1] for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])]
acc = 0.0
# Y_2_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag -= pre_y_2_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_2_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_2_0
real = imag = 0.0
for i in nnn_range:
real += 0.25 * sqrt_5_pi * (3.0 * self._pow_cos_t[2][i] - 1.0)
acc += real * real
# Y_2_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_2_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag += pre_y_2_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
q2 = sqrt(4.0 * pi * acc / (5.0 * float(nnn * nnn)))
return q2
def get_q4(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=4. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=4
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i16_3 = 3.0 / 16.0
i8_3 = 3.0 / 8.0
sqrt_35_pi = sqrt(35.0 / pi)
sqrt_35_2pi = sqrt(35.0 / (2.0 * pi))
sqrt_5_pi = sqrt(5.0 / pi)
sqrt_5_2pi = sqrt(5.0 / (2.0 * pi))
sqrt_1_pi = sqrt(1.0 / pi)
pre_y_4_4 = [i16_3 * sqrt_35_2pi * val for val in self._pow_sin_t[4]]
pre_y_4_3 = [i8_3 * sqrt_35_pi * val[0] * val[1] for val in zip(self._pow_sin_t[3], self._pow_cos_t[1])]
pre_y_4_2 = [
i8_3 * sqrt_5_2pi * val[0] * (7.0 * val[1] - 1.0) for val in zip(self._pow_sin_t[2], self._pow_cos_t[2])
]
pre_y_4_1 = [
i8_3 * sqrt_5_pi * val[0] * (7.0 * val[1] - 3.0 * val[2])
for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], self._pow_cos_t[1])
]
acc = 0.0
# Y_4_-4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag -= pre_y_4_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
# Y_4_-3
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_4_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag -= pre_y_4_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_4_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_4_0
real = imag = 0.0
for i in nnn_range:
real += i16_3 * sqrt_1_pi * (35.0 * self._pow_cos_t[4][i] - 30.0 * self._pow_cos_t[2][i] + 3.0)
acc += real * real
# Y_4_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_4_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag += pre_y_4_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_4_3
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_4_4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag += pre_y_4_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
q4 = sqrt(4.0 * pi * acc / (9.0 * float(nnn * nnn)))
return q4
def get_q6(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=6. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=6
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i64 = 1.0 / 64.0
i32 = 1.0 / 32.0
i32_3 = 3.0 / 32.0
i16 = 1.0 / 16.0
sqrt_3003_pi = sqrt(3003.0 / pi)
sqrt_1001_pi = sqrt(1001.0 / pi)
sqrt_91_2pi = sqrt(91.0 / (2.0 * pi))
sqrt_1365_pi = sqrt(1365.0 / pi)
sqrt_273_2pi = sqrt(273.0 / (2.0 * pi))
sqrt_13_pi = sqrt(13.0 / pi)
pre_y_6_6 = [i64 * sqrt_3003_pi * val for val in self._pow_sin_t[6]]
pre_y_6_5 = [i32_3 * sqrt_1001_pi * val[0] * val[1] for val in zip(self._pow_sin_t[5], self._pow_cos_t[1])]
pre_y_6_4 = [
i32_3 * sqrt_91_2pi * val[0] * (11.0 * val[1] - 1.0) for val in zip(self._pow_sin_t[4], self._pow_cos_t[2])
]
pre_y_6_3 = [
i32 * sqrt_1365_pi * val[0] * (11.0 * val[1] - 3.0 * val[2])
for val in zip(self._pow_sin_t[3], self._pow_cos_t[3], self._pow_cos_t[1])
]
pre_y_6_2 = [
i64 * sqrt_1365_pi * val[0] * (33.0 * val[1] - 18.0 * val[2] + 1.0)
for val in zip(self._pow_sin_t[2], self._pow_cos_t[4], self._pow_cos_t[2])
]
pre_y_6_1 = [
i16 * sqrt_273_2pi * val[0] * (33.0 * val[1] - 30.0 * val[2] + 5.0 * val[3])
for val in zip(
self._pow_sin_t[1],
self._pow_cos_t[5],
self._pow_cos_t[3],
self._pow_cos_t[1],
)
]
acc = 0.0
# Y_6_-6
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i] # cos(x) = cos(-x)
imag -= pre_y_6_6[i] * self._sin_n_p[6][i] # sin(x) = -sin(-x)
acc += real * real + imag * imag
# Y_6_-5
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += real * real + imag * imag
# Y_6_-4
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag -= pre_y_6_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
# Y_6_-3
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_6_-2
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag -= pre_y_6_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_6_-1
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_6_0
real = 0.0
imag = 0.0
for i in nnn_range:
real += (
i32
* sqrt_13_pi
* (231.0 * self._pow_cos_t[6][i] - 315.0 * self._pow_cos_t[4][i] + 105.0 * self._pow_cos_t[2][i] - 5.0)
)
acc += real * real
# Y_6_1
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += real * real + imag * imag
# Y_6_2
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag += pre_y_6_2[i] * self._sin_n_p[2][i]
acc += real * real + imag * imag
# Y_6_3
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += real * real + imag * imag
# Y_6_4
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag += pre_y_6_4[i] * self._sin_n_p[4][i]
acc += real * real + imag * imag
# Y_6_5
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += real * real + imag * imag
# Y_6_6
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i]
imag += pre_y_6_6[i] * self._sin_n_p[6][i]
acc += real * real + imag * imag
q6 = sqrt(4.0 * pi * acc / (13.0 * float(nnn * nnn)))
return q6
def get_type(self, index):
"""
Return type of order parameter at the index provided and
represented by a short string.
Args:
index (int): index of order parameter for which type is
to be returned.
Returns:
str: OP type.
"""
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting order parameter type" " out-of-bounds!")
return self._types[index]
def get_parameters(self, index):
"""
Returns list of floats that represents
the parameters associated
with calculation of the order
parameter that was defined at the index provided.
Attention: the parameters do not need to equal those originally
inputted because of processing out of efficiency reasons.
Args:
index (int):
index of order parameter for which associated parameters
are to be returned.
Returns:
[float]: parameters of a given OP.
"""
if index < 0 or index >= len(self._types):
raise ValueError(
"Index for getting parameters associated with" " order parameter calculation out-of-bounds!"
)
return self._params[index]
def get_order_parameters(self, structure, n, indices_neighs=None, tol=0.0, target_spec=None):
"""
Compute all order parameters of site n.
Args:
structure (Structure): input structure.
n (int): index of site in input structure,
for which OPs are to be
calculated. Note that we do not use the sites iterator
here, but directly access sites via struct[index].
indices_neighs ([int]): list of indices of those neighbors
in Structure object
structure that are to be considered for OP computation.
This optional argument overwrites the way neighbors are
to be determined as defined in the constructor (i.e.,
Voronoi coordination finder via negative cutoff radius
vs constant cutoff radius if cutoff was positive).
We do not use information about the underlying
structure lattice if the neighbor indices are explicitly
provided. This has two important consequences. First,
the input Structure object can, in fact, be a
simple list of Site objects. Second, no nearest images
of neighbors are determined when providing an index list.
Note furthermore that this neighbor
determination type ignores the optional target_spec
argument.
tol (float): threshold of weight
(= solid angle / maximal solid angle)
to determine if a particular pair is
considered neighbors; this is relevant only in the case
when Voronoi polyhedra are used to determine coordination
target_spec (Species): target species to be considered
when calculating the order
parameters of site n; None includes all species of input
structure.
Returns:
[floats]: representing order parameters. Should it not be
possible to compute a given OP for a conceptual reason, the
corresponding entry is None instead of a float. For Steinhardt
et al.'s bond orientational OPs and the other geometric OPs
("tet", "oct", "bcc", etc.),
this can happen if there is a single
neighbor around site n in the structure because that
does not permit calculation of angles between multiple
neighbors.
"""
# Do error-checking and initialization.
if n < 0:
raise ValueError("Site index smaller zero!")
if n >= len(structure):
raise ValueError("Site index beyond maximum!")
if indices_neighs is not None:
for index in indices_neighs:
if index >= len(structure):
raise ValueError("Neighbor site index beyond maximum!")
if tol < 0.0:
raise ValueError("Negative tolerance for weighted solid angle!")
left_of_unity = 1.0 - 1.0e-12
# The following threshold has to be adapted to non-Angstrom units.
very_small = 1.0e-12
fac_bcc = 1.0 / exp(-0.5)
# Find central site and its neighbors.
# Note that we adopt the same way of accessing sites here as in
# VoronoiNN; that is, not via the sites iterator.
centsite = structure[n]
if indices_neighs is not None:
neighsites = [structure[index] for index in indices_neighs]
elif self._voroneigh:
vnn = VoronoiNN(tol=tol, targets=target_spec)
neighsites = vnn.get_nn(structure, n)
else:
# Structure.get_sites_in_sphere --> also other periodic images
neighsitestmp = [i[0] for i in structure.get_sites_in_sphere(centsite.coords, self._cutoff)]
neighsites = []
if centsite not in neighsitestmp:
raise ValueError("Could not find center site!")
neighsitestmp.remove(centsite)
if target_spec is None:
neighsites = list(neighsitestmp)
else:
neighsites[:] = [site for site in neighsitestmp if site.specie.symbol == target_spec]
nneigh = len(neighsites)
self._last_nneigh = nneigh
# Prepare angle calculations, if applicable.
rij = []
rjk = []
rijnorm = []
rjknorm = []
dist = []
distjk_unique = []
distjk = []
centvec = centsite.coords
if self._computerijs:
for j, neigh in enumerate(neighsites):
rij.append((neigh.coords - centvec))
dist.append(np.linalg.norm(rij[j]))
rijnorm.append((rij[j] / dist[j]))
if self._computerjks:
for j, neigh in enumerate(neighsites):
rjk.append([])
rjknorm.append([])
distjk.append([])
kk = 0
for k, neigh_2 in enumerate(neighsites):
if j != k:
rjk[j].append(neigh_2.coords - neigh.coords)
distjk[j].append(np.linalg.norm(rjk[j][kk]))
if k > j:
distjk_unique.append(distjk[j][kk])
rjknorm[j].append(rjk[j][kk] / distjk[j][kk])
kk = kk + 1
# Initialize OP list and, then, calculate OPs.
ops = [0.0 for t in self._types]
# norms = [[[] for j in range(nneigh)] for t in self._types]
# First, coordination number and distance-based OPs.
for i, t in enumerate(self._types):
if t == "cn":
ops[i] = nneigh / self._params[i]["norm"]
elif t == "sgl_bd":
dist_sorted = sorted(dist)
if len(dist_sorted) == 1:
ops[i] = 1.0
elif len(dist_sorted) > 1:
ops[i] = 1.0 - dist_sorted[0] / dist_sorted[1]
# Then, bond orientational OPs based on spherical harmonics
# according to Steinhardt et al., Phys. Rev. B, 28, 784-805, 1983.
if self._boops:
thetas = []
phis = []
for j, vec in enumerate(rijnorm):
# z is North pole --> theta between vec and (0, 0, 1)^T.
# Because vec is normalized, dot product is simply vec[2].
thetas.append(acos(max(-1.0, min(vec[2], 1.0))))
tmpphi = 0.0
# Compute phi only if it is not (almost) perfectly
# aligned with z-axis.
if -left_of_unity < vec[2] < left_of_unity:
# x is prime meridian --> phi between projection of vec
# into x-y plane and (1, 0, 0)^T
tmpphi = acos(
max(
-1.0,
min(vec[0] / (sqrt(vec[0] * vec[0] + vec[1] * vec[1])), 1.0),
)
)
if vec[1] < 0.0:
tmpphi = -tmpphi
phis.append(tmpphi)
# Note that None flags that we have too few neighbors
# for calculating BOOPS.
for i, t in enumerate(self._types):
if t == "q2":
ops[i] = self.get_q2(thetas, phis) if len(thetas) > 0 else None
elif t == "q4":
ops[i] = self.get_q4(thetas, phis) if len(thetas) > 0 else None
elif t == "q6":
ops[i] = self.get_q6(thetas, phis) if len(thetas) > 0 else None
# Then, deal with the Peters-style OPs that are tailor-made
# to recognize common structural motifs
# (Peters, J. Chem. Phys., 131, 244103, 2009;
# Zimmermann et al., J. Am. Chem. Soc., under revision, 2015).
if self._geomops:
gaussthetak = [0.0 for t in self._types] # not used by all OPs
qsptheta = [[[] for j in range(nneigh)] for t in self._types]
norms = [[[] for j in range(nneigh)] for t in self._types]
ipi = 1.0 / pi
piover2 = pi / 2.0
onethird = 1.0 / 3.0
twothird = 2.0 / 3.0
for j in range(nneigh): # Neighbor j is put to the North pole.
zaxis = rijnorm[j]
kc = 0
for k in range(nneigh): # From neighbor k, we construct
if j != k: # the prime meridian.
for i in range(len(self._types)):
qsptheta[i][j].append(0.0)
norms[i][j].append(0)
tmp = max(-1.0, min(np.inner(zaxis, rijnorm[k]), 1.0))
thetak = acos(tmp)
xaxis = gramschmidt(rijnorm[k], zaxis)
if np.linalg.norm(xaxis) < very_small:
flag_xaxis = True
else:
xaxis = xaxis / np.linalg.norm(xaxis)
flag_xaxis = False
if self._comp_azi:
flag_yaxis = True
yaxis = np.cross(zaxis, xaxis)
if np.linalg.norm(yaxis) > very_small:
yaxis = yaxis / np.linalg.norm(yaxis)
flag_yaxis = False
# Contributions of j-i-k angles, where i represents the
# central atom and j and k two of the neighbors.
for i, t in enumerate(self._types):
if t in ["bent", "sq_pyr_legacy"]:
tmp = self._params[i]["IGW_TA"] * (thetak * ipi - self._params[i]["TA"])
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["tri_plan", "tri_plan_max", "tet", "tet_max"]:
tmp = self._params[i]["IGW_TA"] * (thetak * ipi - self._params[i]["TA"])
gaussthetak[i] = exp(-0.5 * tmp * tmp)
if t in ["tri_plan_max", "tet_max"]:
qsptheta[i][j][kc] += gaussthetak[i]
norms[i][j][kc] += 1
elif t in ["T", "tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr"]:
tmp = self._params[i]["IGW_EP"] * (thetak * ipi - 0.5)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in [
"sq_plan",
"oct",
"oct_legacy",
"cuboct",
"cuboct_max",
]:
if thetak >= self._params[i]["min_SPP"]:
tmp = self._params[i]["IGW_SPP"] * (thetak * ipi - 1.0)
qsptheta[i][j][kc] += self._params[i]["w_SPP"] * exp(-0.5 * tmp * tmp)
norms[i][j][kc] += self._params[i]["w_SPP"]
elif t in [
"see_saw_rect",
"tri_bipyr",
"sq_bipyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"sq_plan_max",
"hex_plan_max",
]:
if thetak < self._params[i]["min_SPP"]:
tmp = (
self._params[i]["IGW_EP"] * (thetak * ipi - 0.5)
if t != "hex_plan_max"
else self._params[i]["IGW_TA"]
* (fabs(thetak * ipi - 0.5) - self._params[i]["TA"])
)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["pent_plan", "pent_plan_max"]:
tmp = 0.4 if thetak <= self._params[i]["TA"] * pi else 0.8
tmp2 = self._params[i]["IGW_TA"] * (thetak * ipi - tmp)
gaussthetak[i] = exp(-0.5 * tmp2 * tmp2)
if t == "pent_plan_max":
qsptheta[i][j][kc] += gaussthetak[i]
norms[i][j][kc] += 1
elif t == "bcc" and j < k:
if thetak >= self._params[i]["min_SPP"]:
tmp = self._params[i]["IGW_SPP"] * (thetak * ipi - 1.0)
qsptheta[i][j][kc] += self._params[i]["w_SPP"] * exp(-0.5 * tmp * tmp)
norms[i][j][kc] += self._params[i]["w_SPP"]
elif t == "sq_face_cap_trig_pris":
if thetak < self._params[i]["TA3"]:
tmp = self._params[i]["IGW_TA1"] * (thetak * ipi - self._params[i]["TA1"])
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
for m in range(nneigh):
if (m != j) and (m != k) and (not flag_xaxis):
tmp = max(-1.0, min(np.inner(zaxis, rijnorm[m]), 1.0))
thetam = acos(tmp)
xtwoaxistmp = gramschmidt(rijnorm[m], zaxis)
l = np.linalg.norm(xtwoaxistmp)
if l < very_small:
flag_xtwoaxis = True
else:
xtwoaxis = xtwoaxistmp / l
phi = acos(max(-1.0, min(np.inner(xtwoaxis, xaxis), 1.0)))
flag_xtwoaxis = False
if self._comp_azi:
phi2 = atan2(
np.dot(xtwoaxis, yaxis),
np.dot(xtwoaxis, xaxis),
)
# South pole contributions of m.
if t in [
"tri_bipyr",
"sq_bipyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"sq_plan_max",
"hex_plan_max",
"see_saw_rect",
]:
if thetam >= self._params[i]["min_SPP"]:
tmp = self._params[i]["IGW_SPP"] * (thetam * ipi - 1.0)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
# Contributions of j-i-m angle and
# angles between plane j-i-k and i-m vector.
if not flag_xaxis and not flag_xtwoaxis:
for i, t in enumerate(self._types):
if t in [
"tri_plan",
"tri_plan_max",
"tet",
"tet_max",
]:
tmp = self._params[i]["IGW_TA"] * (thetam * ipi - self._params[i]["TA"])
tmp2 = cos(self._params[i]["fac_AA"] * phi) ** self._params[i]["exp_cos_AA"]
tmp3 = 1 if t in ["tri_plan_max", "tet_max"] else gaussthetak[i]
qsptheta[i][j][kc] += tmp3 * exp(-0.5 * tmp * tmp) * tmp2
norms[i][j][kc] += 1
elif t in ["pent_plan", "pent_plan_max"]:
tmp = 0.4 if thetam <= self._params[i]["TA"] * pi else 0.8
tmp2 = self._params[i]["IGW_TA"] * (thetam * ipi - tmp)
tmp3 = cos(phi)
tmp4 = 1 if t == "pent_plan_max" else gaussthetak[i]
qsptheta[i][j][kc] += tmp4 * exp(-0.5 * tmp2 * tmp2) * tmp3 * tmp3
norms[i][j][kc] += 1
elif t in [
"T",
"tri_pyr",
"sq_pyr",
"pent_pyr",
"hex_pyr",
]:
tmp = cos(self._params[i]["fac_AA"] * phi) ** self._params[i]["exp_cos_AA"]
tmp3 = self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp3 * tmp3)
norms[i][j][kc] += 1
elif t in ["sq_plan", "oct", "oct_legacy"]:
if (
thetak < self._params[i]["min_SPP"]
and thetam < self._params[i]["min_SPP"]
):
tmp = (
cos(self._params[i]["fac_AA"] * phi)
** self._params[i]["exp_cos_AA"]
)
tmp2 = self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
if t == "oct_legacy":
qsptheta[i][j][kc] -= tmp * self._params[i][6] * self._params[i][7]
norms[i][j][kc] += 1
elif t in [
"tri_bipyr",
"sq_bipyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"sq_plan_max",
"hex_plan_max",
]:
if thetam < self._params[i]["min_SPP"]:
if thetak < self._params[i]["min_SPP"]:
tmp = (
cos(self._params[i]["fac_AA"] * phi)
** self._params[i]["exp_cos_AA"]
)
tmp2 = (
self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
if t != "hex_plan_max"
else self._params[i]["IGW_TA"]
* (fabs(thetam * ipi - 0.5) - self._params[i]["TA"])
)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1
elif t == "bcc" and j < k:
if thetak < self._params[i]["min_SPP"]:
if thetak > piover2:
fac = 1.0
else:
fac = -1.0
tmp = (thetam - piover2) / asin(1 / 3)
qsptheta[i][j][kc] += (
fac * cos(3.0 * phi) * fac_bcc * tmp * exp(-0.5 * tmp * tmp)
)
norms[i][j][kc] += 1
elif t == "see_saw_rect":
if thetam < self._params[i]["min_SPP"]:
if thetak < self._params[i]["min_SPP"] and phi < 0.75 * pi:
tmp = (
cos(self._params[i]["fac_AA"] * phi)
** self._params[i]["exp_cos_AA"]
)
tmp2 = self._params[i]["IGW_EP"] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif t in ["cuboct", "cuboct_max"]:
if (
thetam < self._params[i]["min_SPP"]
and self._params[i][4] < thetak < self._params[i][2]
):
if self._params[i][4] < thetam < self._params[i][2]:
tmp = cos(phi)
tmp2 = self._params[i][5] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif thetam < self._params[i][4]:
tmp = 0.0556 * (cos(phi - 0.5 * pi) - 0.81649658)
tmp2 = self._params[i][6] * (thetam * ipi - onethird)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) * exp(
-0.5 * tmp2 * tmp2
)
norms[i][j][kc] += 1.0
elif thetam > self._params[i][2]:
tmp = 0.0556 * (cos(phi - 0.5 * pi) - 0.81649658)
tmp2 = self._params[i][6] * (thetam * ipi - twothird)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) * exp(
-0.5 * tmp2 * tmp2
)
norms[i][j][kc] += 1.0
elif t == "sq_face_cap_trig_pris" and not flag_yaxis:
if thetak < self._params[i]["TA3"]:
if thetam < self._params[i]["TA3"]:
tmp = (
cos(self._params[i]["fac_AA1"] * phi2)
** self._params[i]["exp_cos_AA1"]
)
tmp2 = self._params[i]["IGW_TA1"] * (
thetam * ipi - self._params[i]["TA1"]
)
else:
tmp = (
cos(
self._params[i]["fac_AA2"]
* (phi2 + self._params[i]["shift_AA2"])
)
** self._params[i]["exp_cos_AA2"]
)
tmp2 = self._params[i]["IGW_TA2"] * (
thetam * ipi - self._params[i]["TA2"]
)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1
kc += 1
# Normalize Peters-style OPs.
for i, t in enumerate(self._types):
if t in [
"tri_plan",
"tet",
"bent",
"sq_plan",
"oct",
"oct_legacy",
"cuboct",
"pent_plan",
]:
ops[i] = tmp_norm = 0.0
for j in range(nneigh):
ops[i] += sum(qsptheta[i][j])
tmp_norm += float(sum(norms[i][j]))
ops[i] = ops[i] / tmp_norm if tmp_norm > 1.0e-12 else None
elif t in [
"T",
"tri_pyr",
"see_saw_rect",
"sq_pyr",
"tri_bipyr",
"sq_bipyr",
"pent_pyr",
"hex_pyr",
"pent_bipyr",
"hex_bipyr",
"oct_max",
"tri_plan_max",
"tet_max",
"sq_plan_max",
"pent_plan_max",
"cuboct_max",
"hex_plan_max",
"sq_face_cap_trig_pris",
]:
ops[i] = None
if nneigh > 1:
for j in range(nneigh):
for k in range(len(qsptheta[i][j])):
qsptheta[i][j][k] = (
qsptheta[i][j][k] / norms[i][j][k] if norms[i][j][k] > 1.0e-12 else 0.0
)
ops[i] = max(qsptheta[i][j]) if j == 0 else max(ops[i], max(qsptheta[i][j]))
elif t == "bcc":
ops[i] = 0.0
for j in range(nneigh):
ops[i] += sum(qsptheta[i][j])
ops[i] = (
ops[i] / float(0.5 * float(nneigh * (6 + (nneigh - 2) * (nneigh - 3)))) if nneigh > 3 else None
)
elif t == "sq_pyr_legacy":
if nneigh > 1:
dmean = np.mean(dist)
acc = 0.0
for d in dist:
tmp = self._params[i][2] * (d - dmean)
acc = acc + exp(-0.5 * tmp * tmp)
for j in range(nneigh):
ops[i] = max(qsptheta[i][j]) if j == 0 else max(ops[i], max(qsptheta[i][j]))
ops[i] = acc * ops[i] / float(nneigh)
# nneigh * (nneigh - 1))
else:
ops[i] = None
# Then, deal with the new-style OPs that require vectors between
# neighbors.
if self._geomops2:
# Compute all (unique) angles and sort the resulting list.
aij = []
for ir, r in enumerate(rijnorm):
for j in range(ir + 1, len(rijnorm)):
aij.append(acos(max(-1.0, min(np.inner(r, rijnorm[j]), 1.0))))
aijs = sorted(aij)
# Compute height, side and diagonal length estimates.
neighscent = np.array([0.0, 0.0, 0.0])
for j, neigh in enumerate(neighsites):
neighscent = neighscent + neigh.coords
if nneigh > 0:
neighscent = neighscent / float(nneigh)
h = np.linalg.norm(neighscent - centvec)
b = min(distjk_unique) if len(distjk_unique) > 0 else 0
dhalf = max(distjk_unique) / 2.0 if len(distjk_unique) > 0 else 0
for i, t in enumerate(self._types):
if t in ("reg_tri", "sq"):
if nneigh < 3:
ops[i] = None
else:
ops[i] = 1.0
if t == "reg_tri":
a = 2.0 * asin(b / (2.0 * sqrt(h * h + (b / (2.0 * cos(3.0 * pi / 18.0))) ** 2.0)))
nmax = 3
elif t == "sq":
a = 2.0 * asin(b / (2.0 * sqrt(h * h + dhalf * dhalf)))
nmax = 4
for j in range(min([nneigh, nmax])):
ops[i] = ops[i] * exp(-0.5 * ((aijs[j] - a) * self._params[i][0]) ** 2)
return ops
class BrunnerNN_reciprocal(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
largest reciprocal gap in interatomic distances.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = sorted([i.nn_distance for i in neighs_dists])
ns = [1.0 / ds[i] - 1.0 / ds[i + 1] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for nn in neighs_dists:
s, dist = nn, nn.nn_distance
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class BrunnerNN_relative(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
of largest relative gap in interatomic distances.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = sorted([i.nn_distance for i in neighs_dists])
ns = [ds[i + 1] / ds[i] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for nn in neighs_dists:
s, dist = nn, nn.nn_distance
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class BrunnerNN_real(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
largest gap in interatomic distances.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
"""
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
self.tol = tol
self.cutoff = cutoff
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = sorted([i.nn_distance for i in neighs_dists])
ns = [ds[i + 1] - ds[i] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for nn in neighs_dists:
s, dist = nn, nn.nn_distance
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append(
{
"site": s,
"image": self._get_image(structure, s),
"weight": w,
"site_index": self._get_original_site(structure, s),
}
)
return siw
class EconNN(NearNeighbors):
"""
Determines the average effective coordination number for each cation in a
given structure using Hoppe's algorithm.
This method follows the procedure outlined in:
Hoppe, Rudolf. "Effective coordination numbers (ECoN) and mean fictive ionic
radii (MEFIR)." Zeitschrift für Kristallographie-Crystalline Materials
150.1-4 (1979): 23-52.
"""
def __init__(
self,
tol: float = 0.2,
cutoff: float = 10.0,
cation_anion: bool = False,
use_fictive_radius: bool = False,
):
"""
Args:
tol: Tolerance parameter for bond determination.
cutoff: Cutoff radius in Angstrom to look for near-neighbor atoms.
cation_anion: If set to True, will restrict bonding targets to
sites with opposite or zero charge. Requires an oxidation states
on all sites in the structure.
use_fictive_radius: Whether to use the fictive radius in the
EcoN calculation. If False, the bond distance will be used.
"""
self.tol = tol
self.cutoff = cutoff
self.cation_anion = cation_anion
self.use_fictive_radius = use_fictive_radius
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighbors = structure.get_neighbors(site, self.cutoff)
if self.cation_anion and hasattr(site.specie, "oxi_state"):
# filter out neighbor of like charge (except for neutral sites)
if site.specie.oxi_state >= 0:
neighbors = [n for n in neighbors if n.oxi_state <= 0]
elif site.specie.oxi_state <= 0:
neighbors = [n for n in neighbors if n.oxi_state >= 0]
if self.use_fictive_radius:
# calculate fictive ionic radii
firs = [_get_fictive_ionic_radius(site, neighbor) for neighbor in neighbors]
else:
# just use the bond distance
firs = [neighbor.nn_distance for neighbor in neighbors]
# calculate mean fictive ionic radius
mefir = _get_mean_fictive_ionic_radius(firs)
# # iteratively solve MEFIR; follows equation 4 in Hoppe's EconN paper
prev_mefir = float("inf")
while abs(prev_mefir - mefir) > 1e-4:
# this is guaranteed to converge
prev_mefir = mefir
mefir = _get_mean_fictive_ionic_radius(firs, minimum_fir=mefir)
siw = []
for nn, fir in zip(neighbors, firs):
if nn.nn_distance < self.cutoff:
w = exp(1 - (fir / mefir) ** 6)
if w > self.tol:
bonded_site = {
"site": nn,
"image": self._get_image(structure, nn),
"weight": w,
"site_index": self._get_original_site(structure, nn),
}
siw.append(bonded_site)
return siw
def _get_fictive_ionic_radius(site: Site, neighbor: PeriodicNeighbor) -> float:
"""
Get fictive ionic radius.
Follows equation 1 of:
Hoppe, Rudolf. "Effective coordination numbers (ECoN) and mean fictive ionic
radii (MEFIR)." Zeitschrift für Kristallographie-Crystalline Materials
150.1-4 (1979): 23-52.
Args:
site: The central site.
neighbor neighboring site.
Returns:
Hoppe's fictive ionic radius.
"""
r_h = _get_radius(site)
if r_h == 0:
r_h = _get_default_radius(site)
r_i = _get_radius(neighbor)
if r_i == 0:
r_i = _get_default_radius(neighbor)
return neighbor.nn_distance * (r_h / (r_h + r_i))
def _get_mean_fictive_ionic_radius(
fictive_ionic_radii: List[float],
minimum_fir: Optional[float] = None,
) -> float:
"""
Returns the mean fictive ionic radius.
Follows equation 2:
Hoppe, Rudolf. "Effective coordination numbers (ECoN) and mean fictive ionic
radii (MEFIR)." Zeitschrift für Kristallographie-Crystalline Materials
150.1-4 (1979): 23-52.
Args:
fictive_ionic_radii: List of fictive ionic radii for a center site
and its neighbors.
minimum_fir: Minimum fictive ionic radius to use.
Returns:
Hoppe's mean fictive ionic radius.
"""
if not minimum_fir:
minimum_fir = min(fictive_ionic_radii)
weighted_sum = 0.0
total_sum = 0.0
for fir in fictive_ionic_radii:
weighted_sum += fir * exp(1 - (fir / minimum_fir) ** 6)
total_sum += exp(1 - (fir / minimum_fir) ** 6)
return weighted_sum / total_sum
class CrystalNN(NearNeighbors):
"""
This is custom near neighbor method intended for use in all kinds of
periodic structures (metals, minerals, porous structures, etc). It is based
on a Voronoi algorithm and uses the solid angle weights to determine the
probability of various coordination environments. The algorithm can also
modify probability using smooth distance cutoffs as well as Pauling
electronegativity differences. The output can either be the most probable
coordination environment or a weighted list of coordination environments.
"""
NNData = namedtuple("NNData", ["all_nninfo", "cn_weights", "cn_nninfo"])
def __init__(
self,
weighted_cn=False,
cation_anion=False,
distance_cutoffs=(0.5, 1),
x_diff_weight=3.0,
porous_adjustment=True,
search_cutoff=7,
fingerprint_length=None,
):
"""
Initialize CrystalNN with desired parameters. Default parameters assume
"chemical bond" type behavior is desired. For geometric neighbor
finding (e.g., structural framework), set (i) distance_cutoffs=None,
(ii) x_diff_weight=0.0 and (optionally) (iii) porous_adjustment=False
which will disregard the atomic identities and perform best for a purely
geometric match.
Args:
weighted_cn: (bool) if set to True, will return fractional weights
for each potential near neighbor.
cation_anion: (bool) if set True, will restrict bonding targets to
sites with opposite or zero charge. Requires an oxidation states
on all sites in the structure.
distance_cutoffs: ([float, float]) - if not None, penalizes neighbor
distances greater than sum of covalent radii plus
distance_cutoffs[0]. Distances greater than covalent radii sum
plus distance_cutoffs[1] are enforced to have zero weight.
x_diff_weight: (float) - if multiple types of neighbor elements are
possible, this sets preferences for targets with higher
electronegativity difference.
porous_adjustment: (bool) - if True, readjusts Voronoi weights to
better describe layered / porous structures
search_cutoff: (float) cutoff in Angstroms for initial neighbor
search; this will be adjusted if needed internally
fingerprint_length: (int) if a fixed_length CN "fingerprint" is
desired from get_nn_data(), set this parameter
"""
self.weighted_cn = weighted_cn
self.cation_anion = cation_anion
self.distance_cutoffs = distance_cutoffs
self.x_diff_weight = x_diff_weight if x_diff_weight is not None else 0
self.search_cutoff = search_cutoff
self.porous_adjustment = porous_adjustment
self.fingerprint_length = fingerprint_length
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return False
def get_nn_info(self, structure, n):
"""
Get all near-neighbor information.
Args:
structure: (Structure) pymatgen Structure
n: (int) index of target site
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
nndata = self.get_nn_data(structure, n)
if not self.weighted_cn:
max_key = max(nndata.cn_weights, key=lambda k: nndata.cn_weights[k])
nn = nndata.cn_nninfo[max_key]
for entry in nn:
entry["weight"] = 1
return nn
for entry in nndata.all_nninfo:
weight = 0
for cn in nndata.cn_nninfo:
for cn_entry in nndata.cn_nninfo[cn]:
if entry["site"] == cn_entry["site"]:
weight += nndata.cn_weights[cn]
entry["weight"] = weight
return nndata.all_nninfo
def get_nn_data(self, structure, n, length=None):
"""
The main logic of the method to compute near neighbor.
Args:
structure: (Structure) enclosing structure object
n: (int) index of target site to get NN info for
length: (int) if set, will return a fixed range of CN numbers
Returns:
a namedtuple (NNData) object that contains:
- all near neighbor sites with weights
- a dict of CN -> weight
- a dict of CN -> associated near neighbor sites
"""
length = length or self.fingerprint_length
# determine possible bond targets
target = None
if self.cation_anion:
target = []
m_oxi = structure[n].specie.oxi_state
for site in structure:
if site.specie.oxi_state * m_oxi <= 0: # opposite charge
target.append(site.specie)
if not target:
raise ValueError("No valid targets for site within cation_anion constraint!")
# get base VoronoiNN targets
cutoff = self.search_cutoff
vnn = VoronoiNN(weight="solid_angle", targets=target, cutoff=cutoff)
nn = vnn.get_nn_info(structure, n)
# solid angle weights can be misleading in open / porous structures
# adjust weights to correct for this behavior
if self.porous_adjustment:
for x in nn:
x["weight"] *= x["poly_info"]["solid_angle"] / x["poly_info"]["area"]
# adjust solid angle weight based on electronegativity difference
if self.x_diff_weight > 0:
for entry in nn:
X1 = structure[n].specie.X
X2 = entry["site"].specie.X
if math.isnan(X1) or math.isnan(X2):
chemical_weight = 1
else:
# note: 3.3 is max deltaX between 2 elements
chemical_weight = 1 + self.x_diff_weight * math.sqrt(abs(X1 - X2) / 3.3)
entry["weight"] = entry["weight"] * chemical_weight
# sort nearest neighbors from highest to lowest weight
nn = sorted(nn, key=lambda x: x["weight"], reverse=True)
if nn[0]["weight"] == 0:
return self.transform_to_length(self.NNData([], {0: 1.0}, {0: []}), length)
# renormalize weights so the highest weight is 1.0
highest_weight = nn[0]["weight"]
for entry in nn:
entry["weight"] = entry["weight"] / highest_weight
# adjust solid angle weights based on distance
if self.distance_cutoffs:
r1 = _get_radius(structure[n])
for entry in nn:
r2 = _get_radius(entry["site"])
if r1 > 0 and r2 > 0:
d = r1 + r2
else:
warnings.warn(
"CrystalNN: cannot locate an appropriate radius, "
"covalent or atomic radii will be used, this can lead "
"to non-optimal results."
)
d = _get_default_radius(structure[n]) + _get_default_radius(entry["site"])
dist = np.linalg.norm(structure[n].coords - entry["site"].coords)
dist_weight = 0
cutoff_low = d + self.distance_cutoffs[0]
cutoff_high = d + self.distance_cutoffs[1]
if dist <= cutoff_low:
dist_weight = 1
elif dist < cutoff_high:
dist_weight = (math.cos((dist - cutoff_low) / (cutoff_high - cutoff_low) * math.pi) + 1) * 0.5
entry["weight"] = entry["weight"] * dist_weight
# sort nearest neighbors from highest to lowest weight
nn = sorted(nn, key=lambda x: x["weight"], reverse=True)
if nn[0]["weight"] == 0:
return self.transform_to_length(self.NNData([], {0: 1.0}, {0: []}), length)
for entry in nn:
entry["weight"] = round(entry["weight"], 3)
del entry["poly_info"] # trim
# remove entries with no weight
nn = [x for x in nn if x["weight"] > 0]
# get the transition distances, i.e. all distinct weights
dist_bins = []
for entry in nn:
if not dist_bins or dist_bins[-1] != entry["weight"]:
dist_bins.append(entry["weight"])
dist_bins.append(0)
# main algorithm to determine fingerprint from bond weights
cn_weights = {} # CN -> score for that CN
cn_nninfo = {} # CN -> list of nearneighbor info for that CN
for idx, val in enumerate(dist_bins):
if val != 0:
nn_info = []
for entry in nn:
if entry["weight"] >= val:
nn_info.append(entry)
cn = len(nn_info)
cn_nninfo[cn] = nn_info
cn_weights[cn] = self._semicircle_integral(dist_bins, idx)
# add zero coord
cn0_weight = 1.0 - sum(cn_weights.values())
if cn0_weight > 0:
cn_nninfo[0] = []
cn_weights[0] = cn0_weight
return self.transform_to_length(self.NNData(nn, cn_weights, cn_nninfo), length)
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
if self.weighted_cn != use_weights:
raise ValueError("The weighted_cn parameter and use_weights " "parameter should match!")
return super().get_cn(structure, n, use_weights)
def get_cn_dict(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of each element bonded to site with index n in structure
Args:
structure (Structure): input structure
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (dict): dictionary of CN of each element bonded to site
"""
if self.weighted_cn != use_weights:
raise ValueError("The weighted_cn parameter and use_weights " "parameter should match!")
return super().get_cn_dict(structure, n, use_weights)
@staticmethod
def _semicircle_integral(dist_bins, idx):
"""
An internal method to get an integral between two bounds of a unit
semicircle. Used in algorithm to determine bond probabilities.
Args:
dist_bins: (float) list of all possible bond weights
idx: (float) index of starting bond weight
Returns:
(float) integral of portion of unit semicircle
"""
r = 1
x1 = dist_bins[idx]
x2 = dist_bins[idx + 1]
if dist_bins[idx] == 1:
area1 = 0.25 * math.pi * r ** 2
else:
area1 = 0.5 * ((x1 * math.sqrt(r ** 2 - x1 ** 2)) + (r ** 2 * math.atan(x1 / math.sqrt(r ** 2 - x1 ** 2))))
area2 = 0.5 * ((x2 * math.sqrt(r ** 2 - x2 ** 2)) + (r ** 2 * math.atan(x2 / math.sqrt(r ** 2 - x2 ** 2))))
return (area1 - area2) / (0.25 * math.pi * r ** 2)
@staticmethod
def transform_to_length(nndata, length):
"""
Given NNData, transforms data to the specified fingerprint length
Args:
nndata: (NNData)
length: (int) desired length of NNData
"""
if length is None:
return nndata
if length:
for cn in range(length):
if cn not in nndata.cn_weights:
nndata.cn_weights[cn] = 0
nndata.cn_nninfo[cn] = []
return nndata
def _get_default_radius(site):
"""
An internal method to get a "default" covalent/element radius
Args:
site: (Site)
Returns:
Covalent radius of element on site, or Atomic radius if unavailable
"""
try:
return CovalentRadius.radius[site.specie.symbol]
except Exception:
return site.specie.atomic_radius
def _get_radius(site):
"""
An internal method to get the expected radius for a site with
oxidation state.
Args:
site: (Site)
Returns:
Oxidation-state dependent radius: ionic, covalent, or atomic.
Returns 0 if no oxidation state or appropriate radius is found.
"""
if hasattr(site.specie, "oxi_state"):
el = site.specie.element
oxi = site.specie.oxi_state
if oxi == 0:
return _get_default_radius(site)
if oxi in el.ionic_radii:
return el.ionic_radii[oxi]
# e.g., oxi = 2.667, average together 2+ and 3+ radii
if int(math.floor(oxi)) in el.ionic_radii and int(math.ceil(oxi)) in el.ionic_radii:
oxi_low = el.ionic_radii[int(math.floor(oxi))]
oxi_high = el.ionic_radii[int(math.ceil(oxi))]
x = oxi - int(math.floor(oxi))
return (1 - x) * oxi_low + x * oxi_high
if oxi > 0 and el.average_cationic_radius > 0:
return el.average_cationic_radius
if el.average_anionic_radius > 0 > oxi:
return el.average_anionic_radius
else:
warnings.warn(
"No oxidation states specified on sites! For better results, set "
"the site oxidation states in the structure."
)
return 0
class CutOffDictNN(NearNeighbors):
"""
A very basic NN class using a dictionary of fixed
cut-off distances. Can also be used with no dictionary
defined for a Null/Empty NN class.
"""
def __init__(self, cut_off_dict=None):
"""
Args:
cut_off_dict (Dict[str, float]): a dictionary
of cut-off distances, e.g. {('Fe','O'): 2.0} for
a maximum Fe-O bond length of 2.0 Angstroms.
Note that if your structure is oxidation state
decorated, the cut-off distances will have to
explicitly include the oxidation state, e.g.
{('Fe2+', 'O2-'): 2.0}
"""
self.cut_off_dict = cut_off_dict or {}
# for convenience
self._max_dist = 0.0
lookup_dict = defaultdict(dict)
for (sp1, sp2), dist in self.cut_off_dict.items():
lookup_dict[sp1][sp2] = dist
lookup_dict[sp2][sp1] = dist
if dist > self._max_dist:
self._max_dist = dist
self._lookup_dict = lookup_dict
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
@staticmethod
def from_preset(preset):
"""
Initialise a CutOffDictNN according to a preset set of cut-offs.
Args:
preset (str): A preset name. The list of supported presets are:
- "vesta_2019": The distance cut-offs used by the VESTA
visualisation program.
Returns:
A CutOffDictNN using the preset cut-off dictionary.
"""
if preset == "vesta_2019":
cut_offs = loadfn(os.path.join(_directory, "vesta_cutoffs.yaml"))
return CutOffDictNN(cut_off_dict=cut_offs)
raise ValueError("Unrecognised preset: {}".format(preset))
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self._max_dist)
nn_info = []
for nn in neighs_dists:
n_site = nn
dist = nn.nn_distance
neigh_cut_off_dist = self._lookup_dict.get(site.species_string, {}).get(n_site.species_string, 0.0)
if dist < neigh_cut_off_dist:
nn_info.append(
{
"site": n_site,
"image": self._get_image(structure, n_site),
"weight": dist,
"site_index": self._get_original_site(structure, n_site),
}
)
return nn_info
class Critic2NN(NearNeighbors):
"""
Performs a topological analysis using critic2 to obtain
neighbor information, using a sum of atomic charge
densities. If an actual charge density is available
(e.g. from a VASP CHGCAR), see Critic2Caller directly
instead.
"""
def __init__(self):
"""
Init for Critic2NN.
"""
# we cache the last-used structure, in case user
# calls get_nn_info() repeatedly for different
# sites in the same structure to save redundant
# computations
self.__last_structure = None
self.__last_bonded_structure = None
@property
def structures_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Structure
objects?
"""
return True
@property
def molecules_allowed(self):
"""
Boolean property: can this NearNeighbors class be used with Molecule
objects?
"""
return True
@property
def extend_structure_molecules(self):
"""
Boolean property: Do Molecules need to be converted to Structures to use
this NearNeighbors class? Note: this property is not defined for classes
for which molecules_allowed == False.
"""
return True
def get_bonded_structure(self, structure, decorate=False):
"""
:param structure: Input structure
:param decorate: Whether to decorate the structure
:return: Bonded structure
"""
# not a top-level import because critic2 is an optional
# dependency, only want to raise an import error if
# Critic2NN() is used
from pymatgen.command_line.critic2_caller import Critic2Caller
if structure == self.__last_structure:
sg = self.__last_bonded_structure
else:
c2_output = Critic2Caller(structure).output
sg = c2_output.structure_graph()
self.__last_structure = structure
self.__last_bonded_structure = sg
if decorate:
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
sg.structure.add_site_property("order_parameters", order_parameters)
return sg
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
sg = self.get_bonded_structure(structure)
return [
{
"site": connected_site.site,
"image": connected_site.jimage,
"weight": connected_site.weight,
"site_index": connected_site.index,
}
for connected_site in sg.get_connected_sites(n)
]
|
davidwaroquiers/pymatgen
|
pymatgen/analysis/local_env.py
|
Python
|
mit
| 169,341
|
[
"Gaussian",
"Jmol",
"VASP",
"pymatgen"
] |
025cb9b39f00b7943ac3870d6cb947bcedc154af4cdc81ab91d4d72246f2b601
|
# vi: ts=8 sts=4 sw=4 et
#
# check.py: form checking
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
from draco2.form.exception import *
from draco2.form.control import Control, ScalarControl
from draco2.form.visit import FormVisitor
class CheckVisitor(FormVisitor):
"""Form checker.
This visitor checks all nodes in a form.
"""
def visit_control(self, control):
if not issubclass(control, Control):
m = 'Not a Control instance: %s' % control
raise FormDefinitionError, m
if control.name is None:
m = 'Property "name" not set for control %s' % control
raise FormDefinitionError, m
if not isinstance(control.name, basestring):
m = 'Property "name" not string in control %s.' % control
raise FormDefinitionError, m
if control.label is not None and not \
isinstance(control.label, basestring):
m = 'Property "label" not a string in control %s' % control
raise FormDefinitionError, m
if isinstance(control, ScalarControl) and control.type is not None \
and not isinstance(control.type, type):
m = 'Property "type" not a type in control %s.' % control
raise FormDefinitionError, m
if isinstance(control, ScalarControl) and control.default is not None \
and not isinstance(control.default, basestring):
m = 'Property "default" not a string in control %s.' % control
raise FormDefinitionError, m
def visit_form(self, form):
names = set()
for co in form.inputs:
if co.name in names:
m = 'Duplicate input control name %s in form %s.' % (co.name, form)
raise FormDefinitionError, m
names.add(co.name)
names = set()
for co in form.outputs:
if co.name in names:
m = 'Duplicate output control name %s in form %s.' % (co.name, form)
raise FormDefinitionError, m
names.add(co.name)
|
geertj/draco2
|
draco2/form/check.py
|
Python
|
mit
| 2,382
|
[
"VisIt"
] |
6a01bed99254c135f556f1497be4c90021e207cbb8310ad97b47712818e171d6
|
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Create the data ############################################################
import numpy as np
x, y, z = np.ogrid[- .5:.5:200j, - .5:.5:200j, - .5:.5:200j]
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
# Generalized Laguerre polynomial (3, 2)
L = - r ** 3 / 6 + 5. / 2 * r ** 2 - 10 * r + 6
# Spherical harmonic (3, 2)
Y = (x + y * 1j) ** 2 * z / r ** 3
Phi = L * Y * np.exp(- r) * r ** 2
# Plot it ####################################################################
from mayavi import mlab
mlab.figure(1, fgcolor=(1, 1, 1), bgcolor=(0, 0, 0))
# We create a scalar field with the module of Phi as the scalar
src = mlab.pipeline.scalar_field(np.abs(Phi))
# And we add the phase of Phi as an additional array
# This is a tricky part: the layout of the new array needs to be the same
# as the existing dataset, and no checks are performed. The shape needs
# to be the same, and so should the data. Failure to do so can result in
# segfaults.
src.image_data.point_data.add_array(np.angle(Phi).T.ravel())
# We need to give a name to our new dataset.
src.image_data.point_data.get_array(1).name = 'angle'
# Make sure that the dataset is up to date with the different arrays:
src.update()
# We select the 'scalar' attribute, ie the norm of Phi
src2 = mlab.pipeline.set_active_attribute(src, point_scalars='scalar')
# Cut isosurfaces of the norm
contour = mlab.pipeline.contour(src2)
# Now we select the 'angle' attribute, ie the phase of Phi
contour2 = mlab.pipeline.set_active_attribute(contour,
point_scalars='angle')
# And we display the surface. The colormap is the current attribute: the phase.
mlab.pipeline.surface(contour2, colormap='hsv')
mlab.colorbar(title='Phase', orientation='vertical', nb_labels=3)
mlab.show()
|
s0vereign/Ahti
|
utils/pyvis/spherical.py
|
Python
|
gpl-3.0
| 1,885
|
[
"Mayavi"
] |
f5cb2261ae9ab45e35c9d9166aecd11638af11abe72cf28e49c492d64d8f706f
|
#### PATTERN | DE | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for German word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - attributive and predicative of adjectives,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX German morphology word forms):
# 75% for gender()
# 72% for pluralize()
# 84% for singularize() (for nominative)
# 87% for Verbs.find_lemma()
# 87% for Verbs.find_lexeme()
# 98% for predicative
from __future__ import unicode_literals
from __future__ import division
from builtins import str, bytes, dict, int
from builtins import map, zip, filter
from builtins import object, range
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
INDICATIVE, IMPERATIVE, SUBJUNCTIVE,
PROGRESSIVE,
PARTICIPLE, GERUND
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# German inflection of depends on gender, role and number + the determiner (if any).
# Inflection gender.
# Masculine is the most common, so it is the default for all functions.
MASCULINE, FEMININE, NEUTER, PLURAL = \
MALE, FEMALE, NEUTRAL, PLURAL = \
M, F, N, PL = "m", "f", "n", "p"
# Inflection role.
# - nom = subject, "Der Hund bellt" (the dog barks).
# - acc = object, "Das Mädchen küsst den Hund" (the girl kisses the dog).
# - dat = object (indirect), "Der Mann gibt einen Knochen zum Hund" (the man gives the dog a bone).
# - gen = property, "die Knochen des Hundes" (the dog's bone).
NOMINATIVE, ACCUSATIVE, DATIVE, GENITIVE = SUBJECT, OBJECT, INDIRECT, PROPERTY = \
"nominative", "accusative", "dative", "genitive"
article_definite = {
("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das", ("p", "nom"): "die",
("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das", ("p", "acc"): "die",
("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem", ("p", "dat"): "den",
("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des", ("p", "gen"): "der",
}
article_indefinite = {
("m", "nom"): "ein" , ("f", "nom"): "eine" , ("n", "nom"): "ein" , ("p", "nom"): "eine",
("m", "acc"): "einen", ("f", "acc"): "eine" , ("n", "acc"): "ein" , ("p", "acc"): "eine",
("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem", ("p", "dat"): "einen",
("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines", ("p", "gen"): "einer",
}
def definite_article(word, gender=MALE, role=SUBJECT):
""" Returns the definite article (der/die/das/die) for a given word.
"""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
def indefinite_article(word, gender=MALE, role=SUBJECT):
""" Returns the indefinite article (ein) for a given word.
"""
return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
DEFINITE = "definite"
INDEFINITE = "indefinite"
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role)
_article = article
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article, gender, role), word)
#### GENDER #########################################################################################
gender_masculine = (
"ant", "ast", "ich", "ig", "ismus", "ling", "or", "us"
)
gender_feminine = (
"a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion", "sis",
"tät", "tion", "ung", "ur"
)
gender_neuter = (
"chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um", "al", "an", "ar",
"ät", "ent", "ett", "ier", "iv", "o", "on", "nis", "sal"
)
gender_majority_vote = {
MASCULINE: (
"ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb", "ef", "eg",
"el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir", "kt", "lf",
"li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of", "og", "or",
"pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr", "rs", "rt",
"rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt", "zt"
),
FEMININE: (
"be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge", "he", "hr",
"ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me", "na", "ne",
"ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve", "ze"
),
NEUTER: (
"ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im", "io", "is",
"iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns", "ol", "om",
"op", "os", "ot", "pt", "rk", "rn", "ro", "to", "tt", "ul", "um", "uz"
)
}
def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g
#### PLURALIZE ######################################################################################
plural_inflections = [
("aal", "äle" ), ("aat", "aaten"), ("abe", "aben" ), ("ach", "ächer"), ("ade", "aden" ),
("age", "agen" ), ("ahn", "ahnen"), ("ahr", "ahre" ), ("akt", "akte" ), ("ale", "alen" ),
("ame", "amen" ), ("amt", "ämter"), ("ane", "anen" ), ("ang", "änge" ), ("ank", "änke" ),
("ann", "änner" ), ("ant", "anten"), ("aph", "aphen"), ("are", "aren" ), ("arn", "arne" ),
("ase", "asen" ), ("ate", "aten" ), ("att", "ätter"), ("atz", "ätze" ), ("aum", "äume" ),
("aus", "äuser" ), ("bad", "bäder"), ("bel", "bel" ), ("ben", "ben" ), ("ber", "ber" ),
("bot", "bote" ), ("che", "chen" ), ("chs", "chse" ), ("cke", "cken" ), ("del", "del" ),
("den", "den" ), ("der", "der" ), ("ebe", "ebe" ), ("ede", "eden" ), ("ehl", "ehle" ),
("ehr", "ehr" ), ("eil", "eile" ), ("eim", "eime" ), ("eis", "eise" ), ("eit", "eit" ),
("ekt", "ekte" ), ("eld", "elder"), ("ell", "elle" ), ("ene", "enen" ), ("enz", "enzen" ),
("erd", "erde" ), ("ere", "eren" ), ("erk", "erke" ), ("ern", "erne" ), ("ert", "erte" ),
("ese", "esen" ), ("ess", "esse" ), ("est", "este" ), ("etz", "etze" ), ("eug", "euge" ),
("eur", "eure" ), ("fel", "fel" ), ("fen", "fen" ), ("fer", "fer" ), ("ffe", "ffen" ),
("gel", "gel" ), ("gen", "gen" ), ("ger", "ger" ), ("gie", "gie" ), ("hen", "hen" ),
("her", "her" ), ("hie", "hien" ), ("hle", "hlen" ), ("hme", "hmen" ), ("hne", "hnen" ),
("hof", "höfe" ), ("hre", "hren" ), ("hrt", "hrten"), ("hse", "hsen" ), ("hte", "hten" ),
("ich", "iche" ), ("ick", "icke" ), ("ide", "iden" ), ("ieb", "iebe" ), ("ief", "iefe" ),
("ieg", "iege" ), ("iel", "iele" ), ("ien", "ium" ), ("iet", "iete" ), ("ife", "ifen" ),
("iff", "iffe" ), ("ift", "iften"), ("ige", "igen" ), ("ika", "ikum" ), ("ild", "ilder" ),
("ilm", "ilme" ), ("ine", "inen" ), ("ing", "inge" ), ("ion", "ionen"), ("ise", "isen" ),
("iss", "isse" ), ("ist", "isten"), ("ite", "iten" ), ("itt", "itte" ), ("itz", "itze" ),
("ium", "ium" ), ("kel", "kel" ), ("ken", "ken" ), ("ker", "ker" ), ("lag", "läge" ),
("lan", "läne" ), ("lar", "lare" ), ("lei", "leien"), ("len", "len" ), ("ler", "ler" ),
("lge", "lgen" ), ("lie", "lien" ), ("lle", "llen" ), ("mel", "mel" ), ("mer", "mer" ),
("mme", "mmen" ), ("mpe", "mpen" ), ("mpf", "mpfe" ), ("mus", "mus" ), ("mut", "mut" ),
("nat", "nate" ), ("nde", "nden" ), ("nen", "nen" ), ("ner", "ner" ), ("nge", "ngen" ),
("nie", "nien" ), ("nis", "nisse"), ("nke", "nken" ), ("nkt", "nkte" ), ("nne", "nnen" ),
("nst", "nste" ), ("nte", "nten" ), ("nze", "nzen" ), ("ock", "öcke" ), ("ode", "oden" ),
("off", "offe" ), ("oge", "ogen" ), ("ohn", "öhne" ), ("ohr", "ohre" ), ("olz", "ölzer" ),
("one", "onen" ), ("oot", "oote" ), ("opf", "öpfe" ), ("ord", "orde" ), ("orm", "ormen" ),
("orn", "örner" ), ("ose", "osen" ), ("ote", "oten" ), ("pel", "pel" ), ("pen", "pen" ),
("per", "per" ), ("pie", "pien" ), ("ppe", "ppen" ), ("rag", "räge" ), ("rau", "raün" ),
("rbe", "rben" ), ("rde", "rden" ), ("rei", "reien"), ("rer", "rer" ), ("rie", "rien" ),
("rin", "rinnen"), ("rke", "rken" ), ("rot", "rote" ), ("rre", "rren" ), ("rte", "rten" ),
("ruf", "rufe" ), ("rzt", "rzte" ), ("sel", "sel" ), ("sen", "sen" ), ("ser", "ser" ),
("sie", "sien" ), ("sik", "sik" ), ("sse", "ssen" ), ("ste", "sten" ), ("tag", "tage" ),
("tel", "tel" ), ("ten", "ten" ), ("ter", "ter" ), ("tie", "tien" ), ("tin", "tinnen"),
("tiv", "tive" ), ("tor", "toren"), ("tte", "tten" ), ("tum", "tum" ), ("tur", "turen" ),
("tze", "tzen" ), ("ube", "uben" ), ("ude", "uden" ), ("ufe", "ufen" ), ("uge", "ugen" ),
("uhr", "uhren" ), ("ule", "ulen" ), ("ume", "umen" ), ("ung", "ungen"), ("use", "usen" ),
("uss", "üsse" ), ("ute", "uten" ), ("utz", "utz" ), ("ver", "ver" ), ("weg", "wege" ),
("zer", "zer" ), ("zug", "züge" ), ("ück", "ücke" )
]
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if pos == NOUN:
for a, b in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rules (baseline = 69%).
if w.startswith("ge"):
return w
if w.endswith("gie"):
return w
if w.endswith("e"):
return w + "n"
if w.endswith("ien"):
return w[:-2] + "um"
if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", "tät", "tik", "tum", "u")):
return w
if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
return w + "en"
if w.endswith("in"):
return w + "nen"
if w.endswith("nis"):
return w + "se"
if w.endswith(("eld", "ild", "ind")):
return w + "er"
if w.endswith("o"):
return w + "s"
if w.endswith("a"):
return w[:-1] + "en"
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
umlaut = w[-3]
umlaut = umlaut.replace("a", "ä")
umlaut = umlaut.replace("o", "ö")
umlaut = umlaut.replace("u", "ü")
return w[:-3] + umlaut + w[-2:] + "e"
for a, b in (
("ag", "äge"),
("ann", "änner"),
("aum", "äume"),
("aus", "äuser"),
("zug", "züge")):
if w.endswith(a):
return w[:-len(a)] + b
return w + "e"
return w
#### SINGULARIZE ###################################################################################
singular_inflections = [
( "innen", "in" ), ( "täten", "tät"), ( "ahnen", "ahn"), ( "enten", "ent"), ( "räser", "ras"),
( "hrten", "hrt"), ( "ücher", "uch"), ( "örner", "orn"), ( "änder", "and"), ( "ürmer", "urm"),
( "ahlen", "ahl"), ( "uhren", "uhr"), ( "ätter", "att"), ( "suren", "sur"), ( "chten", "cht"),
( "kuren", "kur"), ( "erzen", "erz"), ( "güter", "gut"), ( "soren", "sor"), ( "änner", "ann"),
( "äuser", "aus"), ( "taten", "tat"), ( "isten", "ist"), ( "bäder", "bad"), ( "ämter", "amt"),
( "eiten", "eit"), ( "raten", "rat"), ( "ormen", "orm"), ( "ionen", "ion"), ( "nisse", "nis"),
( "ölzer", "olz"), ( "ungen", "ung"), ( "läser", "las"), ( "ächer", "ach"), ( "urten", "urt"),
( "enzen", "enz"), ( "aaten", "aat"), ( "aphen", "aph"), ( "öcher", "och"), ( "türen", "tür"),
( "sonen", "son"), ( "ühren", "ühr"), ( "ühner", "uhn"), ( "toren", "tor"), ( "örter", "ort"),
( "anten", "ant"), ( "räder", "rad"), ( "turen", "tur"), ( "äuler", "aul"), ( "änze", "anz"),
( "tten", "tte"), ( "mben", "mbe"), ( "ädte", "adt"), ( "llen", "lle"), ( "ysen", "yse"),
( "rben", "rbe"), ( "hsen", "hse"), ( "raün", "rau"), ( "rven", "rve"), ( "rken", "rke"),
( "ünge", "ung"), ( "üten", "üte"), ( "usen", "use"), ( "tien", "tie"), ( "läne", "lan"),
( "iben", "ibe"), ( "ifen", "ife"), ( "ssen", "sse"), ( "gien", "gie"), ( "eten", "ete"),
( "rden", "rde"), ( "öhne", "ohn"), ( "ärte", "art"), ( "ncen", "nce"), ( "ünde", "und"),
( "uben", "ube"), ( "lben", "lbe"), ( "üsse", "uss"), ( "agen", "age"), ( "räge", "rag"),
( "ogen", "oge"), ( "anen", "ane"), ( "sken", "ske"), ( "eden", "ede"), ( "össe", "oss"),
( "ürme", "urm"), ( "ggen", "gge"), ( "üren", "üre"), ( "nten", "nte"), ( "ühle", "ühl"),
( "änge", "ang"), ( "mmen", "mme"), ( "igen", "ige"), ( "nken", "nke"), ( "äcke", "ack"),
( "oden", "ode"), ( "oben", "obe"), ( "ähne", "ahn"), ( "änke", "ank"), ( "inen", "ine"),
( "seen", "see"), ( "äfte", "aft"), ( "ulen", "ule"), ( "äste", "ast"), ( "hren", "hre"),
( "öcke", "ock"), ( "aben", "abe"), ( "öpfe", "opf"), ( "ugen", "uge"), ( "lien", "lie"),
( "ände", "and"), ( "ücke", "ück"), ( "asen", "ase"), ( "aden", "ade"), ( "dien", "die"),
( "aren", "are"), ( "tzen", "tze"), ( "züge", "zug"), ( "üfte", "uft"), ( "hien", "hie"),
( "nden", "nde"), ( "älle", "all"), ( "hmen", "hme"), ( "ffen", "ffe"), ( "rmen", "rma"),
( "olen", "ole"), ( "sten", "ste"), ( "amen", "ame"), ( "höfe", "hof"), ( "üste", "ust"),
( "hnen", "hne"), ( "ähte", "aht"), ( "umen", "ume"), ( "nnen", "nne"), ( "alen", "ale"),
( "mpen", "mpe"), ( "mien", "mie"), ( "rten", "rte"), ( "rien", "rie"), ( "äute", "aut"),
( "uden", "ude"), ( "lgen", "lge"), ( "ngen", "nge"), ( "iden", "ide"), ( "ässe", "ass"),
( "osen", "ose"), ( "lken", "lke"), ( "eren", "ere"), ( "üche", "uch"), ( "lüge", "lug"),
( "hlen", "hle"), ( "isen", "ise"), ( "ären", "äre"), ( "töne", "ton"), ( "onen", "one"),
( "rnen", "rne"), ( "üsen", "üse"), ( "haün", "hau"), ( "pien", "pie"), ( "ihen", "ihe"),
( "ürfe", "urf"), ( "esen", "ese"), ( "ätze", "atz"), ( "sien", "sie"), ( "läge", "lag"),
( "iven", "ive"), ( "ämme", "amm"), ( "äufe", "auf"), ( "ppen", "ppe"), ( "enen", "ene"),
( "lfen", "lfe"), ( "äume", "aum"), ( "nien", "nie"), ( "unen", "une"), ( "cken", "cke"),
( "oten", "ote"), ( "mie", "mie"), ( "rie", "rie"), ( "sis", "sen"), ( "rin", "rin"),
( "ein", "ein"), ( "age", "age"), ( "ern", "ern"), ( "ber", "ber"), ( "ion", "ion"),
( "inn", "inn"), ( "ben", "ben"), ( "äse", "äse"), ( "eis", "eis"), ( "hme", "hme"),
( "iss", "iss"), ( "hen", "hen"), ( "fer", "fer"), ( "gie", "gie"), ( "fen", "fen"),
( "her", "her"), ( "ker", "ker"), ( "nie", "nie"), ( "mer", "mer"), ( "ler", "ler"),
( "men", "men"), ( "ass", "ass"), ( "ner", "ner"), ( "per", "per"), ( "rer", "rer"),
( "mus", "mus"), ( "abe", "abe"), ( "ter", "ter"), ( "ser", "ser"), ( "äle", "aal"),
( "hie", "hie"), ( "ger", "ger"), ( "tus", "tus"), ( "gen", "gen"), ( "ier", "ier"),
( "ver", "ver"), ( "zer", "zer"),
]
singular = {
"Löwen": "Löwe",
}
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the singular of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if word in singular:
return singular[word]
if pos == NOUN:
for a, b in singular_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rule: strip known plural suffixes (baseline = 51%).
for suffix in ("nen", "en", "n", "e", "er", "s"):
if w.endswith(suffix):
w = w[:-len(suffix)]
break
# Corrections (these add about 1% accuracy):
if w.endswith(("rr", "rv", "nz")):
return w + "e"
return w
return w
#### VERB CONJUGATION ##############################################################################
# The verb table was trained on CELEX and contains the top 2000 most frequent verbs.
prefix_inseparable = (
"be", "emp", "ent", "er", "ge", "miss", "über", "unter", "ver", "voll", "wider", "zer"
)
prefix_separable = (
"ab", "an", "auf", "aus", "bei", "durch", "ein", "fort", "mit", "nach", "vor", "weg",
"zurück", "zusammen", "zu", "dabei", "daran", "da", "empor", "entgegen", "entlang",
"fehl", "fest", "gegenüber", "gleich", "herab", "heran", "herauf", "heraus", "herum",
"her", "hinweg", "hinzu", "hin", "los", "nieder", "statt", "umher", "um", "weg",
"weiter", "wieder", "zwischen"
) + ( # There are many more...
"dort", "fertig", "frei", "gut", "heim", "hoch", "klein", "klar", "nahe", "offen", "richtig"
)
prefixes = prefix_inseparable + prefix_separable
def encode_sz(s):
return s.replace("ß", "ss")
def decode_sz(s):
return s.replace("ss", "ß")
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "de-verbs.txt"),
language = "de",
format = [0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21, 24, 52, 54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
default = {6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
)
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
"""
v = verb.lower()
# Common prefixes: be-finden and emp-finden probably inflect like finden.
if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
for prefix in prefixes:
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Common sufixes: setze nieder => niedersetzen.
b, suffix = " " in v and v.split()[:2] or (v, "")
# Infinitive -ln: trommeln.
if b.endswith(("ln", "rn")):
return b
# Lemmatize regular inflections.
for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
if b.endswith(x):
b = b[:-len(x)]; break
# Subjunctive: hielte => halten, schnitte => schneiden.
for x, y in (
("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"),
("ien", "ein"), ("iess", "ass"), ( "ieß", "aß" ), ( "iff", "eif" ), ("iss", "eiss"),
( "iß", "eiß"), ( "it", "eid"), ( "oss", "iess"), ( "öss", "iess")):
if b.endswith(x):
b = b[:-len(x)] + y; break
b = b.replace("eeiss", "eiss")
b = b.replace("eeid", "eit")
# Subjunctive: wechselte => wechseln
if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
b = b + "e"
# abknallst != abknalln => abknallen
if b.endswith(("hl", "ll", "ul", "eil")):
b = b + "e"
# Strip ge- from (likely) gerund:
if b.startswith("ge") and v.endswith("t"):
b = b[2:]
# Corrections (these add about 1.5% accuracy):
if b.endswith(("lnde", "rnde")):
b = b[:-3]
if b.endswith(("ae", "al", "öe", "üe")):
b = b.rstrip("e") + "te"
if b.endswith("äl"):
b = b + "e"
return suffix + b + "n"
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
# Split common prefixes.
x, x1, x2 = "", "", ""
for prefix in prefix_separable:
if v.startswith(prefix):
b, x = b[len(prefix):], prefix
x1 = (" " + x).rstrip()
x2 = x + "ge"
break
# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
pl = b.endswith("el") and b[:-2] + "l" or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(("ln", "rn")) and v or b + "en"
# Present tense ending in -d or -t gets -e:
pr = b.endswith(("d", "t")) and b + "e" or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(("s", "z")) and pr + "t" or pr + "st"
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(("en", "ln", "rn")) and v + "d" or v + "end"
# Past tense regular:
pt = encode_sz(pr) + "t"
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable) or b.endswith(("r", "t"))) and pt or "ge" + pt
ge = x and x + "ge" + pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
lexeme = a = [
v,
pl + "e" + x1, p2 + x1, pr + "t" + x1, pw + x1, pr + "t" + x1, pp, # present
pt + "e" + x1, pt + "est" + x1, pt + "e" + x1, pt + "en" + x1, pt + "et" + x1, ge, # past
b + "e" + x1, pr + "t" + x1, x + pw, # imperative
s1 + "e" + x1, s1 + "est" + x1, s1 + "en" + x1, s1 + "et" + x1, # subjunctive I
s2 + "e" + x1, s2 + "est" + x1, s2 + "en" + x1, s2 + "et" + x1 # subjunctive II
]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the lexicon.
return [a[i] or lexeme[i] for i in range(len(a))]
def tenses(self, verb, parse=True):
""" Returns a list of possible tenses for the given inflected verb.
"""
tenses = _Verbs.tenses(self, verb, parse)
if len(tenses) == 0:
# auswirkte => wirkte aus
for prefix in prefix_separable:
if verb.startswith(prefix):
tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse)
break
return tenses
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
# Strong inflection: no article.
adjectives_strong = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "e",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "e",
("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en", ("p", "gen"): "er",
}
# Mixed inflection: after indefinite article ein & kein and possessive determiners.
adjectives_mixed = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Weak inflection: after definite article.
adjectives_weak = {
("m", "nom"): "e", ("f", "nom"): "e" , ("n", "nom"): "e", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "e", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Uninflected + exceptions.
adjective_attributive = {
"etwas" : "etwas",
"genug" : "genug",
"viel" : "viel",
"wenig" : "wenig"
}
def attributive(adjective, gender=MALE, role=SUBJECT, article=None):
""" For a predicative adjective, returns the attributive form (lowercase).
In German, the attributive is formed with -e, -em, -en, -er or -es,
depending on gender (masculine, feminine, neuter or plural) and role
(nominative, accusative, dative, genitive).
"""
w, g, c, a = \
adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None
if w in adjective_attributive:
return adjective_attributive[w]
if a is None \
or a in ("mir", "dir", "ihm") \
or a in ("ein", "etwas", "mehr") \
or a.startswith(("all", "mehrer", "wenig", "viel")):
return w + adjectives_strong.get((g, c), "")
if a.startswith(("ein", "kein")) \
or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
return w + adjectives_mixed.get((g, c), "")
if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
or a.startswith((
"derselb", "derjenig", "jed", "jeglich", "jen", "manch",
"dies", "solch", "welch")):
return w + adjectives_weak.get((g, c), "")
# Default to strong inflection.
return w + adjectives_strong.get((g, c), "")
def predicative(adjective):
""" Returns the predicative adjective (lowercase).
In German, the attributive form preceding a noun is always used:
"ein kleiner Junge" => strong, masculine, nominative,
"eine schöne Frau" => mixed, feminine, nominative,
"der kleine Prinz" => weak, masculine, nominative, etc.
The predicative is useful for lemmatization.
"""
w = adjective.lower()
if len(w) > 3:
for suffix in ("em", "en", "er", "es", "e"):
if w.endswith(suffix):
b = w[:max(-len(suffix), -(len(w) - 3))]
if b.endswith("bl"): # plausibles => plausibel
b = b[:-1] + "el"
if b.endswith("pr"): # propres => proper
b = b[:-1] + "er"
return b
return w
#### COMPARATIVE & SUPERLATIVE #####################################################################
COMPARATIVE = "er"
SUPERLATIVE = "st"
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given (inflected) adjective.
"""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", "ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):]
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
#print(comparative("schönes"))
#print(superlative("schönes"))
#print(superlative("große"))
|
clips/pattern
|
pattern/text/de/inflect.py
|
Python
|
bsd-3-clause
| 29,028
|
[
"ASE"
] |
5974a8a6881715581dc47711f7230a387cedaa8fdc283416393405c55af9753f
|
# -*- coding: utf-8 -*-
from sympy.matrices import Matrix
from sympy.core import Add, diff, Symbol
from sympy.simplify import simplify
from tensor_analysis.arraypy import Arraypy, TensorArray, matrix2arraypy, \
matrix2tensor, list2arraypy, list2tensor
from tensor_analysis.tensor_methods import is_symmetric
from tensor_analysis.helper_functions import check_vector_of_arguments, \
check_metric_tensor, check_the_vector_field, replace_index_to_k, \
check_the_christoffel_symbols_2
"""Module riemannian_geometry contains functions for work with tensor fields:
- the calculation of the scalar product;
- the Christoffel symbols of the first and second kind;
- the covariant derivative of the curvature tensor;
- the Ricci tensor;
- scalar and sectional curvature;
- the covariant derivative the tensor field;
- the covariant divergence of a tensor field;
- the Riemann curvature tensor and sectional curvature for left-invariant metric;
- the product of Kulkarni-Nomizu;
- the Gaussian curvature;
- the second quadratic form.
To implement the functions used modules: matrices and tensor
(with classes arraypy and tensor). All functions take arguments,
the types of which may be such as list, matrix, or array Arraypy tensor.
Some functions have optional parameter indicating the type of the function result.
Starting index of arguments with type Arraypy or TensorArray is not necessarily
and by default equal to 0. The function determines the range of the index
in array to return the object with the same range of index.
Functions are work with multidimensional arrays Arraypy and tensors,
classes and methods are contained in the module Arraypy.
"""
def scal_prod(X, Y, g):
"""Returns scalar product of vectors g(X,Y).
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import scal_prod
>>> from sympy import symbols, cos
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> x1, x2 = symbols('x1, x2')
X, Y it's a vector or a vector field. They can be a list,
one-dimensional arraypy or TensorArray with valence of indices (+1):
>>> X = [1, 2]
>>> Y = [3, 4]
g it's a metric tensor must be symmetric matrix, array of arraypy or
covariant tensor with valence of indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
The scalar product:
>>> sc = scal_prod(X, Y, g)
>>> print(sc)
3*cos(x2)**2 + 8
"""
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# Handling of a input arguments - vector or vector fields X
check_the_vector_field(X)
if isinstance(X, (TensorArray, Arraypy)):
X = X.to_list()
# Handling of a input arguments - vector or vector fields Y
check_the_vector_field(Y)
if isinstance(Y, (TensorArray, Arraypy)):
Y = Y.to_list()
if not len(X) == len(Y):
raise ValueError('The vectors must be identical length')
elif len(X) != g.rows:
raise ValueError(
'The vector fields and dimension of metric tensor must be identical length')
# Calculation
indices = range(len(X))
scal = sum([g[i, j] * X[i] * Y[j] for i in indices
for j in indices])
# Output
return scal
def christoffel_1(g, var, type_output='t'):
"""Return the (-1,-1,-1) - tensor of Christoffel symbols for the given metric.
This returns the Christoffel symbol of first kind that represents the
Levi-Civita connection for the given metric.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import christoffel_1
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var is a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The Christoffel symbols of the first kind:
>>> ch_1 = christoffel_1(g, var, 't')
>>> print(ch_1)
0 sin(x2)*cos(x2)
-sin(x2)*cos(x2) 0
-sin(x2)*cos(x2) 0
0 0
>>> ch_1.type_pq
(0, 3)
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
Ch = Arraypy([3, n, idx_start])
# Calculation
for i in indices:
for j in indices:
for k in indices:
Ch[i,
j,
k] = (diff(g[j,
k],
var[i - idx_start]) + diff(g[i,
k],
var[j - idx_start]) - diff(g[i,
j],
var[k - idx_start])) / 2
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
christoffel_1 = Ch.to_tensor((-1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
christoffel_1 = Ch
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return christoffel_1
def christoffel_2(g, var, type_output='t'):
"""Return the (1, -1, -1) - tensor of Christoffel symbols for the given metric.
This returns the Christoffel symbol of second kind that represents the
Levi-Civita connection for the given metric.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import christoffel_2
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The Christoffel symbols of the second kind:
>>> ch_2 = christoffel_2(g, var, 'a')
>>> print(ch_2)
0 sin(x2)*cos(x2)
-sin(x2)/cos(x2) 0
-sin(x2)/cos(x2) 0
0 0
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
g_inv = (g.to_matrix()).inv()
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
g_inv = g.inv()
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
Ch = Arraypy([3, n, idx_start])
# Calculation
for i in indices:
for j in indices:
for k in indices:
Ch[i,
j,
k] = Add(*[g_inv[k - idx_start,
l - idx_start] * (diff(g[j,
l],
var[i - idx_start]) + diff(g[i,
l],
var[j - idx_start]) - diff(g[i,
j],
var[l - idx_start])) / 2 for l in indices])
# Other variant calculation
# christ_1 = christoffel_1(g, var)
# for i in indices:
# for j in indices:
# for k in indices:
# Ch[i,
# j,
# k] = Add(*[g_inv[k,
# l] *christ_1[i,
# j,
# l] for l in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
christoffel_2 = Ch.to_tensor((1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
christoffel_2 = Ch
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return christoffel_2
def covar_der(X, g, var, type_output='t'):
"""Return the covariant derivative the vector field.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import covar_der
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
X it's vector field can be a list, one-dimensional arraypy, or one-dimensional
tensor with valences of indices (+1):
>>> X = [x1 * x2**3, x1 - cos(x2)]
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The covariant derivative:
>>> c_v = covar_der(X, g, var, 't')
>>> print(c_v)
x2**3 - (x1 - cos(x2))*sin(x2)/cos(x2) x1*x2**3*sin(x2)*cos(x2) + 1
-x1*x2**3*sin(x2)/cos(x2) + 3*x1*x2**2 sin(x2)
>>> c_v.type_pq
(1, 1)
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_g = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_g = 0
# Handling of a input argument - vector field X
check_the_vector_field(X)
if isinstance(X, (Arraypy, TensorArray)):
idx_X = X.start_index[0]
elif isinstance(X, list):
idx_X = 0
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
if (idx_g != idx_X):
raise ValueError(
'The start index of the metric tensor and vector field must be equal')
else:
idx_start = idx_g
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
cov = Arraypy([2, n, idx_start])
ch_2 = christoffel_2(g, var)
# Calculation
for i in indices:
for j in indices:
cov[i, j] = diff(X[j], var[i - idx_start]) + \
Add(*[ch_2[k, i, j] * X[k] for k in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
cov_der = cov.to_tensor((1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
cov_der = cov
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return cov_der
def covar_der_xy(X, Y, g, var, type_output='t'):
"""Return the covariant derivative the vector field along another field.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import covar_der_xy
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valences indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
X, Y it's vector fields may be lists, one-dimensional arraypy,
or one-dimensional tensor indices with valences (+ 1):
>>> X = [x1 * x2**3, x1 - cos(x2)]
>>> Y = [1, 2]
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The covariant derivative along another vector field:
>>> c_v_XY = covar_der_xy(X, Y, g, var, 't')
>>> print(c_v_XY)
-2*x1*x2**3*sin(x2)/cos(x2) + 6*x1*x2**2 + x2**3 - (x1 - cos(x2))*sin(x2)/cos(x2) \
x1*x2**3*sin(x2)*cos(x2) + 2*sin(x2) + 1
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_g = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_g = 0
# Handling of a input argument - vector field X
check_the_vector_field(X)
if isinstance(X, (Arraypy, TensorArray)):
idx_X = X.start_index[0]
elif isinstance(X, list):
idx_X = 0
# Handling of a input argument - vector field Y
check_the_vector_field(Y)
if isinstance(Y, (Arraypy, TensorArray)):
idx_Y = Y.start_index[0]
elif isinstance(Y, list):
idx_Y = 0
[n1, n2] = g.shape
if not len(X) == len(Y):
raise ValueError('The vectors must be identical length')
elif not idx_X == idx_Y:
raise ValueError('The start index of vector fields must be equal')
elif not(idx_g == idx_X):
raise ValueError(
'The start index of the metric tensor and vector field must be equal')
else:
idx_start = idx_g
if len(X) != n1:
raise ValueError(
'The vector fields and dimension of metric tensor must be identical length')
# The definition of diapason changes in an index
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not concide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
nabla_XY = Arraypy([1, n, idx_start])
nabla_X = covar_der(X, g, var)
# Calculation
for j in indices:
nabla_XY[j] = sum([nabla_X[i, j] * Y[i] for i in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
cov_der_XY = nabla_XY.to_tensor((1))
elif type_output == str('a') or type_output == Symbol('a'):
cov_der_XY = nabla_XY
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return cov_der_XY
def riemann(g, var, type_output='t'):
"""Return the Riemann curvature tensor of type (1, -1, -1, -1)
for the given metric tensor.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import riemann
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The curvature tensor:
>>> r = riemann(g, var, 'a')
>>> print(r)
0 0
0 0
0 -cos(x2)**2
1 0
0 cos(x2)**2
-1 0
0 0
0 0
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
R = Arraypy([4, n, idx_start])
ch_2 = christoffel_2(g, var)
# Calculation
for i in indices:
for j in indices:
for k in indices:
for l in indices:
R[i,
j,
k,
l] = diff(ch_2[j,
k,
l],
var[i - idx_start]) - diff(ch_2[i,
k,
l],
var[j - idx_start]) + sum([ch_2[i,
p,
l] * ch_2[j,
k,
p] - ch_2[j,
p,
l] * ch_2[i,
k,
p] for p in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
riemann = R.to_tensor((1, -1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
riemann = R
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return riemann
def ricci(riemann, var, type_output='t'):
"""Return the tensor Ricci of type (-1, -1), is symmetric tensor
for given Riemann curvature tensor.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import ricci, riemann
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2,2))
>>> g = TensorArray(A,(-1,-1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
riemann it's a Riemann curvature tensor must be symmetric matrix,
arraypy or tensor with valences indices (-1, -1, -1, 1):
>>> cur = riemann(g, var, 't')
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The Ricci tensor:
>>> r = ricci(cur, var, 't')
>>> print(r)
cos(x2)**2 0
0 1
>>> r.type_pq
(0, 2)
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument Riemann curvature tensor - riemann
if not isinstance(riemann, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray')
else:
if isinstance(riemann, (Arraypy, TensorArray)):
if isinstance(riemann, TensorArray):
if not riemann.type_pq == (1, 3):
raise ValueError(
'The valence of Riemann curvature tensor must be (1, -1, -1, -1)')
if not (
riemann.start_index.count(
riemann.start_index[0]) == 4):
raise ValueError(
'The starting indices of Riemann curvature tensor must be identical')
idx_start = riemann.start_index[0]
else:
idx_start = 0
# The definition of diapason changes in an index
[n1, n2, n3, n4] = riemann.shape
if not n == n1:
raise ValueError(
'The rank of the Riemann curvature tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
Ri = Arraypy([2, n, idx_start])
# Calculation
for j in indices:
for k in indices:
Ri[j, k] = sum([riemann[i, j, k, i] for i in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
ricci = Ri.to_tensor((-1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
ricci = Ri
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return ricci
def scal_curv(g, ricci, var):
"""The scalar curvature (or the Ricci scalar) is the simplest curvature
invariant of a Riemannian manifold.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import scal_curv, ricci, riemann
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2,2))
>>> g = TensorArray(A,(-1,-1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
riemann it's a Riemann curvature tensor must be symmetric matrix,
arraypy or tensor with valences indices (-1, -1, -1, 1):
>>> cur = riemann(g, var, 't')
ricci it's Ricci tensor must be a matrix, arraypy or valences with
tensor indices (-1, -1):
>>> r = ricci(cur, var, 't')
The Ricci tensor for the Riemann curvature tensor:
>>> sc_c = scal_curv(g, r, var)
>>> print(sc_c)
1
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# The definition of inverse matrix of the metric tensor
g_inv = g.inv()
# Handling of a input argument tensor Ricci - ricci
if not isinstance(ricci, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of tensor Ricci must be Matrix, TensorArray or Arraypy')
else:
if isinstance(ricci, (Arraypy, TensorArray)):
if isinstance(ricci, TensorArray):
if not ricci.type_pq == (0, 2):
raise ValueError(
'The valence of tensor Ricci must be (-1,-1)')
ricci = ricci.to_matrix()
if not ricci.is_symmetric():
raise ValueError('The Ricci tensor must be symmetric.')
if not (g.shape == ricci.shape):
raise ValueError(
'The rank of the metric tensor does not coincide with the rank of tensor Ricci.')
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
# Calculation
indices = range(n)
for i in indices:
for j in indices:
scal_curv = g_inv[i, j] * ricci[i, j]
# Output
return scal_curv
def k_sigma(X, Y, R, g, var):
"""Return Sectional curvature of thу Riemannian space
in the direction за two-dimensional area formed by
vectors X, Y for the given metric tensor.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import k_sigma, riemann
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
X, Y it's a vector or a vector field. They can be a list, one-dimensional
arraypy or tensor with valence of indices (+1):
>>> X = [1, 2]
>>> Y = [3, 4]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
R it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor
with valences indices (1, -1, -1, -1):
>>> R = riemann(g, var)
The sectional curvature:
>>> k_sig = k_sigma(X, Y, R, g, var)
>>> print(k_sig)
1
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# Handling of a input arguments - vector or vector fields X
check_the_vector_field(X)
if isinstance(X, (TensorArray, Arraypy)):
X = X.to_list()
# Handling of a input arguments - vector or vector fields Y
check_the_vector_field(Y)
if isinstance(Y, (TensorArray, Arraypy)):
Y = Y.to_list()
if not len(X) == len(Y):
raise ValueError('The vectors must be identical length')
elif len(X) != g.rows:
raise ValueError(
'The vector fields and dimension of metric tensor must be identical length')
# Handling of a input argument Riemann curvature tensor - R
if not isinstance(R, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray')
else:
if isinstance(R, (Arraypy, TensorArray)):
if isinstance(R, TensorArray):
if not R.type_pq == (1, 3):
raise ValueError(
'The valence of Riemann curvature tensor must be (1, -1,- 1, -1)')
if not (R.start_index[0] == R.start_index[1]):
raise ValueError(
'The starting indices of Riemann curtivate tensor must be identical')
idx_R = R.start_index[0]
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
[n1, n2, n3, n4] = R.shape
if not n == n1:
raise ValueError(
'The rank of the Riemann curvature tensor does not concide with the number of variables.')
indices = range(len(X))
# Calculation
Sc_pr = scal_prod(X, X, g) * scal_prod(Y, Y, g) - scal_prod(X, Y, g)**2
if (Sc_pr == 0):
raise ValueError('The two-dimensional area is a degenerate!')
numerator = sum([g[r, s] * R[i + idx_R, j + idx_R, k + idx_R, r + idx_R] * X[i] * Y[j] * Y[k] * X[s] for i in indices
for j in indices
for k in indices
for r in indices
for s in indices])
k_sigma = simplify(numerator / Sc_pr)
# Output
return k_sigma
def nabla(T, ch_2, var):
"""Return the covariant derivative the tensor field.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import nabla
>>> from tensor_analysis.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
T it's a tensor field must be tensor:
>>> T = Arraypy([2, 2, 0]).to_tensor((1, -1))
>>> T[0,0] = x2
>>> T[0,1] = -x2
>>> T[1,0] = -x1
>>> T[1,1] = x1
ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor
with valence indices (1, -1, -1):
>>> ch_2 = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> ch_2[0,0,0] = 0
>>> ch_2[0,0,1] = sin(x2)*cos(x2)
>>> ch_2[0,1,1] = 0
>>> ch_2[1,1,1] = 0
>>> ch_2[1,0,1] = 0
>>> ch_2[1,1,0] = 0
>>> ch_2[1,0,0] = -sin(x2)*cos(x2)
>>> ch_2[0,1,0] = -sin(x2)*cos(x2)
The covariant derivative of tensor field:
>>> nabla_t = nabla(T, ch_2, var)
>>> print(nabla_t)
-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) 0
x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) x2*sin(x2)*cos(x2) - 1
-x1*sin(x2)*cos(x2) - x2*sin(x2)*cos(x2) -x1*sin(x2)*cos(x2) - 1
-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) 0
"""
# Handling of a input argument - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of a input argument - Christoffel symbol of second kind
check_the_christoffel_symbols_2(ch_2)
idx_ch = ch_2.start_index[0]
# Handling of a input argument - tensor field T
if not isinstance(T, TensorArray):
raise TypeError(
'The type of tensor field must be TensorArray')
idx_start_T = T.start_index[0]
if (idx_start_T != idx_ch):
raise ValueError(
'The start index of the tensor field and Christoffel symbol \
of second kind must be equal')
# The definition of diapason changes in an index
# The number of upper indices
p = T.type_pq[0]
# The dimension of the input array
n = T.shape[0]
# The rank of the input array
rank_T = len(T.shape)
# The definition of the start index
idx_char_T = T.ind_char
idx_char_nabla_T = list(idx_char_T) + [-1]
# upper_idx_numbers it is a list with the positions on which are the upper
# indices
upper_idx_numbers = [
k for k in range(len(idx_char_T)) if idx_char_T[k] == 1]
# low_idx_numbers it is a list with the positions on which are the lower
# indices
low_idx_numbers = [
k for k in range(len(idx_char_T)) if idx_char_T[k] == -1]
# Creating the output array in accordance with the start index
nabla_T = Arraypy([rank_T + 1, n, idx_start_T]).to_tensor(idx_char_nabla_T)
index_nabla_T = nabla_T.index_list
# Calculation
for index in index_nabla_T:
index_T = list(index)
del index_T[n]
index_T = tuple(index_T)
s = index[rank_T]
dt = diff(T[index_T], var[index[s]])
k = idx_start_T
nabla_T_up = 0
nabla_T_lo = 0
while k < n + idx_start_T:
for i in upper_idx_numbers:
index_T_ik = replace_index_to_k(index_T, i, k)
nabla_T_up += T[index_T_ik] * ch_2[index_T[i], s, k]
for j in low_idx_numbers:
index_T_jk = replace_index_to_k(index_T, j, k)
nabla_T_lo += T[index_T_jk] * ch_2[index_T[j], s, k]
k = k + 1
nabla_T[index] = dt + nabla_T_up - nabla_T_lo
# Output
return nabla_T
def nabla_x(T, ch_2, X, var):
"""Return the covariant derivative the tensor field along another vector field.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import nabla_x
>>> from tensor_analysis.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
T it's a tensor field must be tensor:
>>> T = Arraypy([2, 2, 0]).to_tensor((1, -1))
>>> T[0,0] = x2
>>> T[0,1] = -x2
>>> T[1,0] = -x1
>>> T[1,1] = x1
ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor
with valence indices (1, -1, -1):
>>> ch_2 = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> ch_2[0,0,0] = 0
>>> ch_2[0,0,1] = sin(x2)*cos(x2)
>>> ch_2[0,1,1] = 0
>>> ch_2[1,1,1] = 0
>>> ch_2[1,0,1] = 0
>>> ch_2[1,1,0] = 0
>>> ch_2[1,0,0] = -sin(x2)*cos(x2)
>>> ch_2[0,1,0] = -sin(x2)*cos(x2)
X it's vector field can be a list, one-dimensional arraypy, or one-dimensional
tensor with valences of indices (+1):
>>> X = [x1 * x2**3, x1 - cos(x2)]
The covariant derivative of tensor field along another vector field:
>>> nabla_xt = nabla_x(T, ch_2, X, var)
>>> print(nabla_xt)
x1*x2**3*(-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2)) x1*x2**3*(x1*sin(x2)*cos(x2) + \
x2*sin(x2)*cos(x2)) + (x1 - cos(x2))*(x2*sin(x2)*cos(x2) - 1)
x1*x2**3*(-x1*sin(x2)*cos(x2) - x2*sin(x2)*cos(x2)) + \
(x1 - cos(x2))*(-x1*sin(x2)*cos(x2) - 1) x1*x2**3*(-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2))
"""
# Handling of a input argument - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of a input argument - Christoffel symbol of second kind
check_the_christoffel_symbols_2(ch_2)
idx_ch = ch_2.start_index[0]
# Handling of a input argument - vector field X
check_the_vector_field(X)
if isinstance(X, (Arraypy, TensorArray)):
idx_X = X.start_index[0]
elif isinstance(X, list):
idx_X = 0
# Handling of a input argument - tensor field T
if not isinstance(T, TensorArray):
raise TypeError(
'The type of tensor field must be TensorArray')
idx_start_T = T.start_index[0]
if (idx_start_T != idx_ch != idx_X):
raise ValueError(
'The start index of the tensor field and Christoffel symbol \
of second kind and vector field must be equal')
# The definition of diapason changes in an index
# The number of upper indices
p = T.type_pq[0]
# The dimension of the input array
n = T.shape[0]
# The rank of the input array
rank_T = len(T.shape)
# The definition of the start index
idx_char_T = T.ind_char
# Creating the output array in accordance with the start index
nabla_TX = Arraypy([rank_T, n, idx_start_T]).to_tensor(idx_char_T)
index_nabla_TX = nabla_TX.index_list
nabla_T = nabla(T, ch_2, var)
# Calculation
for index in index_nabla_TX:
k = idx_start_T
while k < n + idx_start_T:
idx_nabla_T = tuple(list(index) + [k])
nabla_TX[index] += nabla_T[idx_nabla_T] * X[k]
k = k + 1
# Output
return nabla_TX
def delta(T, g, ch_2, var):
"""Return the covariant divergence of a tensor field T.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import delta
>>> from tensor_analysis.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
T it's a tensor field must be tensor:
>>> T = Arraypy([2, 2, 0]).to_tensor((1, -1))
>>> T[0,0] = x2
>>> T[0,1] = -x2
>>> T[1,0] = -x1
>>> T[1,1] = x1
ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor
with valence indices (1, -1, -1):
>>> ch_2 = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> ch_2[0,0,0] = 0
>>> ch_2[0,0,1] = sin(x2)*cos(x2)
>>> ch_2[0,1,1] = 0
>>> ch_2[1,1,1] = 0
>>> ch_2[1,0,1] = 0
>>> ch_2[1,1,0] = 0
>>> ch_2[1,0,0] = -sin(x2)*cos(x2)
>>> ch_2[0,1,0] = -sin(x2)*cos(x2)
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> g = Arraypy((2, 2)).to_tensor((-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
The covariant divergence of a tensor field:
>>> delta_T = delta(T, g, ch_2, var)
>>> print(delta_T)
x1*sin(x2)*cos(x2) + 1 0
"""
# Handling of a input argument - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# Handling of a input argument - Christoffel symbol of second kind
check_the_christoffel_symbols_2(ch_2)
idx_ch = ch_2.start_index[0]
# Handling of a input argument - tensor field T
if not isinstance(T, TensorArray):
raise TypeError(
'The type of vector field must be TensorArray')
idx_start_T = T.start_index[0]
# The definition of inverse matrix of the metric tensor
g_inv = g.inv()
# The definition of diapason changes in an index
# The dimension of the input array
n = T.shape[0]
# The rank of the input array
rank_T = len(T.shape)
index_T = T.index_list
idx_char_delta_T = [(-1) for i in range(rank_T - 1)]
nabla_T = nabla(T, ch_2, var)
# Creating the output array in accordance with the start index
delta_T = Arraypy([rank_T - 1, n, idx_start_T]).to_tensor(idx_char_delta_T)
# Calculation
for index in index_T:
k = idx_start_T
while k < n + idx_start_T:
for j in range(n):
idx_nabla_T = tuple(list(index) + [k])
idx_delta_T = list(index)
del idx_delta_T[0]
idx_delta_T = tuple(idx_delta_T)
delta_T[idx_delta_T] = (-1) * \
nabla_T[idx_nabla_T] * g_inv[k, j]
k = k + 1
# Output
return delta_T
def riemann_li(C, g, var, type_output='t'):
"""Return the Riemann curvature tensor of type (1, -1, -1, -1)
for the given left-invariant metric tensor.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import riemann_li
>>> from tensor_analysis.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
C it's a structural constant must be tensor with valence indices (1,-1,-1):
>>> C = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> C[0,0,0] = 0
>>> C[0,0,1] = sin(x2)*cos(x2)
>>> C[0,1,1] = 0
>>> C[1,1,1] = 0
>>> C[1,0,1] = 0
>>> C[1,1,0] = 0
>>> C[1,0,0] = -sin(x2)*cos(x2)
>>> C[0,1,0] = -sin(x2)*cos(x2)
g it's a left-invariant metric tensor must be symmetric matrix, arraypy or
tensor with valence indices (-1, -1):
>>> g = Arraypy((2, 2)).to_tensor((-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The curvature tensor:
>>> r_li = riemann_li(C, g, var, 'a')
>>> print(r_li)
-0.25*sin(x2)**2*cos(x2)**2 0
0 0
0 0
0 0
0 0
0 0
0 0
0 0
"""
# Handling of input vector arguments var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_g = g.start_index[0]
g_inv = (g.to_matrix()).inv()
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_g = 0
g_inv = g.inv()
# Handling of a input argument - structure constant
if not isinstance(C, TensorArray):
raise TypeError(
'The type of must be TensorArray')
else:
if isinstance(C, TensorArray):
if not C.type_pq == (1, 2):
raise ValueError(
'The valence or ind_char of must be (1,-1,-1)')
idx_c = C.start_index[0]
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
if (idx_g != idx_c):
raise ValueError(
'The start index of the tensor field and Christoffel symbol \
of second kind must be equal')
else:
idx_start = idx_g
indices = range(idx_start, idx_start + n)
gamma = Arraypy([3, n, idx_start])
for p in indices:
for i in indices:
for j in indices:
for s in indices:
for k in indices:
gamma[p, i, j] = 0.5 * (C[p, i, j] + g[j, s] * C[s, k, i] * g_inv[
k, p] + g[i, s] * C[s, k, j] * g_inv[k, p])
# Creating the output array in accordance with the start index
R = Arraypy([4, n, idx_start])
# Calculation
for s in indices:
for i in indices:
for j in indices:
for k in indices:
for p in indices:
R[i, j, k, s] = gamma[s, i, p] * gamma[p, j, k] - gamma[s, j, p] * gamma[p, i, k] - \
gamma[s, p, k] * gamma[p, i, j]
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
riemann = R.to_tensor((1, -1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
riemann = R
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return riemann
def k_sigma_li(R, g, var):
"""Return Sectional curvature in the direction of coordinate areas.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import k_sigma_li, riemann_li
>>> from tensor_analysis.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> g = Arraypy((2, 2)).to_tensor((-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
C it's a structural constant must be tensor with valence indices (1,-1,-1):
>>> C = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> C[0,0,0] = 0
>>> C[0,0,1] = sin(x2)
>>> C[0,1,1] = cos(x2)
>>> C[1,1,1] = cos(x2)
>>> C[1,0,1] = cos(x2)
>>> C[1,1,0] = 0
>>> C[1,0,0] = -sin(x2)
>>> C[0,1,0] = -sin(x2)
R it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor
with valences indices (1, -1, -1, -1):
>>> R = riemann_li(C, g, var, 't')
The sectional curvature:
>>> k_sig_li = k_sigma_li(R, g, var)
>>> print(k_sig_li)
Division by zero!
"""
# Handling of input vector arguments var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
# Handling of a input argument Riemann curvature tensor - R
if not isinstance(R, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray')
else:
if isinstance(R, (Arraypy, TensorArray)):
if isinstance(R, TensorArray):
if not R.type_pq == (1, 3):
raise ValueError(
'The valence or ind_char of Riemann curvature tensor must be (-1,-1,-1,+1)')
if not (R.start_index[0] == R.start_index[1]):
raise ValueError(
'The starting indices of Riemann curtivate tensor must be identical')
idx_R = R.start_index[0]
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
[n1, n2, n3, n4] = R.shape
if not n == n1:
raise ValueError(
'The rank of the Riemann curvature tensor does not concide with the number of variables.')
indices = range(n)
k_sig_li = Arraypy([2, n, idx_start])
# Calculation
for i in indices:
for j in indices:
for k in indices:
if (g[i, j] * g[j, j] - g[i, j]**2) == 0:
raise ValueError('Division by zero!')
else:
k_sig_li = sum(
(g[k, i] * R[k, i, j, j]) / (g[i, i] * g[j, j] - g[i, j]**2))
# Output
return k_sig_li
def kulkarni_nomizu(h, k, var, type_output='t'):
"""Return the product of Kulkarni-Nomizu of type (-1, -1, -1, -1)
for the given two symmetric tensor.
Examples:
=========
>>> from tensor_analysis.riemannian_geometry import kulkarni_nomizu
>>> from tensor_analysis.arraypy import Arraypy
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
h,k it's a tensor must be symmetric arraypy or tensor
with valence indices (-1, -1):
>>> h = Arraypy((2, 2)).to_tensor((-1, -1))
>>> h[0,0] = x1
>>> h[0,1] = 0
>>> h[1,0] = 0
>>> h[1,1] = x2
>>> k = Arraypy((2, 2)).to_tensor((-1, -1))
>>> k[0,0] = x2
>>> k[0,1] = 0
>>> k[1,0] = 0
>>> k[1,1] = x1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The curvature tensor:
>>> k_n = kulkarni_nomizu(h, k, var, 'a')
>>> print(k_n)
0 0
0 0
0 x1**2 + x2**2
-x1**2 - x2**2 0
0 -x1**2 - x2**2
x1**2 + x2**2 0
0 0
0 0
"""
# Handling of input vector arguments var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of input symmetric tensor h
if not isinstance(h, TensorArray):
raise TypeError(
'The type of input tensor must be a TensorArray')
if isinstance(h, TensorArray):
if not h.type_pq == (0, 2):
raise ValueError(
'The valence or ind_char of tensor must be (-1,-1)')
if not (h.to_matrix()).is_symmetric():
raise ValueError('The tensor must be symmetric.')
# Handling of input symmetric tensor k
if not isinstance(k, TensorArray):
raise TypeError(
'The type of input tensor must be a TensorArray')
if isinstance(k, TensorArray):
if not k.type_pq == (0, 2):
raise ValueError(
'The valence or ind_char of tensor must be (-1,-1)')
if not (k.to_matrix()).is_symmetric():
raise ValueError('The tensor must be symmetric.')
if (h.start_index[0] != k.start_index[0]):
raise ValueError(
'The start index of the tensors must be equal')
else:
idx_start = h.start_index[0]
# Definition of number of variables
n = len(var)
kul_nom = Arraypy([4, n, idx_start])
indices = range(idx_start, idx_start + n)
# Calculation
for i in indices:
for j in indices:
for t in indices:
for l in indices:
kul_nom[i, j, t, l] = (
h[i, t] * k[j, l] - h[i, l] * k[j, t]) - (h[j, t] * k[i, l] - h[j, l] * k[i, t])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
K = kul_nom.to_tensor((-1, -1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
K = kul_nom
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return K
def second_surf(surf, var, type_output='t'):
"""Return the second quadratic form.
Examples:
=========
>>> from sympy import symbols
>>> from tensor_analysis.riemannian_geometry import second_surf
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
surf it's list of functions, must be consist of one or three functions.
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The the second quadratic form.
>>> surf3 = [x1+x2, 2*x1**2-3*x2, (1+x2)*x1+x2-4]
>>> print(second_surf(surf3, var, 't'))
(-x1 + x2)/(3*x1) -(4*x1 + 3)/((x1 + 1)*(x2 + 1))
-(4*x1 + 3)/((x1 + 1)*(x2 + 1)) 0
>>> surf1 = [x1 + 4*x2**2]
>>> print(second_surf(surf1, var, 't'))
0 0
0 8
"""
# The definition symbols i, j, k
i = Symbol('i')
j = Symbol('j')
k = Symbol('k')
b = Arraypy((2, 2))
# Calculation
if (len(surf) == 1):
b[0, 0] = diff(diff(surf[0], var[0]), var[0])
b[0, 1] = b[1, 0] = diff((diff(surf[0], var[0])), var[1])
b[1, 1] = diff((diff(surf[0], var[1])), var[1])
elif (len(surf) == 3):
# The first partial derivatives
r_u = diff(surf[0], var[0]) * i + diff(surf[1], var[0]) * j +\
diff(surf[2], var[0]) * k
r_v = diff(surf[0], var[1]) * i + diff(surf[1], var[1]) * j +\
diff(surf[2], var[1]) * k
# The vector product
vect_prod = (r_u.coeff(j) * r_v.coeff(k) - r_v.coeff(j) * r_u.coeff(k)) * i - \
(r_u.coeff(k) * r_v.coeff(i) - r_v.coeff(k) * r_u.coeff(i)) * j + \
(r_u.coeff(i) * r_v.coeff(j) - r_v.coeff(i) * r_u.coeff(j)) * k
# The length of vector product
len_r_uv = r_u.coeff(i) * r_v.coeff(i) * i + r_u.coeff(j) * r_v.coeff(j) * j + \
r_u.coeff(k) * r_v.coeff(k) * k
if (len_r_uv == 0):
raise ValueError('The two-dimensional area is a degenerate!')
# The components of the normal vector
n = (simplify(vect_prod.coeff(i) / len_r_uv.coeff(i)) * i +
simplify(vect_prod.coeff(j) / len_r_uv.coeff(j)) * j +
simplify(vect_prod.coeff(k) / len_r_uv.coeff(k)) * k)
# The second partial derivatives
r_uu = diff(r_u.coeff(i), var[0]) * i + diff(r_u.coeff(j), var[0]) * j + \
diff(r_u.coeff(k), var[0]) * k
r_uv = diff(r_u.coeff(i), var[1]) * i + diff(r_u.coeff(j), var[1]) * j + \
diff(r_u.coeff(k), var[1]) * k
r_vv = diff(r_v.coeff(i), var[1]) * i + diff(r_v.coeff(j), var[1]) * j + \
diff(r_v.coeff(k), var[1]) * k
b[0, 0] = r_uu.coeff(i) * n.coeff(i) + r_uu.coeff(j) * n.coeff(j) + \
r_uu.coeff(k) * n.coeff(k)
b[0, 1] = b[1, 0] = r_uv.coeff(i) * n.coeff(i) + r_uv.coeff(j) * n.coeff(j) + \
r_uv.coeff(k) * n.coeff(k)
b[1, 1] = r_vv.coeff(i) * n.coeff(i) + r_vv.coeff(j) * n.coeff(j) + \
r_vv.coeff(k) * n.coeff(k)
else:
raise ValueError(
"The argument surf must be consist one function or three functions")
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
b = b.to_tensor((-1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
b = b
elif type_output == str('m') or type_output == Symbol('m'):
b = b.to_matrix()
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 'm' - Matrix\
't' and None - TensorArray.")
# Output
return b
def k_surf(surf, var):
"""Return the Gaussian curvature.
Examples:
=========
>>> from sympy import symbols
>>> from tensor_analysis.riemannian_geometry import k_surf
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
surf it's list of functions, must be consist of one or three functions.
The Gaussian curvature:
>>> surf3 = [x1+x2, 2*x1**2-3*x2, (1+x2)*x1+x2-4]
>>> print(k_surf(surf3, var))
-(4*x1 + 3)**2/((x1 + 1)**2*(x2 + 1)**2*(((x1 + 1)**2 + 10)* \
(16*x1**2 + (x2 + 1)**2 + 1) - (-12*x1 + (x1 + 1)*(x2 + 1) + 1)**2))
>>> surf1 = [x1 + 4*x2**2]
>>> print(k_surf(surf1, var))
0
"""
# Calculation
if (len(surf) == 1):
K = diff(diff(surf[0], var[0]), var[0]) * diff(diff(surf[0], var[1]), var[1]) -\
(diff(diff(surf[0], var[0]), var[1]))**2 / \
(1 + diff(surf[0], var[0])**2 + diff(surf[0], var[1])**2)**2
elif (len(surf) == 3):
g = Arraypy((2, 2))
g[0, 0] = diff(surf[0], var[0])**2 + \
diff(surf[1], var[0])**2 + diff(surf[2], var[0])**2
g[0, 1] = g[1, 0] = diff(surf[0], var[0]) * diff(surf[0], var[1]) + diff(
surf[1], var[0]) * diff(surf[1], var[1]) + diff(surf[2], var[0]) * diff(surf[2], var[1])
g[1, 1] = diff(surf[0], var[1])**2 + \
diff(surf[1], var[1])**2 + diff(surf[2], var[1])**2
b = second_surf(surf3, var, 't')
K = simplify(
(b[0, 0] * b[1, 1] - b[0, 1]**2) / (g[0, 0] * g[1, 1] - g[0, 1]**2))
else:
raise ValueError(
"The argument surf must be consist one function or three functions")
# Output
return K
|
AunShiLord/Tensor-analysis
|
tensor_analysis/riemannian_geometry.py
|
Python
|
mit
| 61,932
|
[
"Gaussian"
] |
cf83c2c2cf495d37eddb0825996187a0680733761d25807a0d4faa347e0d96f6
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
r"""Module to define a class :py:class:`~BasisFamily` that associates
fitting basis sets to an orbital basis and to provide functions to
query appropriate fitting bases for any orbital basis distributed
with Psi4.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
basisfamily_list = []
class BasisFamily(object):
"""Class to associate with an orbital basis name *ornate*
the gbs file names in which the orbital basis *orbital*
(usually the coded form of *ornate*) and *jfit*, *jkfit*,
*rifit*, and *dualfit* auxiliary bases can be found.
"""
def __init__(self, ornate, orbital=None, zeta=None):
"""Constructor"""
# literature name of orbital basis set, e.g., aug-cc-pVTZ or 6-31+G*
self.ornate = ornate
# gbs file name of orbital basis set, e.g., aug-cc-pvtz or 6-31pgs
self.orbital = sanitize_basisname(ornate) if orbital is None else sanitize_basisname(orbital)
# gbs file name of JKFIT designed for orbital basis
self.jkfit = None
# gbs friendly file name of JFIT designed for orbital basis
self.jfit = None
# gbs file name of CFIT designed for orbital basis
self.rifit = None
# gbs file name of DUAL designed for orbital basis
self.dualfit = None
# gbs file name of DECON designed for orbital basis
self.decon = self.orbital
# gbs file name of JKFIT default when self.jkfit unavailable
#self.jkdef = jkdef
# gbs file name of JFIT default when self.jfit unavailable
#self.jdef = jdef
# gbs file name of CFIT default when self.rifit unavailable
#self.ridef = ridef
# zeta
self.zeta = zeta
def __str__(self):
text = ''
text += """ ==> %s Family <==\n\n""" % (self.ornate)
text += """ Orbital basis: %s\n""" % (self.orbital)
text += """ JK auxiliary basis: %s\n""" % (self.jkfit)
text += """ MP2 auxiliary basis: %s\n""" % (self.rifit)
#text += """ JK auxiliary basis: %s Def: %s\n""" % (self.jkfit, self.jkdef)
#text += """ J auxiliary basis: %s Def: %s\n""" % (self.jfit, self.jdef)
#text += """ MP2 auxiliary basis: %s Def: %s\n""" % (self.rifit, self.ridef)
text += """ DUAL auxiliary basis: %s\n""" % (self.dualfit)
text += """ DECON auxiliary basis:%s\n""" % (self.decon)
text += """ Zeta: %s\n""" % ('(unset)' if self.zeta is None else str(self.zeta))
text += """\n"""
return text
def name(self):
"""Function to return the ornate name of the orbital basis,
e.g., 6-311++G** for 6-311ppgss.
"""
return self.ornate
def add_jkfit(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jkfit* to a BasisFamily object.
"""
self.jkfit = sanitize_basisname(fit)
def add_rifit(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *rifit* to a BasisFamily object.
"""
self.rifit = sanitize_basisname(fit)
def add_dualfit(self, fit):
"""Function to add basis *fit* as associated helper basis
member *dualfit* to a BasisFamily object.
"""
self.dualfit = sanitize_basisname(fit)
def add_jfit(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jfit* to a BasisFamily object.
"""
self.jfit = sanitize_basisname(fit)
def add_jfit_default(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jdef* to a BasisFamily object.
"""
self.jdef = sanitize_basisname(fit)
def add_jkfit_default(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jkdef* to a BasisFamily object.
"""
self.jkdef = sanitize_basisname(fit)
def add_rifit_default(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *ridef* to a BasisFamily object.
"""
self.ridef = sanitize_basisname(fit)
def sanitize_basisname(name):
"""Function to return *name* in coded form, stripped of
characters that confuse filenames, characters into lowercase,
``+`` into ``p``, ``*`` into ``s``, and ``(``, ``)``, & ``,``
into ``_``.
"""
temp = name.lower()
temp = temp.replace('+', 'p')
temp = temp.replace('*', 's')
temp = temp.replace('(', '_')
temp = temp.replace(')', '_')
temp = temp.replace(',', '_')
return temp
def load_basis_families():
"""Function to load into the array ``basisfamily_list``
BasisFamily objects for all Psi4's standard installed bases.
"""
from .basislistdunning import load_basfam_dunning
from .basislistother import load_basfam_other
if len(basisfamily_list) == 0:
load_basfam_dunning()
load_basfam_other()
return basisfamily_list
def print_basis_families():
"""Function to print to the output file a formatted summary
of all the BasisFamily objects in ``basisfamily_list``, by
default all Psi4's standard installed bases.
"""
basisfamily_list = load_basis_families()
text = ''
for fam in basisfamily_list:
text += '%s' % (fam)
return text
def corresponding_zeta(name):
basisfamily_list = load_basis_families()
for fam in basisfamily_list:
if sanitize_basisname(fam.ornate) == sanitize_basisname(name):
return fam.zeta
def corresponding_basis(name, role='BASIS'):
"""Function to validate if the orbital basis *name* in coded or
ornate form is in Psi4's standard installed bases list. ``None``
is returned if the orbital basis is not found.
Return triplet of name for mol hash key, gbs file, post-processing function.
"""
from .libmintsbasisset import BasisSet
role = role.upper()
basisfamily_list = load_basis_families()
for fam in basisfamily_list:
if sanitize_basisname(name).endswith('-decon'):
if sanitize_basisname(fam.ornate + '-decon') == sanitize_basisname(name):
if role == 'JKFIT':
return fam.jkfit + '-decon', fam.jkfit, BasisSet.decontract
if sanitize_basisname(fam.ornate) == sanitize_basisname(name):
if role == 'ORNATE':
return fam.ornate, fam.orbital, None # is fam.orbital right for 2nd posn? it's the corresponding gbs
elif role in ['BASIS', 'ORBITAL']:
return fam.orbital, fam.orbital, None
elif role == 'JFIT':
return fam.jfit, fam.jfit, None
elif role == 'JKFIT':
return fam.jkfit, fam.jkfit, None
elif role == 'RIFIT':
return fam.rifit, fam.rifit, None
elif role == 'DUALFIT':
return fam.dualfit, fam.dualfit, None
elif role == 'DECON':
return fam.decon + '-decon', fam.decon, BasisSet.decontract
# catches decontract signmal when name not in a BasisFamily entry
if role == 'DECON':
return sanitize_basisname(name) + '-decon', sanitize_basisname(name), BasisSet.decontract
return None, None, None
|
amjames/psi4
|
psi4/driver/qcdb/basislist.py
|
Python
|
lgpl-3.0
| 8,266
|
[
"Psi4"
] |
6eb11f2ade7244cb454f96f0a25c96e50039a102f8ad654bd551a86dfa7166d8
|
import random
import string
import pickle
import cherrypy
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy import sparse
import re
import os
from jinja2 import Environment, FileSystemLoader
path = os.path.abspath(os.path.dirname(__file__))
config = {
'global' : {
'tools.proxy.on':True,
'server.socket_host' : '0.0.0.0',
'server.socket_port' : 7071,
'server.thread_pool' : 8
},
'/' : {'tools.staticdir.root':path},
'/css' : {
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(path, 'css')
},
'/fonts' : {
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(path, 'fonts')
}
}
env = Environment(loader=FileSystemLoader(os.path.join(path, 'templates')))
class SKTFIDFCompare(object):
similarity = None
@classmethod
def from_hdf(cls, fname):
meta = pd.read_hdf(fname, 'meta')
df_matrix = pd.read_hdf(fname, 'tfidf_matrix')
matrix = sparse.coo_matrix((df_matrix.tfidf.values,
(df_matrix.row, df_matrix.col.values))).tocsr()
vocabulary = pd.read_hdf(fname, 'vocabulary')
return cls(matrix, vocabulary, meta)
def __init__(self, matrix, vocabulary, meta):
self.matrix = matrix
self.vocabulary = vocabulary
self.meta = meta
def get_doc_vector(self, article_id):
self.cur_article_id = article_id
paper_id = self.meta.index.get_loc(article_id)
doc_vector = self.matrix[paper_id]
self.vocabulary['cur_word_weight'] = np.squeeze(doc_vector.A)
return doc_vector
def compare_paper(self, article_id):
self.cur_article_id = article_id
doc_vector = self.get_doc_vector(article_id)
self.similarity = np.squeeze((self.matrix * doc_vector.T).A)
self.ranked_similarity = np.argsort(self.similarity)[::-1]
return self.similarity
#
class DeepThought(object):
DATASET_FNAME = 'dt_201806_tfidf_skl.h5'
def __init__(self):
print('Loading Dataset')
self.dt_tfidf = SKTFIDFCompare.from_hdf(self.DATASET_FNAME)
print('Loaded Dataset')
@cherrypy.expose
def index(self):
template = env.get_template('index.html')
return template.render()
@cherrypy.expose
def arxiv_search(self, identifier='1207.4481'):
identifier = identifier.strip()
template = env.get_template('arxiv_search')
if identifier not in self.dt_tfidf.meta.index:
return template.render(identifier=identifier, unknown_id=True)
else:
similarity = self.dt_tfidf.compare_paper(identifier)
#test_document_id = self.dt_tfidf.meta.index.getloc(identifier)
#test_document = self.X_tfidf[test_document_id]
#ranked_similarity, ranked_identifiers = self._get_similar_documents(test_document)
data_table = self.dt_tfidf.meta.copy().iloc[self.dt_tfidf.ranked_similarity]
data_table['similarity'] = similarity[self.dt_tfidf.ranked_similarity]
data_table['identifier'] = data_table.index
data_table['link'] = ['https://arxiv.org/abs/{0}'.format(identifier) for identifier in data_table['identifier']]
return template.render(identifier=identifier, data_table=data_table.iloc[:50].to_dict('records'))
#return ''.join(random.sample(string.hexdigits, int(length)))
#@cherrypy.expose
#def text_search(self, text='astronomy galaxy star'):
# test_document = self.tfidf_vect.transform([text])
# ranked_similarity, ranked_identifiers = self._get_similar_documents(test_document)
# data_table = self._generate_table(ranked_similarity, ranked_identifiers)
#template = env.get_template('arxiv_search')
#return template.render(search_str=text, data_table=data_table)
dt_app = DeepThought()
cherrypy.quickstart(dt_app, '/deepthought', config=config)
|
wkerzendorf/deepthought_web
|
deepthought_web.py
|
Python
|
bsd-3-clause
| 4,003
|
[
"Galaxy"
] |
60481182a6bbae3a7ac94b1bbd125f5397747b95e70d3430d88ba5662bb520ce
|
## Generating simulated data for variations in the object parameters:
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import random
### User-defined functions
def rigid_fixed(K_robot, K_rf):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
#print len(dist)
#print len(time)
applied_force_rf = np.zeros((len(dist),1))
deform_rf = np.zeros((len(dist),1))
sensed_force_rf = np.zeros((len(time),1))
robot_pos_rf = np.zeros((len(time),1))
for i in range(len(robot_pos_rf)):
robot_pos_rf[i] = Robot_Current_Position
for i in range(1,len(dist)):
applied_force_rf[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_rf[i] = applied_force_rf[i]/K_rf
if i == 1:
if (Robot_Current_Position + deform_rf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rf[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_rf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rf[i] - deform_rf[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_rf[i] = K_rf*(Robot_Current_Position - 0.60)
robot_pos_rf[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
#print i
sensed_force_rf[i] = sensed_force_rf[i-1]
robot_pos_rf[i] = Robot_Current_Position
force = sum(sensed_force_rf.tolist(),[])
pos = sum(robot_pos_rf.tolist(),[])
#print force
#print pos
#print np.shape(pos)
#print np.shape(force)
return pos,force
def soft_fixed(K_robot, K_sf):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
applied_force_sf = np.zeros((len(dist),1))
deform_sf = np.zeros((len(dist),1))
sensed_force_sf = np.zeros((len(time),1))
robot_pos_sf = np.zeros((len(time),1))
for i in range(len(robot_pos_sf)):
robot_pos_sf[i] = Robot_Current_Position
for i in range(1,len(dist)):
applied_force_sf[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_sf[i] = applied_force_sf[i]/K_sf
if i == 1:
if (Robot_Current_Position + deform_sf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sf[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_sf[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sf[i] - deform_sf[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_sf[i] = K_sf*(Robot_Current_Position - 0.60)
robot_pos_sf[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
sensed_force_sf[i] = sensed_force_sf[i-1]
robot_pos_sf[i] = Robot_Current_Position
force = sum(sensed_force_sf.tolist(),[])
pos = sum(robot_pos_sf.tolist(),[])
return pos,force
def rigid_movable(K_robot, K_rm, Mass_rm, mu_static_rigid, mu_dynamic_rigid):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
g = 9.81
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
applied_force_rm = np.zeros((len(dist),1))
deform_rm = np.zeros((len(dist),1))
acc_rm = np.zeros((len(dist),1))
vel_rm = np.zeros((len(dist),1))
pos_rm = np.zeros((len(time),1))
sensed_force_rm = np.zeros((len(time),1))
robot_pos_rm = np.zeros((len(time),1))
for i in range(len(robot_pos_rm)):
robot_pos_rm[i] = Robot_Current_Position
stat_force = Mass_rm*g*mu_static_rigid
index = 1
for i in range(1,len(dist)):
if index == 1:
applied_force_rm[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_rm[i] = applied_force_rm[i]/K_rm
if i == 1:
if (Robot_Current_Position + deform_rm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rm[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_rm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_rm[i] - deform_rm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_rm[i] = K_rm*(Robot_Current_Position - 0.60)
else:
applied_force_rm[i] = K_rm*(dist[i] - Robot_Current_Position)
if (applied_force_rm[i] <= stat_force) and (index == 1):
sensed_force_rm[i] = sensed_force_rm[i]
acc_rm[i] = 0
vel_rm[i] = 0
pos_rm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
net_force_rm = applied_force_rm[i] - Mass_rm*g*mu_dynamic_rigid
sensed_force_rm[i] = Mass_rm*g*mu_dynamic_rigid
if net_force_rm < 0:
net_force_rm = 0
acc_rm[i] = 0
vel_rm[i] = 0
pos_rm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
acc_rm[i] = net_force_rm/Mass_rm
vel_rm[i] = vel_rm[i-1]+acc_rm[i]*0.01
pos_rm[i] = pos_rm[i-1]+vel_rm[i]*0.01
if (Robot_Current_Position + pos_rm[i] - pos_rm[i-1]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + pos_rm[i] - pos_rm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + dist[i] - dist[i-1]
index = index+1
robot_pos_rm[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
sensed_force_rm[i] = sensed_force_rm[i-1]
pos_rm[i] = pos_rm[i-1]
robot_pos_rm[i] = Robot_Current_Position
force = sum(sensed_force_rm.tolist(),[])
pos = sum(robot_pos_rm.tolist(),[])
return pos,force
def soft_movable(K_robot, K_sm, Mass_sm, mu_static_soft, mu_dynamic_soft):
Robot_Home_Position = 0.00
Robot_Current_Position = 0.60
time = np.arange(0.00,1.21,0.01)
g = 9.81
eqbm_point_1 = 0.50
eqbm_point_2 = 0.75
eqbm_point_3 = 0.90
dist_eqbm_pt2 = np.arange(Robot_Current_Position,eqbm_point_2,0.01)
dist_eqbm_pt3 = np.arange(eqbm_point_2+0.01,eqbm_point_3,0.01)
dist = np.concatenate((dist_eqbm_pt2,dist_eqbm_pt3),axis=0)
applied_force_sm = np.zeros((len(dist),1))
deform_sm = np.zeros((len(dist),1))
acc_sm = np.zeros((len(dist),1))
vel_sm = np.zeros((len(dist),1))
pos_sm = np.zeros((len(time),1))
sensed_force_sm = np.zeros((len(time),1))
robot_pos_sm = np.zeros((len(time),1))
for i in range(len(robot_pos_sm)):
robot_pos_sm[i] = Robot_Current_Position
stat_force = Mass_sm*g*mu_static_soft
index = 1
for i in range(1,len(dist)):
if index == 1:
applied_force_sm[i] = K_robot*(dist[i] - Robot_Current_Position)
deform_sm[i] = applied_force_sm[i]/K_sm
if i == 1:
if (Robot_Current_Position + deform_sm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sm[i]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - Robot_Current_Position)
else:
if (Robot_Current_Position + deform_sm[i]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + deform_sm[i] - deform_sm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + (dist[i] - dist[i-1])
sensed_force_sm[i] = K_sm*(Robot_Current_Position - 0.60)
else:
applied_force_sm[i] = K_sm*(dist[i] - Robot_Current_Position)
if (applied_force_sm[i] <= stat_force) and (index == 1):
sensed_force_sm[i] = sensed_force_sm[i]
acc_sm[i] = 0
vel_sm[i] = 0
pos_sm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
net_force_sm = applied_force_sm[i] - Mass_sm*g*mu_dynamic_soft
sensed_force_sm[i] = Mass_sm*g*mu_dynamic_soft
if net_force_sm < 0:
net_force_sm = 0
acc_sm[i] = 0
vel_sm[i] = 0
pos_sm[i] = 0
Robot_Current_Position = Robot_Current_Position
else:
acc_sm[i] = net_force_sm/Mass_sm
vel_sm[i] = vel_sm[i-1]+acc_sm[i]*0.01
pos_sm[i] = pos_sm[i-1]+vel_sm[i]*0.01
if (Robot_Current_Position + pos_sm[i] - pos_sm[i-1]) <= dist[i]:
Robot_Current_Position = Robot_Current_Position + pos_sm[i] - pos_sm[i-1]
else:
Robot_Current_Position = Robot_Current_Position + dist[i] - dist[i-1]
index = index+1
robot_pos_sm[i] = Robot_Current_Position
for i in range(len(dist),len(time)):
sensed_force_sm[i] = sensed_force_sm[i-1]
pos_sm[i] = pos_sm[i-1]
robot_pos_sm[i] = Robot_Current_Position
force = sum(sensed_force_sm.tolist(),[])
pos = sum(robot_pos_sm.tolist(),[])
return pos,force
### Main Program
if __name__ == '__main__':
time = np.arange(0.00,1.21,0.01)
# For Rigid-Fixed
K_robot = 100
K_rf = np.zeros((1000,1))
for i in range(1000):
K_rf[i] = 100*(i+1)
row_rf = np.size(K_rf,0)
samples = len(time)
#print samples
trials_rf = row_rf
robot_pos_rf = np.zeros((trials_rf,samples))
#print np.shape(robot_pos_rf)
sensed_force_rf = np.zeros((trials_rf,samples))
#print np.shape(robot_pos_rf)
k=0
for i in range(1000):
#print k
#print np.shape(robot_pos_rf[k,:])
robot_pos_rf[k,:],sensed_force_rf[k,:] = rigid_fixed(K_robot, K_rf[i])
k = k+1
# For Soft-Fixed
K_robot = 100
K_sf = np.zeros((1000,1))
for i in range(1000):
K_sf[i] = 0.05*(i+1)
row_sf = np.size(K_sf,0)
samples = len(time)
trials_sf = row_sf
robot_pos_sf = np.zeros((trials_sf,samples))
sensed_force_sf = np.zeros((trials_sf,samples))
k=0
for i in range(1000):
robot_pos_sf[k,:], sensed_force_sf[k,:] = soft_fixed(K_robot, K_sf[i])
k = k+1
# For Rigid_Movable
K_robot = 100
K_rm = np.zeros((50,1))
Mass_rm = [2.0, 2.1, 2.2, 2.3, 2.4]
mu_static_rigid = [0.45, 0.55]
mu_dynamic_rigid = [0.15, 0.2]
for i in range(50):
K_rm[i] = 500*(i+1)
row_K_rm = np.size(K_rm,0)
row_Mass_rm = np.size(Mass_rm,0)
row_mu_static_rm = len(mu_static_rigid)
row_mu_dynamic_rm = len(mu_dynamic_rigid)
samples = len(time)
trials_rm = row_K_rm*row_Mass_rm*row_mu_static_rm*row_mu_dynamic_rm
robot_pos_rm = np.zeros((trials_rm,samples))
sensed_force_rm = np.zeros((trials_rm,samples))
p=0
for j in range(50):
for k in range(5):
for m in range(2):
for n in range(2):
#print p
robot_pos_rm[p,:], sensed_force_rm[p,:] = rigid_movable(K_robot, K_rm[j], Mass_rm[k], mu_static_rigid[m], mu_dynamic_rigid[n])
p=p+1
# For Soft-Movable
K_robot = 100
K_sm = np.zeros((50,1))
Mass_sm = [0.3, 0.35, 0.4, 0.45, 0.5]
mu_static_soft = [0.15, 0.35]
mu_dynamic_soft = [0.05, 0.1]
for i in range(50):
K_sm[i] = 100*(i+1)
row_K_sm = np.size(K_sm,0)
row_Mass_sm = np.size(Mass_sm,0)
row_mu_static_sm = len(mu_static_soft)
row_mu_dynamic_sm = len(mu_dynamic_soft)
samples = len(time)
trials_sm = row_K_sm*row_Mass_sm*row_mu_static_sm*row_mu_dynamic_sm
robot_pos_sm = np.zeros((trials_sm,samples))
sensed_force_sm = np.zeros((trials_sm,samples))
p=0
for j in range(50):
for k in range(5):
for m in range(2):
for n in range(2):
robot_pos_sm[p,:], sensed_force_sm[p,:] = rigid_movable(K_robot, K_sm[j], Mass_sm[k], mu_static_soft[m], mu_dynamic_soft[n])
p=p+1
# Store data
rf_data = {}
rm_data = {}
sf_data = {}
sm_data = {}
rf_data['sensed_force_rf'] = sensed_force_rf
rm_data['sensed_force_rm'] = sensed_force_rm
sf_data['sensed_force_sf'] = sensed_force_sf
sm_data['sensed_force_sm'] = sensed_force_sm
rf_data['robot_pos_rf'] = robot_pos_rf
rm_data['robot_pos_rm'] = robot_pos_rm
sf_data['robot_pos_sf'] = robot_pos_sf
sm_data['robot_pos_sm'] = robot_pos_sm
scipy.io.savemat('rigid_fixed_object_training.mat',rf_data)
scipy.io.savemat('rigid_movable_object_training.mat',rm_data)
scipy.io.savemat('soft_fixed_object_training.mat',sf_data)
scipy.io.savemat('soft_movable_object_training.mat',sm_data)
# Load data
data_rf = scipy.io.loadmat('rigid_fixed_object_training.mat')
data_sf = scipy.io.loadmat('soft_fixed_object_training.mat')
data_rm = scipy.io.loadmat('rigid_movable_object_training.mat')
data_sm = scipy.io.loadmat('soft_movable_object_training.mat')
dataforce_rf = np.transpose(data_rf['sensed_force_rf'])
dataforce_sf = np.transpose(data_sf['sensed_force_sf'])
dataforce_rm = np.transpose(data_rm['sensed_force_rm'])
dataforce_sm = np.transpose(data_sm['sensed_force_sm'])
datamotion_rf = np.transpose(data_rf['robot_pos_rf'])
datamotion_sf = np.transpose(data_sf['robot_pos_sf'])
datamotion_rm = np.transpose(data_rm['robot_pos_rm'])
datamotion_sm = np.transpose(data_sm['robot_pos_sm'])
# Plot data
# Force
mpu.figure(1)
pp.subplot(221)
pp.title('Rigid Fixed',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_rf, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
pp.subplot(222)
pp.title('Soft Fixed',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_sf, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
pp.subplot(223)
pp.title('Rigid Movable',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_rm, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
pp.subplot(224)
pp.title('Soft Movable',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(time, dataforce_sm, linewidth=3.0)
pp.xlim((0.0, 1.3))
pp.grid('True')
# Position
mpu.figure(2)
pp.subplot(221)
pp.title('Rigid Fixed',fontsize='24')
pp.xlabel('Position (s)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_rf, dataforce_rf, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.subplot(222)
pp.title('Soft Fixed',fontsize='24')
pp.xlabel('Position (m)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_sf, dataforce_sf, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.subplot(223)
pp.title('Rigid Movable',fontsize='24')
pp.xlabel('Position (m)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_rm, dataforce_rm, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.subplot(224)
pp.title('Soft-Movable',fontsize='24')
pp.xlabel('Position (m)',fontsize='24')
pp.ylabel('Force (N)',fontsize='24')
pp.plot(datamotion_sm, dataforce_sm, linewidth=3.0)
pp.xlim((0.5, 1.0))
pp.grid('True')
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/simulation_results/comparision_with_kNN_PCA/Combined/object_training/gen_data_object_training.py
|
Python
|
mit
| 17,336
|
[
"Mayavi"
] |
f42df2c53a5e451554062c08da09557e1ec404f37612859bb51c26545551bab6
|
# -*- coding: utf-8 -*-
import base64
import datetime
import json
import time
import mock
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from pyquery import PyQuery as pq
from urlparse import urlparse
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import mail
from django.db.models import Q
from django.test.client import (FakePayload, encode_multipart,
BOUNDARY, CONTENT_TYPE_RE, MULTIPART_CONTENT)
from django.test.utils import override_settings
from django.http import Http404
from django.utils.encoding import smart_str
from constance import config
from jingo.helpers import urlparams
from waffle.models import Flag, Switch
from kuma.attachments.models import Attachment
from kuma.attachments.utils import make_test_file
from kuma.authkeys.models import Key
from kuma.core.cache import memcache as cache
from kuma.core.models import IPBan
from kuma.core.tests import post, get, override_constance_settings
from kuma.core.urlresolvers import reverse
from kuma.users.tests import UserTestCase, user
from ..content import get_seo_description
from ..events import EditDocumentEvent
from ..forms import MIDAIR_COLLISION
from ..models import (Document, Revision, RevisionIP, DocumentZone,
DocumentTag, DocumentDeletionLog)
from ..views import _get_seo_parent_title
from . import (doc_rev, document, new_document_data, revision,
normalize_html, create_template_test_users,
make_translation, WikiTestCase, FakeResponse)
class RedirectTests(UserTestCase, WikiTestCase):
"""Tests for the REDIRECT wiki directive"""
localizing_client = True
def test_redirect_suppression(self):
"""The document view shouldn't redirect when passed redirect=no."""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="/en-US/docs/blah">smoo</a>')
url = redirect.get_absolute_url() + '?redirect=no'
response = self.client.get(url, follow=True)
self.assertContains(response, 'REDIRECT ')
def test_redirects_only_internal(self):
"""Ensures redirects cannot be used to link to other sites"""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="//davidwalsh.name">DWB</a>')
url = redirect.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, 'DWB')
def test_redirects_only_internal_2(self):
"""Ensures redirects cannot be used to link to other sites"""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="http://davidwalsh.name">DWB</a>')
url = redirect.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, 'DWB')
def test_self_redirect_suppression(self):
"""The document view shouldn't redirect to itself."""
slug = 'redirdoc'
html = ('REDIRECT <a class="redirect" href="/en-US/docs/%s">smoo</a>' %
slug)
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
response = self.client.get(doc.get_absolute_url(), follow=True)
eq_(200, response.status_code)
response_html = pq(response.content)
article_body = response_html.find('#wikiArticle').html()
self.assertHTMLEqual(html, article_body)
class LocaleRedirectTests(UserTestCase, WikiTestCase):
"""Tests for fallbacks to en-US and such for slug lookups."""
# Some of these may fail or be invalid if your WIKI_DEFAULT_LANGUAGE is de.
localizing_client = True
def test_fallback_to_translation(self):
"""If a slug isn't found in the requested locale but is in the default
locale and if there is a translation of that default-locale document to
the requested locale, the translation should be served."""
en_doc, de_doc = self._create_en_and_de_docs()
response = self.client.get(reverse('wiki.document',
args=(en_doc.slug,),
locale='de'),
follow=True)
self.assertRedirects(response, de_doc.get_absolute_url())
def test_fallback_with_query_params(self):
"""The query parameters should be passed along to the redirect."""
en_doc, de_doc = self._create_en_and_de_docs()
url = reverse('wiki.document', args=[en_doc.slug], locale='de')
response = self.client.get(url + '?x=y&x=z', follow=True)
self.assertRedirects(response, de_doc.get_absolute_url() + '?x=y&x=z')
def test_redirect_with_no_slug(self):
"""Bug 775241: Fix exception in redirect for URL with ui-locale"""
loc = settings.WIKI_DEFAULT_LANGUAGE
url = '/%s/docs/%s/' % (loc, loc)
try:
self.client.get(url, follow=True)
except Http404, e:
pass
except Exception, e:
self.fail("The only exception should be a 404, not this: %s" % e)
def _create_en_and_de_docs(self):
en = settings.WIKI_DEFAULT_LANGUAGE
en_doc = document(locale=en, slug='english-slug', save=True)
de_doc = document(locale='de', parent=en_doc, save=True)
revision(document=de_doc, is_approved=True, save=True)
return en_doc, de_doc
class ViewTests(UserTestCase, WikiTestCase):
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
@attr('bug875349')
def test_json_view(self):
expected_tags = sorted(['foo', 'bar', 'baz'])
expected_review_tags = sorted(['tech', 'editorial'])
doc = Document.objects.get(pk=1)
doc.tags.set(*expected_tags)
doc.current_revision.review_tags.set(*expected_review_tags)
url = reverse('wiki.json', locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url, {'title': 'an article title'})
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_('article-title', data['slug'])
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
url = reverse('wiki.json_slug', args=('article-title',),
locale=settings.WIKI_DEFAULT_LANGUAGE)
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_('an article title', data['title'])
ok_('translations' in data)
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
def test_history_view(self):
slug = 'history-view-test-doc'
html = 'history view test doc'
doc = document(title='History view test doc', slug=slug,
html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
for i in xrange(1, 51):
revision(document=doc, content=html,
comment='Revision %s' % i,
is_approved=True, save=True)
url = reverse('wiki.document_revisions', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url)
eq_(200, resp.status_code)
all_url = urlparams(reverse('wiki.document_revisions', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE),
limit='all')
resp = self.client.get(all_url)
eq_(403, resp.status_code)
self.client.login(username='testuser', password='testpass')
resp = self.client.get(all_url)
eq_(200, resp.status_code)
def test_toc_view(self):
slug = 'toc_test_doc'
html = '<h2>Head 2</h2><h3>Head 3</h3>'
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
url = reverse('wiki.toc', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
self.assertHTMLEqual(
resp.content, '<ol><li><a href="#Head_2" rel="internal">Head 2</a>'
'<ol><li><a href="#Head_3" rel="internal">Head 3</a>'
'</ol></li></ol>')
@attr('bug875349')
def test_children_view(self):
test_content = '<p>Test <a href="http://example.com">Summary</a></p>'
def _make_doc(title, slug, parent=None, is_redir=False):
doc = document(title=title,
slug=slug,
save=True,
is_redirect=is_redir)
if is_redir:
content = 'REDIRECT <a class="redirect" href="/en-US/blah">Blah</a>'
else:
content = test_content
revision(document=doc,
content=test_content,
summary=get_seo_description(
test_content,
strip_markup=False),
save=True)
doc.html = content
if parent:
doc.parent_topic = parent
doc.save()
return doc
root_doc = _make_doc('Root', 'Root')
child_doc_1 = _make_doc('Child 1', 'Root/Child_1', root_doc)
_make_doc('Grandchild 1', 'Root/Child_1/Grandchild_1', child_doc_1)
grandchild_doc_2 = _make_doc('Grandchild 2',
'Root/Child_1/Grandchild_2',
child_doc_1)
_make_doc('Great Grandchild 1',
'Root/Child_1/Grandchild_2/Great_Grand_Child_1',
grandchild_doc_2)
_make_doc('Child 2', 'Root/Child_2', root_doc)
_make_doc('Child 3', 'Root/Child_3', root_doc, True)
Switch.objects.create(name='application_ACAO', active=True)
for expand in (True, False):
url = reverse('wiki.get_children', args=['Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
if expand:
url = '%s?expand' % url
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
json_obj = json.loads(resp.content)
# Basic structure creation testing
eq_(json_obj['slug'], 'Root')
if not expand:
ok_('summary' not in json_obj)
else:
eq_(json_obj['summary'],
'Test <a href="http://example.com">Summary</a>')
ok_('tags' in json_obj)
ok_('review_tags' in json_obj)
eq_(len(json_obj['subpages']), 2)
eq_(len(json_obj['subpages'][0]['subpages']), 2)
eq_(json_obj['subpages'][0]['subpages'][1]['title'],
'Grandchild 2')
# Depth parameter testing
def _depth_test(depth, aught):
url = reverse('wiki.get_children', args=['Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?depth=' + str(depth)
resp = self.client.get(url)
json_obj = json.loads(resp.content)
eq_(len(json_obj['subpages'][0]['subpages'][1]['subpages']), aught)
_depth_test(2, 0)
_depth_test(3, 1)
_depth_test(6, 1)
# Sorting test
sort_root_doc = _make_doc('Sort Root', 'Sort_Root')
_make_doc('B Child', 'Sort_Root/B_Child', sort_root_doc)
_make_doc('A Child', 'Sort_Root/A_Child', sort_root_doc)
resp = self.client.get(reverse('wiki.get_children', args=['Sort_Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
json_obj = json.loads(resp.content)
eq_(json_obj['subpages'][0]['title'], 'A Child')
# Test if we are serving an error json if document does not exist
no_doc_url = reverse('wiki.get_children', args=['nonexistentDocument'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(no_doc_url)
result = json.loads(resp.content)
eq_(result, {'error': 'Document does not exist.'})
def test_summary_view(self):
"""The ?summary option should restrict document view to summary"""
d, r = doc_rev("""
<p>Foo bar <a href="http://example.com">baz</a></p>
<p>Quux xyzzy</p>
""")
resp = self.client.get('%s?raw&summary' % d.get_absolute_url())
eq_(resp.content, 'Foo bar <a href="http://example.com">baz</a>')
@override_settings(CELERY_ALWAYS_EAGER=True)
@mock.patch('waffle.flag_is_active')
@mock.patch('kuma.wiki.jobs.DocumentContributorsJob.get')
def test_footer_contributors(self, get_contributors, flag_is_active):
get_contributors.return_value = [
{'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'},
{'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'},
]
flag_is_active.return_value = True
d, r = doc_rev('some content')
resp = self.client.get(d.get_absolute_url())
page = pq(resp.content)
contributors = (page.find(":contains('Contributors to this page')")
.parent())
# just checking if the contributor link is rendered
eq_(len(contributors.find('a')), 2)
def test_revision_view_bleached_content(self):
"""Bug 821988: Revision content should be cleaned with bleach"""
d, r = doc_rev("""
<a href="#" onload=alert(3)>Hahaha</a>
<svg><svg onload=alert(3);>
""")
resp = self.client.get(r.get_absolute_url())
page = pq(resp.content)
ct = page.find('#wikiArticle').html()
ok_('<svg>' not in ct)
ok_('<a href="#">Hahaha</a>' in ct)
def test_raw_css_view(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
doc = document(title='Template:CustomSampleCSS',
slug='Template:CustomSampleCSS',
save=True)
revision(
save=True,
is_approved=True,
document=doc,
content="""
/* CSS here */
body {
padding: 0;
margin: 0;
}
svg:not(:root) {
display:block;
}
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[doc.slug]))
ok_('text/css' in response['Content-Type'])
class PermissionTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
"""Set up the permissions, groups, and users needed for the tests"""
super(PermissionTests, self).setUp()
self.perms, self.groups, self.users, self.superuser = (
create_template_test_users())
def test_template_revert_permission(self):
locale = 'en-US'
slug = 'Template:test-revert-perm'
doc = document(save=True, slug=slug, title=slug, locale=locale)
rev = revision(save=True, document=doc)
# Revision template should not show revert button
url = reverse('wiki.revision', args=([doc.slug, rev.id]))
resp = self.client.get(url)
ok_('Revert' not in resp.content)
# Revert POST should give permission denied to user without perm
username = self.users['none'].username
self.client.login(username=username, password='testpass')
url = reverse('wiki.revert_document',
args=([doc.slug, rev.id]))
resp = self.client.post(url, {'comment': 'test'})
eq_(403, resp.status_code)
# Revert POST should give success to user with perm
username = self.users['change'].username
self.client.login(username=username, password='testpass')
url = reverse('wiki.revert_document',
args=([doc.slug, rev.id]))
resp = self.client.post(url, {'comment': 'test'}, follow=True)
eq_(200, resp.status_code)
def test_template_permissions(self):
msg = ('edit', 'create')
for is_add in (True, False):
slug_trials = (
('test_for_%s', (
(True, self.superuser),
(True, self.users['none']),
(True, self.users['all']),
(True, self.users['add']),
(True, self.users['change']),
)),
('Template:test_for_%s', (
(True, self.superuser),
(False, self.users['none']),
(True, self.users['all']),
(is_add, self.users['add']),
(not is_add, self.users['change']),
))
)
for slug_tmpl, trials in slug_trials:
for expected, tmp_user in trials:
username = tmp_user.username
slug = slug_tmpl % username
locale = settings.WIKI_DEFAULT_LANGUAGE
Document.objects.all().filter(slug=slug).delete()
if not is_add:
doc = document(save=True, slug=slug, title=slug,
locale=locale)
revision(save=True, document=doc)
self.client.login(username=username, password='testpass')
data = new_document_data()
slug = slug_tmpl % username
data.update({"title": slug, "slug": slug})
if is_add:
url = reverse('wiki.new_document', locale=locale)
resp = self.client.post(url, data, follow=False)
else:
data['form'] = 'rev'
url = reverse('wiki.edit_document', args=(slug,),
locale=locale)
resp = self.client.post(url, data, follow=False)
if expected:
eq_(302, resp.status_code,
"%s should be able to %s %s" %
(user, msg[is_add], slug))
Document.objects.filter(slug=slug).delete()
else:
eq_(403, resp.status_code,
"%s should not be able to %s %s" %
(user, msg[is_add], slug))
class ConditionalGetTests(UserTestCase, WikiTestCase):
"""Tests for conditional GET on document view"""
localizing_client = True
def test_last_modified(self):
"""Ensure the last-modified stamp of a document is cached"""
doc, rev = doc_rev()
get_url = reverse('wiki.document',
args=[doc.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
# There should be a last-modified date cached for this document already
cache_key = doc.last_modified_cache_key
ok_(cache.get(cache_key))
# Now, try a request, and ensure that the last-modified header is
# present.
response = self.client.get(get_url, follow=False)
ok_(response.has_header('last-modified'))
last_mod = response['last-modified']
# Try another request, using If-Modified-Since. This should be a 304
response = self.client.get(get_url, follow=False,
HTTP_IF_MODIFIED_SINCE=last_mod)
eq_(304, response.status_code)
# Finally, ensure that the last-modified was cached.
cached_last_mod = cache.get(cache_key)
eq_(doc.modified.strftime('%s'), cached_last_mod)
# Let the clock tick, so the last-modified will change on edit.
time.sleep(1.0)
# Edit the document, ensure the last-modified has been invalidated.
revision(document=doc, content="New edits", save=True)
ok_(cache.get(cache_key) != cached_last_mod)
# This should be another 304, but the last-modified in response and
# cache should have changed.
response = self.client.get(get_url, follow=False,
HTTP_IF_MODIFIED_SINCE=last_mod)
eq_(200, response.status_code)
ok_(last_mod != response['last-modified'])
ok_(cached_last_mod != cache.get(cache_key))
def test_deletion_clears_last_modified(self):
"""Deleting a page clears any last-modified caching"""
# Setup mostly the same as previous test, to get a doc and set
# last-modified info.
doc, rev = doc_rev()
self.url = reverse('wiki.document',
args=[doc.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
cache_key = doc.last_modified_cache_key
last_mod = cache.get(cache_key)
ok_(last_mod) # exists already because pre-filled
self.client.get(self.url, follow=False)
ok_(cache.get(cache_key) == last_mod)
# Now delete the doc and make sure there's no longer
# last-modified data in the cache for it afterward.
doc.delete()
ok_(not cache.get(cache_key))
def test_deleted_doc_returns_404(self):
"""Requesting a deleted doc returns 404"""
doc, rev = doc_rev()
doc.delete()
DocumentDeletionLog.objects.create(locale=doc.locale, slug=doc.slug,
user=rev.creator, reason="test")
response = self.client.get(doc.get_absolute_url(), follow=False)
eq_(404, response.status_code)
class ReadOnlyTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
def setUp(self):
super(ReadOnlyTests, self).setUp()
self.d, r = doc_rev()
self.edit_url = reverse('wiki.edit_document', args=[self.d.slug])
def test_everyone(self):
""" kumaediting: everyone, kumabanned: none """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
def test_superusers_only(self):
""" kumaediting: superusers, kumabanned: none """
self.kumaediting_flag.everyone = None
self.kumaediting_flag.superusers = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('The wiki is in read-only mode.' in resp.content)
self.client.logout()
self.client.login(username='admin', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
def test_banned_users(self):
""" kumaediting: everyone, kumabanned: testuser2 """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
# ban testuser2
kumabanned = Flag.objects.create(name='kumabanned')
kumabanned.users = self.user_model.objects.filter(username='testuser2')
kumabanned.save()
# testuser can still access
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
self.client.logout()
# testuser2 cannot
self.client.login(username='testuser2', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
# ban testuser01 and testuser2
kumabanned.users = self.user_model.objects.filter(
Q(username='testuser2') | Q(username='testuser01'))
kumabanned.save()
# testuser can still access
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
self.client.logout()
# testuser2 cannot access
self.client.login(username='testuser2', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
# testuser01 cannot access
self.client.login(username='testuser01', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
class BannedIPTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
def setUp(self):
super(BannedIPTests, self).setUp()
self.ip = '127.0.0.1'
self.ip_ban = IPBan.objects.create(ip=self.ip)
self.doc, rev = doc_rev()
self.edit_url = reverse('wiki.edit_document',
args=[self.doc.slug])
def tearDown(self):
cache.clear()
def test_banned_ip_cant_get_edit(self):
self.client.login(username='testuser', password='testpass')
response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip)
eq_(403, response.status_code)
def test_banned_ip_cant_post_edit(self):
self.client.login(username='testuser', password='testpass')
response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip)
eq_(403, response.status_code)
def test_banned_ip_can_still_get_articles(self):
response = self.client.get(self.doc.get_absolute_url(),
REMOTE_ADDR=self.ip)
eq_(200, response.status_code)
class KumascriptIntegrationTests(UserTestCase, WikiTestCase):
"""
Tests for usage of the kumascript service.
Note that these tests really just check whether or not the service was
used, and are not integration tests meant to exercise the real service.
"""
localizing_client = True
def setUp(self):
super(KumascriptIntegrationTests, self).setUp()
self.d, self.r = doc_rev()
self.r.content = "TEST CONTENT"
self.r.save()
self.d.tags.set('foo', 'bar', 'baz')
self.url = reverse('wiki.document',
args=(self.d.slug,),
locale=self.d.locale)
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get = (
# mock.patch('kuma.wiki.kumascript.get'))
# self.mock_kumascript_get.return_value = self.d.html
def tearDown(self):
super(KumascriptIntegrationTests, self).tearDown()
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get.stop()
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_basic_view(self, mock_kumascript_get):
"""When kumascript timeout is non-zero, the service should be used"""
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get(self.url, follow=False)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_disabled(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used"""
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get(self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript not should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_disabled_rendering(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used
in rendering"""
mock_kumascript_get.return_value = (self.d.html, None)
self.d.schedule_rendering('max-age=0')
ok_(not mock_kumascript_get.called,
"kumascript not should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_nomacros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?nomacros' % self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript should not have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?raw' % self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript should not have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_macros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?raw¯os' % self.url, follow=False)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_ua_max_age_zero(self, mock_requests_get):
"""Authenticated users can request a zero max-age for kumascript"""
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(status_code=200,
headers={}, text='HELLO WORLD')
mock_requests_get.side_effect = my_requests_get
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('max-age=1234', trap['headers']['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('no-cache', trap['headers']['Cache-Control'])
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_ua_no_cache(self, mock_requests_get):
"""Authenticated users can request no-cache for kumascript"""
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(status_code=200,
headers={}, text='HELLO WORLD')
mock_requests_get.side_effect = my_requests_get
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('max-age=1234', trap['headers']['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('no-cache', trap['headers']['Cache-Control'])
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_conditional_get(self, mock_requests_get):
"""Ensure conditional GET in requests to kumascript work as expected"""
expected_etag = "8675309JENNY"
expected_modified = "Wed, 14 Mar 2012 22:29:17 GMT"
expected_content = "HELLO THERE, WORLD"
trap = dict(req_cnt=0)
def my_requests_get(url, headers=None, timeout=None):
trap['req_cnt'] += 1
trap['headers'] = headers
if trap['req_cnt'] in [1, 2]:
return FakeResponse(
status_code=200, text=expected_content,
headers={
"etag": expected_etag,
"last-modified": expected_modified,
"age": 456
})
else:
return FakeResponse(
status_code=304, text='',
headers={
"etag": expected_etag,
"last-modified": expected_modified,
"age": 123
})
mock_requests_get.side_effect = my_requests_get
# First request to let the view cache etag / last-modified
response = self.client.get(self.url)
# Clear rendered_html to force another request.
self.d.rendered_html = ''
self.d.save()
# Second request to verify the view sends them back
response = self.client.get(self.url)
eq_(expected_etag, trap['headers']['If-None-Match'])
eq_(expected_modified, trap['headers']['If-Modified-Since'])
# Third request to verify content was cached and served on a 304
response = self.client.get(self.url)
ok_(expected_content in response.content)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('requests.get')
def test_error_reporting(self, mock_requests_get):
"""Kumascript reports errors in HTTP headers, Kuma should display"""
# Make sure we have enough log messages to ensure there are more than
# 10 lines of Base64 in headers. This ensures that there'll be a
# failure if the view sorts FireLogger sequence number alphabetically
# instead of numerically.
expected_errors = {
"logs": [
{"level": "debug",
"message": "Message #1",
"args": ['TestError', {}, {'name': 'SomeMacro', 'token': {'args': 'arguments here'}}],
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #2",
"args": ['TestError', {}, {'name': 'SomeMacro2'}],
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #3",
"args": ['TestError'],
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
{"level": "debug",
"message": "Message #4",
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #5",
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #6",
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
]
}
# Pack it up, get ready to ship it out.
d_json = json.dumps(expected_errors)
d_b64 = base64.encodestring(d_json)
d_lines = [x for x in d_b64.split("\n") if x]
# Headers are case-insensitive, so let's just drive that point home
p = ['firelogger', 'FIRELOGGER', 'FireLogger']
fl_uid = 8675309
headers_out = {}
for i in range(0, len(d_lines)):
headers_out['%s-%s-%s' % (p[i % len(p)], fl_uid, i)] = d_lines[i]
# Now, trap the request from the view.
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(
status_code=200,
text='HELLO WORLD',
headers=headers_out
)
mock_requests_get.side_effect = my_requests_get
# Finally, fire off the request to the view and ensure that the log
# messages were received and displayed on the page. But, only for a
# logged in user.
self.client.login(username='admin', password='testpass')
response = self.client.get(self.url)
eq_(trap['headers']['X-FireLogger'], '1.2')
for error in expected_errors['logs']:
ok_(error['message'] in response.content)
eq_(response.status_code, 200)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('requests.post')
def test_preview_nonascii(self, mock_post):
"""POSTing non-ascii to kumascript should encode to utf8"""
content = u'Français'
trap = {}
def my_post(url, timeout=None, headers=None, data=None):
trap['data'] = data
return FakeResponse(status_code=200, headers={},
text=content.encode('utf8'))
mock_post.side_effect = my_post
self.client.login(username='admin', password='testpass')
self.client.post(reverse('wiki.preview'), {'content': content})
try:
trap['data'].decode('utf8')
except UnicodeDecodeError:
self.fail("Data wasn't posted as utf8")
class DocumentSEOTests(UserTestCase, WikiTestCase):
"""Tests for the document seo logic"""
localizing_client = True
@attr('bug1190212')
def test_get_seo_parent_doesnt_throw_404(self):
slug_dict = {'seo_root': 'Root/Does/Not/Exist'}
try:
_get_seo_parent_title(slug_dict, 'bn-BD')
except Http404:
self.fail('Missing parent should not cause 404 from '
'_get_seo_parent_title')
def test_seo_title(self):
self.client.login(username='admin', password='testpass')
# Utility to make a quick doc
def _make_doc(title, aught_titles, slug):
doc = document(save=True, slug=slug, title=title,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(save=True, document=doc)
response = self.client.get(reverse('wiki.document', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE))
page = pq(response.content)
ok_(page.find('title').text() in aught_titles)
# Test nested document titles
_make_doc('One', ['One | MDN'], 'one')
_make_doc('Two', ['Two - One | MDN'], 'one/two')
_make_doc('Three', ['Three - One | MDN'], 'one/two/three')
_make_doc(u'Special Φ Char',
[u'Special \u03a6 Char - One | MDN',
u'Special \xce\xa6 Char - One | MDN'],
'one/two/special_char')
# Additional tests for /Web/* changes
_make_doc('Firefox OS', ['Firefox OS | MDN'], 'firefox_os')
_make_doc('Email App', ['Email App - Firefox OS | MDN'],
'firefox_os/email_app')
_make_doc('Web', ['Web | MDN'], 'Web')
_make_doc('HTML', ['HTML | MDN'], 'Web/html')
_make_doc('Fieldset', ['Fieldset - HTML | MDN'], 'Web/html/fieldset')
_make_doc('Legend', ['Legend - HTML | MDN'],
'Web/html/fieldset/legend')
def test_seo_script(self):
self.client.login(username='admin', password='testpass')
def make_page_and_compare_seo(slug, content, aught_preview):
# Create the doc
data = new_document_data()
data.update({'title': 'blah', 'slug': slug, 'content': content})
response = self.client.post(reverse('wiki.new_document',
locale=settings.WIKI_DEFAULT_LANGUAGE),
data)
eq_(302, response.status_code)
# Connect to newly created page
response = self.client.get(reverse('wiki.document', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE))
page = pq(response.content)
meta_content = page.find('meta[name=description]').attr('content')
eq_(str(meta_content).decode('utf-8'),
str(aught_preview).decode('utf-8'))
# Test pages - very basic
good = 'This is the content which should be chosen, man.'
make_page_and_compare_seo('one', '<p>' + good + '</p>', good)
# No content, no seo
make_page_and_compare_seo('two', 'blahblahblahblah<br />', None)
# No summary, no seo
make_page_and_compare_seo('three', '<div><p>You cant see me</p></div>',
None)
# Warning paragraph ignored
make_page_and_compare_seo('four',
'<div class="geckoVersion">'
'<p>No no no</p></div><p>yes yes yes</p>',
'yes yes yes')
# Warning paragraph ignored, first one chosen if multiple matches
make_page_and_compare_seo('five',
'<div class="geckoVersion"><p>No no no</p>'
'</div><p>yes yes yes</p>'
'<p>ignore ignore ignore</p>',
'yes yes yes')
# Don't take legacy crumbs
make_page_and_compare_seo('six', u'<p>« CSS</p><p>I am me!</p>',
'I am me!')
# Take the seoSummary class'd element
make_page_and_compare_seo('seven',
u'<p>I could be taken</p>'
'<p class="seoSummary">I should be though</p>',
'I should be though')
# Two summaries append
make_page_and_compare_seo('eight',
u'<p>I could be taken</p>'
'<p class="seoSummary">a</p>'
'<p class="seoSummary">b</p>',
'a b')
# No brackets
make_page_and_compare_seo('nine',
u'<p>I <em>am</em> awesome.'
' <a href="blah">A link</a> is also <cool></p>',
u'I am awesome. A link is also cool')
class DocumentEditingTests(UserTestCase, WikiTestCase):
"""Tests for the document-editing view"""
localizing_client = True
def test_noindex_post(self):
self.client.login(username='admin', password='testpass')
# Go to new document page to ensure no-index header works
response = self.client.get(reverse('wiki.new_document', args=[],
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(response['X-Robots-Tag'], 'noindex')
@attr('bug821986')
def test_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor"""
self.client.login(username='admin', password='testpass')
r = revision(save=True, content="""
<svg><circle onload=confirm(3)>
""")
args = [r.document.slug]
urls = (
reverse('wiki.edit_document', args=args),
'%s?tolocale=%s' % (reverse('wiki.translate', args=args), 'fr')
)
for url in urls:
page = pq(self.client.get(url).content)
editor_src = page.find('#id_content').text()
ok_('onload' not in editor_src)
def test_create_on_404(self):
self.client.login(username='admin', password='testpass')
# Create the parent page.
d, r = doc_rev()
# Establish attribs of child page.
locale = settings.WIKI_DEFAULT_LANGUAGE
local_slug = 'Some_New_Title'
slug = '%s/%s' % (d.slug, local_slug)
url = reverse('wiki.document', args=[slug], locale=locale)
# Ensure redirect to create new page on attempt to visit non-existent
# child page.
resp = self.client.get(url)
eq_(302, resp.status_code)
ok_('docs/new' in resp['Location'])
ok_('?slug=%s' % local_slug in resp['Location'])
# Ensure real 404 for visit to non-existent page with params common to
# kumascript and raw content API.
for p_name in ('raw', 'include', 'nocreate'):
sub_url = '%s?%s=1' % (url, p_name)
resp = self.client.get(sub_url)
eq_(404, resp.status_code)
# Ensure root level documents work, not just children
response = self.client.get(reverse('wiki.document',
args=['noExist'], locale=locale))
eq_(302, response.status_code)
response = self.client.get(reverse('wiki.document',
args=['Template:NoExist'],
locale=locale))
eq_(302, response.status_code)
def test_new_document_comment(self):
"""Creating a new document with a revision comment saves the comment"""
self.client.login(username='admin', password='testpass')
comment = 'I am the revision comment'
slug = 'Test-doc-comment'
loc = settings.WIKI_DEFAULT_LANGUAGE
# Create a new doc.
data = new_document_data()
data.update({'slug': slug, 'comment': comment})
self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(slug=slug, locale=loc)
eq_(comment, doc.current_revision.comment)
@attr('toc')
def test_toc_initial(self):
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.new_document'))
eq_(200, resp.status_code)
page = pq(resp.content)
toc_select = page.find('#id_toc_depth')
toc_options = toc_select.find('option')
for option in toc_options:
opt_element = pq(option)
found_selected = False
if opt_element.attr('selected'):
found_selected = True
eq_(str(Revision.TOC_DEPTH_H4), opt_element.attr('value'))
if not found_selected:
raise AssertionError("No ToC depth initially selected.")
@attr('retitle')
def test_retitling_solo_doc(self):
""" Editing just title of non-parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
new_title = 'Some New Title'
d, r = doc_rev()
old_title = d.title
data = new_document_data()
data.update({'title': new_title,
'form': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit_document', args=[d.slug])
self.client.post(url, data)
eq_(new_title,
Document.objects.get(slug=d.slug, locale=d.locale).title)
try:
Document.objects.get(title=old_title)
self.fail("Should not find doc by old title after retitling.")
except Document.DoesNotExist:
pass
@attr('retitle')
def test_retitling_parent_doc(self):
""" Editing just title of parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
# create parent doc & rev along with child doc & rev
d = document(title='parent', save=True)
revision(document=d, content='parent', save=True)
d2 = document(title='child', parent_topic=d, save=True)
revision(document=d2, content='child', save=True)
old_title = d.title
new_title = 'Some New Title'
data = new_document_data()
data.update({'title': new_title,
'form': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit_document', args=[d.slug])
self.client.post(url, data)
eq_(new_title,
Document.objects.get(slug=d.slug, locale=d.locale).title)
try:
Document.objects.get(title=old_title)
self.fail("Should not find doc by old title after retitling.")
except Document.DoesNotExist:
pass
def test_slug_change_ignored_for_iframe(self):
"""When the title of an article is edited in an iframe, the change is
ignored."""
self.client.login(username='admin', password='testpass')
new_slug = 'some_new_slug'
d, r = doc_rev()
old_slug = d.slug
data = new_document_data()
data.update({'title': d.title,
'slug': new_slug,
'form': 'rev'})
self.client.post('%s?iframe=1' % reverse('wiki.edit_document',
args=[d.slug]), data)
eq_(old_slug, Document.objects.get(slug=d.slug,
locale=d.locale).slug)
assert "REDIRECT" not in Document.objects.get(slug=old_slug).html
@attr('clobber')
def test_slug_collision_errors(self):
"""When an attempt is made to retitle an article and another with that
title already exists, there should be form errors"""
self.client.login(username='admin', password='testpass')
exist_slug = "existing-doc"
# Create a new doc.
data = new_document_data()
data.update({"slug": exist_slug})
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
# Create another new doc.
data = new_document_data()
data.update({"slug": 'some-new-title'})
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
# Now, post an update with duplicate slug
data.update({
'form': 'rev',
'slug': exist_slug
})
resp = self.client.post(reverse('wiki.edit_document',
args=['some-new-title']), data)
eq_(200, resp.status_code)
p = pq(resp.content)
ok_(p.find('.errorlist').length > 0)
ok_(p.find('.errorlist a[href="#id_slug"]').length > 0)
@attr('clobber')
def test_redirect_can_be_clobbered(self):
"""When an attempt is made to retitle an article, and another article
with that title exists but is a redirect, there should be no errors and
the redirect should be replaced."""
self.client.login(username='admin', password='testpass')
exist_title = "Existing doc"
exist_slug = "existing-doc"
changed_title = 'Changed title'
changed_slug = 'changed-title'
# Create a new doc.
data = new_document_data()
data.update({"title": exist_title, "slug": exist_slug})
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
# Change title and slug
data.update({'form': 'rev',
'title': changed_title,
'slug': changed_slug})
resp = self.client.post(reverse('wiki.edit_document',
args=[exist_slug]),
data)
eq_(302, resp.status_code)
# Change title and slug back to originals, clobbering the redirect
data.update({'form': 'rev',
'title': exist_title,
'slug': exist_slug})
resp = self.client.post(reverse('wiki.edit_document',
args=[changed_slug]),
data)
eq_(302, resp.status_code)
def test_invalid_slug(self):
"""Slugs cannot contain "$", but can contain "/"."""
self.client.login(username='admin', password='testpass')
data = new_document_data()
data['title'] = 'valid slug'
data['slug'] = 'valid'
response = self.client.post(reverse('wiki.new_document'), data)
self.assertRedirects(response,
reverse('wiki.document', args=[data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE))
new_url = reverse('wiki.new_document')
invalid_slugs = [
'va/lid', # slashes
'inva$lid', # dollar signs
'inva?lid', # question marks
'inva%lid', # percentage sign
'"invalid\'', # quotes
'in valid', # whitespace
]
for invalid_slug in invalid_slugs:
data['title'] = 'invalid with %s' % invalid_slug
data['slug'] = invalid_slug
response = self.client.post(new_url, data)
self.assertContains(response, 'The slug provided is not valid.')
def test_invalid_reserved_term_slug(self):
"""Slugs should not collide with reserved URL patterns"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
# TODO: This is info derived from urls.py, but unsure how to DRY it
reserved_slugs = (
'ckeditor_config.js',
'watch-ready-for-review',
'unwatch-ready-for-review',
'watch-approved',
'unwatch-approved',
'.json',
'new',
'all',
'preview-wiki-content',
'category/10',
'needs-review/technical',
'needs-review/',
'feeds/atom/all/',
'feeds/atom/needs-review/technical',
'feeds/atom/needs-review/',
'tag/tasty-pie'
)
for term in reserved_slugs:
data['title'] = 'invalid with %s' % term
data['slug'] = term
response = self.client.post(reverse('wiki.new_document'), data)
self.assertContains(response, 'The slug provided is not valid.')
def test_slug_revamp(self):
self.client.login(username='admin', password='testpass')
def _createAndRunTests(slug):
# Create some vars
locale = settings.WIKI_DEFAULT_LANGUAGE
foreign_locale = 'es'
new_doc_url = reverse('wiki.new_document')
invalid_slug = "some/thing"
invalid_slugs = [
"some/thing",
"some?thing",
"some thing",
"some%thing",
"$omething",
]
child_slug = 'kiddy'
grandchild_slug = 'grandkiddy'
# Create the document data
doc_data = new_document_data()
doc_data['title'] = slug + ' Doc'
doc_data['slug'] = slug
doc_data['content'] = 'This is the content'
doc_data['is_localizable'] = True
""" NEW DOCUMENT CREATION, CHILD CREATION """
# Create the document, validate it exists
response = self.client.post(new_doc_url, doc_data)
eq_(302, response.status_code) # 302 = good, forward to new page
ok_(slug in response['Location'])
self.assertRedirects(response, reverse('wiki.document',
locale=locale, args=[slug]))
doc_url = reverse('wiki.document', locale=locale, args=[slug])
eq_(self.client.get(doc_url).status_code, 200)
doc = Document.objects.get(locale=locale, slug=slug)
eq_(doc.slug, slug)
eq_(0, len(Document.objects.filter(title=doc_data['title'] + 'Redirect')))
# Create child document data
child_data = new_document_data()
child_data['title'] = slug + ' Child Doc'
child_data['slug'] = invalid_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
# Attempt to create the child with invalid slug, validate it fails
def test_invalid_slug(inv_slug, url, data, doc):
data['slug'] = inv_slug
response = self.client.post(url, data)
page = pq(response.content)
eq_(200, response.status_code) # 200 = bad, invalid data
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
eq_(doc.get_absolute_url(),
page.find('.metadataDisplay').attr('href'))
self.assertContains(response,
'The slug provided is not valid.')
for invalid_slug in invalid_slugs:
test_invalid_slug(invalid_slug,
new_doc_url + '?parent=' + str(doc.id),
child_data, doc)
# Attempt to create the child with *valid* slug,
# should succeed and redirect
child_data['slug'] = child_slug
full_child_slug = slug + '/' + child_data['slug']
response = self.client.post(new_doc_url + '?parent=' + str(doc.id),
child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
locale=locale,
args=[full_child_slug]))
child_doc = Document.objects.get(locale=locale,
slug=full_child_slug)
eq_(child_doc.slug, full_child_slug)
eq_(0, len(Document.objects.filter(
title=child_data['title'] + ' Redirect 1',
locale=locale)))
# Create grandchild data
grandchild_data = new_document_data()
grandchild_data['title'] = slug + ' Grandchild Doc'
grandchild_data['slug'] = invalid_slug
grandchild_data['content'] = 'This is the content'
grandchild_data['is_localizable'] = True
# Attempt to create the child with invalid slug, validate it fails
response = self.client.post(
new_doc_url + '?parent=' + str(child_doc.id), grandchild_data)
page = pq(response.content)
eq_(200, response.status_code) # 200 = bad, invalid data
# Slug doesn't add parent
eq_(invalid_slug, page.find('input[name=slug]')[0].value)
eq_(child_doc.get_absolute_url(),
page.find('.metadataDisplay').attr('href'))
self.assertContains(response, 'The slug provided is not valid.')
# Attempt to create the child with *valid* slug,
# should succeed and redirect
grandchild_data['slug'] = grandchild_slug
full_grandchild_slug = (full_child_slug + '/' +
grandchild_data['slug'])
response = self.client.post(
new_doc_url + '?parent=' + str(child_doc.id),
grandchild_data)
eq_(302, response.status_code)
self.assertRedirects(response,
reverse('wiki.document', locale=locale,
args=[full_grandchild_slug]))
grandchild_doc = Document.objects.get(locale=locale,
slug=full_grandchild_slug)
eq_(grandchild_doc.slug, full_grandchild_slug)
missing_title = grandchild_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=missing_title,
locale=locale)))
def _run_edit_tests(edit_slug, edit_data, edit_doc,
edit_parent_path):
"""EDIT DOCUMENT TESTING"""
# Load "Edit" page for the root doc, ensure no "/" in the slug
# Also ensure the 'parent' link is not present
response = self.client.get(reverse('wiki.edit_document',
args=[edit_doc.slug], locale=locale))
eq_(200, response.status_code)
page = pq(response.content)
eq_(edit_data['slug'], page.find('input[name=slug]')[0].value)
eq_(edit_parent_path,
page.find('.metadataDisplay').attr('href'))
# Attempt an invalid edit of the root,
# ensure the slug stays the same (i.e. no parent prepending)
def test_invalid_slug_edit(inv_slug, url, data):
data['slug'] = inv_slug
data['form'] = 'rev'
response = self.client.post(url, data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
eq_(edit_parent_path,
page.find('.metadataDisplay').attr('href'))
self.assertContains(response,
'The slug provided is not valid.')
# Ensure no redirect
redirect_title = data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=locale)))
# Push a valid edit, without changing the slug
edit_data['slug'] = edit_slug
edit_data['form'] = 'rev'
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=locale),
edit_data)
eq_(302, response.status_code)
# Ensure no redirect
redirect_title = edit_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=locale)))
self.assertRedirects(response,
reverse('wiki.document',
locale=locale,
args=[edit_doc.slug]))
def _run_translate_tests(translate_slug, translate_data,
translate_doc):
"""TRANSLATION DOCUMENT TESTING"""
foreign_url = (reverse('wiki.translate',
args=[translate_doc.slug],
locale=locale) +
'?tolocale=' + foreign_locale)
foreign_doc_url = reverse('wiki.document',
args=[translate_doc.slug],
locale=foreign_locale)
# Verify translate page form is populated correctly
response = self.client.get(foreign_url)
eq_(200, response.status_code)
page = pq(response.content)
eq_(translate_data['slug'],
page.find('input[name=slug]')[0].value)
# Attempt an invalid edit of the root
# ensure the slug stays the same (i.e. no parent prepending)
def test_invalid_slug_translate(inv_slug, url, data):
data['slug'] = inv_slug
data['form'] = 'both'
response = self.client.post(url, data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
self.assertContains(response,
'The slug provided is not valid.')
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=data['title'] +
' Redirect 1',
locale=foreign_locale)))
# Push a valid translation
translate_data['slug'] = translate_slug
translate_data['form'] = 'both'
response = self.client.post(foreign_url, translate_data)
eq_(302, response.status_code)
# Ensure no redirect
redirect_title = translate_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=foreign_locale)))
self.assertRedirects(response, foreign_doc_url)
return Document.objects.get(locale=foreign_locale,
slug=translate_doc.slug)
_run_translate_tests(slug, doc_data, doc)
_run_translate_tests(child_slug, child_data, child_doc)
_run_translate_tests(grandchild_slug, grandchild_data,
grandchild_doc)
def _run_translate_edit_tests(edit_slug, edit_data, edit_doc):
"""TEST BASIC EDIT OF TRANSLATION"""
# Hit the initial URL
response = self.client.get(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=foreign_locale))
eq_(200, response.status_code)
page = pq(response.content)
eq_(edit_data['slug'], page.find('input[name=slug]')[0].value)
# Attempt an invalid edit of the root, ensure the slug stays
# the same (i.e. no parent prepending)
edit_data['slug'] = invalid_slug
edit_data['form'] = 'both'
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=foreign_locale),
edit_data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(invalid_slug, page.find('input[name=slug]')[0].value)
self.assertContains(response, page.find('ul.errorlist li'
' a[href="#id_slug"]').
text())
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=edit_data['title'] +
' Redirect 1',
locale=foreign_locale)))
# Push a valid edit, without changing the slug
edit_data['slug'] = edit_slug
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=foreign_locale),
edit_data)
eq_(302, response.status_code)
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=edit_data['title'] +
' Redirect 1',
locale=foreign_locale)))
self.assertRedirects(response, reverse('wiki.document',
locale=foreign_locale,
args=[edit_doc.slug]))
""" TEST EDITING SLUGS AND TRANSLATIONS """
def _run_slug_edit_tests(edit_slug, edit_data, edit_doc, loc):
edit_data['slug'] = edit_data['slug'] + '_Updated'
edit_data['form'] = 'rev'
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=loc),
edit_data)
eq_(302, response.status_code)
# HACK: the es doc gets a 'Redirigen 1' if locale/ is updated
# Ensure *1* redirect
eq_(1,
len(Document.objects.filter(
title__contains=edit_data['title'] + ' Redir',
locale=loc)))
self.assertRedirects(response,
reverse('wiki.document',
locale=loc,
args=[edit_doc.slug.replace(
edit_slug,
edit_data['slug'])]))
# Run all of the tests
_createAndRunTests("parent")
# Test that slugs with the same "specific" slug but in different levels
# in the heiharachy are validate properly upon submission
# Create base doc
parent_doc = document(title='Length',
slug='length',
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
parent_doc.save()
r = revision(document=parent_doc)
r.save()
# Create child, try to use same slug, should work
child_data = new_document_data()
child_data['title'] = 'Child Length'
child_data['slug'] = 'length'
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_url = (reverse('wiki.new_document') +
'?parent=' +
str(parent_doc.id))
response = self.client.post(child_url, child_data)
eq_(302, response.status_code)
self.assertRedirects(response,
reverse('wiki.document',
args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Editing "length/length" document doesn't cause errors
child_data['form'] = 'rev'
child_data['slug'] = ''
edit_url = reverse('wiki.edit_document', args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(edit_url, child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Creating a new translation of "length" and "length/length"
# doesn't cause errors
child_data['form'] = 'both'
child_data['slug'] = 'length'
translate_url = reverse('wiki.document', args=[child_data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
args=[child_data['slug']],
locale='es'))
translate_url = reverse('wiki.document', args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
eq_(302, response.status_code)
slug = 'length/' + child_data['slug']
self.assertRedirects(response, reverse('wiki.document',
args=[slug],
locale='es'))
def test_translate_keeps_topical_parent(self):
self.client.login(username='admin', password='testpass')
en_doc, de_doc = make_translation()
en_child_doc = document(parent_topic=en_doc, slug='en-child',
save=True)
en_child_rev = revision(document=en_child_doc, save=True)
de_child_doc = document(parent_topic=de_doc, locale='de',
slug='de-child', parent=en_child_doc,
save=True)
revision(document=de_child_doc, save=True)
post_data = {}
post_data['slug'] = de_child_doc.slug
post_data['title'] = 'New title'
post_data['form'] = 'both'
post_data['content'] = 'New translation'
post_data['tolocale'] = 'de'
post_data['toc_depth'] = 0
post_data['based_on'] = en_child_rev.id
post_data['parent_id'] = en_child_doc.id
translate_url = reverse('wiki.edit_document',
args=[de_child_doc.slug],
locale='de')
self.client.post(translate_url, post_data)
de_child_doc = Document.objects.get(locale='de', slug='de-child')
eq_(en_child_doc, de_child_doc.parent)
eq_(de_doc, de_child_doc.parent_topic)
eq_('New translation', de_child_doc.current_revision.content)
def test_translate_keeps_toc_depth(self):
self.client.login(username='admin', password='testpass')
locale = settings.WIKI_DEFAULT_LANGUAGE
original_slug = 'eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
en_doc = document(title='Eng Doc', slug=original_slug,
is_localizable=True, locale=locale)
en_doc.save()
r = revision(document=en_doc, toc_depth=1)
r.save()
post_data = new_document_data()
post_data['title'] = 'ES Doc'
post_data['slug'] = foreign_slug
post_data['content'] = 'This is the content'
post_data['is_localizable'] = True
post_data['form'] = 'both'
post_data['toc_depth'] = r.toc_depth
translate_url = reverse('wiki.document', args=[original_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, post_data)
self.assertRedirects(response, reverse('wiki.document',
args=[foreign_slug],
locale=foreign_locale))
es_d = Document.objects.get(locale=foreign_locale, slug=foreign_slug)
eq_(r.toc_depth, es_d.current_revision.toc_depth)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
def test_translate_rebuilds_source_json(self):
self.client.login(username='admin', password='testpass')
# Create an English original and a Spanish translation.
en_slug = 'en-doc'
es_locale = 'es'
es_slug = 'es-doc'
en_doc = document(title='EN Doc',
slug=en_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
en_doc.save()
en_doc.render()
en_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=en_slug)
json.loads(en_doc.json)
r = revision(document=en_doc)
r.save()
translation_data = new_document_data()
translation_data['title'] = 'ES Doc'
translation_data['slug'] = es_slug
translation_data['content'] = 'This is the content'
translation_data['is_localizable'] = False
translation_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[en_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + es_locale
response = self.client.post(translate_url, translation_data)
# Sanity to make sure the translate succeeded.
self.assertRedirects(response, reverse('wiki.document',
args=[es_slug],
locale=es_locale))
es_doc = Document.objects.get(locale=es_locale,
slug=es_slug)
es_doc.render()
new_en_json = json.loads(Document.objects.get(pk=en_doc.pk).json)
ok_('translations' in new_en_json)
ok_(translation_data['title'] in [t['title'] for t in
new_en_json['translations']])
es_translation_json = [t for t in new_en_json['translations'] if
t['title'] == translation_data['title']][0]
eq_(es_translation_json['last_edit'],
es_doc.current_revision.created.isoformat())
def test_slug_translate(self):
"""Editing a translated doc keeps the correct slug"""
self.client.login(username='admin', password='testpass')
# Settings
original_slug = 'eng-doc'
child_slug = 'child-eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
foreign_child_slug = 'child-es-doc'
# Create the one-level English Doc
en_doc = document(title='Eng Doc',
slug=original_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
parent_data = new_document_data()
parent_data['title'] = 'ES Doc'
parent_data['slug'] = foreign_slug
parent_data['content'] = 'This is the content'
parent_data['is_localizable'] = True
parent_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[original_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, parent_data)
self.assertRedirects(response, reverse('wiki.document',
args=[foreign_slug],
locale=foreign_locale))
# Go to edit the translation, ensure the the slug is correct
response = self.client.get(reverse('wiki.edit_document',
args=[foreign_slug],
locale=foreign_locale))
page = pq(response.content)
eq_(page.find('input[name=slug]')[0].value, foreign_slug)
# Create an English child now
en_doc = document(title='Child Eng Doc',
slug=original_slug + '/' + child_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=en_doc)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
child_data = new_document_data()
child_data['title'] = 'ES Child Doc'
child_data['slug'] = foreign_child_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_data['form'] = 'both'
translate_url = reverse('wiki.document',
args=[original_slug + '/' + child_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, child_data)
slug = foreign_slug + '/' + child_data['slug']
self.assertRedirects(response, reverse('wiki.document',
args=[slug],
locale=foreign_locale))
def test_clone(self):
self.client.login(username='admin', password='testpass')
slug = None
title = None
content = '<p>Hello!</p>'
test_revision = revision(save=True, title=title, slug=slug,
content=content)
document = test_revision.document
response = self.client.get(reverse('wiki.new_document',
args=[],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?clone=' + str(document.id))
page = pq(response.content)
eq_(page.find('input[name=title]')[0].value, title)
eq_(page.find('input[name=slug]')[0].value, slug)
self.assertHTMLEqual(page.find('textarea[name=content]')[0].value, content)
def test_localized_based_on(self):
"""Editing a localized article 'based on' an older revision of the
localization is OK."""
self.client.login(username='admin', password='testpass')
en_r = revision(save=True)
fr_d = document(parent=en_r.document, locale='fr', save=True)
fr_r = revision(document=fr_d, based_on=en_r, save=True)
url = reverse('wiki.new_revision_based_on',
locale='fr', args=(fr_d.slug, fr_r.pk,))
response = self.client.get(url)
input = pq(response.content)('#id_based_on')[0]
eq_(int(input.value), en_r.pk)
def test_restore_translation_source(self):
"""Edit a localized article without an English parent allows user to
set translation parent."""
# Create english doc
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.new_document'), data)
en_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Create french doc
data.update({'locale': 'fr',
'title': 'A Tést Articlé',
'content': "C'ést bon."})
self.client.post(reverse('wiki.new_document', locale='fr'), data)
fr_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Check edit doc page for choose parent box
url = reverse('wiki.edit_document', args=[fr_d.slug], locale='fr')
response = self.client.get(url)
ok_(pq(response.content)('li.metadata-choose-parent'))
# Set the parent
data.update({'form': 'rev', 'parent_id': en_d.id})
resp = self.client.post(url, data)
eq_(302, resp.status_code)
ok_('fr/docs/a-test-article' in resp['Location'])
# Check the languages drop-down
resp = self.client.get(resp['Location'])
translations = pq(resp.content)('ul#translations li')
ok_('A Test Article' in translations.html())
ok_('English (US)' in translations.text())
def test_translation_source(self):
"""Allow users to change "translation source" settings"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.new_document'), data)
parent = Document.objects.get(locale=data['locale'], slug=data['slug'])
data.update({'title': 'Another Test Article',
'content': "Yahoooo!",
'parent_id': parent.id})
self.client.post(reverse('wiki.new_document'), data)
child = Document.objects.get(locale=data['locale'], slug=data['slug'])
url = reverse('wiki.edit_document', args=[child.slug])
response = self.client.get(url)
content = pq(response.content)
ok_(content('li.metadata-choose-parent'))
ok_(str(parent.id) in content.html())
@attr('tags')
@mock.patch.object(Site.objects, 'get_current')
def test_document_tags(self, get_current):
"""Document tags can be edited through revisions"""
data = new_document_data()
locale = data['locale']
slug = data['slug']
path = slug
ts1 = ('JavaScript', 'AJAX', 'DOM')
ts2 = ('XML', 'JSON')
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
def assert_tag_state(yes_tags, no_tags):
# Ensure the tags are found for the Documents
doc = Document.objects.get(locale=locale, slug=slug)
doc_tags = [x.name for x in doc.tags.all()]
for t in yes_tags:
ok_(t in doc_tags)
for t in no_tags:
ok_(t not in doc_tags)
# Ensure the tags are found in the Document view
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
for t in yes_tags:
eq_(1, page.find('.tags li a:contains("%s")' % t).length,
'%s should NOT appear in document view tags' % t)
for t in no_tags:
eq_(0, page.find('.tags li a:contains("%s")' % t).length,
'%s should appear in document view tags' % t)
# Check for the document slug (title in feeds) in the tag listing
for t in yes_tags:
response = self.client.get(reverse('wiki.tag', args=[t]))
self.assertContains(response, doc.slug, msg_prefix=t)
response = self.client.get(reverse('wiki.feeds.recent_documents',
args=['atom', t]))
self.assertContains(response, doc.title)
for t in no_tags:
response = self.client.get(reverse('wiki.tag', args=[t]))
ok_(doc.slug not in response.content.decode('utf-8'))
response = self.client.get(reverse('wiki.feeds.recent_documents',
args=['atom', t]))
self.assertNotContains(response, doc.title)
# Create a new doc with tags
data.update({'slug': slug, 'tags': ','.join(ts1)})
self.client.post(reverse('wiki.new_document'), data)
assert_tag_state(ts1, ts2)
# Now, update the tags.
data.update({'form': 'rev', 'tags': ', '.join(ts2)})
self.client.post(reverse('wiki.edit_document',
args=[path]), data)
assert_tag_state(ts2, ts1)
@attr('review_tags')
@mock.patch.object(Site.objects, 'get_current')
def test_review_tags(self, get_current):
"""Review tags can be managed on document revisions"""
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
# Create a new doc with one review tag
data = new_document_data()
data.update({'review_tags': ['technical']})
response = self.client.post(reverse('wiki.new_document'), data)
# Ensure there's now a doc with that expected tag in its newest
# revision
doc = Document.objects.get(slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
eq_(['technical'], review_tags)
# Now, post an update with two tags
data.update({
'form': 'rev',
'review_tags': ['editorial', 'technical'],
})
response = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
# Ensure the doc's newest revision has both tags.
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
eq_(['editorial', 'technical'], review_tags)
# Now, ensure that warning boxes appear for the review tags.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
eq_(2, page.find('.warning.warning-review').length)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
# Post an edit that removes one of the tags.
data.update({
'form': 'rev',
'review_tags': ['editorial', ]
})
response = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
# Ensure only one of the tags' warning boxes appears, now.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
eq_(1, page.find('.warning.warning-review').length)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
eq_(0, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
ok_('<entry><title>%s</title>' % doc.title not in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
@attr('review-tags')
def test_quick_review(self):
"""Test the quick-review button."""
self.client.login(username='admin', password='testpass')
test_data = [
{
'params': {'approve_technical': 1},
'expected_tags': ['editorial'],
'name': 'technical',
'message_contains': ['Technical review completed.']
},
{
'params': {'approve_editorial': 1},
'expected_tags': ['technical'],
'name': 'editorial',
'message_contains': ['Editorial review completed.']
},
{
'params': {
'approve_technical': 1,
'approve_editorial': 1
},
'expected_tags': [],
'name': 'editorial-technical',
'message_contains': [
'Technical review completed.',
'Editorial review completed.',
]
}
]
for data_dict in test_data:
slug = 'test-quick-review-%s' % data_dict['name']
data = new_document_data()
data.update({'review_tags': ['editorial', 'technical'],
'slug': slug})
resp = self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_url = reverse('wiki.quick_review',
args=[doc.slug])
params = dict(data_dict['params'], revision_id=rev.id)
resp = self.client.post(review_url, params)
eq_(302, resp.status_code)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
for expected_str in data_dict['message_contains']:
ok_(expected_str in rev.summary)
ok_(expected_str in rev.comment)
eq_(data_dict['expected_tags'], review_tags)
@attr('midair')
def test_edit_midair_collision(self):
self.client.login(username='admin', password='testpass')
# Post a new document.
data = new_document_data()
resp = self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(slug=data['slug'])
# Edit #1 starts...
resp = self.client.get(reverse('wiki.edit_document',
args=[doc.slug]))
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get(reverse('wiki.edit_document',
args=[doc.slug]))
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': 'This edit got there first',
'current_rev': rev_id2
})
resp = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
eq_(302, resp.status_code)
# Edit #1 submits, but receives a mid-aired notification
data.update({
'form': 'rev',
'content': 'This edit gets mid-aired',
'current_rev': rev_id1
})
resp = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
eq_(200, resp.status_code)
ok_(unicode(MIDAIR_COLLISION).encode('utf-8') in resp.content,
"Midair collision message should appear")
@attr('toc')
def test_toc_toggle_off(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
d, _ = doc_rev()
data = new_document_data()
ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
data['form'] = 'rev'
data['toc_depth'] = 0
data['slug'] = d.slug
data['title'] = d.title
self.client.post(reverse('wiki.edit_document',
args=[d.slug]),
data)
doc = Document.objects.get(slug=d.slug, locale=d.locale)
eq_(0, doc.current_revision.toc_depth)
@attr('toc')
def test_toc_toggle_on(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev()
new_r = revision(document=d, content=r.content, toc_depth=0,
is_approved=True)
new_r.save()
ok_(not Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
data = new_document_data()
data['form'] = 'rev'
data['slug'] = d.slug
data['title'] = d.title
self.client.post(reverse('wiki.edit_document',
args=[d.slug]),
data)
ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
def test_parent_topic(self):
"""Selection of a parent topic when creating a document."""
self.client.login(username='admin', password='testpass')
d = document(title='HTML8')
d.save()
r = revision(document=d)
r.save()
data = new_document_data()
data['title'] = 'Replicated local storage'
data['parent_topic'] = d.id
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
ok_(d.children.count() == 1)
ok_(d.children.all()[0].title == 'Replicated local storage')
def test_repair_breadcrumbs(self):
english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English top',
save=True)
english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English mid',
parent_topic=english_top,
save=True)
english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English bottom',
parent_topic=english_mid,
save=True)
french_top = document(locale='fr',
title='French top',
parent=english_top,
save=True)
french_mid = document(locale='fr',
title='French mid',
parent=english_mid,
parent_topic=english_mid,
save=True)
french_bottom = document(locale='fr',
title='French bottom',
parent=english_bottom,
parent_topic=english_bottom,
save=True)
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.repair_breadcrumbs',
args=[french_bottom.slug],
locale='fr'))
eq_(302, resp.status_code)
ok_(french_bottom.get_absolute_url() in resp['Location'])
french_bottom_fixed = Document.objects.get(locale='fr',
title=french_bottom.title)
eq_(french_mid.id, french_bottom_fixed.parent_topic.id)
eq_(french_top.id, french_bottom_fixed.parent_topic.parent_topic.id)
def test_translate_on_edit(self):
d1 = document(title="Doc1", locale=settings.WIKI_DEFAULT_LANGUAGE,
save=True)
revision(document=d1, save=True)
d2 = document(title="TransDoc1", locale='de', parent=d1, save=True)
revision(document=d2, save=True)
self.client.login(username='admin', password='testpass')
url = reverse('wiki.edit_document', args=(d2.slug,), locale=d2.locale)
resp = self.client.get(url)
eq_(200, resp.status_code)
def test_discard_location(self):
"""Testing that the 'discard' HREF goes to the correct place when it's
explicitely and implicitely set"""
self.client.login(username='admin', password='testpass')
def _create_doc(slug, locale):
doc = document(slug=slug, is_localizable=True, locale=locale)
doc.save()
r = revision(document=doc)
r.save()
return doc
# Test that the 'discard' button on an edit goes to the original page
doc = _create_doc('testdiscarddoc', settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(reverse('wiki.edit_document',
args=[doc.slug], locale=doc.locale))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[doc.slug], locale=doc.locale))
# Test that the 'discard button on a new translation goes
# to the en-US page'
response = self.client.get(reverse('wiki.translate',
args=[doc.slug], locale=doc.locale) + '?tolocale=es')
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[doc.slug], locale=doc.locale))
# Test that the 'discard' button on an existing translation goes
# to the 'es' page
foreign_doc = _create_doc('testdiscarddoc', 'es')
response = self.client.get(reverse('wiki.edit_document',
args=[foreign_doc.slug],
locale=foreign_doc.locale))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[foreign_doc.slug],
locale=foreign_doc.locale))
# Test new
response = self.client.get(reverse('wiki.new_document',
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.new_document',
locale=settings.WIKI_DEFAULT_LANGUAGE))
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_revert(self, mock_kumascript_get):
self.client.login(username='admin', password='testpass')
mock_kumascript_get.return_value = (
'lorem ipsum dolor sit amet', None)
data = new_document_data()
data['title'] = 'A Test Article For Reverting'
data['slug'] = 'test-article-for-reverting'
response = self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug='test-article-for-reverting')
rev = doc.revisions.order_by('-id').all()[0]
data['content'] = 'Not lorem ipsum anymore'
data['comment'] = 'Nobody likes Latin anyway'
response = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
mock_kumascript_get.called = False
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True, 'comment': 'Blah blah'})
ok_(mock_kumascript_get.called,
"kumascript should have been used")
ok_(302 == response.status_code)
rev = doc.revisions.order_by('-id').all()[0]
ok_('lorem ipsum dolor sit amet' == rev.content)
ok_('Blah blah' in rev.comment)
mock_kumascript_get.called = False
rev = doc.revisions.order_by('-id').all()[1]
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True})
ok_(302 == response.status_code)
rev = doc.revisions.order_by('-id').all()[0]
ok_(': ' not in rev.comment)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
def test_store_revision_ip(self):
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For Storing Revision IP',
'slug': slug})
self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
data.update({'form': 'rev',
'content': 'This revision should NOT record IP',
'comment': 'This revision should NOT record IP'})
self.client.post(reverse('wiki.edit_document', args=[doc.slug]),
data)
eq_(0, RevisionIP.objects.all().count())
Switch.objects.create(name='store_revision_ips', active=True)
data.update({'content': 'Store the IP address for the revision.',
'comment': 'Store the IP address for the revision.'})
self.client.post(reverse('wiki.edit_document', args=[doc.slug]),
data)
eq_(1, RevisionIP.objects.all().count())
rev = doc.revisions.order_by('-id').all()[0]
rev_ip = RevisionIP.objects.get(revision=rev)
eq_('127.0.0.1', rev_ip.ip)
@mock.patch.object(Site.objects, 'get_current')
def test_email_for_first_edits(self, get_current):
get_current.return_value.domain = 'dev.mo.org'
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For First Edit Emails',
'slug': slug})
self.client.post(reverse('wiki.new_document'), data)
eq_(1, len(mail.outbox))
doc = Document.objects.get(
locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug)
data.update({'form': 'rev',
'content': 'This edit should not send an email',
'comment': 'This edit should not send an email'})
self.client.post(reverse('wiki.edit_document',
args=[doc.slug]),
data)
eq_(1, len(mail.outbox))
self.client.login(username='admin', password='testpass')
data.update({'content': 'Admin first edit should send an email',
'comment': 'Admin first edit should send an email'})
self.client.post(reverse('wiki.edit_document',
args=[doc.slug]),
data)
eq_(2, len(mail.outbox))
def _check_message_for_headers(message, username):
ok_("%s made their first edit" % username in message.subject)
eq_({'X-Kuma-Document-Url': "https://dev.mo.org%s" % doc.get_absolute_url(),
'X-Kuma-Editor-Username': username}, message.extra_headers)
testuser_message = mail.outbox[0]
admin_message = mail.outbox[1]
_check_message_for_headers(testuser_message, 'testuser')
_check_message_for_headers(admin_message, 'admin')
class DocumentWatchTests(UserTestCase, WikiTestCase):
"""Tests for un/subscribing to document edit notifications."""
localizing_client = True
def setUp(self):
super(DocumentWatchTests, self).setUp()
self.document, self.r = doc_rev()
self.client.login(username='testuser', password='testpass')
def test_watch_GET_405(self):
"""Watch document with HTTP GET results in 405."""
response = get(self.client, 'wiki.subscribe_document',
args=[self.document.slug])
eq_(405, response.status_code)
def test_unwatch_GET_405(self):
"""Unwatch document with HTTP GET results in 405."""
response = get(self.client, 'wiki.subscribe_document',
args=[self.document.slug])
eq_(405, response.status_code)
def test_watch_unwatch(self):
"""Watch and unwatch a document."""
user = self.user_model.objects.get(username='testuser')
# Subscribe
response = post(self.client, 'wiki.subscribe_document', args=[self.document.slug])
eq_(200, response.status_code)
assert EditDocumentEvent.is_notifying(user, self.document), \
'Watch was not created'
# Unsubscribe
response = post(self.client, 'wiki.subscribe_document', args=[self.document.slug])
eq_(200, response.status_code)
assert not EditDocumentEvent.is_notifying(user, self.document), \
'Watch was not destroyed'
class SectionEditingResourceTests(UserTestCase, WikiTestCase):
localizing_client = True
def test_raw_source(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
Switch.objects.create(name='application_ACAO', active=True)
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
ok_('Access-Control-Allow-Origin' in response)
eq_('*', response['Access-Control-Allow-Origin'])
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('bug821986')
def test_raw_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<p onload=alert(3)>FOO</p>
<svg><circle onload=confirm(3)>HI THERE</circle></svg>
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
ok_('<p onload=' not in response.content)
ok_('<circle onload=' not in response.content)
def test_raw_with_editing_links_source(self):
"""The raw source for a document can be requested, with section editing
links"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s1" href="/en-US/docs/%(slug)s$edit?section=s1&edit_links=true" title="Edit section">Edit</a>s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s2" href="/en-US/docs/%(slug)s$edit?section=s2&edit_links=true" title="Edit section">Edit</a>s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s3" href="/en-US/docs/%(slug)s$edit?section=s3&edit_links=true" title="Edit section">Edit</a>s3</h1>
<p>test</p>
<p>test</p>
""" % {'slug': d.slug}
response = self.client.get('%s?raw=true&edit_links=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
def test_raw_section_source(self):
"""The raw source for a document section can be requested"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?section=s2&raw=true' %
reverse('wiki.document',
args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('midair')
@attr('rawsection')
def test_raw_section_edit(self):
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
expected = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
response = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[d.slug]),
{"form": "rev",
"slug": d.slug,
"content": replace},
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('midair')
def test_midair_section_merge(self):
"""If a page was changed while someone was editing, but the changes
didn't affect the specific section being edited, then ignore the midair
warning"""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="replace2">replace2</h1>
<p>replace</p>
"""
expected = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
<h1 id="replace2">replace2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
data = {
'form': 'rev',
'content': rev.content,
'slug': ''
}
# Edit #1 starts...
resp = self.client.get('%s?section=s1' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': replace_2,
'current_rev': rev_id2,
'slug': doc.slug
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[doc.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(302, resp.status_code)
# Edit #1 submits, but since it's a different section, there's no
# mid-air collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s1&raw=true' %
reverse('wiki.edit_document', args=[doc.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# No conflict, but we should get a 205 Reset as an indication that the
# page needs a refresh.
eq_(205, resp.status_code)
# Finally, make sure that all the edits landed
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
# Also, ensure that the revision is slipped into the headers
eq_(unicode(Document.objects.get(slug=doc.slug, locale=doc.locale)
.current_revision.id),
unicode(response['x-kuma-revision']))
@attr('midair')
def test_midair_section_collision(self):
"""If both a revision and the edited section has changed, then a
section edit is a collision."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="s2">replace</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="s2">first replace</h1>
<p>first replace</p>
"""
data = {
'form': 'rev',
'content': rev.content
}
# Edit #1 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': replace_2,
'slug': doc.slug,
'current_rev': rev_id2
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[doc.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(302, resp.status_code)
# Edit #1 submits, but since it's the same section, there's a collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[doc.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# With the raw API, we should get a 409 Conflict on collision.
eq_(409, resp.status_code)
def test_raw_include_option(self):
doc_src = u"""
<div class="noinclude">{{ XULRefAttr() }}</div>
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
<div class="noinclude">
<p>{{ languages( { "ja": "ja/XUL/Attribute/maxlength" } ) }}</p>
</div>
"""
doc, rev = doc_rev(doc_src)
expected = u"""
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
"""
resp = self.client.get('%s?raw&include' %
reverse('wiki.document', args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(resp.content.decode('utf-8')))
def test_section_edit_toc(self):
"""show_toc is preserved in section editing."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
rev.toc_depth = 1
rev.save()
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document', args=[doc.slug]),
{"form": "rev", "slug": doc.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=doc.id).current_revision
ok_(rev.id != changed.id)
eq_(1, changed.toc_depth)
def test_section_edit_review_tags(self):
"""review tags are preserved in section editing."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
tags_to_save = ['bar', 'foo']
rev.save()
rev.review_tags.set(*tags_to_save)
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document', args=[doc.slug]),
{"form": "rev", "slug": doc.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=doc.id).current_revision
ok_(rev.id != changed.id)
eq_(set(tags_to_save),
set([t.name for t in changed.review_tags.all()]))
class MindTouchRedirectTests(UserTestCase, WikiTestCase):
"""
Test that we appropriately redirect old-style MindTouch URLs to
new-style kuma URLs.
"""
# A note on these tests: we could try to use assertRedirects on
# these, but for the most part we're just constructing a URL
# similar enough to the wiki app's own built-in redirects that
# it'll pick up the request and do what we want with it. But it
# may end up issuing its own redirects, which are tricky to sort
# out from the ones the legacy MindTouch handling will emit, so
# instead we just test that A) we did issue a redirect and B) the
# URL we constructed is enough for the document views to go on.
localizing_client = True
server_prefix = 'http://testserver/%s/docs' % settings.WIKI_DEFAULT_LANGUAGE
namespace_urls = (
# One for each namespace.
{'mindtouch': '/Help:Foo',
'kuma': '%s/Help:Foo' % server_prefix},
{'mindtouch': '/Help_talk:Foo',
'kuma': '%s/Help_talk:Foo' % server_prefix},
{'mindtouch': '/Project:En/MDC_editor_guide',
'kuma': '%s/Project:MDC_editor_guide' % server_prefix},
{'mindtouch': '/Project_talk:En/MDC_style_guide',
'kuma': '%s/Project_talk:MDC_style_guide' % server_prefix},
{'mindtouch': '/Special:Foo',
'kuma': '%s/Special:Foo' % server_prefix},
{'mindtouch': '/Talk:en/Foo',
'kuma': '%s/Talk:Foo' % server_prefix},
{'mindtouch': '/Template:Foo',
'kuma': '%s/Template:Foo' % server_prefix},
{'mindtouch': '/User:Foo',
'kuma': '%s/User:Foo' % server_prefix},
)
documents = (
{'title': 'XHTML', 'mt_locale': 'cn', 'kuma_locale': 'zh-CN',
'expected': '/zh-CN/docs/XHTML'},
{'title': 'JavaScript', 'mt_locale': 'zh_cn', 'kuma_locale': 'zh-CN',
'expected': '/zh-CN/docs/JavaScript'},
{'title': 'XHTML6', 'mt_locale': 'zh_tw', 'kuma_locale': 'zh-CN',
'expected': '/zh-TW/docs/XHTML6'},
{'title': 'HTML7', 'mt_locale': 'fr', 'kuma_locale': 'fr',
'expected': '/fr/docs/HTML7'},
)
def test_namespace_urls(self):
new_doc = document()
new_doc.title = 'User:Foo'
new_doc.slug = 'User:Foo'
new_doc.save()
for namespace_test in self.namespace_urls:
resp = self.client.get(namespace_test['mindtouch'], follow=False)
eq_(301, resp.status_code)
eq_(namespace_test['kuma'], resp['Location'])
def test_trailing_slash(self):
d = document()
d.locale = 'zh-CN'
d.slug = 'foofoo'
d.title = 'FooFoo'
d.save()
mt_url = '/cn/%s/' % (d.slug,)
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
eq_('http://testserver%s' % d.get_absolute_url(), resp['Location'])
def test_document_urls(self):
for doc in self.documents:
d = document()
d.title = doc['title']
d.slug = doc['title']
d.locale = doc['kuma_locale']
d.save()
mt_url = '/%s' % '/'.join([doc['mt_locale'], doc['title']])
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
eq_('http://testserver%s' % doc['expected'], resp['Location'])
def test_view_param(self):
d = document()
d.locale = settings.WIKI_DEFAULT_LANGUAGE
d.slug = 'HTML/HTML5'
d.title = 'HTML 5'
d.save()
mt_url = '/en-US/%s?view=edit' % (d.slug,)
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
expected_url = 'http://testserver%s$edit' % d.get_absolute_url()
eq_(expected_url, resp['Location'])
class AutosuggestDocumentsTests(WikiTestCase):
"""
Test the we're properly filtering out the Redirects from the document list
"""
localizing_client = True
def test_autosuggest_no_term(self):
url = reverse('wiki.autosuggest_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url)
eq_(400, resp.status_code)
def test_document_redirects(self):
# All contain "e", so that will be the search term
invalid_documents = (
{
'title': 'Something Redirect 8',
'html': 'REDIRECT <a class="redirect" href="/blah">Something Redirect</a>',
'is_redirect': 1
},
)
valid_documents = (
{'title': 'e 6', 'html': '<p>Blah text Redirect'},
{'title': 'e 7', 'html': 'AppleTalk'},
{'title': 'Response.Redirect'},
)
for doc in invalid_documents + valid_documents:
d = document()
d.title = doc['title']
if 'html' in doc:
d.html = doc['html']
if 'slug' in doc:
d.slug = doc['slug']
if 'is_redirect' in doc:
d.is_redirect = 1
d.save()
url = reverse('wiki.autosuggest_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?term=e'
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_(len(data), len(valid_documents))
# Ensure that the valid docs found are all in the valid list
for d in data:
found = False
for v in valid_documents:
if v['title'] in d['title']:
found = True
break
eq_(True, found)
def test_list_no_redirects(self):
Document.objects.all().delete()
invalid_documents = [
{
'title': 'Something Redirect 8',
'slug': 'xx',
'html': 'REDIRECT <a class="redirect" href="%s">yo</a>' % settings.SITE_URL
},
{
'title': 'My Template',
'slug': 'Template:Something',
'html': 'blah',
},
]
valid_documents = [
{'title': 'A Doc', 'slug': 'blah', 'html': 'Blah blah blah'}
]
for doc in invalid_documents + valid_documents:
document(save=True, slug=doc['slug'],
title=doc['title'], html=doc['html'])
resp = self.client.get(reverse('wiki.all_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(len(valid_documents), len(pq(resp.content).find('.document-list li')))
class CodeSampleViewTests(UserTestCase, WikiTestCase):
localizing_client = True
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver')
def test_code_sample_1(self):
"""The raw source for a document can be requested"""
d, r = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""")
expecteds = (
'<style type="text/css">.some-css { color: red; }</style>',
'Some HTML',
'<script type="text/javascript">window.alert("HI THERE")</script>',
)
Switch.objects.create(name='application_ACAO', active=True)
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='testserver')
ok_('Access-Control-Allow-Origin' in response)
eq_('*', response['Access-Control-Allow-Origin'])
eq_(200, response.status_code)
normalized = normalize_html(response.content)
# Content checks
ok_('<!DOCTYPE html>' in response.content)
for item in expecteds:
ok_(item in normalized)
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver')
def test_code_sample_host_restriction(self):
d, r = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""")
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='testserver')
eq_(403, response.status_code)
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='sampleserver')
eq_(200, response.status_code)
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver')
def test_code_sample_iframe_embed(self):
slug = 'test-code-embed'
embed_url = ('https://sampleserver/%s/docs/%s$samples/sample1' %
(settings.WIKI_DEFAULT_LANGUAGE, slug))
doc_src = """
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<iframe id="if1" src="%(embed_url)s"></iframe>
<iframe id="if2" src="http://testserver"></iframe>
<iframe id="if3" src="https://some.alien.site.com"></iframe>
<p>test</p>
""" % dict(embed_url=embed_url)
slug = 'test-code-doc'
d, r = doc_rev()
revision(save=True, document=d, title="Test code doc", slug=slug,
content=doc_src)
response = self.client.get(reverse('wiki.document', args=(d.slug,)))
eq_(200, response.status_code)
page = pq(response.content)
if1 = page.find('#if1')
eq_(if1.length, 1)
eq_(if1.attr('src'), embed_url)
if2 = page.find('#if2')
eq_(if2.length, 1)
eq_(if2.attr('src'), '')
if3 = page.find('#if3')
eq_(if3.length, 1)
eq_(if3.attr('src'), '')
class CodeSampleViewFileServingTests(UserTestCase, WikiTestCase):
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver',
WIKI_ATTACHMENT_ALLOWED_TYPES='text/plain')
@override_settings(ATTACHMENT_HOST='testserver')
def test_code_sample_file_serving(self):
self.client.login(username='admin', password='testpass')
# first let's upload a file
file_for_upload = make_test_file(content='Something something unique')
post_data = {
'title': 'An uploaded file',
'description': 'A unique experience for your file serving needs.',
'comment': 'Yadda yadda yadda',
'file': file_for_upload,
}
response = self.client.post(reverse('attachments.new_attachment'),
data=post_data)
eq_(response.status_code, 302)
# then build the document and revision we need to test
attachment = Attachment.objects.get(title='An uploaded file')
filename = attachment.current_revision.filename()
url_css = 'url("files/%(attachment_id)s/%(filename)s")' % {
'attachment_id': attachment.id,
'filename': filename,
}
doc, rev = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { background: %s }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""" % url_css)
# then see of the code sample view has successfully found the sample
response = self.client.get(reverse('wiki.code_sample',
args=[doc.slug, 'sample1'],
locale='en-US'))
eq_(response.status_code, 200)
normalized = normalize_html(response.content)
ok_(url_css in normalized)
# and then we try if a redirect by the file serving view redirects
# to the main file serving view
response = self.client.get(reverse('wiki.raw_code_sample_file',
args=[doc.slug,
'sample1',
attachment.id,
filename],
locale='en-US'))
eq_(response.status_code, 302)
eq_(response['Location'], attachment.get_file_url())
class DeferredRenderingViewTests(UserTestCase, WikiTestCase):
"""Tests for the deferred rendering system and interaction with views"""
localizing_client = True
def setUp(self):
super(DeferredRenderingViewTests, self).setUp()
self.rendered_content = 'HELLO RENDERED CONTENT'
self.raw_content = 'THIS IS RAW CONTENT'
self.d, self.r = doc_rev(self.raw_content)
# Disable TOC, makes content inspection easier.
self.r.toc_depth = 0
self.r.save()
self.d.html = self.raw_content
self.d.rendered_html = self.rendered_content
self.d.save()
self.url = reverse('wiki.document',
args=(self.d.slug,),
locale=self.d.locale)
config.KUMASCRIPT_TIMEOUT = 5.0
config.KUMASCRIPT_MAX_AGE = 600
def tearDown(self):
super(DeferredRenderingViewTests, self).tearDown()
config.KUMASCRIPT_TIMEOUT = 0
config.KUMASCRIPT_MAX_AGE = 0
@mock.patch('kuma.wiki.kumascript.get')
def test_rendered_content(self, mock_kumascript_get):
"""Document view should serve up rendered content when available"""
mock_kumascript_get.return_value = (self.rendered_content, None)
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
ok_(self.rendered_content in txt)
ok_(self.raw_content not in txt)
eq_(0, p.find('#doc-rendering-in-progress').length)
eq_(0, p.find('#doc-render-raw-fallback').length)
def test_rendering_in_progress_warning(self):
"""Document view should serve up rendered content when available"""
# Make the document look like there's a rendering in progress.
self.d.render_started_at = datetime.datetime.now()
self.d.save()
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
# Even though a rendering looks like it's in progress, ensure the
# last-known render is displayed.
ok_(self.rendered_content in txt)
ok_(self.raw_content not in txt)
eq_(0, p.find('#doc-rendering-in-progress').length)
# Only for logged-in users, ensure the render-in-progress warning is
# displayed.
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
eq_(1, p.find('#doc-rendering-in-progress').length)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_content_during_initial_render(self, mock_kumascript_get):
"""Raw content should be displayed during a document's initial
deferred rendering"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# Make the document look like there's no rendered content, but that a
# rendering is in progress.
self.d.html = self.raw_content
self.d.rendered_html = ''
self.d.render_started_at = datetime.datetime.now()
self.d.save()
# Now, ensure that raw content is shown in the view.
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
ok_(self.rendered_content not in txt)
ok_(self.raw_content in txt)
eq_(0, p.find('#doc-render-raw-fallback').length)
# Only for logged-in users, ensure that a warning is displayed about
# the fallback
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
eq_(1, p.find('#doc-render-raw-fallback').length)
@attr('schedule_rendering')
@mock.patch.object(Document, 'schedule_rendering')
@mock.patch('kuma.wiki.kumascript.get')
def test_schedule_rendering(self, mock_kumascript_get,
mock_document_schedule_rendering):
mock_kumascript_get.return_value = (self.rendered_content, None)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({
'form': 'rev',
'content': 'This is an update',
})
edit_url = reverse('wiki.edit_document', args=[self.d.slug])
resp = self.client.post(edit_url, data)
eq_(302, resp.status_code)
ok_(mock_document_schedule_rendering.called)
mock_document_schedule_rendering.reset_mock()
data.update({
'form': 'both',
'content': 'This is a translation',
})
translate_url = (reverse('wiki.translate', args=[data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?tolocale=fr')
response = self.client.post(translate_url, data)
eq_(302, response.status_code)
ok_(mock_document_schedule_rendering.called)
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch('requests.post')
def test_alternate_bleach_whitelist(self, mock_requests_post,
mock_kumascript_get):
# Some test content with contentious tags.
test_content = """
<p id="foo">
<a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Expected result filtered through old/current Bleach rules
expected_content_old = """
<p id="foo">
<a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Expected result filtered through alternate whitelist
expected_content_new = """
<p id="foo">
<a style="border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Set up an alternate set of whitelists...
config.BLEACH_ALLOWED_TAGS = json.dumps([
"a", "p"
])
config.BLEACH_ALLOWED_ATTRIBUTES = json.dumps({
"a": ['href', 'style'],
"p": ['id']
})
config.BLEACH_ALLOWED_STYLES = json.dumps([
"border"
])
config.KUMASCRIPT_TIMEOUT = 100
# Rig up a mocked response from KumaScript GET method
mock_kumascript_get.return_value = (test_content, None)
# Rig up a mocked response from KumaScript POST service
# Digging a little deeper into the stack, so that the rest of
# kumascript.post processing happens.
from StringIO import StringIO
m_resp = mock.Mock()
m_resp.status_code = 200
m_resp.text = test_content
m_resp.read = StringIO(test_content).read
mock_requests_post.return_value = m_resp
d, r = doc_rev(test_content)
trials = (
(False, '', expected_content_old),
(False, '&bleach_new', expected_content_old),
(True, '', expected_content_old),
(True, '&bleach_new', expected_content_new),
)
for trial in trials:
do_login, param, expected = trial
if do_login:
self.client.login(username='testuser', password='testpass')
else:
self.client.logout()
url = ('%s?raw¯os%s' % (
reverse('wiki.document', args=(d.slug,), locale=d.locale),
param))
resp = self.client.get(url, follow=True)
eq_(normalize_html(expected),
normalize_html(resp.content),
"Should match? %s %s %s %s" %
(do_login, param, expected, resp.content))
class APITests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(APITests, self).setUp()
self.username = 'tester23'
self.password = 'trustno1'
self.email = 'tester23@example.com'
self.user = user(username=self.username,
email=self.email,
password=self.password,
save=True)
self.key = Key(user=self.user, description='Test Key 1')
self.secret = self.key.generate_secret()
self.key_id = self.key.key
self.key.save()
auth = '%s:%s' % (self.key_id, self.secret)
self.basic_auth = 'Basic %s' % base64.encodestring(auth)
self.d, self.r = doc_rev("""
<h3 id="S1">Section 1</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S2">Section 2</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S3">Section 3</h3>
<p>This is a page. Deal with it.</p>
""")
self.r.tags = "foo, bar, baz"
self.r.review_tags.set('technical', 'editorial')
self.url = self.d.get_absolute_url()
def tearDown(self):
super(APITests, self).tearDown()
Document.objects.filter(current_revision__creator=self.user).delete()
Revision.objects.filter(creator=self.user).delete()
Key.objects.filter(user=self.user).delete()
self.user.delete()
def test_put_existing(self):
"""PUT API should allow overwrite of existing document content"""
data = dict(
summary="Look, I made an edit!",
content="""
<p>This is an edit to the page. We've dealt with it.</p>
""",
)
# No auth key leads to a 403 Forbidden
resp = self._put(self.url, data)
eq_(403, resp.status_code)
# But, this should work, given a proper auth key
resp = self._put(self.url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
# Verify the edit happened.
curr_d = Document.objects.get(pk=self.d.pk)
eq_(normalize_html(data['content'].strip()),
normalize_html(Document.objects.get(pk=self.d.pk).html))
# Also, verify that this resulted in a new revision.
curr_r = curr_d.current_revision
ok_(self.r.pk != curr_r.pk)
eq_(data['summary'], curr_r.summary)
r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all()))
eq_('editorial,technical', r_tags)
def test_put_section_edit(self):
"""PUT API should allow overwrite of a specific section of an existing
document"""
data = dict(
content="""
<h3 id="S2">Section 2</h3>
<p>This is an edit to the page. We've dealt with it.</p>
""",
# Along with the section, let's piggyback in some other metadata
# edits just for good measure. They're not tied to section edit
# though.
title="Hahah this is a new title!",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put('%s?section=S2' % self.url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
expected = """
<h3 id="S1">Section 1</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S2">Section 2</h3>
<p>This is an edit to the page. We've dealt with it.</p>
<h3 id="S3">Section 3</h3>
<p>This is a page. Deal with it.</p>
"""
# Verify the section edit happened.
curr_d = Document.objects.get(pk=self.d.pk)
eq_(normalize_html(expected.strip()),
normalize_html(curr_d.html))
eq_(data['title'], curr_d.title)
d_tags = ','.join(sorted(t.name for t in curr_d.tags.all()))
eq_(data['tags'], d_tags)
# Also, verify that this resulted in a new revision.
curr_r = curr_d.current_revision
ok_(self.r.pk != curr_r.pk)
r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all()))
eq_(data['review_tags'], r_tags)
def test_put_new_root(self):
"""PUT API should allow creation of a document whose path would place
it at the root of the topic hierarchy."""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
def test_put_new_child(self):
"""PUT API should allow creation of a document whose path would make it
a child of an existing parent."""
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
# This first attempt should fail; the proposed parent does not exist.
url = '%s/nonexistent/newchild' % self.url
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(404, resp.status_code)
# TODO: I suppose we could rework this part to create the chain of
# missing parents with stub content, but currently this demands
# that API users do that themselves.
# Now, fill in the parent gap...
p_doc = document(slug='%s/nonexistent' % self.d.slug,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=self.d)
p_doc.save()
p_rev = revision(document=p_doc,
slug='%s/nonexistent' % self.d.slug,
title='I EXIST NOW', save=True)
p_rev.save()
# The creation should work, now.
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_slug = '%s/nonexistent/newchild' % self.d.slug
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=new_slug)
eq_(p_doc.pk, new_doc.parent_topic.pk)
def test_put_unsupported_content_type(self):
"""PUT API should complain with a 400 Bad Request on an unsupported
content type submission"""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = "I don't even know what this content is."
resp = self._put(url, json.dumps(data),
content_type='x-super-happy-fun-text',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(400, resp.status_code)
def test_put_json(self):
"""PUT API should handle application/json requests"""
slug = 'new-root-json-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(data['title'], new_doc.title)
eq_(normalize_html(data['content']), normalize_html(new_doc.html))
def test_put_simple_html(self):
"""PUT API should handle text/html requests"""
slug = 'new-root-html-doc-1'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
html = """
<p>This is a new page, hooray!</p>
"""
resp = self._put(url, html, content_type='text/html',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(normalize_html(html), normalize_html(new_doc.html))
def test_put_complex_html(self):
"""PUT API should handle text/html requests with complex HTML documents
and extract document fields from the markup"""
slug = 'new-root-html-doc-2'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title='This is a complex document',
content="""
<p>This is a new page, hooray!</p>
""",
)
html = """
<html>
<head>
<title>%(title)s</title>
</head>
<body>%(content)s</body>
</html>
""" % data
resp = self._put(url, html, content_type='text/html',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(data['title'], new_doc.title)
eq_(normalize_html(data['content']), normalize_html(new_doc.html))
# TODO: Anything else useful to extract from HTML?
# Extract tags from head metadata?
def test_put_track_authkey(self):
"""Revisions modified by PUT API should track the auth key used"""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
last_log = self.key.history.order_by('-pk').all()[0]
eq_('created', last_log.action)
data['title'] = 'New title for old page'
resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
last_log = self.key.history.order_by('-pk').all()[0]
eq_('updated', last_log.action)
def test_put_etag_conflict(self):
"""A PUT request with an if-match header throws a 412 Precondition
Failed if the underlying document has been changed."""
resp = self.client.get(self.url)
orig_etag = resp['ETag']
content1 = """
<h2 id="s1">Section 1</h2>
<p>New section 1</p>
<h2 id="s2">Section 2</h2>
<p>New section 2</p>
"""
# First update should work.
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=orig_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
# Get the new etag, ensure it doesn't match the original.
resp = self.client.get(self.url)
new_etag = resp['ETag']
ok_(orig_etag != new_etag)
# But, the ETag should have changed, so this update shouldn't work.
# Using the old ETag suggests a mid-air edit collision happened.
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=orig_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(412, resp.status_code)
# Just for good measure, switching to the new ETag should work
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=new_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
def _put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""django.test.client.put() does the wrong thing, here. This does
better, based on post()."""
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
params = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
params.update(extra)
response = self.client.request(**params)
if follow:
response = self.client._handle_redirects(response, **extra)
return response
class PageMoveTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(PageMoveTests, self).setUp()
page_move_flag = Flag.objects.create(name='page_move')
page_move_flag.users = self.user_model.objects.filter(is_superuser=True)
page_move_flag.save()
def test_move_conflict(self):
parent = revision(title='Test page move views',
slug='test-page-move-views',
is_approved=True,
save=True)
parent_doc = parent.document
child = revision(title='Child of page-move view test',
slug='page-move/test-views',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = parent.document
child_doc.save()
revision(title='Conflict for page-move view',
slug='moved/test-page-move-views/test-views',
is_approved=True,
save=True)
data = {'slug': 'moved/test-page-move-views'}
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.move',
args=(parent_doc.slug,),
locale=parent_doc.locale),
data=data)
eq_(200, resp.status_code)
class DocumentZoneTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(DocumentZoneTests, self).setUp()
root_rev = revision(title='ZoneRoot', slug='ZoneRoot',
content='This is the Zone Root',
is_approved=True, save=True)
self.root_doc = root_rev.document
middle_rev = revision(title='middlePage', slug='middlePage',
content='This is a middlepage',
is_approved=True, save=True)
self.middle_doc = middle_rev.document
self.middle_doc.parent_topic = self.root_doc
self.middle_doc.save()
sub_rev = revision(title='SubPage', slug='SubPage',
content='This is a subpage',
is_approved=True, save=True)
self.sub_doc = sub_rev.document
self.sub_doc.parent_topic = self.middle_doc
self.sub_doc.save()
self.root_zone = DocumentZone(document=self.root_doc)
self.root_zone.styles = """
article { color: blue; }
"""
self.root_zone.save()
self.middle_zone = DocumentZone(document=self.middle_doc)
self.middle_zone.styles = """
article { font-weight: bold; }
"""
self.middle_zone.save()
def test_zone_styles(self):
"""Ensure CSS styles for a zone can be fetched"""
url = reverse('wiki.styles', args=(self.root_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(self.root_zone.styles, response.content)
url = reverse('wiki.styles', args=(self.middle_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(self.middle_zone.styles, response.content)
url = reverse('wiki.styles', args=(self.sub_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(404, response.status_code)
def test_zone_styles_links(self):
"""Ensure link to zone style appears in child document views"""
url = reverse('wiki.document', args=(self.sub_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
styles_url = reverse('wiki.styles', args=(self.root_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
root_expected = ('<link rel="stylesheet" type="text/css" href="%s"' %
styles_url)
ok_(root_expected in response.content)
styles_url = reverse('wiki.styles', args=(self.middle_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
middle_expected = ('<link rel="stylesheet" type="text/css" href="%s"' %
styles_url)
ok_(middle_expected in response.content)
class ListDocumentTests(UserTestCase, WikiTestCase):
"""Tests for list_documents view"""
localizing_client = True
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
def test_case_insensitive_tags(self):
"""
Bug 976071 - Tags should be case insensitive
https://bugzil.la/976071
"""
lower_tag = DocumentTag.objects.create(name='foo', slug='foo')
lower_tag.save()
doc = Document.objects.get(pk=1)
doc.tags.set(lower_tag)
response = self.client.get(reverse('wiki.tag', args=['foo']))
ok_(doc.slug in response.content.decode('utf-8'))
response = self.client.get(reverse('wiki.tag', args=['Foo']))
ok_(doc.slug in response.content.decode('utf-8'))
|
varunkamra/kuma
|
kuma/wiki/tests/test_views.py
|
Python
|
mpl-2.0
| 167,376
|
[
"VisIt"
] |
1803705fbc538d294001f92ec5d84b84731b8dafebe7a166c123cabfc124ad8d
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
import pyscf.pbc
from pyscf import ao2mo, gto
from pyscf.pbc import gto as pgto
from pyscf.pbc import scf as pscf
from pyscf.pbc.df import rsdf
cell = pgto.Cell(
atom="H 0 0 0; H 0.75 0 0",
a = numpy.eye(3)*3,
basis={"H": [[0,(0.5,1.)],[1,(0.3,1.)]]},
)
cell.verbose = 0
cell.max_memory = 1000
cell.build()
scaled_center = numpy.array([0.392, 0.105, 0.872])
def tearDownModule():
global cell
del cell
class KnownValues(unittest.TestCase):
def test_h2_gamma(self):
mf = pscf.KRHF(cell).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -1.0430635249356706, 7)
def test_h2_kpt1_shiftedcenter(self):
kpts = cell.make_kpts([1,1,1], scaled_center=scaled_center)
mf = pscf.KRHF(cell, kpts).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -0.9961857465459392, 7)
def test_h2_jonly_k211(self):
kpts = cell.make_kpts([2,1,1])
mf = pscf.KRKS(cell,kpts).rs_density_fit()
mf.xc = "pbe"
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -1.0021023542499443, 7)
def test_h2_jonly_k211_shiftedcenter(self):
kpts = cell.make_kpts([2,1,1],scaled_center=scaled_center)
mf = pscf.KRKS(cell,kpts).rs_density_fit()
mf.xc = "pbe"
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -1.0047041613565, 7)
def test_h2_jk_k211(self):
kpts = cell.make_kpts([2,1,1])
mf = pscf.KRHF(cell,kpts).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -0.9822344249942677, 7)
def test_h2_jk_k211_shiftedcenter(self):
kpts = cell.make_kpts([2,1,1],scaled_center=scaled_center)
mf = pscf.KRHF(cell,kpts).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -0.9840980585857037, 7)
if __name__ == '__main__':
print("Full Tests for rsdf scf")
unittest.main()
|
sunqm/pyscf
|
pyscf/pbc/df/test/test_rsdf_scf.py
|
Python
|
apache-2.0
| 2,579
|
[
"PySCF"
] |
84bc229cb5386a32849cdb757a34a370420621f94f557a57611e4c24c37e5792
|
''' <h1>Library for combined x-ray and neutrons simulations.</h1>
<p>The neutron simulations is capable of handling non-magnetic,
magnetic non-spin flip as well as neutron spin-flip reflectivity. </p>
<h2>Classes</h2>
<h3>Layer</h3>
<code> Layer(b = 0.0, d = 0.0, f = 0.0+0.0J, dens = 1.0, magn_ang = 0.0, magn = 0.0, sigma = 0.0)</code>
<dl>
<dt><code><b>b</b></code></dt>
<dd>The neutron scattering length per formula unit in fm (fermi meter = 1e-15m)</dd>
<dt><code><b>d</b></code></dt>
<dd>The thickness of the layer in AA (Angstroms = 1e-10m)</dd>
<dt><code><b>f</b></code></dt>
<dd>The x-ray scattering length per formula unit in electrons. To be strict it is the
number of Thompson scattering lengths for each formula unit.</dd>
<dt><code><b>dens</b></code></dt>
<dd>The density of formula units in units per Angstroms. Note the units!</dd>
<dt><code><b>magn_ang</b></code></dt>
<dd>The angle of the magnetic moment in degress. 0 degrees correspond to
a moment collinear with the neutron spin.</dd>
<dt><code><b>magn</b></code></dt>
<dd>The magnetic moment per formula unit (same formula unit as b and dens refer to)</dd>
<dt><code><b>sigma</b></code></dt>
<dd>The root mean square roughness of the top interface of the layer in Angstroms.</dd>
<dt><code><b>xs_ai</b></code></dt>
<dd>The sum of the absorption cross section and the incoherent scattering cross section
in barns for neutrons</dd>
</dl>
<h3>Stack</h3>
<code> Stack(Layers = [], Repetitions = 1)</code>
<dl>
<dt><code><b>Layers</b></code></dt>
<dd>A <code>list</code> consiting of <code>Layer</code>s in the stack
the first item is the layer closest to the bottom</dd>
<dt><code><b>Repetitions</b></code></dt>
<dd>The number of repsetions of the stack</dd>
</dl>
<h3>Sample</h3>
<code> Sample(Stacks = [], Ambient = Layer(), Substrate = Layer())</code>
<dl>
<dt><code><b>Stacks</b></code></dt>
<dd>A <code>list</code> consiting of <code>Stack</code>s in the stacks
the first item is the layer closest to the bottom</dd>
<dt><code><b>Ambient</b></code></dt>
<dd>A <code>Layer</code> describing the Ambient (enviroment above the sample).
Only the scattering lengths and density of the layer is used.</dd>
<dt><code><b>Substrate</b></code></dt>
<dd>A <code>Layer</code> describing the substrate (enviroment below the sample).
Only the scattering lengths, density and roughness of the layer is used.</dd>
</dl>
<h3>Instrument</h3>
<code>Instrument(probe = 'x-ray', wavelength = 1.54, coords = 'tth',
I0 = 1.0 res = 0.001, restype = 'no conv', respoints = 5, resintrange = 2,
beamw = 0.01, footype = 'no corr', samplelen = 10.0, incangle = 0.0, pol = 'uu')</code>
<dl>
<dt><code><b>probe</b></code></dt>
<dd>Describes the radiation and measurments used is one of:
'x-ray', 'neutron', 'neutron pol', 'neutron pol spin flip', 'neutron tof', 'neutron pol tof'
or the respective
number 0, 1, 2, 3, 4, 5, 6. The calculations for x-rays uses <code>f</code> for the scattering
length for neutrons <code>b</code> for 'neutron pol', 'neutron pol spin flip' and
'neutron pol tof' alternatives the <code>magn</code>
is used in the calculations. Note that the angle of magnetization <code>magn_ang</code>
is only used in the last alternative.</dd>
<dt><code><b>wavelength</b></code></dt>
<dd>The wavelength of the radiation given in AA (Angstroms)</dd>
<dt><code><b>coords</b></code></dt>
<dd>The coordinates of the data given to the SimSpecular function.
The available alternatives are: 'q' or 'tth'. Alternatively the numbers
0 (q) or 1 (tth) can be used.</dd>
<dt><code><b>I0</b></code></dt>
<dd>The incident intensity (a scaling factor)</dd>
<dt><code><b>Ibkg</b></code></dt>
<dd>The background intensity. Added as a constant value to the calculated
reflectivity</dd>
<dt><code><b>res</b></code></dt>
<dd>The resolution of the instrument given in the coordinates of
<code>coords</code>. This assumes a gaussian resolution function and
<code>res</code> is the standard deviation of that gaussian.
If <code>restype</code> has (dx/x) in its name the gaussian standard deviation is given by res*x where x is
either in tth or q.</dd>
<dt><code><b>restype</b></code></dt>
<dd>Describes the rype of the resolution calculated. One of the alterantives:
'no conv', 'fast conv', 'full conv and varying res.', 'fast conv + varying res.',
'full conv and varying res. (dx/x)', 'fast conv + varying res. (dx/x)'.
The respective numbers 0-3 also works. Note that fast convolution only alllows
a single value into res wheras the other can also take an array with the
same length as the x-data (varying resolution)</dd>
<dt><code><b>respoints</b></code></dt>
<dd>The number of points to include in the resolution calculation. This is only
used for 'full conv and vaying res.', 'fast conv + varying res', 'full conv and varying res. (dx/x)' and
'fast conv + varying res. (dx/x)'.</dd>
<dt><code><b>resintrange</b></code></dt>
<dd>Number of standard deviatons to integrate the resolution function times
the reflectivity over</dd>
<dt><code><b>footype</b></code></dt>
<dd>Which type of footprint correction is to be applied to the simulation.
One of: 'no corr', 'gauss beam' or 'square beam'. Alternatively,
the number 0-2 are also valid. The different choices are self expnalatory.</dd>
<dt><code><b>beamw</b></code></dt>
<dd>The width of the beam given in mm. For 'gauss beam' it should be
the standard deviation. For 'square beam' it is the full width of the beam.</dd>
<dt><code><b>samplelen</b></code></dt>
<dd>The length of the sample given in mm</dd>
<dt><code><b>incangle</b></code></dt>
<dd>The incident angle of the neutrons, only valid in tof mode</dd>
<dt><code><b>pol</b></code></dt>
<dd>The measured polarization of the instrument. Valid options are:
'uu','dd', 'ud', 'du' or 'ass' the respective number 0-3 also works.</dd>
'''
from numpy import *
try:
import genx.models.lib.paratt_cython as Paratt
except Exception as S:
print('Not using inline c code for reflectivity calcs - can not import module')
print(S)
import genx.models.lib.paratt as Paratt
import genx.models.lib.neutron_refl as MatrixNeutron
from genx.models.lib.instrument import *
import genx.models.lib.refl as refl
# Preamble to define the parameters needed for the models outlined below:
#import genx.models.lib.paratt as slow_paratt
ModelID='SpecNX'
#InstrumentParameters={'Wavelength':1.54, 'Coordinates':1, 'I0':1.0, 'Sim': 0,\
# 'Res':0.001, 'Restype':0, 'Respoints':5, 'Resintrange':2, 'Beaw':0.01,\
# 'Footype':0.0, 'Samlen':10.0, 'Incangle':0.0}
__pars__ = ['Layer', 'Stack', 'Sample', 'Instrument']
instrument_string_choices = {'probe': ['x-ray', 'neutron', 'neutron pol',
'neutron pol spin flip', 'neutron tof', 'neutron pol tof'], 'coords': ['q', 'tth'],
'restype': ['no conv', 'fast conv',
'full conv and varying res.', 'fast conv + varying res.',
'full conv and varying res. (dx/x)', 'fast conv + varying res. (dx/x)'],
'footype': ['no corr', 'gauss beam', 'square beam'],
'pol': ['uu', 'dd', 'ud', 'ass', 'du']}
InstrumentParameters = {'probe':'x-ray', 'wavelength':1.54, 'coords':'tth',
'I0':1.0, 'res':0.001,
'restype':'no conv', 'respoints':5, 'resintrange':2, 'beamw':0.01,
'footype': 'no corr', 'samplelen':10.0, 'incangle':0.0, 'pol': 'uu',
'Ibkg': 0.0, 'tthoff':0.0}
InstrumentGroups = [('General', ['wavelength', 'coords', 'I0', 'Ibkg', 'tthoff']),
('Resolution', ['restype', 'res', 'respoints', 'resintrange']),
('Neutron', ['probe', 'pol', 'incangle']),
('Footprint', ['footype', 'beamw', 'samplelen',]),
]
InstrumentUnits = {'probe':'', 'wavelength': 'AA', 'coords':'',
'I0': 'arb.', 'res': '[coord]',
'restype':'', 'respoints':'pts.', 'resintrange':'[coord]', 'beamw':'mm',\
'footype': '', 'samplelen':'mm', 'incangle':'deg.', 'pol': '',\
'Ibkg': 'arb.', 'tthoff':'deg.'}
# Coordinates=1 or 'tth' => twothetainput
# Coordinates=0 or 'q'=> Q input
# probe: Type of simulation
# 'x-ray' or 0: X-rays (One output)
# 'neutron'or 1: Neutrons (One output, ignoring magn, magn_ang)
# 'neutron pol' or 2: Neutrons polarized (Two outputs Ruu,Rdd)
# 'neutron pol spin flip' or 3: Neutrons polarized with spin-flip
# (Three outputs Ruu,Rdd,Rud=Rdu, ignoring sigma!)
# 'neutron tof' or 4: Neutrons non-polarized TOF, Inc Angle must be set
# 'neutron pol tof'or 5: Neutrons polarized TOF (non-spin flip),
# Inc Angle must be set
#
# res stddev of resolution
# restype 0 'none': No resolution convlution
# 1 or 'fast': Fast convolution
# 2 or 'full': Full Convolution +varying resolution
# 3 or 'ffull': Fast convolution varying resolution
# respoints Number of points for the convolution only valid for ResolutionType=2
# resintrange Number of standard deviatons to integrate over default 2
# Parameters for footprint coorections
# footype: 0 or 'no corr': No corections for footprint
# 1 or 'gauss beam': Correction for Gaussian beam => Beaw given in mm and stddev
# 2 or 'square beam': Correction for square profile => Beaw given in full width mm
# samlen= Samplelength in mm.
LayerParameters={'sigma':0.0, 'dens':1.0, 'd':0.0, 'f':(1.0+1.0j)*1e-20,
'b': 0.0 + 0.0J, 'xs_ai': 0.0, 'magn':0.0, 'magn_ang':0.0}
LayerUnits = {'sigma': 'AA', 'dens': 'at./AA', 'd': 'AA', 'f':'el./at.',
'b': 'fm/at.', 'xs_ai': 'barn/at.', 'magn': 'mu_B/at.', 'magn_ang': 'deg.'}
LayerGroups = [('Standard',['f','dens','d','sigma']),
('Neutron', ['b', 'xs_ai', 'magn', 'magn_ang'])]
StackParameters={'Layers':[], 'Repetitions':1}
SampleParameters={'Stacks':[], 'Ambient':None, 'Substrate':None}
AA_to_eV = 12398.5
''' Conversion from Angstrom to eV E = AA_to_eV/lamda.'''
q_limit = 1e-10
''' Minimum allowed q-value '''
# A buffer to save previous calculations for spin-flip calculations
class Buffer:
Ruu = 0
Rdd = 0
Rdu = 0
Rud = 0
parameters = None
TwoThetaQz = None
def footprintcorr(Q, instrument):
foocor = 1.0
footype = instrument.getFootype()
beamw = instrument.getBeamw()
samlen = instrument.getSamplelen()
theta = arcsin(Q * instrument.getWavelength() / 4.0 / pi) * 180 / pi
if footype == 1 or footype == instrument_string_choices['footype'][1]:
foocor = GaussIntensity(theta, samlen / 2.0, samlen / 2.0, beamw)
elif footype == 2 or footype == instrument_string_choices['footype'][2]:
foocor = SquareIntensity(theta, samlen, beamw)
elif footype == 0 or footype == instrument_string_choices['footype'][0]:
pass
else:
raise ValueError('The choice of footprint correction, footype,'
'is WRONG')
return foocor
def resolutioncorr(R, TwoThetaQz, foocor, instrument, weight):
''' Do the convolution of the reflectivity to account for resolution effects.'''
restype = instrument.getRestype()
if restype == instrument_string_choices['restype'][1] or restype == 1:
R = ConvoluteFast(TwoThetaQz, R[:] * foocor, instrument.getRes(), \
range=instrument.getResintrange())
elif (restype == instrument_string_choices['restype'][2] or restype == 2 or
restype == instrument_string_choices['restype'][4] or restype == 4):
R = ConvoluteResolutionVector(TwoThetaQz, R[:] * foocor, weight)
elif restype == instrument_string_choices['restype'][3] or restype == 3:
R = ConvoluteFastVar(TwoThetaQz, R[:] * foocor, instrument.getRes(), range=instrument.getResintrange())
elif restype == instrument_string_choices['restype'][5] or restype == 5:
R = ConvoluteFastVar(TwoThetaQz, R[:] * foocor, instrument.getRes()*TwoThetaQz,
range=instrument.getResintrange())
elif restype == instrument_string_choices['restype'][0] or restype == 0:
R = R[:] * foocor
else:
raise ValueError('The choice of resolution type, restype,'
'is WRONG')
return R
def resolution_init(TwoThetaQz, instrument):
''' Inits the dependet variable with regards to coordinates and resolution.'''
restype = instrument.getRestype()
weight = 0
if restype == 2 or restype == instrument_string_choices['restype'][2]:
(TwoThetaQz, weight) = ResolutionVector(TwoThetaQz[:],
instrument.getRes(), instrument.getRespoints(),
range=instrument.getResintrange())
elif restype == 4 or restype == instrument_string_choices['restype'][4]:
(TwoThetaQz, weight) = ResolutionVector(TwoThetaQz[:],
instrument.getRes()*TwoThetaQz, instrument.getRespoints(),
range=instrument.getResintrange())
# TTH values given as x
if instrument.getCoords() == instrument_string_choices['coords'][1] \
or instrument.getCoords() == 1:
Q = 4 * pi / instrument.getWavelength() * sin((TwoThetaQz + instrument.getTthoff()) * pi / 360.0)
# Q vector given....
elif instrument.getCoords() == instrument_string_choices['coords'][0] \
or instrument.getCoords() == 0:
Q = 4 * pi / instrument.getWavelength() * sin(
arcsin(TwoThetaQz * instrument.getWavelength() / 4 / pi) + instrument.getTthoff() * pi / 360.)
else:
raise ValueError('The value for coordinates, coords, is WRONG! should be q(0) or tth(1).')
return Q, TwoThetaQz, weight
def neutron_sld(abs_xs, dens, fb, wl):
return dens * (wl ** 2 / 2 / pi * fb - 1.0J * abs_xs * wl / 4 / pi)
def Specular(TwoThetaQz, sample, instrument):
""" Simulate the specular signal from sample when probed with instrument
# BEGIN Parameters
TwoThetaQz data.x
# END Parameters
"""
# preamble to get it working with my class interface
restype = instrument.getRestype()
Q, TwoThetaQz, weight = resolution_init(TwoThetaQz, instrument)
if any(Q < q_limit):
raise ValueError('The q vector has to be above %.1e'%q_limit)
type = instrument.getProbe()
pol = instrument.getPol()
parameters = sample.resolveLayerParameters()
if type == instrument_string_choices['probe'][0] or type==0:
#fb = array(parameters['f'], dtype = complex64)
e = AA_to_eV/instrument.getWavelength()
fb = refl.cast_to_array(parameters['f'], e)
else:
fb = array(parameters['b'], dtype = complex128)*1e-5
abs_xs = array(parameters['xs_ai'], dtype = complex128)*(1e-4)**2
dens = array(parameters['dens'], dtype = complex64)
d = array(parameters['d'], dtype = float64)
magn = array(parameters['magn'], dtype = float64)
#Transform to radians
magn_ang = array(parameters['magn_ang'], dtype = float64)*pi/180.0
sigma = array(parameters['sigma'], dtype = float64)
if type == instrument_string_choices['probe'][0] or type == 0:
sld = dens*fb*instrument.getWavelength()**2/2/pi
else:
wl = instrument.getWavelength()
#sld = dens*(wl**2/2/pi*sqrt(fb**2 - (abs_xs/2.0/wl)**2) -
# 1.0J*abs_xs*wl/4/pi)
sld = neutron_sld(abs_xs, dens, fb, wl)
# Ordinary Paratt X-rays
if type == instrument_string_choices['probe'][0] or type == 0:
R = Paratt.ReflQ(Q,instrument.getWavelength(),1.0-2.82e-5*sld,d,sigma)
#reload(slow_paratt)
#R = slow_paratt.reflq_kin(Q, instrument.getWavelength(), 1.0 - 2.82e-5 * sld, d, sigma)
#R = slow_paratt.reflq_pseudo_kin(Q, instrument.getWavelength(), 1.0 - 2.82e-5 * sld, d, sigma)
#R = slow_paratt.reflq_sra(Q, instrument.getWavelength(), 1.0 - 2.82e-5 * sld, d, sigma)
#Ordinary Paratt Neutrons
elif type == instrument_string_choices['probe'][1] or type == 1:
R = Paratt.ReflQ(Q,instrument.getWavelength(),1.0-sld,d,sigma)
#Ordinary Paratt but with magnetization
elif type == instrument_string_choices['probe'][2] or type == 2:
msld = 2.645e-5*magn*dens*instrument.getWavelength()**2/2/pi
# Polarization uu or ++
if pol == instrument_string_choices['pol'][0] or pol == 0:
R = Paratt.ReflQ(Q,instrument.getWavelength(),\
1.0-sld-msld,d,sigma)
# Polarization dd or --
elif pol == instrument_string_choices['pol'][1] or pol == 1:
R = Paratt.ReflQ(Q,instrument.getWavelength(),\
1.0-sld+msld,d,sigma)
elif pol == instrument_string_choices['pol'][3] or pol == 3:
Rp = Paratt.ReflQ(Q, instrument.getWavelength(), 1.0-sld-msld, d, sigma)
Rm = Paratt.ReflQ(Q, instrument.getWavelength(), 1.0-sld+msld, d, sigma)
R = (Rp - Rm)/(Rp + Rm)
else:
raise ValueError('The value of the polarization is WRONG.'
' It should be uu(0) or dd(1)')
# Spin flip
elif type == instrument_string_choices['probe'][3] or type == 3:
# Check if we have calcluated the same sample previous:
if Buffer.TwoThetaQz is not None:
Q_ok = Buffer.TwoThetaQz.shape == Q.shape
if Q_ok:
Q_ok = any(not_equal(Buffer.TwoThetaQz, Q))
if Buffer.parameters != parameters or not Q_ok:
msld = 2.645e-5*magn*dens*instrument.getWavelength()**2/2/pi
np = 1.0-sld-msld
nm = 1.0-sld+msld
Vp = (2*pi/instrument.getWavelength())**2*(1-np**2)
Vm = (2*pi/instrument.getWavelength())**2*(1-nm**2)
(Ruu,Rdd,Rud,Rdu) = MatrixNeutron.Refl(Q,Vp,Vm,d,magn_ang, sigma)
Buffer.Ruu = Ruu; Buffer.Rdd = Rdd; Buffer.Rud = Rud
Buffer.parameters = parameters.copy()
Buffer.TwoThetaQz = Q.copy()
else:
pass
# Polarization uu or ++
if pol == instrument_string_choices['pol'][0] or pol == 0:
R = Buffer.Ruu
# Polarization dd or --
elif pol == instrument_string_choices['pol'][1] or pol == 1:
R = Buffer.Rdd
# Polarization ud or +-
elif (pol == instrument_string_choices['pol'][2] or pol == 2 or
pol == instrument_string_choices['pol'][4] or pol == 4):
R = Buffer.Rud
# Calculating the asymmetry ass
elif pol == instrument_string_choices['pol'][3] or pol == 3:
R = (Buffer.Ruu - Buffer.Rdd)/(Buffer.Ruu + Buffer.Rdd + 2*Buffer.Rud)
else:
raise ValueError('The value of the polarization is WRONG.'
' It should be uu(0), dd(1) or ud(2)')
# tof
elif type == instrument_string_choices['probe'][4] or type == 4:
wl = 4*pi*sin(instrument.getIncangle()*pi/180)/Q
sld = neutron_sld(abs_xs[:, newaxis], dens[:, newaxis], fb[:, newaxis], wl)
R = Paratt.Refl_nvary2(instrument.getIncangle()*ones(Q.shape),\
(4*pi*sin(instrument.getIncangle()*pi/180)/Q),\
1.0-sld,d,sigma)
# tof spin polarized
elif type == instrument_string_choices['probe'][5] or type == 5:
wl = 4*pi*sin(instrument.getIncangle()*pi/180)/Q
sld = neutron_sld(abs_xs[:, newaxis], dens[:, newaxis], fb[:, newaxis], wl)
msld = 2.645e-5*magn[:,newaxis]*dens[:,newaxis]\
*(4*pi*sin(instrument.getIncangle()*pi/180)/Q)**2/2/pi
# polarization uu or ++
if pol == instrument_string_choices['pol'][0] or pol == 0:
R = Paratt.Refl_nvary2(instrument.getIncangle()*ones(Q.shape),\
(4*pi*sin(instrument.getIncangle()*pi/180)/Q),\
1.0-sld-msld,d,sigma)
# polarization dd or --
elif pol == instrument_string_choices['pol'][1] or pol == 1:
R = Paratt.Refl_nvary2(instrument.getIncangle()*ones(Q.shape),\
(4*pi*sin(instrument.getIncangle()*pi/180)/Q),\
1.0-sld+msld,d,sigma)
# Calculating the asymmetry
elif pol == instrument_string_choices['pol'][3] or pol == 3:
Rd = Paratt.Refl_nvary2(instrument.getIncangle()*ones(Q.shape),
(4*pi*sin(instrument.getIncangle()*pi/180)/Q),
1.0-sld+msld,d,sigma)
Ru = Paratt.Refl_nvary2(instrument.getIncangle()*ones(Q.shape),
(4*pi*sin(instrument.getIncangle()*pi/180)/Q),
1.0-sld-msld,d,sigma)
R = (Ru - Rd)/(Ru + Rd)
else:
raise ValueError('The value of the polarization is WRONG.'
' It should be uu(0) or dd(1) or ass')
else:
raise ValueError('The choice of probe is WRONG')
#FootprintCorrections
foocor = footprintcorr(Q, instrument)
#Resolution corrections
R = resolutioncorr(R, TwoThetaQz, foocor, instrument, weight)
return R*instrument.getI0() + instrument.getIbkg()
def EnergySpecular(Energy, TwoThetaQz,sample,instrument):
''' Simulate the specular signal from sample when probed with instrument. Energy should be in eV.
# BEGIN Parameters
Energy data.x
TwoThetaQz 3.0
# END Parameters
'''
# preamble to get it working with my class interface
restype = instrument.getRestype()
#TODO: Fix so that resolution can be included.
if restype != 0 and restype != instrument_string_choices['restype'][0]:
raise ValueError('Only no resolution is allowed for energy scans.')
wl = AA_to_eV/Energy
# TTH values given as x
if instrument.getCoords() == instrument_string_choices['coords'][1] \
or instrument.getCoords() == 1:
theta = TwoThetaQz/2.0
# Q vector given....
elif instrument.getCoords() == instrument_string_choices['coords'][0] \
or instrument.getCoords() == 0:
theta = arcsin(TwoThetaQz * wl / 4 / pi)*180.0/pi
else:
raise ValueError('The value for coordinates, coords, is WRONG!'
'should be q(0) or tth(1).')
Q = 4 * pi / wl * sin((2*theta + instrument.getTthoff()) * pi / 360.0)
type = instrument.getProbe()
parameters = sample.resolveLayerParameters()
if type == instrument_string_choices['probe'][0] or type==0:
fb = refl.cast_to_array(parameters['f'], Energy)
else:
fb = array(parameters['b'], dtype = complex64)*1e-5
abs_xs = array(parameters['xs_ai'], dtype = complex64)*(1e-4)**2
dens = array(parameters['dens'], dtype = complex64)
d = array(parameters['d'], dtype = float64)
sigma = array(parameters['sigma'], dtype = float64)
if type == instrument_string_choices['probe'][0] or type == 0:
sld = dens[:, newaxis]*fb*wl**2/2/pi
else:
wl = instrument.getWavelength()
sld = dens*(wl**2/2/pi*sqrt(fb**2 - (abs_xs/2.0/wl)**2) - 1.0J*abs_xs*wl/4/pi)
# Ordinary Paratt X-rays
if type == instrument_string_choices['probe'][0] or type == 0:
#R = Paratt.ReflQ(Q,instrument.getWavelength(),1.0-2.82e-5*sld,d,sigma)
R = Paratt.Refl_nvary2(theta, wl, 1.0 - 2.82e-5*sld, d, sigma)
else:
raise ValueError('The choice of probe is WRONG')
#TODO: Fix corrections
#FootprintCorrections
#foocor = footprintcorr(Q, instrument)
#Resolution corrections
#R = resolutioncorr(R, TwoThetaQz, foocor, instrument, weight)
return R*instrument.getI0() + instrument.getIbkg()
def OffSpecular(TwoThetaQz,ThetaQx,sample,instrument):
''' Function that simulates the off-specular signal (not implemented)
# BEGIN Parameters
TwoThetaQz 1.0
ThetaQx data.x
# END Parameters
'''
raise NotImplementedError('Not implemented use model interdiff insteads')
return TwoThetaQz,ThetaQx
def SLD_calculations(z, item, sample, inst):
''' Calculates the scatteringlength density as at the positions z
if item is None or "all" the function returns a dictonary of values.
Otherwise it returns the item as identified by its string.
# BEGIN Parameters
z data.x
item 'Re'
# END Parameters
'''
parameters = sample.resolveLayerParameters()
dens = array(parameters['dens'], dtype = complex64)
#f = array(parameters['f'], dtype = complex64)
e = AA_to_eV/inst.getWavelength()
f = refl.cast_to_array(parameters['f'], e)
b = array(parameters['b'], dtype=complex64)*1e-5
abs_xs = array(parameters['xs_ai'], dtype=complex64)*(1e-4)**2
wl = inst.getWavelength()
type = inst.getProbe()
magnetic = False
mag_sld = 0
sld_unit = 'r_{e}/\AA^{3}'
if type == instrument_string_choices['probe'][0] or type == 0:
sld = dens*f
elif type == instrument_string_choices['probe'][1] or type == 1 or\
type == instrument_string_choices['probe'][4] or type == 4:
sld = dens*(wl**2/2/pi*b - 1.0J*abs_xs*wl/4/pi)/1e-5/(wl**2/2/pi)
sld_unit = 'fm/\AA^{3}'
else:
magnetic = True
sld = dens*(wl**2/2/pi*b - 1.0J*abs_xs*wl/4/pi)/1e-5/(wl**2/2/pi)
magn = array(parameters['magn'], dtype=float64)
#Transform to radians
magn_ang = array(parameters['magn_ang'], dtype=float64)*pi/180.0
mag_sld = 2.645*magn*dens
mag_sld_x = mag_sld*cos(magn_ang)
mag_sld_y = mag_sld*sin(magn_ang)
sld_unit = 'fm/\AA^{3}'
d = array(parameters['d'], dtype=float64)
d = d[1:-1]
# Include one extra element - the zero pos (substrate/film interface)
int_pos = cumsum(r_[0,d])
sigma = array(parameters['sigma'], dtype = float64)[:-1] + 1e-7
if z == None:
z = arange(-sigma[0]*5, int_pos.max()+sigma[-1]*5, 0.5)
if not magnetic:
rho = sum((sld[:-1] - sld[1:])*(0.5 -\
0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sld[-1]
dic = {'Re': real(rho), 'Im': imag(rho), 'z':z,
'SLD unit': sld_unit}
else:
sld_p = sld + mag_sld
sld_m = sld - mag_sld
rho_p = sum((sld_p[:-1] - sld_p[1:])*(0.5 -\
0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sld_p[-1]
rho_m = sum((sld_m[:-1] - sld_m[1:])*(0.5 -\
0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sld_m[-1]
rho_mag_x = sum((mag_sld_x[:-1] - mag_sld_x[1:])*
(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + mag_sld_x[-1]
rho_mag_y = sum((mag_sld_y[:-1] - mag_sld_y[1:])*
(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + mag_sld_y[-1]
#dic = {'Re sld +': real(rho_p), 'Im sld +': imag(rho_p),\
# 'Re sld -': real(rho_m), 'Im sld -': imag(rho_m), 'z':z,
# 'SLD unit': sld_unit}
rho_nucl = (rho_p + rho_m)/2.
dic = {'Re non-mag': real(rho_nucl), 'Im non-mag': imag(rho_nucl),\
'mag': real(rho_p - rho_m)/2, 'z':z, 'mag_x': rho_mag_x, 'mag_y': rho_mag_y,
'SLD unit': sld_unit}
if item == None or item == 'all':
return dic
else:
try:
return dic[item]
except:
raise ValueError('The chosen item, %s, does not exist'%item)
SimulationFunctions={'Specular':Specular,
'OffSpecular':OffSpecular,
'SLD': SLD_calculations,
'EnergySpecular': EnergySpecular,
}
(Instrument, Layer, Stack, Sample) = refl.MakeClasses(InstrumentParameters,\
LayerParameters, StackParameters, SampleParameters, SimulationFunctions,\
ModelID)
if __name__=='__main__':
pass
|
haozhangphd/genx-py3
|
genx/models/spec_nx.py
|
Python
|
gpl-3.0
| 28,153
|
[
"Gaussian"
] |
db5654cc455f7403f8b9e94373f303a06c81636249c90536bb7f5179ee1c23d9
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Modules for handling vtkRenderWindowInteractor events"""
from __future__ import division
from vistrails.core.modules.vistrails_module import Module, NotCacheable
from vistrails.gui.modules.source_configure import SourceConfigurationWidget
from vistrails.gui.modules.python_source_configure import PythonEditor
import urllib
################################################################################
class HandlerConfigurationWidget(SourceConfigurationWidget):
def __init__(self, module, controller, parent=None):
""" HandlerConfigurationWidget(module: Module,
controller: VistrailController,
parent: QWidget)
-> HandlerConfigurationWidget
Setup the dialog to similar to PythonSource but with a
different name
"""
SourceConfigurationWidget.__init__(self, module, controller,
PythonEditor, False, False, parent,
portName='Handler')
class vtkInteractionHandler(NotCacheable, Module):
"""
vtkInteractionHandler allow users to insert callback code for interacting
with the vtkRenderWindowInteractor InteractionEvent
"""
_settings={'configureWidgetType': HandlerConfigurationWidget}
_input_ports = [('Observer', 'vtkInteractorObserver'),
('Handler', 'basic:String', True),
('SharedData', 'basic:Variant')]
_output_ports =[('Instance', 'vtkInteractionHandler')]
# Since vtkCommand is not wrapped in Python, we need to hardcoded all events
# string from vtkCommand.h
vtkEvents = [
'AnyEvent',
'DeleteEvent',
'StartEvent',
'EndEvent',
'RenderEvent',
'ProgressEvent',
'PickEvent',
'StartPickEvent',
'EndPickEvent',
'AbortCheckEvent',
'ExitEvent',
'LeftButtonPressEvent',
'LeftButtonReleaseEvent',
'MiddleButtonPressEvent',
'MiddleButtonReleaseEvent',
'RightButtonPressEvent',
'RightButtonReleaseEvent',
'EnterEvent',
'LeaveEvent',
'KeyPressEvent',
'KeyReleaseEvent',
'CharEvent',
'ExposeEvent',
'ConfigureEvent',
'TimerEvent',
'MouseMoveEvent',
'MouseWheelForwardEvent',
'MouseWheelBackwardEvent',
'ResetCameraEvent',
'ResetCameraClippingRangeEvent',
'ModifiedEvent',
'WindowLevelEvent',
'StartWindowLevelEvent',
'EndWindowLevelEvent',
'ResetWindowLevelEvent',
'SetOutputEvent',
'ErrorEvent',
'WarningEvent',
'StartInteractionEvent',
'InteractionEvent',
'EndInteractionEvent',
'EnableEvent',
'DisableEvent',
'CreateTimerEvent',
'DestroyTimerEvent',
'PlacePointEvent',
'PlaceWidgetEvent',
'CursorChangedEvent',
'ExecuteInformationEvent',
'RenderWindowMessageEvent',
'WrongTagEvent',
'StartAnimationCueEvent',
'AnimationCueTickEvent',
'EndAnimationCueEvent',
'VolumeMapperRenderEndEvent',
'VolumeMapperRenderProgressEvent',
'VolumeMapperRenderStartEvent',
'VolumeMapperComputeGradientsEndEvent',
'VolumeMapperComputeGradientsProgressEvent',
'VolumeMapperComputeGradientsStartEvent',
'WidgetModifiedEvent',
'WidgetValueChangedEvent',
'WidgetActivateEvent',
'ConnectionCreatedEvent',
'ConnectionClosedEvent',
'DomainModifiedEvent',
'PropertyModifiedEvent',
'UpdateEvent',
'RegisterEvent',
'UnRegisterEvent',
'UpdateInformationEvent']
def __init__(self):
Module.__init__(self)
self.observer = None
self.handler = None
self.shareddata = None
def compute(self):
""" compute() -> None
Actually compute nothing
"""
self.observer = self.force_get_input('Observer')
self.handler = self.force_get_input('Handler', '')
self.shareddata = self.force_get_input_list('SharedData')
if len(self.shareddata)==1:
self.shareddata = self.shareddata[0]
if self.observer:
source = urllib.unquote(self.handler)
observer = self.observer.vtkInstance
for e in vtkInteractionHandler.vtkEvents:
f = e[0].lower() + e[1:]
f = f.replace('Event', 'Handler')
source += ('\nif locals().has_key("%s"):\n' % f +
'\tobserver.AddObserver("%s", ' % e +
'self.eventHandler)\n')
exec(source)
if hasattr(self.observer.vtkInstance, 'PlaceWidget'):
self.observer.vtkInstance.PlaceWidget()
self.set_output('Instance', self)
def eventHandler(self, obj, event):
""" eventHandler(obj: vtkObject, event: str) -> None
A proxy for all vtk events to direct to the correct calls
"""
if self.handler!='':
source = urllib.unquote(self.handler)
f = event[0].lower() + event[1:]
f = f.replace('Event', 'Handler')
myGlobals = globals()
myGlobals.update({'self':self})
exec(source + ('\nif locals().has_key("%s"):\n' % f)+
('\t%s(obj, self.shareddata)' % f)) in myGlobals, locals()
def clear(self):
""" clear() -> None
Remove event handler so the object can be freed correctly
"""
# Remove all observers
if self.observer:
for e in vtkInteractionHandler.vtkEvents:
self.observer.vtkInstance.RemoveObservers(e)
Module.clear(self)
def repaintCells(self):
""" repaintCells() -> None
Redraw all cells on the current sheet
"""
from vistrails.packages.spreadsheet.spreadsheet_controller \
import spreadsheetController
from vistrails.packages.spreadsheet.spreadsheet_event \
import RepaintCurrentSheetEvent
spreadsheetController.postEventToSpreadsheet(RepaintCurrentSheetEvent())
_modules = [vtkInteractionHandler]
|
VisTrails/VisTrails
|
vistrails/packages/vtk/vtkhandler.py
|
Python
|
bsd-3-clause
| 8,298
|
[
"VTK"
] |
c6c917bd3f5b3e1c2093922705607b517bb85896f72e46c8c2fc5e0e1d469b15
|
"""
chapter6.py
==========
Models from Chapter 6 of [G&L 2012].
- Model REG = Regional Model (Country divided into North and South regions.)
[G&L 2012] "Monetary Economics: An Integrated Approach to credit, Money, Income, Production
and Wealth; Second Edition", by Wynne Godley and Marc Lavoie, Palgrave Macmillan, 2012.
ISBN 978-0-230-30184-9
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sfc_models.gl_book import GL_book_model
from sfc_models.objects import *
class REG(GL_book_model):
"""
Implements Model REG from Chapter 6 of G&L. REG = "Regional."
This could have been attempted with two countries (which have the same
currency), but there's only a single central bank and Treasury. Splitting
into two countries would mean that we would need to aggregate two different
government sectors.
"""
def build_model(self):
country = self.Country
# As before, there's only one copy of the governmental sectors
tre = Treasury(country, 'TRE', 'Treasury')
cb = CentralBank(country, 'CB', 'Central Bank', treasury=tre)
# Now we split
hh_n = Household(country, 'HH_N', 'Household - North', alpha_income=.6, alpha_fin=.4,
labour_name='LAB_N', consumption_good_name='GOOD_N')
hh_s = Household(country, 'HH_S', 'Household - South', alpha_income=.7, alpha_fin=.3,
labour_name='LAB_S', consumption_good_name='GOOD_S')
goods_n = Market(country, 'GOOD_N', 'Goods market - North')
goods_s = Market(country, 'GOOD_S', 'Goods market - South')
goods_n.AddVariable('MU', 'Propensity to import', '0.18781')
goods_s.AddVariable('MU', 'Propensity to import', '0.18781')
# A literally non-profit business sector
bus_n = FixedMarginBusinessMultiOutput(country, 'BUS_N', market_list=[goods_n, goods_s],
profit_margin=0.0, labour_input_name='LAB_N')
bus_s = FixedMarginBusinessMultiOutput(country, 'BUS_S', market_list=[goods_n, goods_s],
profit_margin=0.0, labour_input_name='LAB_S')
# Create the linkages between sectors - tax flow, markets - labour ('LAB'), goods ('GOOD')
tax = TaxFlow(country, 'TF', 'TaxFlow', taxrate=.2, taxes_paid_to='TRE')
labour_s = Market(country, 'LAB_S', 'Labour market')
labour_n = Market(country, 'LAB_N', 'Labour market')
mm = MoneyMarket(country, issuer_short_code='CB')
dep = DepositMarket(country, issuer_short_code='TRE')
# Create the goods demand function
Y_N = hh_n.GetVariableName('INC')
Y_S = hh_s.GetVariableName('INC')
goods_n.AddSupplier(bus_s, 'MU*{0}'.format(Y_N))
goods_n.AddSupplier(bus_n)
goods_s.AddSupplier(bus_s)
goods_s.AddSupplier(bus_n, 'MU*{0}'.format(Y_S))
# Create the demand for deposits. ('MON' is the residual asset.)
hh_n.AddVariable('L0', 'lambda_0: share of bills in wealth', '0.635')
hh_n.AddVariable('L1', 'lambda_1: parameter related to interest rate', '5.')
hh_n.AddVariable('L2', 'lambda_2: parameter related to disposable income', '.01')
# Generate the equation. Need to get the name of the interest rate variable
r = dep.GetVariableName('r')
# The format() call will replace '{0}' with the contents of the 'r' variable.
eqn = 'L0 + L1 * {0} - L2 * (AfterTax/F)'.format(r)
hh_n.GenerateAssetWeighting([('DEP', eqn)], 'MON')
# Create the demand for deposits. ('MON' is the residual asset.)
hh_s.AddVariable('L0', 'lambda_0: share of bills in wealth', '0.67')
hh_s.AddVariable('L1', 'lambda_1: parameter related to interest rate', '6.')
hh_s.AddVariable('L2', 'lambda_2: parameter related to disposable income', '.07')
# Generate the equation. Need to get the name of the interest rate variable
r = dep.GetVariableName('r')
# The format() call will replace '{0}' with the contents of the 'r' variable.
eqn = 'L0 + L1 * {0} - L2 * (AfterTax/F)'.format(r)
hh_s.GenerateAssetWeighting([('DEP', eqn)], 'MON')
# Add a decorative equation: Government Fiscal Balance
# = Primary Balance - Interest expense + Central Bank Dividend (= interest
# received by the central bank).
tre.AddVariable('FISCBAL', 'Fiscal Balance', 'PRIM_BAL - INTDEP + CB__INTDEP')
tre.SetEquationRightHandSide('DEM_GOOD', 'DEM_GOOD_N + DEM_GOOD_S')
tre.AddVariable('DEM_GOOD_N', 'Demand for goods in the North', '')
tre.AddVariable('DEM_GOOD_S', 'Demand for goods in the South', '')
if self.UseBookExogenous:
# Need to set the exogenous variable - Government demand for Goods ("G" in economist symbology)
tre.SetExogenous('DEM_GOOD_N', '[20.,] * 105')
tre.SetExogenous('DEM_GOOD_S', '[20.,] * 105')
dep.SetExogenous('r', '[.025,]*105')
goods_s.SetExogenous('MU', [0.18781] * 5 + [0.20781] * 105)
# NOTE:
# Initial conditions are only partial; there may be issues with some
# variables.
self.Model.AddInitialCondition('HH_N', 'AfterTax', 86.486)
self.Model.AddInitialCondition('HH_S', 'AfterTax', 86.486)
self.Model.AddInitialCondition('HH_N', 'F', 86.486)
self.Model.AddInitialCondition('HH_N', 'DEM_DEP', 64.865)
self.Model.AddInitialCondition('HH_S', 'F', 86.486)
self.Model.AddInitialCondition('HH_S', 'DEM_DEP', 64.865)
self.Model.AddInitialCondition('TRE', 'F', 2. * -86.486)
self.Model.AddGlobalEquation('t', 'decorated time axis', '1955. + k')
return self.Model
# noinspection PyPep8,PyPep8,PyPep8,PyPep8,PyPep8
def expected_output(self):
"""
Expected output for the model (using default input).
Based on EViews output using code from Gennaro Zezza (from sfcmodels.net)
NOTE: A spreadsheet at sfcmodels.net gives different output; income is changing during the
same period as the rate change.
We ignore value at t=0
:return: list
"""
out = [
('t', [None, 1956., 1957., 1958., ]),
('TRE__DEM_GOOD', [None, 40., 40., 40., 40.]), # G
('DEP__r', [0.025, ] * 10),
('HH_N__WGT_DEP', [None, 0.75, 0.75, 0.75, 0.75, ]),
# Weight of deposits (bills)
('HH_N__AfterTax',
'86.49\t86.49\t86.49\t86.49\t86.49\t88.27\t88.57\t88.79\t88.96\t89.09\t89.19\t89.26\t89.31\t89.35'),
# YD
# ('TRE_T', ), # T
('HH_N__DEM_GOOD_N',
'None\t86.48667\t86.48656\t86.48655\t86.48654\t87.55877\t88.02118\t88.37395\t88.64268\t88.84701\t89.00206'),
('HH_N__SUP_LAB_N',
'None\t106.4866\t106.4866\t106.4866\t106.4865\t108.7204\t109.0749\t109.3441\t109.5482\t109.7027\t109.8192\t109.9068\t109.9724\t110.0213\t110.0575\t110.0841\t110.1035'),
('HH_S__AfterTax',
'86.48666\t86.48656\t86.48655\t86.48654\t86.48654\t84.37456\t84.20819\t84.07316\t83.96609\t83.88098\t83.81313\t83.75889\t83.7154\t83.68043\t83.65222\t83.62939\t83.61085\t83.59574\t83.58338\t83.57325\t83.5649\t83.55801\t83.5523\t83.54755\t83.5436\t83.54028\t83.53751\t83.53517\t83.5332\t83.53154\t83.53013\t83.52893'),
('HH_N__DEM_MON',
'None\t21.62\t21.62\t21.62\t21.62\t21.81\t21.95\t22.05\t22.13\t22.19\t22.23\t22.26\t22.29'),
# high-powered money (H)
]
return out
class REG2(GL_book_model): # pragma: no cover
"""
Implements Model REG from Chapter 6 of G&L. REG = "Regional."
This version of REG splits the model into three "countries."
- Central government sector
- Region (Province) #1 - The North
- Region (Province) #2 - The South
Ignores any existing model that is passed in; the entire Model object is built
from scratch.
"""
def build_country(self, model, paramz):
"""
Builds a country object.
:param model: Model
:param paramz: dict
:return: None
"""
country_name = paramz['Country Name']
country = Region(model, code=paramz['Country'], long_name=country_name)
self.Country = country
hh = Household(country, code='HH', long_name='Household ' + country_name)
goods = Market(country, 'GOOD', 'Goods market ' + country_name)
bus = FixedMarginBusinessMultiOutput(country, 'BUS', 'Business Sector', market_list=[goods, ])
goods.AddSupplier(bus)
goods.AddVariable('MU', 'Propensity to import', paramz['mu'])
labour = Market(country, 'LAB', 'Labour market: ' + country_name)
# Create the goods demand function
# I normally would not commit a file in a half-finished state, but I want to make sure
# that I upload a lot of key changes to GitHub. The work in this class should have been
# done in a different branch; oops.
# Create the demand for deposits. ('MON' is the residual asset.)
hh.AddVariable('L0', 'lambda_0: share of bills in wealth', paramz['L0'])
hh.AddVariable('L1', 'lambda_1: parameter related to interest rate', paramz['L1'])
hh.AddVariable('L2', 'lambda_2: parameter related to disposable income', paramz['L2'])
# Generate the equation. Need to get the name of the interest rate variable
r = model['GOV']['DEP'].GetVariableName('r')
# The format() call will replace '{0}' with the contents of the 'r' variable.
eqn = 'L0 + L1 * {0} - L2 * (AfterTax/F)'.format(r)
hh.GenerateAssetWeighting([('DEP', eqn)], 'MON')
def other_country(self, country):
if country == 'N':
return 'S'
return 'N'
def generate_supply_allocation(self, mod, country):
Y = mod[country]['HH'].GetVariableName('INC')
other = self.other_country(country)
market = mod[country]['GOOD']
market.AddSupplier(mod[other]['BUS'], 'MU*{0}'.format(Y))
mod[other]['BUS'].AddMarket(market)
def build_model(self):
"""
:return: Model
"""
model = Model()
central_gov = Region(model, code='GOV', long_name='Central Government Sector')
tre = Treasury(central_gov, 'TRE', 'Treasury')
cb = CentralBank(central_gov, 'CB', 'Central Bank', tre)
mm = MoneyMarket(central_gov,issuer_short_code='CB')
dep = DepositMarket(central_gov, issuer_short_code='TRE')
tax = TaxFlow(central_gov, 'TF', 'TaxFlow', taxrate=.2, taxes_paid_to='TRE')
tre.SetEquationRightHandSide('DEM_GOOD','DEM_N_GOOD + DEM_S_GOOD')
tre.AddVariable('DEM_N_GOOD', 'Demand for goods in the North', '')
tre.AddVariable('DEM_S_GOOD', 'Demand for goods in the South', '')
paramz = {
'Country': 'N',
'Country Name': 'North',
'alpha_income': .6,
'alpha_fin': .4,
'mu': '0.18761',
'L0': '0.635',
'L1': '5.',
'L2': '.01',
}
self.build_country(model, paramz)
paramz = {
'Country': 'S',
'Country Name': 'South',
'alpha_income': .7,
'alpha_fin': .3,
'mu': '0.18761',
'L0': '0.67',
'L1': '6.',
'L2': '.07',
}
self.build_country(model, paramz)
self.generate_supply_allocation(model, 'N')
self.generate_supply_allocation(model, 'S')
self.Model = model
if self.UseBookExogenous:
# Need to set the exogenous variable - Government demand for Goods ("G" in economist symbology)
tre.SetExogenous('DEM_N_GOOD', '[20.,] * 105')
tre.SetExogenous('DEM_S_GOOD', '[20.,] * 105')
dep.SetExogenous('r', '[.025,]*105')
model['S']['GOOD'].SetExogenous('MU', [0.18781] * 5 + [0.20781] * 105)
# NOTE:
# Initial conditions are only partial; there may be issues with some
# variables.
self.Model.AddInitialCondition('N_HH', 'AfterTax', 86.486)
self.Model.AddInitialCondition('S_HH', 'AfterTax', 86.486)
self.Model.AddInitialCondition('N_HH', 'F', 86.486)
self.Model.AddInitialCondition('N_HH', 'DEM_DEP', 64.865)
self.Model.AddInitialCondition('S_HH', 'F', 86.486)
self.Model.AddInitialCondition('S_HH', 'DEM_DEP', 64.865)
self.Model.AddInitialCondition('GOV_TRE', 'F', 2. * -86.486)
self.Model.AddGlobalEquation('t', 'decorated time axis', '1955. + k')
return self.Model
# noinspection PyPep8,PyPep8,PyPep8,PyPep8,PyPep8
def expected_output(self):
"""
Expected output for the model (using default input).
Based on EViews output using code from Gennaro Zezza (from sfcmodels.net)
NOTE: A spreadsheet at sfcmodels.net gives different output; income is changing during the
same period as the rate change.
We ignore value at t=0
:return: list
"""
out = [
('t', [None, 1956., 1957., 1958., ]),
('GOV_TRE__DEM_GOOD', [None, 40., 40., 40., 40.]), # G
('GOV_DEP__r', [0.025, ] * 10),
('N_HH__WGT_DEP', [None, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, ]),
# Weight of deposits (bills)
('N_HH__AfterTax',
'86.49\t86.49\t86.49\t86.49\t86.49\t88.27\t88.57\t88.79\t88.96\t89.09\t89.19\t89.26\t89.31\t89.35'),
# YD
# ('TRE_T', ), # T
('N_HH__DEM_GOOD',
'None\t86.48667\t86.48656\t86.48655\t86.48654\t87.55877\t88.02118\t88.37395\t88.64268\t88.84701\t89.00206'),
('N_HH__SUP_LAB',
'None\t106.4866\t106.4866\t106.4866\t106.4865\t108.7204\t109.0749\t109.3441\t109.5482\t109.7027\t109.8192\t109.9068\t109.9724\t110.0213\t110.0575\t110.0841\t110.1035'),
('S_HH__AfterTax',
'86.48666\t86.48656\t86.48655\t86.48654\t86.48654\t84.37456\t84.20819\t84.07316\t83.96609\t83.88098\t83.81313\t83.75889\t83.7154\t83.68043\t83.65222\t83.62939\t83.61085\t83.59574\t83.58338\t83.57325\t83.5649\t83.55801\t83.5523\t83.54755\t83.5436\t83.54028\t83.53751\t83.53517\t83.5332\t83.53154\t83.53013\t83.52893'),
('N_HH__DEM_MON',
'None\t21.62\t21.62\t21.62\t21.62\t21.81\t21.95\t22.05\t22.13\t22.19\t22.23\t22.26\t22.29'),
# high-powered money (H)
]
return out
class OPENG(GL_book_model): # pragma: no cover
"""
Implements Model OPENG from Chapter 6 of G&L. OPENG = "Open, with G adjustment"
Ignores any existing model that is passed in; the entire Model object is built
from scratch.
NOTE: Still under development.
"""
def build_country(self, model, paramz):
"""
Builds a country object.
:param model: Model
:param paramz: dict
:return: None
"""
country_name = paramz['Country Name']
country = Country(model, code=paramz['Country'], long_name=country_name)
self.Country = country
tre = Treasury(country, 'TRE', 'Treasury')
cb = GoldStandardCentralBank(country, 'CB', 'Central Bank', tre)
mm = MoneyMarket(country)
dep = DepositMarket(country)
tax = TaxFlow(country, 'TF', 'TaxFlow', .2)
hh = Household(country, code='HH', long_name='Household ' + country_name)
goods = Market(country, 'GOOD', 'Goods market ' + country_name)
bus = FixedMarginBusinessMultiOutput(country, 'BUS', 'Business Sector', [goods, ])
goods.AddSupplier(bus)
goods.AddVariable('MU', 'Propensity to import', paramz['mu'])
labour = Market(country, 'LAB', 'Labour market: ' + country_name)
# Create the goods demand function
# I normally would not commit a file in a half-finished state, but I want to make sure
# that I upload a lot of key changes to GitHub. The work in this class should have been
# done in a different branch; oops.
# Create the demand for deposits. ('MON' is the residual asset.)
hh.AddVariable('L0', 'lambda_0: share of bills in wealth', paramz['L0'])
hh.AddVariable('L1', 'lambda_1: parameter related to interest rate', paramz['L1'])
hh.AddVariable('L2', 'lambda_2: parameter related to disposable income', paramz['L2'])
# Generate the equation. Need to get the name of the interest rate variable
r = dep.GetVariableName('r')
# The format() call will replace '{0}' with the contents of the 'r' variable.
eqn = 'L0 + L1 * {0} - L2 * (AfterTax/F)'.format(r)
hh.GenerateAssetWeighting([('DEP', eqn)], 'MON')
def other_country(self, country):
if country == 'N':
return 'S'
return 'N'
def generate_supply_allocation(self, mod, country):
Y = mod[country]['HH'].GetVariableName('INC')
other = self.other_country(country)
market = mod[country]['GOOD']
market.AddSupplier(mod[other]['BUS'], 'MU*{0}'.format(Y))
mod[other]['BUS'].AddMarket(market)
def build_model(self):
"""
:return: Model
"""
model = Model()
ExternalSector(model)
paramz = {
'Country': 'N',
'Country Name': 'North',
'alpha_income': .6,
'alpha_fin': .4,
'mu': '0.18761',
'L0': '0.635',
'L1': '5.',
'L2': '.01',
}
self.build_country(model, paramz)
paramz = {
'Country': 'S',
'Country Name': 'South',
'alpha_income': .7,
'alpha_fin': .3,
'mu': '0.18761',
'L0': '0.67',
'L1': '6.',
'L2': '.07',
}
self.build_country(model, paramz)
self.generate_supply_allocation(model, 'N')
self.generate_supply_allocation(model, 'S')
self.Model = model
if self.UseBookExogenous:
# Need to set the exogenous variable - Government demand for Goods ("G" in economist symbology)
model['N']['TRE'].SetExogenous('DEM_GOOD', '[20.,] * 105')
model['S']['TRE'].SetExogenous('DEM_GOOD', '[20.,] * 105')
model['N']['DEP'].SetExogenous('r', '[.025,]*105')
model['S']['DEP'].SetExogenous('r', '[.025,]*105')
model['S']['GOOD'].SetExogenous('MU', [0.18781] * 5 + [0.20781] * 105)
# NOTE:
# Initial conditions are only partial; there may be issues with some
# variables.
self.Model.AddInitialCondition('N_HH', 'AfterTax', 86.486)
self.Model.AddInitialCondition('S_HH', 'AfterTax', 86.486)
self.Model.AddInitialCondition('N_HH', 'F', 86.486)
self.Model.AddInitialCondition('N_HH', 'DEM_DEP', 64.865)
self.Model.AddInitialCondition('S_HH', 'F', 86.486)
self.Model.AddInitialCondition('S_HH', 'DEM_DEP', 64.865)
self.Model.AddInitialCondition('N_TRE', 'F', -86.486)
self.Model.AddInitialCondition('S_TRE', 'F', -86.486)
self.Model.AddGlobalEquation('t', 'decorated time axis', '1955. + k')
return self.Model
# noinspection PyPep8,PyPep8,PyPep8,PyPep8,PyPep8
def expected_output(self):
"""
Expected output for the model (using default input).
Based on EViews output using code from Gennaro Zezza (from sfcmodels.net)
NOTE: A spreadsheet at sfcmodels.net gives different output; income is changing during the
same period as the rate change.
We ignore value at t=0
:return: list
"""
out = [
('t', [None, 1956., 1957., 1958., ]),
('GOV_TRE__DEM_GOOD', [None, 40., 40., 40., 40.]), # G
('GOV_DEP__r', [0.025, ] * 10),
('N_HH__WGT_DEP', [None, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, ]),
# Weight of deposits (bills)
('N_HH__AfterTax',
'86.49\t86.49\t86.49\t86.49\t86.49\t88.27\t88.57\t88.79\t88.96\t89.09\t89.19\t89.26\t89.31\t89.35'),
# YD
# ('TRE_T', ), # T
('N_HH__DEM_GOOD',
'None\t86.48667\t86.48656\t86.48655\t86.48654\t87.55877\t88.02118\t88.37395\t88.64268\t88.84701\t89.00206'),
('N_HH__SUP_LAB',
'None\t106.4866\t106.4866\t106.4866\t106.4865\t108.7204\t109.0749\t109.3441\t109.5482\t109.7027\t109.8192\t109.9068\t109.9724\t110.0213\t110.0575\t110.0841\t110.1035'),
('S_HH__AfterTax',
'86.48666\t86.48656\t86.48655\t86.48654\t86.48654\t84.37456\t84.20819\t84.07316\t83.96609\t83.88098\t83.81313\t83.75889\t83.7154\t83.68043\t83.65222\t83.62939\t83.61085\t83.59574\t83.58338\t83.57325\t83.5649\t83.55801\t83.5523\t83.54755\t83.5436\t83.54028\t83.53751\t83.53517\t83.5332\t83.53154\t83.53013\t83.52893'),
('N_HH__DEM_MON',
'None\t21.62\t21.62\t21.62\t21.62\t21.81\t21.95\t22.05\t22.13\t22.19\t22.23\t22.26\t22.29'),
# high-powered money (H)
]
return out
|
brianr747/SFC_models
|
sfc_models/gl_book/chapter6.py
|
Python
|
apache-2.0
| 21,855
|
[
"Brian"
] |
f16daa04b222f08e4f5f4123390626035920b06db71e876b66dde7a7b200a577
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
from bigdl.orca.ray.utils import is_local
class ProcessInfo(object):
def __init__(self, out, err, errorcode, pgid, tag="default", pids=None, node_ip=None):
self.out = str(out.strip())
self.err = str(err.strip())
self.pgid = pgid
self.pids = pids
self.errorcode = errorcode
self.tag = tag
self.master_addr = None
self.node_ip = node_ip
def __str__(self):
return "node_ip: {} tag: {}, pgid: {}, pids: {}, returncode: {}, \
master_addr: {}, \n {} {}".format(self.node_ip, self.tag, self.pgid,
self.pids,
self.errorcode,
self.master_addr,
self.out,
self.err)
def pids_from_gpid(gpid):
import psutil
processes = psutil.process_iter()
result = []
for proc in processes:
try:
if os.getpgid(proc.pid) == gpid:
result.append(proc.pid)
except Exception:
pass
return result
def session_execute(command, env=None, tag=None, fail_fast=False, timeout=120):
pro = subprocess.Popen(
command,
shell=True,
env=env,
cwd=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
pgid = os.getpgid(pro.pid)
out, err = pro.communicate(timeout=timeout)
out = out.decode("utf-8")
err = err.decode("utf-8")
print(out)
print(err)
errorcode = pro.returncode
if errorcode != 0:
if fail_fast:
raise Exception(err)
print(err)
else:
print(out)
return ProcessInfo(out=out,
err=err,
errorcode=pro.returncode,
pgid=pgid,
pids=pids_from_gpid(pgid),
tag=tag)
class ProcessMonitor:
def __init__(self, process_infos, sc, ray_rdd, raycontext, verbose=False):
self.sc = sc
self.raycontext = raycontext
self.verbose = verbose
self.ray_rdd = ray_rdd
self.master = []
self.slaves = []
self.pgids = []
self.node_ips = []
self.process_infos = process_infos
for process_info in process_infos:
self.pgids.append(process_info.pgid)
self.node_ips.append(process_info.node_ip)
if process_info.master_addr:
self.master.append(process_info)
else:
self.slaves.append(process_info)
assert len(self.master) == 1, \
"We should got 1 master only, but we got {}".format(len(self.master))
self.master = self.master[0]
if not is_local(self.sc):
self.print_ray_remote_err_out()
def print_ray_remote_err_out(self):
if self.master.errorcode != 0:
raise Exception(str(self.master))
for slave in self.slaves:
if slave.errorcode != 0:
raise Exception(str(slave))
if self.verbose:
print(self.master)
for slave in self.slaves:
print(slave)
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/ray/process.py
|
Python
|
apache-2.0
| 3,914
|
[
"ORCA"
] |
bb0f185d4b31a93c0ecdca08e977f5be2020a3d153bc308a9ee8f2ccba6675c3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from os.path import join, dirname
from io import open
setup(
name='pandas-charm',
version='0.3.0',
description=(
'A small Python library for getting character matrices '
'(alignments) into and out of pandas'),
long_description=open(
join(dirname(__file__), 'README.rst'), encoding='utf-8').read(),
packages=find_packages(exclude=['docs', 'tests*']),
py_modules=['pandascharm'],
install_requires=['pandas>=0.21'],
extras_require={'testing': [
'coverage', 'pytest', 'biopython', 'dendropy']},
author='Markus Englund',
author_email='jan.markus.englund@gmail.com',
url='https://github.com/jmenglund/pandas-charm',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['alignment', 'BioPython', 'DendroPy', 'pandas'],
)
|
jmenglund/pandas-charm
|
setup.py
|
Python
|
mit
| 1,371
|
[
"Biopython"
] |
1d547aad339f8206030c243a4cfab247f7ad542f748f0fc60b3d2f70c66324b4
|
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
from nose.plugins.attrib import attr
from base_studio_test import StudioCourseTest
from bok_choy.promise import EmptyPromise
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.lms.courseware import CoursewarePage
from textwrap import dedent
from xmodule.partitions.partitions import Group
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
EmptyPromise(
lambda: self.outline_page.is_browser_on_page(), "loaded page {!r}".format(self.outline_page),
timeout=30
).fulfill()
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
self.assertTrue(self.advanced_settings.is_browser_on_page())
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Click Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr('shard_1')
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
|
xingyepei/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
Python
|
agpl-3.0
| 19,688
|
[
"VisIt"
] |
c3aa5696fb9975f637924decc73f941ac811474e022b30584b1a6969288a7431
|
import sys
import os
import random
import numpy as np
from numpy import arange, sin, pi
from .constants import AVOGADRO, E_CHARGE
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def getMeanData(data):
sums = {}
n_twin = 0
first_data = data.values()[0]
for s in first_data.keys():
sums[s] = 0.0
for t in data.keys():
n_twin += 1
current_data = data[t]
for s in current_data.keys():
sums[s] += current_data[s]
for s in sums.keys():
sums[s] = sums[s] / n_twin
return sums
def getInfluxRate(neuron_curr, surf_area, vol):
# Data file now store values in SI units
sum_curr = neuron_curr * surf_area
# For Ca2+, I = Q/t = N2e/t, so N/t = I/2e, where e is elementary charge,
# devided by volume (in liter = vol * 1e3) and AVOGADRO
influx_mol_per_sec_per_liter = sum_curr / 2.0 / E_CHARGE / vol / AVOGADRO / 1e3
return influx_mol_per_sec_per_liter
def readData(data_file):
dataset = {}
file = open(data_file, 'r')
for line in file:
line_secs = line.split()
# entry info
if line_secs[0] == '#Entries:':
dataset["Entries"] = line_secs[1:]
dataset["Data"] = []
else:
data_line = [float(value) for value in line_secs]
dataset["Data"].append(data_line)
return dataset
def SI2NEURON(dataset):
new_dataset = {}
new_dataset["Entries"] = dataset["Entries"]
new_dataset["Data"] = []
for data_line in dataset["Data"]:
new_data_line = []
time = data_line[0] * 1000
new_data_line.append(time)
for v in data_line[1:]:
new_data_line.append(v*1e6)
new_dataset["Data"].append(new_data_line)
return new_dataset
def genCaInfluxProfile(dataset, sur_areas, vols, start_time, end_time, time_win):
roi_entries = dataset["Entries"][1:]
n_entries = len(roi_entries)
curr_start_time = start_time
curr_end_time = start_time + time_win
n_win = 0
sum_curr = [0.0] * n_entries
influx_profile = {}
influx_profile["Entries"] = dataset["Entries"]
influx_profile["Data"] = []
for data in dataset["Data"]:
time = data[0]
curr_data = data[1:]
if start_time > time:
continue
if isclose(time, curr_end_time):
influx_data = []
influx_data.append(curr_start_time)
for i in range(n_entries):
mean_curr = sum_curr[i] / n_win
influx_rate = getInfluxRate(mean_curr, sur_areas[roi_entries[i]], vols[roi_entries[i]])
influx_data.append(influx_rate)
influx_profile["Data"].append(influx_data)
sum_curr = [0.0] * n_entries
n_win = 0
curr_start_time = curr_end_time
curr_end_time = curr_start_time + time_win
if isclose(time, end_time):
break
for i in range(n_entries):
sum_curr[i] += abs(curr_data[i])
n_win += 1
return influx_profile
|
CNS-OIST/STEPS_Example
|
publication_models/API_2/Chen_FNeuroinf__2017/purkinje_model/extra/data_presets.py
|
Python
|
gpl-2.0
| 3,135
|
[
"Avogadro"
] |
6f18dad425bb28b91d7ae322b0e019265673e811938663172db7552cbf889ab2
|
"""
Proteomics Datatypes
"""
import binascii
import logging
import re
from galaxy.datatypes import data
from galaxy.datatypes.binary import Binary
from galaxy.datatypes.data import Text
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.xml import GenericXml
from galaxy.util import nice_size
log = logging.getLogger(__name__)
class Wiff(Binary):
"""Class for wiff files."""
file_ext = 'wiff'
allow_datatype_change = False
composite_type = 'auto_primary_file'
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self.add_composite_file(
'wiff',
description='AB SCIEX files in .wiff format. This can contain all needed information or only metadata.',
is_binary=True)
self.add_composite_file(
'wiff_scan',
description='AB SCIEX spectra file (wiff.scan), if the corresponding .wiff file only contains metadata.',
optional='True', is_binary=True)
def generate_primary_file(self, dataset=None):
rval = ['<html><head><title>Wiff Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).iteritems():
fn = composite_name
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
Binary.register_sniffable_binary_format("wiff", "wiff", Wiff )
class PepXmlReport(Tabular):
"""pepxml converted to tabular report"""
file_ext = "tsv"
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.column_names = ['Protein', 'Peptide', 'Assumed Charge', 'Neutral Pep Mass (calculated)', 'Neutral Mass', 'Retention Time', 'Start Scan', 'End Scan', 'Search Engine', 'PeptideProphet Probability', 'Interprophet Probabaility']
def display_peek(self, dataset):
"""Returns formated html of peek"""
return Tabular.make_html_table(self, dataset, column_names=self.column_names)
class ProtXmlReport(Tabular):
"""protxml converted to tabular report"""
file_ext = "tsv"
comment_lines = 1
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.column_names = [
"Entry Number", "Group Probability",
"Protein", "Protein Link", "Protein Probability",
"Percent Coverage", "Number of Unique Peptides",
"Total Independent Spectra", "Percent Share of Spectrum ID's",
"Description", "Protein Molecular Weight", "Protein Length",
"Is Nondegenerate Evidence", "Weight", "Precursor Ion Charge",
"Peptide sequence", "Peptide Link", "NSP Adjusted Probability",
"Initial Probability", "Number of Total Termini",
"Number of Sibling Peptides Bin", "Number of Instances",
"Peptide Group Designator", "Is Evidence?"]
def display_peek(self, dataset):
"""Returns formated html of peek"""
return Tabular.make_html_table(self, dataset, column_names=self.column_names)
class ProteomicsXml(GenericXml):
""" An enhanced XML datatype used to reuse code across several
proteomic/mass-spec datatypes. """
def sniff(self, filename):
""" Determines whether the file is the correct XML type. """
with open(filename, 'r') as contents:
while True:
line = contents.readline()
if line is None or not line.startswith('<?'):
break
# pattern match <root or <ns:root for any ns string
pattern = '^<(\w*:)?%s' % self.root
return line is not None and re.match(pattern, line) is not None
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
dataset.blurb = self.blurb
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class PepXml(ProteomicsXml):
"""pepXML data"""
file_ext = "pepxml"
blurb = 'pepXML data'
root = "msms_pipeline_analysis"
class MzML(ProteomicsXml):
"""mzML data"""
file_ext = "mzml"
edam_format = "format_3244"
blurb = 'mzML Mass Spectrometry data'
root = "(mzML|indexedmzML)"
class ProtXML(ProteomicsXml):
"""protXML data"""
file_ext = "protxml"
blurb = 'prot XML Search Results'
root = "protein_summary"
class MzXML(ProteomicsXml):
"""mzXML data"""
file_ext = "mzxml"
blurb = "mzXML Mass Spectrometry data"
root = "mzXML"
class MzIdentML(ProteomicsXml):
file_ext = "mzid"
edam_format = "format_3247"
blurb = "XML identified peptides and proteins."
root = "MzIdentML"
class TraML(ProteomicsXml):
file_ext = "traml"
edam_format = "format_3246"
blurb = "TraML transition list"
root = "TraML"
class MzQuantML(ProteomicsXml):
file_ext = "mzq"
edam_format = "format_3248"
blurb = "XML quantification data"
root = "MzQuantML"
class ConsensusXML(ProteomicsXml):
file_ext = "consensusxml"
blurb = "OpenMS multiple LC-MS map alignment file"
root = "consensusXML"
class FeatureXML(ProteomicsXml):
file_ext = "featurexml"
blurb = "OpenMS feature file"
root = "featureMap"
class IdXML(ProteomicsXml):
file_ext = "idxml"
blurb = "OpenMS identification file"
root = "IdXML"
class TandemXML(ProteomicsXml):
file_ext = "tandem"
blurb = "X!Tandem search results file"
root = "bioml"
class UniProtXML(ProteomicsXml):
file_ext = "uniprotxml"
blurb = "UniProt Proteome file"
root = "uniprot"
class Mgf(Text):
"""Mascot Generic Format data"""
file_ext = "mgf"
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
dataset.blurb = 'mgf Mascot Generic Format'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
mgf_begin_ions = "BEGIN IONS"
max_lines = 100
with open(filename) as handle:
for i, line in enumerate(handle):
line = line.rstrip()
if line == mgf_begin_ions:
return True
if i > max_lines:
return False
class MascotDat(Text):
"""Mascot search results """
file_ext = "mascotdat"
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
dataset.blurb = 'mascotdat Mascot Search Results'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
mime_version = "MIME-Version: 1.0 (Generated by Mascot version 1.0)"
max_lines = 10
with open(filename) as handle:
for i, line in enumerate(handle):
line = line.rstrip()
if line == mime_version:
return True
if i > max_lines:
return False
class ThermoRAW(Binary):
"""Class describing a Thermo Finnigan binary RAW file"""
file_ext = "raw"
def sniff(self, filename):
# Thermo Finnigan RAW format is proprietary and hence not well documented.
# Files start with 2 bytes that seem to differ followed by F\0i\0n\0n\0i\0g\0a\0n
# This combination represents 17 bytes, but to play safe we read 20 bytes from
# the start of the file.
try:
header = open(filename).read(20)
hexheader = binascii.b2a_hex(header)
finnigan = binascii.hexlify('F\0i\0n\0n\0i\0g\0a\0n')
if hexheader.find(finnigan) != -1:
return True
return False
except:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Thermo Finnigan RAW file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "Thermo Finnigan RAW file (%s)" % (nice_size(dataset.get_size()))
Binary.register_sniffable_binary_format("thermo.raw", "raw", ThermoRAW )
class Msp(Text):
""" Output of NIST MS Search Program chemdata.nist.gov/mass-spc/ftp/mass-spc/PepLib.pdf """
file_ext = "msp"
@staticmethod
def next_line_starts_with(contents, prefix):
next_line = contents.readline()
return next_line is not None and next_line.startswith(prefix)
def sniff(self, filename):
""" Determines whether the file is a NIST MSP output file."""
with open(filename, 'r') as f:
begin_contents = f.read(1024)
if "\n" not in begin_contents:
return False
lines = begin_contents.splitlines()
if len(lines) < 2:
return False
return lines[0].startswith("Name:") and lines[1].startswith("MW:")
class SPLibNoIndex( Text ):
"""SPlib without index file """
file_ext = "splib_noindex"
def set_peek( self, dataset, is_multi_byte=False ):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
dataset.blurb = 'Spectral Library without index files'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class SPLib(Msp):
"""SpectraST Spectral Library. Closely related to msp format"""
file_ext = "splib"
composite_type = 'auto_primary_file'
def __init__(self, **kwd):
Msp.__init__(self, **kwd)
self.add_composite_file('library.splib',
description='Spectral Library. Contains actual library spectra',
is_binary=False)
self.add_composite_file('library.spidx',
description='Spectrum index', is_binary=False)
self.add_composite_file('library.pepidx',
description='Peptide index', is_binary=False)
def generate_primary_file(self, dataset=None):
rval = ['<html><head><title>Spectral Library Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).iteritems():
fn = composite_name
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
dataset.blurb = 'splib Spectral Library Format'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
""" Determines whether the file is a SpectraST generated file.
"""
with open(filename, 'r') as contents:
return Msp.next_line_starts_with(contents, "Name:") and Msp.next_line_starts_with(contents, "LibID:")
class Ms2(Text):
file_ext = "ms2"
def sniff(self, filename):
""" Determines whether the file is a valid ms2 file."""
with open(filename, 'r') as contents:
header_lines = []
while True:
line = contents.readline()
if line is None or len(line) == 0:
pass
elif line.startswith('H\t'):
header_lines.append(line)
else:
break
for header_field in ['CreationDate', 'Extractor', 'ExtractorVersion', 'ExtractorOptions']:
found_header = False
for header_line in header_lines:
if header_line.startswith('H\t%s' % (header_field)):
found_header = True
break
if not found_header:
return False
return True
# unsniffable binary format, should do something about this
class XHunterAslFormat(Binary):
""" Annotated Spectra in the HLF format http://www.thegpm.org/HUNTER/format_2006_09_15.html """
file_ext = "hlf"
class Sf3(Binary):
"""Class describing a Scaffold SF3 files"""
file_ext = "sf3"
|
icaoberg/cellorganizer-galaxy-tools
|
datatypes/proteomics.py
|
Python
|
gpl-3.0
| 13,937
|
[
"Galaxy",
"OpenMS"
] |
037c7f3466ac6d2eb609237294e5a690dc6fa5c448fcb145c1bcc70bb3cec3f4
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
# make simulation deterministic
np.random.seed(42)
benchmark, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@BENCHMARKS_DIR@/ferrofluid.py", measurement_steps=100, n_iterations=2,
cmd_arguments=["--particles_per_core", "400"], min_skin=0.225, max_skin=0.225,
dp3m_params={'prefactor': 1, 'accuracy': 1e-4, 'cao': 7, 'r_cut': 5.193,
'mesh': [20, 20, 20], 'alpha': 0.64788, 'tune': False})
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = benchmark.system
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/benchmarks/test_ferrofluid.py
|
Python
|
gpl-3.0
| 1,335
|
[
"ESPResSo"
] |
2c1925a96746866c564f3f14695a37befced6983b8cf65d08d94d3914efaabdb
|
#!/usr/bin/env python
import functools
from typing import Union
import metpy.calc
import numpy as np
import numpy.ma
import scipy.signal as spsignal
import seawater
import xarray as xr
from metpy.units import units
from pint import UnitRegistry
_ureg = UnitRegistry()
# All functions in this file (that do not start with an underscore) will be
# available to the parser.
sin = np.sin
cos = np.cos
tan = np.tan
asin = np.arcsin
acos = np.arccos
atan = np.arctan
atan2 = np.arctan2
ln = np.log
log = np.log10
log2 = np.log2
abs = np.abs
def max(arg):
return np.ravel(arg).max()
def min(arg):
return np.ravel(arg).min()
def magnitude(a, b):
"""
Calculates the element-wise magnitude of a and b:
np.sqrt(a ** 2 + b ** 2). See:
https://en.wikipedia.org/wiki/Hadamard_product_(matrices)
Paramters:
a: ndarray
b: ndarray
Returns:
np.ndarray -- magnitude of a and b
"""
return np.sqrt(a**2 + b**2)
def bearing(north_vel: xr.DataArray, east_vel: xr.DataArray) -> xr.DataArray:
"""
Calculates the bearing (degrees clockwise positive from North) from
component East and North vectors.
Returns:
xr.DataArray -- bearing of east_vel and north_vel
"""
east_vel = np.squeeze(east_vel)
north_vel = np.squeeze(north_vel)
bearing = np.arctan2(north_vel, east_vel)
bearing = np.pi / 2.0 - bearing
bearing = xr.where(bearing < 0, bearing + 2 * np.pi, bearing)
bearing *= 180.0 / np.pi
# Deal with undefined angles (where velocity is 0 or very close)
inds = np.where(np.logical_and(np.abs(east_vel) < 10e-6, np.abs(north_vel) < 10e-6))
bearing.values[inds] = np.nan
return bearing
def unstaggered_speed(u_vel, v_vel):
"""Calculate the speed of seawater current from u and v velocity component
array that are on the u and v points of an Akawara-C staggered grid;
see https://en.wikipedia.org/wiki/Arakawa_grids
To correctly calculate the speed of the current, the velocity components have to be
"unstaggered" by interpolating their values to the T-grid points at the centres of the
grid cells. Here that is accomplished by averaging u(i-1) and u(i) values to get u values
at the T-points. Likewise, v(j-1) and v(j) values are averaged to get v values at the T-points.
With those arrays of unstaggered values, the speed of the current is calculated as the
element-wise magnitude of u and v:
np.sqrt(u ** 2 + v ** 2)
See: https://en.wikipedia.org/wiki/Hadamard_product_(matrices)
We assume that the dimension order of the velocity component arrays is (t, depth, y, x)
or (t, y, x). So, we can pick out the dimensions that we need to shift along to average the
velocity components to the T-points by indexing to the appropriate one of the final two
dimensions to get its name.
Paramters:
u_vel: ndarray
v_vel: ndarray
Returns:
ndarray -- speed of current
"""
# Use indices here rather than hard coding dimension name strings.
x_dim = u_vel.dims[-1]
y_dim = v_vel.dims[-2]
u_t_grid = (u_vel + u_vel.shift({x_dim: 1})) / 2
v_t_grid = (v_vel + v_vel.shift({y_dim: 1})) / 2
return numpy.sqrt(u_t_grid**2 + v_t_grid**2)
def __calc_pressure(depth, latitude):
pressure = []
try:
pressure = [seawater.pres(d, latitude) for d in depth]
except TypeError:
pressure = seawater.pres(depth, latitude)
return np.array(pressure)
def __validate_depth_lat_temp_sal(depth, latitude, temperature, salinity):
if type(depth) is not np.ndarray:
depth = np.array(depth)
if type(latitude) is not np.ndarray:
latitude = np.array(latitude)
if type(temperature) is not np.ndarray:
temperature = np.array(temperature)
if type(salinity) is not np.ndarray:
salinity = np.array(salinity)
return depth, latitude, np.squeeze(temperature), np.squeeze(salinity)
def __find_depth_index_of_min_value(data: np.ndarray, depth_axis=0) -> np.ndarray:
if not np.ma.is_masked(data):
# Mask out NaN values to prevent an exception blow-up.
masked = np.ma.masked_array(data, np.isnan(data))
# TODO: could we use a .view() here instead of copying stuff?
else:
masked = data
return np.argmin(masked, axis=depth_axis)
def __find_depth_index_of_max_value(data: np.ndarray, depth_axis=0) -> np.ndarray:
if not np.ma.is_masked(data):
# Mask out NaN values to prevent an exception blow-up.
masked = np.ma.masked_array(data, np.isnan(data))
# TODO: could we use a .view() here instead of copying stuff?
else:
masked = data
return np.argmax(masked, axis=depth_axis)
def oxygensaturation(temperature: np.ndarray, salinity: np.ndarray) -> np.ndarray:
"""
Calculate the solubility (saturation) of
Oxygen (O2) in seawater.
Required Arguments:
* temperature: temperature values in Celsius.
* salinity: salinity values.
"""
return seawater.satO2(salinity, temperature)
def nitrogensaturation(temperature: np.ndarray, salinity: np.ndarray) -> np.ndarray:
"""
Calculate the solubility (saturation) of
Nitrogen (N2) in seawater.
Required Arguments:
* temperature: temperature values in Celsius.
* salinity: salinity values.
"""
return seawater.satN2(salinity, temperature)
def sspeed(
depth: Union[np.ndarray, xr.Variable],
latitude: np.ndarray,
temperature: np.ndarray,
salinity: np.ndarray,
) -> np.ndarray:
"""
Calculates the speed of sound.
Required Arguments:
* depth: The depth(s) in meters
* latitude: The latitude(s) in degrees North
* temperature: The temperatures(s) in Celsius
* salinity: The salinity (unitless)
"""
depth, latitude, temperature, salinity = __validate_depth_lat_temp_sal(
depth, latitude, temperature, salinity
)
press = __calc_pressure(depth, latitude)
if salinity.shape != press.shape:
# Need to pad press so it can broadcast against temperature and salinity.
# eg. if using GIOPS and salinity has shape (3, 50, 3, 12) then press has
# shape (50, 3). This logic pads press to give shape (1, 50, 3, 1).
for ax, val in enumerate(salinity.shape):
if ax > press.ndim - 1 or press.shape[ax] != val:
press = np.expand_dims(press, axis=ax)
speed = seawater.svel(salinity, temperature, press)
return np.squeeze(speed)
def density(depth, latitude, temperature, salinity) -> np.ndarray:
"""
Calculates the density of sea water.
Parameters:
depth: The depth(s) in meters
latitude: The latitude(s) in degrees North
temperature: The temperatures(s) in Celsius
salinity: The salinity (unitless)
"""
press = __calc_pressure(depth, latitude)
density = seawater.dens(salinity, temperature, press)
return np.array(density)
def heatcap(depth, latitude, temperature, salinity) -> np.ndarray:
"""
Calculates the heat capacity of sea water.
Parameters:
depth: The depth(s) in meters
latitude: The latitude(s) in degrees North
temperature: The temperatures(s) in Celsius
salinity: The salinity (unitless)
"""
press = __calc_pressure(depth, latitude)
heatcap = seawater.cp(salinity, temperature, press)
return np.array(heatcap)
def tempgradient(depth, latitude, temperature, salinity) -> np.ndarray:
"""
Calculates the adiabatic temp gradient of sea water.
Required Arguments:
* depth: Depth in meters
* latitude: Latitude in degrees North
* temperature: Temperatures in Celsius
* salinity: Salinity
"""
depth, latitude, temperature, salinity = __validate_depth_lat_temp_sal(
depth, latitude, temperature, salinity
)
press = __calc_pressure(depth, latitude)
tempgradient = seawater.adtg(salinity, temperature, press)
return np.array(tempgradient)
def __get_soniclayerdepth_mask(
soundspeed: np.ndarray, min_depth_indices: np.ndarray
) -> np.ndarray:
"""
Create mask which masks out values BELOW deep sound channel.
"""
mask = min_depth_indices.ravel()[..., np.newaxis] < np.arange(soundspeed.shape[0])
return mask.T.reshape(soundspeed.shape)
def __soniclayerdepth_from_sound_speed(
soundspeed: np.ndarray, depth: np.ndarray
) -> np.ndarray:
min_indices = __find_depth_index_of_min_value(soundspeed)
mask = __get_soniclayerdepth_mask(soundspeed, min_indices)
soundspeed[mask] = np.nan
# Find sonic layer depth indices
max_indices = __find_depth_index_of_max_value(soundspeed)
data = depth[max_indices]
# Mask out surface depths, since sonic layer depth cannot physically
# be present at the surface. Using np.nan will make the main map have
# transparent spots when the surface is masked out.
data[data == depth[0]] = np.nan
return data
def soniclayerdepth(depth, latitude, temperature, salinity) -> np.ndarray:
"""
Find and return the depth of the maximum value of the speed
of sound ABOVE the deep sound channel.
Required Arguments:
* depth: Depth in meters
* latitude: Latitude in degrees North
* temperature: Temperatures in Celsius
* salinity: Salinity
"""
depth, latitude, temperature, salinity = __validate_depth_lat_temp_sal(
depth, latitude, temperature, salinity
)
sound_speed = sspeed(depth, latitude, temperature, salinity)
if len(sound_speed.shape) > 3: # if true dims are (time, depth, y, x)
sound_speed = np.swapaxes(
sound_speed, 0, 1
) # swap time and depth dims to ensure depth is 0th
return __soniclayerdepth_from_sound_speed(sound_speed, depth)
def deepsoundchannel(depth, latitude, temperature, salinity) -> np.ndarray:
"""
Find and return the depth of the minimum value of the
speed of sound.
https://en.wikipedia.org/wiki/SOFAR_channel
Required Arguments:
* depth: Depth in meters
* latitude: Latitude in degrees North
* temperature: Temperatures in Celsius
* salinity: Salinity
"""
depth, latitude, temperature, salinity = __validate_depth_lat_temp_sal(
depth, latitude, temperature, salinity
)
sound_speed = sspeed(depth, latitude, temperature, salinity)
if len(sound_speed.shape) > 3: # if true dims are (time, depth, y, x)
sound_speed = np.swapaxes(
sound_speed, 0, 1
) # swap time and depth dims to ensure depth is 0th
min_indices = __find_depth_index_of_min_value(sound_speed)
data = depth[min_indices]
# Mask out depth values above 500 meters since deep sound
# channel cannot occour above this in general.
data[data < 500] = np.nan
return data
def deepsoundchannelbottom(depth, latitude, temperature, salinity) -> np.ndarray:
"""
Find and return the deep sound channel bottom (the second depth where
the speed of sound is equal to the speed at the sonic layer depth).
Note: Nearest Neighbou interpolation is used to find the depth value
with closest sound speed value to the sonic layer depth.
Required Arguments:
* depth: Depth in meters
* latitude: Latitude in degrees North
* temperature: Temperatures in Celsius
* salinity: Salinity
"""
depth, latitude, temperature, salinity = __validate_depth_lat_temp_sal(
depth, latitude, temperature, salinity
)
# Use masked array to quickly enable/disable data (see below)
sound_speed = np.ma.array(
sspeed(depth, latitude, temperature, salinity), fill_value=np.nan
)
if len(sound_speed.shape) > 3: # if true dims are (time, depth, y, x)
sound_speed = np.swapaxes(
sound_speed, 0, 1
) # swap time and depth dims to ensure depth is 0th
min_indices = __find_depth_index_of_min_value(sound_speed)
sound_speed.mask = __get_soniclayerdepth_mask(sound_speed, min_indices)
# Find sonic layer depth indices
max_indices = __find_depth_index_of_max_value(sound_speed)
# Extract sound speed values for later comparison.
sound_speed_values_at_sonic_layer_depth = np.squeeze(
np.take_along_axis(
sound_speed,
max_indices[np.newaxis, :], # pad to equate number of dims to sound_speed
0, # apply along depth axis
)
)
# Flip the mask since we actually want to examine the values BELOW the sonic
# layer depth.
sound_speed.mask = ~sound_speed.mask
# Nearest neighbour
# numpy broadcasting handles subtraction between 3D and 2D arrays
min_difference = np.abs(
sound_speed - sound_speed_values_at_sonic_layer_depth
).argmin(
axis=0
) # We can use argmin here because the fill_value of the masked arrays is np.nan
# Finito...LOOK MOM! NO LOOPS!!!
return depth[min_difference]
def depthexcess(depth, latitude, temperature, salinity, bathy) -> np.ndarray:
"""
Difference between the Deep Sound Channel Bottom and the Ocean Bottom.
Required Arguments:
* depth: Depth in meters
* latitude: Latitude in degrees North
* temperature: Temperatures in Celsius
* salinity: Salinity
* bathy:
"""
dscb = deepsoundchannelbottom(depth, latitude, temperature, salinity)
# Actually do the math.
return dscb - bathy.data
def calculate_del_C(
depth: np.ndarray,
soundspeed: np.ndarray,
minima: np.ndarray,
maxima: np.ndarray,
freq_cutoff: float,
) -> np.ndarray:
"""
Calculate ΔC from a given sound profile and freq cutoff
Required Arguments:
* depth: The depth(s) in meters
* soundspeed: Speed of sound in m/s
* minima: Minima ndarray of Speed of sound, which contains the index where the minima occurs
* maxima: Maxima ndarray of Speed of sound, which contains the index where the maxima occurs
* freq_cutoff: Desired frequency cutoff in Hz
Returns the value of ΔC, which will later be used inside the PSSC detection method
"""
# Getting Cmin from the sound speed profile
first_minimum = np.empty_like(minima, dtype="int64")
# TODO: need to look at alternative for the following operation
it = np.nditer(minima, flags=["refs_ok", "multi_index"])
for x in it:
array_size = x.tolist().size
first_minimum[it.multi_index] = x.tolist()[0] if array_size > 0 else -1
Cmin = np.squeeze(
np.take_along_axis(soundspeed, first_minimum[np.newaxis, :, :], axis=0)
)
Cmin[first_minimum == -1] = np.nan
# calculating delZ
first_maximum = np.empty_like(maxima, dtype="int64")
it = np.nditer(maxima, flags=["refs_ok", "multi_index"])
for x in it:
array_size = x.tolist().size
first_maximum[it.multi_index] = x.tolist()[0] if array_size > 0 else -1
channel_start_depth = depth[first_maximum]
channel_start_depth[first_maximum == -1] = np.nan
Cmax = np.squeeze(
np.take_along_axis(soundspeed, first_maximum[np.newaxis, :, :], axis=0)
)
Cmax[first_minimum == -1] = np.nan
# channel_end_depth = np.apply_along_axis(np.interp,0, Cmax,soundspeed,depth)
channel_end_depth = np.empty_like(Cmax, dtype="float")
it = np.nditer(Cmax, flags=["refs_ok", "multi_index"])
for x in it:
channel_end_depth[it.multi_index] = np.interp(
x, soundspeed[:, it.multi_index[0], it.multi_index[1]], depth
)
del_Z = channel_end_depth - channel_start_depth
numerator = freq_cutoff * del_Z
denominator = 0.2652 * Cmin
final_denom = numerator / denominator
final_denom = np.power(final_denom, 2)
delC = Cmin / final_denom
# print(delC)
return delC
def potentialsubsurfacechannel(
depth, latitude, temperature, salinity, freq_cutoff=2755.03
) -> np.ndarray:
"""
Detect if there is sub-surface channel.
Required Arguments:
* depth: Depth in meters
* latitude: Latitude in degrees North
* temperature: Temperatures in Celsius
* salinity: Salinity
* freq_cutoff: Desired frequency cutoff in Hz
Returns 1 if the profile has a sub-surface channel, 0 if the profile does not have a sub-surface channel
"""
depth, latitude, temperature, salinity = __validate_depth_lat_temp_sal(
depth, latitude, temperature, salinity
)
# Trimming the profile considering the depth above 1000m
depth = depth[depth < 1000]
depth_length = len(depth)
temp = temperature[0:depth_length, :, :]
sal = salinity[0:depth_length, :, :]
sound_speed = sspeed(depth, latitude, temp, sal)
minima = np.apply_along_axis(spsignal.find_peaks, 0, -sound_speed)[0]
maxima = np.apply_along_axis(spsignal.find_peaks, 0, sound_speed)[0]
delC = calculate_del_C(depth, sound_speed, minima, maxima, freq_cutoff)
hasPSSC = np.zeros_like(minima, dtype="float")
it = np.nditer(minima, flags=["refs_ok", "multi_index"])
for minima_array in it:
minima_list = minima_array.tolist()
maxima_list = maxima[it.multi_index].tolist()
if len(minima_list) >= 2:
p1 = 0
p2 = minima[it.multi_index].tolist()[0]
if len(maxima_list) >= 2:
p1 = maxima_list[0]
p3 = maxima_list[1]
else:
p3 = maxima_list[0]
if (
p3 > p2
): # if the only maximum is not higher in the water column than the minima
p1_sound_speed = sound_speed[p1, it.multi_index[0], it.multi_index[1]]
p2_sound_speed = sound_speed[p2, it.multi_index[0], it.multi_index[1]]
p3_sound_speed = sound_speed[p3, it.multi_index[0], it.multi_index[1]]
c1 = abs(p1_sound_speed - p2_sound_speed)
c2 = abs(p3_sound_speed - p2_sound_speed)
if c1 > delC[it.multi_index] and c2 > delC[it.multi_index]:
hasPSSC[it.multi_index] = 1
return hasPSSC
def _metpy(func, data, lat, lon, dim):
"""Wrapper for MetPy functions
Parameters:
func -- the MetPy function
data -- the xarray or netcdf variable (already sliced)
lat -- an array of latitudes, the shape must match that of data
lon -- an array of longitudes, the shape must match that of data
dim -- the dimension to return, a string, x or y
"""
if hasattr(data, "dims"):
dims = data.dims
else:
dims = data.dimensions
dx, dy = metpy.calc.lat_lon_grid_deltas(np.array(lon), np.array(lat))
dim_order = "".join([d for d in dims if d in "yx"])
if dim_order == "yx":
deltas = [dy, dx]
else:
deltas = [dx, dy]
if len(dims) > 2:
axes = list(range(0, len(dims)))
new_axes = list(axes)
new_dims = list(dims)
if dim_order == "yx":
new_axes += [new_axes.pop(new_dims.index("y"))]
new_dims += [new_dims.pop(new_dims.index("y"))]
new_axes += [new_axes.pop(new_dims.index("x"))]
new_dims += [new_dims.pop(new_dims.index("x"))]
restore_axes = [x for _, x in sorted(zip(new_axes, range(0, len(dims))))]
else:
new_axes += [new_axes.pop(new_dims.index("x"))]
new_dims += [new_dims.pop(new_dims.index("x"))]
new_axes += [new_axes.pop(new_dims.index("y"))]
new_dims += [new_dims.pop(new_dims.index("y"))]
restore_axes = [x for _, x in sorted(zip(new_axes, range(0, len(dims))))]
data = np.transpose(np.array(data), new_axes)
oshape = data.shape
extra_axes = data.shape[:-2]
data = np.reshape(
data, (functools.reduce(np.multiply, extra_axes), *data.shape[-2:])
)
result = []
for j in range(0, len(data)):
result.append(
func(np.array(data[j]), deltas=deltas, dim_order=dim_order)[
dim_order.index(dim)
].magnitude
)
result = np.array(result)
result = np.reshape(result, oshape)
result = np.transpose(result, restore_axes)
return result
else:
return func(np.array(data), deltas=deltas, dim_order=dim_order)[
dim_order.index(dim)
].magnitude
def _metpy_uv(func, u, v, lat, lon):
"""Wrapper for MetPy vector functions
Parameters:
func -- the MetPy function
u -- the u-component xarray or netcdf variable (already sliced)
v -- the v-component xarray or netcdf variable (already sliced)
lat -- an array of latitudes, the shape must match that of data
lon -- an array of longitudes, the shape must match that of data
"""
if hasattr(u, "dims"):
dims = u.dims
else:
dims = u.dimensions
dx, dy = metpy.calc.lat_lon_grid_deltas(np.array(lon), np.array(lat))
dim_order = "".join([d for d in dims if d in "yx"])
if len(dims) > 2:
axes = list(range(0, len(dims)))
new_axes = list(axes)
new_dims = list(dims)
if dim_order == "yx":
new_axes += [new_axes.pop(new_dims.index("y"))]
new_dims += [new_dims.pop(new_dims.index("y"))]
new_axes += [new_axes.pop(new_dims.index("x"))]
new_dims += [new_dims.pop(new_dims.index("x"))]
restore_axes = [x for _, x in sorted(zip(new_axes, range(0, len(dims))))]
else:
new_axes += [new_axes.pop(new_dims.index("x"))]
new_dims += [new_dims.pop(new_dims.index("x"))]
new_axes += [new_axes.pop(new_dims.index("y"))]
new_dims += [new_dims.pop(new_dims.index("y"))]
restore_axes = [x for _, x in sorted(zip(new_axes, range(0, len(dims))))]
u = np.transpose(np.array(u), new_axes)
v = np.transpose(np.array(v), new_axes)
oshape = u.shape
extra_axes = u.shape[:-2]
u = np.reshape(u, (functools.reduce(np.multiply, extra_axes), *u.shape[-2:]))
v = np.reshape(v, (functools.reduce(np.multiply, extra_axes), *v.shape[-2:]))
result = []
for j in range(0, len(u)):
result.append(
func(
np.array(u[j]) * units.meter / units.second,
np.array(v[j]) * units.meter / units.second,
dx,
dy,
dim_order=dim_order,
).magnitude
)
result = np.array(result)
result = np.reshape(result, oshape)
result = np.transpose(result, restore_axes)
return result
else:
u = np.array(u) * units.meter / units.second
v = np.array(v) * units.meter / units.second
return func(u, v, dx, dy, dim_order=dim_order).magnitude
def geostrophic_x(h, lat, lon):
"""Calculates the X component of geostrophic currents
Parameters:
h -- Sea Surface Height, xarray or netcdf variable, already sliced
lat -- an array of latitudes, the shape must match that of h
lon -- an array of longitudes, the shape must match that of h
"""
if isinstance(lat, xr.Variable):
lat = lat.values
if hasattr(h, "dims"):
dims = h.dims
else:
dims = h.dimensions
dim_order = "".join([d for d in dims if d in "yx"])
def f(heights, **kwargs):
c = metpy.calc.coriolis_parameter(lat * _ureg.degrees)
if dim_order == "yx":
dy, dx = kwargs["deltas"]
else:
dx, dy = kwargs["deltas"]
return metpy.calc.geostrophic_wind(
xr.DataArray(heights), c, dx, dy, dim_order=kwargs["dim_order"]
)
return _metpy(f, h, lat, lon, dim_order[0])
def geostrophic_y(h, lat, lon):
"""Calculates the Y component of geostrophic currents
Parameters:
h -- Sea Surface Height, xarray or netcdf variable, already sliced
lat -- an array of latitudes, the shape must match that of h
lon -- an array of longitudes, the shape must match that of h
"""
if isinstance(lat, xr.Variable):
lat = lat.values
if hasattr(h, "dims"):
dims = h.dims
else:
dims = h.dimensions
dim_order = "".join([d for d in dims if d in "yx"])
def f(heights, **kwargs):
c = metpy.calc.coriolis_parameter(lat * _ureg.degrees)
if dim_order == "yx":
dy, dx = kwargs["deltas"]
else:
dx, dy = kwargs["deltas"]
return metpy.calc.geostrophic_wind(
xr.DataArray(heights), c, dx, dy, dim_order=kwargs["dim_order"]
)
return _metpy(f, h, lat, lon, dim_order[1])
def vorticity(u, v, lat, lon):
"""Calculates the vorticity
Parameters:
u -- u component of the current, xarray or netcdf variable, already sliced
v -- v component of the current, xarray or netcdf variable, already sliced
lat -- an array of latitudes, the shape must match that of u and v
lon -- an array of longitudes, the shape must match that of u and v
"""
return _metpy_uv(metpy.calc.vorticity, u, v, lat, lon)
def divergence(u, v, lat, lon):
"""Calculates the divergence
Parameters:
u -- u component of the current, xarray or netcdf variable, already sliced
v -- v component of the current, xarray or netcdf variable, already sliced
lat -- an array of latitudes, the shape must match that of u and v
lon -- an array of longitudes, the shape must match that of u and v
"""
return _metpy_uv(metpy.calc.divergence, u, v, lat, lon)
def gradient_x(d, lat, lon):
"""Calculates the X component of the gradient of a variable
Parameters:
d -- xarray or netcdf variable, already sliced
lat -- an array of latitudes, the shape must match that of d
lon -- an array of longitudes, the shape must match that of d
"""
return _metpy(metpy.calc.gradient, d, lat, lon, "x")
def gradient_y(d, lat, lon):
"""Calculates the Y component of the gradient of a variable
Parameters:
d -- xarray or netcdf variable, already sliced
lat -- an array of latitudes, the shape must match that of d
lon -- an array of longitudes, the shape must match that of d
"""
return _metpy(metpy.calc.gradient, d, lat, lon, "y")
|
DFO-Ocean-Navigator/Ocean-Data-Map-Project
|
data/calculated_parser/functions.py
|
Python
|
gpl-3.0
| 26,417
|
[
"NetCDF"
] |
d47cf5ef74c525e3c4054cfcaa7752bb75468de18c6ce68f21a7cc952b048d02
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import sys
import unittest
if not '..' in sys.path:
sys.path.insert(0, '..')
from octopus.backends import Backend, ProjectsIterator, ReleasesIterator
# Backends for unit tests
class FakeBackend(Backend):
def __init__(self):
super(FakeBackend, self).__init__('FakeBackend')
class UnnamedBackend(Backend):
def __init__(self):
super(UnnamedBackend, self).__init__()
class TestBackend(unittest.TestCase):
def test_backend(self):
backend = FakeBackend()
self.assertEqual('FakeBackend', backend.name)
def test_unnamed_backend(self):
self.assertRaises(TypeError, UnnamedBackend)
def test_readonly_properties(self):
backend = FakeBackend()
self.assertRaises(AttributeError, setattr, backend, 'name', 'ErrorName')
self.assertEqual('FakeBackend', backend.name)
class TestProjectsIterator(unittest.TestCase):
def test_is_iterable(self):
import collections
iterator = ProjectsIterator()
self.assertIsInstance(iterator, collections.Iterable)
class TestReleasesIterator(unittest.TestCase):
def test_is_iterable(self):
import collections
iterator = ReleasesIterator()
self.assertIsInstance(iterator, collections.Iterable)
if __name__ == "__main__":
unittest.main()
|
MetricsGrimoire/Octopus
|
tests/test_backend.py
|
Python
|
gpl-3.0
| 2,139
|
[
"Octopus"
] |
9f87cf926e55a541f2b3a94b29b4e0fc86e37eb758694ce04af20e145afbe33d
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
import time
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 142:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def feature_vector_diff(Zt1,Zt2,i): # For 1.2 Seconds (Wipe Container_Movable: All Trials)
data_matrix = np.array([0,0,0])
n = i+121
while (i < n):
data_instant = np.array([Zt1[i,3],Zt1[i,4],Zt2[i,1]])
data_matrix = np.row_stack([data_matrix, data_instant])
i = i+3
Fvec_a = np.matrix(data_matrix[1:,0]).T
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
#Fvec_a = (Fvec_a-mean_a)/std_a
Fvec_b = np.matrix(data_matrix[1:,1]).T
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
Fvec_c = np.matrix(data_matrix[1:,2]).T
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
#Fvec_c = (Fvec_c-mean_c)/std_c
Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
if __name__ == '__main__':
# Time-manipulation for Video
index = 0
while (index < 140):
print 'Getting data:'
time.sleep(0.1)
index = index+1
Fmat = np.matrix(np.zeros((123,142)))
Fmat[:,0:140] = Fmat_original
# New Objects (Two Objects)
# First_Object
ta_no_fo_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_data_first_object_trial_3.pkl')
fa_no_fo_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_tracking_data_first_object_trial_3.pkl')
# Second_Object
ta_no_so_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_data_second_object_trial_3.pkl')
fa_no_so_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_tracking_data_second_object_trial_3.pkl')
# Creating Feature Vector
Fmat[:,140] = feature_vector_diff(ta_no_fo_t1,fa_no_fo_t1,300)
Fmat[:,141] = feature_vector_diff(ta_no_so_t1,fa_no_so_t1,300)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W_mov_fixed = eigvec_total[:,0:12]
W_soft_rigid = eigvec_total[:,0:8]
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y_mov_fixed = (W_mov_fixed.T)*B
Y_soft_rigid = (W_soft_rigid.T)*B
#Using PYMVPA
Y_train_mov_fixed = Y_mov_fixed[:,:140]
Y_test_mov_fixed = Y_mov_fixed[:,140:142]
Y_train_soft_rigid = Y_soft_rigid[:,:140]
Y_test_soft_rigid = Y_soft_rigid[:,140:142]
PCA_training_data_mov_fixed = np.array(Y_train_mov_fixed.T)
PCA_test_data_mov_fixed = np.array(Y_test_mov_fixed.T)
PCA_training_data_soft_rigid = np.array(Y_train_soft_rigid.T)
PCA_test_data_soft_rigid = np.array(Y_test_soft_rigid.T)
PCA_training_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_training_label_2 = ['Rigid']*70 + ['Soft']*70
PCA_test_1_label = ['Fixed']*1 + ['Fixed']*1
PCA_test_2_label = ['Fixed']*1 + ['Movable']*1
PCA_test_3_label = ['Movable']*1 + ['Fixed']*1
PCA_test_4_label = ['Movable']*1 + ['Movable']*1
PCA_test_5_label = ['Rigid']*1 + ['Rigid']*1
PCA_test_6_label = ['Soft']*1 + ['Soft']*1
PCA_test_7_label = ['Soft']*1 + ['Rigid']*1
PCA_test_8_label = ['Rigid']*1 + ['Soft']*1
PCA_training_chunk = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
PCA_test_1_chunk = ['Fixed']*1 + ['Fixed']*1
PCA_test_2_chunk = ['Fixed']*1 + ['Movable']*1
PCA_test_3_chunk = ['Movable']*1 + ['Fixed']*1
PCA_test_4_chunk = ['Movable']*1 + ['Movable']*1
PCA_test_5_chunk = ['Rigid']*1 + ['Rigid']*1
PCA_test_6_chunk = ['Soft']*1 + ['Soft']*1
PCA_test_7_chunk = ['Soft']*1 + ['Rigid']*1
PCA_test_8_chunk = ['Rigid']*1 + ['Soft']*1
clf_mov_fixed = kNN(k=3)
clf_soft_rigid = kNN(k=4)
terr_mov_fixed = TransferError(clf_mov_fixed)
terr_soft_rigid = TransferError(clf_soft_rigid)
ds_training_1 = Dataset(samples=PCA_training_data_mov_fixed,labels=PCA_training_label_1,chunks=PCA_training_chunk)
ds_training_2 = Dataset(samples=PCA_training_data_soft_rigid,labels=PCA_training_label_2,chunks=PCA_training_chunk)
ds_test_1 = Dataset(samples=PCA_test_data_mov_fixed,labels=PCA_test_1_label,chunks=PCA_test_1_chunk)
ds_test_2 = Dataset(samples=PCA_test_data_mov_fixed,labels=PCA_test_2_label,chunks=PCA_test_2_chunk)
ds_test_3 = Dataset(samples=PCA_test_data_mov_fixed,labels=PCA_test_3_label,chunks=PCA_test_3_chunk)
ds_test_4 = Dataset(samples=PCA_test_data_mov_fixed,labels=PCA_test_4_label,chunks=PCA_test_4_chunk)
ds_test_5 = Dataset(samples=PCA_test_data_soft_rigid,labels=PCA_test_5_label,chunks=PCA_test_5_chunk)
ds_test_6 = Dataset(samples=PCA_test_data_soft_rigid,labels=PCA_test_6_label,chunks=PCA_test_6_chunk)
ds_test_7 = Dataset(samples=PCA_test_data_soft_rigid,labels=PCA_test_7_label,chunks=PCA_test_7_chunk)
ds_test_8 = Dataset(samples=PCA_test_data_soft_rigid,labels=PCA_test_8_label,chunks=PCA_test_8_chunk)
error_1 = terr_mov_fixed(ds_test_1,ds_training_1)
error_2 = terr_mov_fixed(ds_test_2,ds_training_1)
error_3 = terr_mov_fixed(ds_test_3,ds_training_1)
error_4 = terr_mov_fixed(ds_test_4,ds_training_1)
error_5 = terr_soft_rigid(ds_test_5,ds_training_2)
error_6 = terr_soft_rigid(ds_test_6,ds_training_2)
error_7 = terr_soft_rigid(ds_test_7,ds_training_2)
error_8 = terr_soft_rigid(ds_test_8,ds_training_2)
error_fixed_movable = min(error_1,error_2,error_3,error_4)
error_soft_rigid = min(error_5,error_6,error_7,error_8)
if error_fixed_movable == error_1:
print "Both Objects are Fixed"
elif error_fixed_movable == error_2 or error_fixed_movable == error_3:
print "One object is Fixed and the other is Movable"
elif error_fixed_movable == error_4:
print "Both Objects are Movable"
if error_soft_rigid == error_5:
print "Both Objects are Rigid"
elif error_soft_rigid == error_7 or error_soft_rigid == error_8:
print "One object is Soft and the other is Rigid"
elif error_soft_rigid == error_6:
print "Both Objects are Soft"
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Multiple_Contact_Classification/New_classify_2_objects_2_categories_1200ms_scaled.py
|
Python
|
mit
| 10,144
|
[
"Mayavi"
] |
6403fd46f8f88cdfe7ae2780296bbc6c7a157448bb3d22b052870486a0b97fc0
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants and types shared by tf.Transform Beam package."""
import collections
import enum
import os
import uuid
import apache_beam as beam
from apache_beam.typehints import Union
from tensorflow_transform import nodes
from tfx_bsl.telemetry import util
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
NUMERIC_TYPE = Union[float, int]
PRIMITIVE_TYPE = Union[NUMERIC_TYPE, str, bytes]
METRICS_NAMESPACE = util.MakeTfxNamespace(['Transform'])
# Depending on the environment, (TF 1.x vs 2.x for e.g.,) we may want to
# register different implementations of beam nodes for the TFT beam nodes. These
# tags are used to identify the implementation to use under the current
# environment.
class EnvironmentTags(enum.Enum):
TF_COMPAT_V1 = 'tf_compat_v1'
TF_V2_ONLY = 'tf_v2_only'
_ALLOWED_PTRANSFORM_TAGS = [tag.value for tag in EnvironmentTags]
def get_unique_temp_path(base_temp_dir):
"""Return a path to a unique temp dir from given base temp dir.
Note this doesn't create the path that it returns.
Args:
base_temp_dir: A base directory
Returns:
The path name of a subdirectory of base_temp_dir, where the subdirectory is
unique.
"""
return os.path.join(base_temp_dir, uuid.uuid4().hex)
class _PtransformWrapper:
"""A wrapper around registered implementations of beam nodes."""
_GENERAL_ENVIRONMENT_TAG = object()
def __init__(self):
self._ptransform_by_tag = {}
def add_ptransform(self, ptransform_class, tags):
"""Add `ptransform_class` for all `tags`."""
# Many tags can refer to the same ptransform_class, but each
# ptransform_class should be registered only once.
tags = {self._GENERAL_ENVIRONMENT_TAG} if tags is None else tags
assert (tag not in self._ptransform_by_tag for tag in tags)
for tag in tags:
self._ptransform_by_tag[tag] = ptransform_class
def get_ptransform(self, tag):
"""Retrieves ptransform for `tag`.
Args:
tag: A string key (or None) to retrieve corresponding ptransform.
Returns:
A tuple of a registered beam.PTransform implementation and the tag it was
registered with.
Raises:
KeyError: If no registered PTransform implementation could be found.
"""
if tag is None or tag not in self._ptransform_by_tag:
return self._ptransform_by_tag[self._GENERAL_ENVIRONMENT_TAG], None
return self._ptransform_by_tag[tag], tag.value
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS = (
collections.defaultdict(_PtransformWrapper))
def register_ptransform(operation_def_subclass, tags=None):
"""Decorator to register a PTransform as the implementation for an analyzer.
This function is used to define implementations of the analyzers defined in
tensorflow_transform/analyzer_nodes.py and also the internal operations
defined in tensorflow_transform/beam/beam_nodes.py. The registered PTransform
will be invoked as follows:
outputs = inputs | operation.label >> MyPTransform(operation, extra_args)
where operation is a the instance of the subclass that was registered,
extra_args are global arguments available to each PTransform (see
ConstructBeamPipelineVisitor.extra_args) and `inputs` is a tuple of
PCollections correpsonding to the inputs of the OperationNode being
implemented. The return value `outputs` should be a a tuple of PCollections
corresponding to the outputs of the OperationNode. If the OperationNode has
a single output then the return value can also be a PCollection instead of a
tuple.
In some cases the implementation cannot be a PTransform and so instead the
value being registered may also be a function. The registered function will
be invoked as follows:
outputs = my_function(inputs, operation, extra_args)
where inputs, operation, extra_args and outputs are the same as for the
PTransform case.
Args:
operation_def_subclass: The class of attributes that is being registered.
Should be a subclass of `tensorflow_transform.nodes.OperationDef`.
tags: A set of string tags belonging to `EnvironmentTags`. If
provided, the PTransform will be registered against all of them.
Returns:
A class decorator that registers a PTransform or function as an
implementation of the OperationDef subclass.
"""
def register(ptransform_class):
assert isinstance(ptransform_class, type)
assert issubclass(ptransform_class, beam.PTransform)
assert tags is None or (tag in _ALLOWED_PTRANSFORM_TAGS for tag in tags)
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS[
operation_def_subclass].add_ptransform(ptransform_class, tags)
return ptransform_class
return register
class ConstructBeamPipelineVisitor(nodes.Visitor):
"""Visitor that constructs the beam pipeline from the node graph."""
ExtraArgs = tfx_namedtuple.namedtuple( # pylint: disable=invalid-name
'ExtraArgs', [
'base_temp_dir',
'pipeline',
'flat_pcollection',
'pcollection_dict',
'tf_config',
'graph',
'input_signature',
'input_specs',
'input_tensor_adapter_config',
'use_tf_compat_v1',
'cache_pcoll_dict',
'preprocessing_fn',
'analyzers_fingerprint',
])
def __init__(self, extra_args):
self._extra_args = extra_args
def visit(self, operation, inputs):
try:
ptransform_wrapper = (
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS[operation.__class__])
environment_tag = (
EnvironmentTags.TF_COMPAT_V1
if self._extra_args.use_tf_compat_v1 else EnvironmentTags.TF_V2_ONLY)
ptransform, tag = ptransform_wrapper.get_ptransform(environment_tag)
except KeyError:
raise ValueError('No implementation for {} was registered'.format(
operation))
# TODO(zoyahav): Consider extracting a single PCollection before passing to
# ptransform if len(inputs) == 1.
if tag is None:
tagged_label = operation.label
else:
tagged_label = '{label}[{tag}]'.format(label=operation.label, tag=tag)
outputs = ((inputs or beam.pvalue.PBegin(self._extra_args.pipeline))
| tagged_label >> ptransform(operation, self._extra_args))
if isinstance(outputs, beam.pvalue.PCollection):
return (outputs,)
else:
return outputs
def validate_value(self, value):
if not isinstance(value, beam.pvalue.PCollection):
raise TypeError('Expected a PCollection, got {} of type {}'.format(
value, type(value)))
class IncrementCounter(beam.PTransform):
"""A PTransform that increments a counter once per PCollection.
The output PCollection is the same as the input PCollection.
"""
def __init__(self, counter_name):
self._counter_name = counter_name
def _make_and_increment_counter(self, unused_element):
del unused_element
beam.metrics.Metrics.counter(METRICS_NAMESPACE, self._counter_name).inc()
return None
def expand(self, pcoll):
_ = (
pcoll.pipeline
| 'CreateSole' >> beam.Create([None])
| 'Count' >> beam.Map(self._make_and_increment_counter))
return pcoll
|
tensorflow/transform
|
tensorflow_transform/beam/common.py
|
Python
|
apache-2.0
| 7,881
|
[
"VisIt"
] |
2c7302afb59b1f1d5eadf8f833386e30c20827e59e3cdcdff550b81abe5d23b7
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
# imports
# PySol imports
from settings import TITLE, PACKAGE_URL, TOOLKIT, VERSION
from pysoltk import make_help_toplevel
from pysoltk import MfxMessageDialog
from pysoltk import PysolAboutDialog
from pysoltk import HTMLViewer
# ************************************************************************
# *
# ************************************************************************
def help_about(app, timeout=0, sound=True):
if sound:
app.audio.playSample("about")
t = _("A Python Solitaire Game Collection\n")
if app.miscrandom.random() < 0.8:
t = _("A World Domination Project\n")
strings=(_("&Nice"), _("&Credits..."))
if timeout:
strings=(_("&Enjoy"),)
version = _("Version %s") % VERSION
d = PysolAboutDialog(app, app.top, title=_("About ") + TITLE,
timeout=timeout,
text=_('''PySol Fan Club edition
%s%s
Copyright (C) 1998 - 2003 Markus F.X.J. Oberhumer.
Copyright (C) 2003 Mt. Hood Playing Card Co.
Copyright (C) 2005 - 2009 Skomoroh.
All Rights Reserved.
PySol is free software distributed under the terms
of the GNU General Public License.
For more information about this application visit''') % (t, version),
url=PACKAGE_URL,
image=app.gimages.logos[2],
strings=strings, default=0,
separator=True)
if d.status == 0 and d.button == 1:
help_credits(app, sound=sound)
return d.status
def help_credits(app, timeout=0, sound=True):
if sound:
app.audio.playSample("credits")
t = ""
if TOOLKIT == "tk" : t = "Tcl/Tk"
elif TOOLKIT == "gtk": t = "PyGTK"
elif TOOLKIT == "kde": t = "pyKDE"
elif TOOLKIT == "wx" : t = "wxPython"
d = MfxMessageDialog(app.top, title=_("Credits"), timeout=timeout,
text=TITLE+_(''' credits go to:
Volker Weidner for getting me into Solitaire
Guido van Rossum for the initial example program
T. Kirk for lots of contributed games and cardsets
Carl Larsson for the background music
The Gnome AisleRiot team for parts of the documentation
Natascha
The Python, %s, SDL & Linux crews
for making this program possible''') % t,
image=app.gimages.logos[3], image_side="right",
separator=True)
return d.status
# ************************************************************************
# *
# ************************************************************************
help_html_viewer = None
help_html_index = None
def help_html(app, document, dir_, top=None):
global help_html_viewer, help_html_index
if not document:
return None
if top is None:
top = app.top
try:
doc = app.dataloader.findFile(document, dir_)
if help_html_index is None:
document, dir_ = "index.html", "html"
help_html_index = app.dataloader.findFile(document, dir_)
except EnvironmentError:
d = MfxMessageDialog(app.top, title=TITLE + _(" HTML Problem"),
text=_("Cannot find help document\n") + document,
bitmap="warning")
return None
##print doc, help_html_index
try:
viewer = help_html_viewer
#if viewer.parent.winfo_parent() != top._w:
# viewer.destroy()
# viewer = None
viewer.updateHistoryXYView()
viewer.display(doc, relpath=0)
except:
##traceback.print_exc()
top = make_help_toplevel(app, title=TITLE+_(" Help"))
if top.winfo_screenwidth() < 800 or top.winfo_screenheight() < 600:
#maximized = 1
top.wm_minsize(300, 150)
else:
#maximized = 0
top.wm_minsize(400, 200)
viewer = HTMLViewer(top, app, help_html_index)
viewer.display(doc)
#wm_map(top, maximized=maximized)
viewer.parent.wm_deiconify()
viewer.parent.tkraise()
help_html_viewer = viewer
return viewer
def destroy_help_html():
try:
help_html_viewer.destroy()
except:
pass
|
TrevorLowing/PyGames
|
pysollib/help.py
|
Python
|
gpl-2.0
| 5,147
|
[
"VisIt"
] |
268a30983212bcc45966f97dccac9048d4e9c6460b442a82c0adfb347d44b0ae
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Option class representing an enumerated list of possible values.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
#-------------------------------------------------------------------------
#
# EnumeratedListOption class
#
#-------------------------------------------------------------------------
class EnumeratedListOption(Option):
"""
This class describes an option that provides a finite number of values.
Each possible value is assigned a value and a description.
"""
__signals__ = { 'options-changed' : None }
def __init__(self, label, value):
"""
@param label: A friendly label to be applied to this option.
Example: "Paper Size"
@type label: string
@param value: An initial value for this option.
Example: 5
@type value: int
@return: nothing
"""
Option.__init__(self, label, value)
self.__items = []
def add_item(self, value, description):
"""
Add an item to the list of possible values.
@param value: The value that corresponds to this item.
Example: 5
@type value: int
@param description: A description of this value.
Example: "8.5 x 11"
@type description: string
@return: nothing
"""
self.__items.append((value, description))
self.emit('options-changed')
def set_items(self, items):
"""
Add a list of items to the list of possible values.
@param items: A list of tuples containing value, description pairs.
Example: [ (5,"8.5 x 11"), (6,"11 x 17")]
@type items: array
@return: nothing
"""
self.__items = items
self.emit('options-changed')
def get_items(self):
"""
Get all the possible values for this option.
@return: an array of tuples containing (value,description) pairs.
"""
return self.__items
def clear(self):
"""
Clear all possible values from this option.
@return: nothing.
"""
self.__items = []
self.emit('options-changed')
def set_value(self, value):
"""
Set the value of this option.
@param value: A value for this option.
Example: True
@type value: The type will depend on the type of option.
@return: nothing
"""
if value in (v for v, d in self.__items):
Option.set_value(self, value)
else:
print "Value '%s' not found for option '%s'" % (str(value),
self.get_label())
|
arunkgupta/gramps
|
gramps/gen/plug/menu/_enumeratedlist.py
|
Python
|
gpl-2.0
| 3,749
|
[
"Brian"
] |
5797dd6bbee1a011a6a4aa3722418f804ee4aa587e52207bd4e66630e789b185
|
"""
Displays Agg images in the browser, with interactivity
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# The WebAgg backend is divided into two modules:
#
# - `backend_webagg_core.py` contains code necessary to embed a WebAgg
# plot inside of a web application, and communicate in an abstract
# way over a web socket.
#
# - `backend_webagg.py` contains a concrete implementation of a basic
# application, implemented with tornado.
import six
import datetime
import errno
import json
import os
import random
import sys
import socket
import threading
try:
import tornado
except ImportError:
raise RuntimeError("The WebAgg backend requires Tornado.")
import tornado.web
import tornado.ioloop
import tornado.websocket
import matplotlib
from matplotlib import rcParams
from matplotlib import backend_bases
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
from . import backend_webagg_core as core
from .backend_webagg_core import TimerTornado
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasWebAgg(figure)
manager = core.FigureManagerWebAgg(canvas, num)
return manager
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(backend_bases.ShowBase):
def mainloop(self):
WebAggApplication.initialize()
url = "http://127.0.0.1:{port}{prefix}".format(
port=WebAggApplication.port,
prefix=WebAggApplication.url_prefix)
if rcParams['webagg.open_in_browser']:
import webbrowser
webbrowser.open(url)
else:
print("To view figure, visit {0}".format(url))
WebAggApplication.start()
show = Show().mainloop
class ServerThread(threading.Thread):
def run(self):
tornado.ioloop.IOLoop.instance().start()
webagg_server_thread = ServerThread()
class FigureCanvasWebAgg(core.FigureCanvasWebAggCore):
def show(self):
# show the figure window
show()
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
def start_event_loop(self, timeout):
backend_bases.FigureCanvasBase.start_event_loop_default(
self, timeout)
start_event_loop.__doc__ = \
backend_bases.FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
backend_bases.FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = \
backend_bases.FigureCanvasBase.stop_event_loop_default.__doc__
class WebAggApplication(tornado.web.Application):
initialized = False
started = False
class FavIcon(tornado.web.RequestHandler):
def get(self):
image_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'mpl-data', 'images')
self.set_header('Content-Type', 'image/png')
with open(os.path.join(image_path,
'matplotlib.png'), 'rb') as fd:
self.write(fd.read())
class SingleFigurePage(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
self.url_prefix = kwargs.pop('url_prefix', '')
return tornado.web.RequestHandler.__init__(self, application,
request, **kwargs)
def get(self, fignum):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"single_figure.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=manager.canvas)
class AllFiguresPage(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
self.url_prefix = kwargs.pop('url_prefix', '')
return tornado.web.RequestHandler.__init__(self, application,
request, **kwargs)
def get(self):
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"all_figures.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
figures=sorted(
list(Gcf.figs.items()), key=lambda item: item[0]),
toolitems=core.NavigationToolbar2WebAgg.toolitems)
class MplJs(tornado.web.RequestHandler):
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = core.FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
def get(self, fignum, fmt):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
# TODO: Move this to a central location
mimetypes = {
'ps': 'application/postscript',
'eps': 'application/postscript',
'pdf': 'application/pdf',
'svg': 'image/svg+xml',
'png': 'image/png',
'jpeg': 'image/jpeg',
'tif': 'image/tiff',
'emf': 'application/emf'
}
self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
buff = six.BytesIO()
manager.canvas.print_figure(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
supports_binary = True
def open(self, fignum):
self.fignum = int(fignum)
self.manager = Gcf.get_fig_manager(self.fignum)
self.manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
self.manager.remove_web_socket(self)
def on_message(self, message):
message = json.loads(message)
# The 'supports_binary' message is on a client-by-client
# basis. The others affect the (shared) canvas as a
# whole.
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = Gcf.get_fig_manager(self.fignum)
# It is possible for a figure to be closed,
# but a stale figure UI is still sending messages
# from the browser.
if manager is not None:
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, url_prefix=''):
if url_prefix:
assert url_prefix[0] == '/' and url_prefix[-1] != '/', \
'url_prefix must start with a "/" and not end with one.'
super(WebAggApplication, self).__init__(
[
# Static files for the CSS and JS
(url_prefix + r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': core.FigureManagerWebAgg.get_static_file_path()}),
# An MPL favicon
(url_prefix + r'/favicon.ico', self.FavIcon),
# The page that contains all of the pieces
(url_prefix + r'/([0-9]+)', self.SingleFigurePage,
{'url_prefix': url_prefix}),
# The page that contains all of the figures
(url_prefix + r'/?', self.AllFiguresPage,
{'url_prefix': url_prefix}),
(url_prefix + r'/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
(url_prefix + r'/([0-9]+)/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(url_prefix + r'/([0-9]+)/download.([a-z0-9.]+)',
self.Download),
],
template_path=core.FigureManagerWebAgg.get_static_file_path())
@classmethod
def initialize(cls, url_prefix='', port=None):
if cls.initialized:
return
# Create the class instance
app = cls(url_prefix=url_prefix)
cls.url_prefix = url_prefix
# This port selection algorithm is borrowed, more or less
# verbatim, from IPython.
def random_ports(port, n):
"""
Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield port + random.randint(-2 * n, 2 * n)
success = None
cls.port = rcParams['webagg.port']
for port in random_ports(cls.port, rcParams['webagg.port_retries']):
try:
app.listen(port)
except socket.error as e:
if e.errno != errno.EADDRINUSE:
raise
else:
cls.port = port
success = True
break
if not success:
raise SystemExit(
"The webagg server could not be started because an available "
"port could not be found")
cls.initialized = True
@classmethod
def start(cls):
if cls.started:
return
# Set the flag to True *before* blocking on IOLoop.instance().start()
cls.started = True
"""
IOLoop.running() was removed as of Tornado 2.4; see for example
https://groups.google.com/forum/#!topic/python-tornado/QLMzkpQBGOY
Thus there is no correct way to check if the loop has already been
launched. We may end up with two concurrently running loops in that
unlucky case with all the expected consequences.
"""
print("Press Ctrl+C to stop WebAgg server")
sys.stdout.flush()
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("Server is stopped")
sys.stdout.flush()
finally:
cls.started = False
def ipython_inline_display(figure):
import tornado.template
WebAggApplication.initialize()
if not webagg_server_thread.is_alive():
webagg_server_thread.start()
with open(os.path.join(
core.FigureManagerWebAgg.get_static_file_path(),
'ipython_inline_figure.html')) as fd:
tpl = fd.read()
fignum = figure.number
t = tornado.template.Template(tpl)
return t.generate(
prefix=WebAggApplication.url_prefix,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=figure.canvas,
port=WebAggApplication.port).decode('utf-8')
FigureCanvas = FigureCanvasWebAgg
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/matplotlib/backends/backend_webagg.py
|
Python
|
bsd-2-clause
| 12,190
|
[
"VisIt"
] |
7f775a3cde595ee69e35d67bec6074d4724a2b2b569c82790aaf054ec79c6183
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Blast'
db.create_table('bugle_blast', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='blasts', to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('extended', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('attachment', self.gf('django.db.models.fields.files.FileField')(max_length=100, blank=True)),
('short', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('is_todo', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_broadcast', self.gf('django.db.models.fields.BooleanField')(default=False)),
('done', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('bugle', ['Blast'])
# Adding M2M table for field mentioned_users on 'Blast'
db.create_table('bugle_blast_mentioned_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('blast', models.ForeignKey(orm['bugle.blast'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('bugle_blast_mentioned_users', ['blast_id', 'user_id'])
# Adding M2M table for field favourited_by on 'Blast'
db.create_table('bugle_blast_favourited_by', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('blast', models.ForeignKey(orm['bugle.blast'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('bugle_blast_favourited_by', ['blast_id', 'user_id'])
# Adding model 'ImageUpload'
db.create_table('bugle_imageupload', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='image_uploads', to=orm['auth.User'])),
('attachment', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('bugle', ['ImageUpload'])
def backwards(self, orm):
# Deleting model 'Blast'
db.delete_table('bugle_blast')
# Removing M2M table for field mentioned_users on 'Blast'
db.delete_table('bugle_blast_mentioned_users')
# Removing M2M table for field favourited_by on 'Blast'
db.delete_table('bugle_blast_favourited_by')
# Deleting model 'ImageUpload'
db.delete_table('bugle_imageupload')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'bugle.blast': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Blast'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'extended': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'favourited_by': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'favourites'", 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_todo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mentioned_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'mentions'", 'blank': 'True', 'to': "orm['auth.User']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'short': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blasts'", 'to': "orm['auth.User']"})
},
'bugle.imageupload': {
'Meta': {'object_name': 'ImageUpload'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_uploads'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bugle']
|
devfort/bugle
|
bugle_project/bugle/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 8,132
|
[
"BLAST"
] |
1c031d387ab40195b6e96ab67afa16c5d8040cc0238bda627e0317401f04c55c
|
#!/usr/bin/env python3
########################################################################
# Solves problem 24 from projectEuler.net.
# Finds the 1000000th lexicographic permutation of the 10 digits.
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
from CommonFunctions import factorial
def combinations(n, r):
return factorial(n) // (factorial(n - r))
if __name__ == '__main__':
number = 999999
lst = list(range(10))
result = []
for i in range(9, -1, -1):
comb = combinations(i, i)
index = number // comb
number = number % comb
result.append(str(lst.pop(index)))
print("The result is:", ''.join(result))
|
sanSS/programming-contests
|
project-euler/problem024.py
|
Python
|
gpl-3.0
| 1,503
|
[
"VisIt"
] |
9cc2881ca3f0c0c27eeca3614d5ccdf6e687d3ff869e9d0e8b35b471c90dbeb4
|
from . import moose
|
python-security/pyt
|
examples/import_test_project/package_with_folder_and_alias/nested_folder_with_init/__init__.py
|
Python
|
gpl-2.0
| 20
|
[
"MOOSE"
] |
401e787b78170e9dfb42f0a71cc82e92760d6c370075a46ea8b4f13bba2a828c
|
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" unit testing code for molecular descriptor calculators
"""
import os.path
import pickle
import unittest
from io import BytesIO, StringIO
import numpy
from rdkit import Chem, RDConfig
from rdkit.ML.Descriptors import Descriptors, MoleculeDescriptors
from rdkit.TestRunner import redirect_stdout
class TestCase(unittest.TestCase):
def setUp(self):
self.descs = ['MolLogP', 'Chi1v']
self.vers = ('1.1.0', '1.0.0')
self.calc = MoleculeDescriptors.MolecularDescriptorCalculator(self.descs)
self.testD = [('CCOC', (0.6527, 1.40403)), ('CC=O', (0.2052, 0.81305)), ('CCC(=O)O',
(0.481, 1.48839))]
def testGetNames(self):
self.assertEqual(self.calc.GetDescriptorNames(), tuple(self.descs))
def _testVals(self, calc, testD):
for smi, vals in testD:
mol = Chem.MolFromSmiles(smi)
ans = numpy.array(vals)
res = numpy.array(calc.CalcDescriptors(mol))
self.assertTrue(
max(abs(res - ans)) < 1e-4, 'bad descriptor values for SMILES %s (%s)' % (smi, str(res)))
def testCalcVals(self):
self._testVals(self.calc, self.testD)
def testSaveState(self):
fName = os.path.join(RDConfig.RDCodeDir, 'ML/Descriptors/test_data', 'molcalc.dsc')
with open(fName, 'r') as inTF:
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
inF = BytesIO(buf)
calc = pickle.load(inF)
self.assertEqual(calc.GetDescriptorNames(), tuple(self.descs))
self.assertEqual(calc.GetDescriptorVersions(), tuple(self.vers))
self._testVals(calc, self.testD)
f = StringIO()
with redirect_stdout(f):
calc.ShowDescriptors()
s = f.getvalue()
for name in calc.GetDescriptorNames():
self.assertIn(name, s)
self.assertIn('Wildman-Crippen LogP value', calc.GetDescriptorSummaries())
self.assertIn('N/A', calc.GetDescriptorSummaries())
funcs = calc.GetDescriptorFuncs()
self.assertEqual(len(funcs), len(self.descs))
for f in funcs:
self.assertTrue(callable(f))
class TestDescriptors(unittest.TestCase):
def test_DescriptorCalculator(self):
calc = Descriptors.DescriptorCalculator()
self.assertRaises(NotImplementedError, calc.ShowDescriptors)
self.assertRaises(NotImplementedError, calc.GetDescriptorNames)
self.assertRaises(NotImplementedError, calc.CalcDescriptors, None)
calc.simpleList = ['simple1', 'simple2']
calc.compoundList = ['cmpd1', 'cmpd2']
f = StringIO()
with redirect_stdout(f):
calc.ShowDescriptors()
s = f.getvalue()
for name in calc.simpleList:
self.assertIn(name, s)
for name in calc.compoundList:
self.assertIn(name, s)
def test_github3511(self):
mol = Chem.MolFromSmiles('C')
descriptors = [name for name, _ in Chem.Descriptors.descList]
calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptors)
calculator.CalcDescriptors(mol)
# This should not raise a pickling exception
pickle.dumps(mol)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/ML/Descriptors/UnitTestMolDescriptors.py
|
Python
|
bsd-3-clause
| 3,435
|
[
"RDKit"
] |
dd8bfb0eff553b729e9afc08fa6034218bf2cf5e4a0de6eb0aa0e65e57873e1e
|
#########################################################################################
# Condor.py
# 10.11.2014
# Author: A.T.
#########################################################################################
""" Condor.py is a DIRAC independent class representing Condor batch system.
Condor objects are used as backend batch system representation for
LocalComputingElement and SSHComputingElement classes
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import re
import tempfile
# TODO: This should be modernised to use subprocess(32)
try:
import commands
except ImportError:
# Python 3's subprocess module contains a compatibility layer
import subprocess as commands
import os
__RCSID__ = "$Id$"
def parseCondorStatus(lines, jobID):
"""parse the condor_q or condor_history output for the job status
:param lines: list of lines from the output of the condor commands, each line is a pair of jobID and statusID
:type lines: python:list
:param str jobID: jobID of condor job, e.g.: 123.53
:returns: Status as known by DIRAC
"""
jobID = str(jobID)
for line in lines:
l = line.strip().split()
try:
status = int(l[1])
except (ValueError, IndexError):
continue
if l[0] == jobID:
return {1: 'Waiting',
2: 'Running',
3: 'Aborted',
4: 'Done',
5: 'HELD'
}.get(status, 'Unknown')
return 'Unknown'
def treatCondorHistory(condorHistCall, qList):
"""concatenate clusterID and processID to get the same output as condor_q
until we can expect condor version 8.5.3 everywhere
:param str condorHistCall: condor_history command to run
:param qList: list of jobID and status from condor_q output, will be modified in this function
:type qList: python:list
:returns: None
"""
status_history, stdout_history_temp = commands.getstatusoutput(condorHistCall)
# Join the ClusterId and the ProcId and add to existing list of statuses
if status_history == 0:
for line in stdout_history_temp.split('\n'):
values = line.strip().split()
if len(values) == 3:
qList.append("%s.%s %s" % tuple(values))
class Condor(object):
def submitJob(self, **kwargs):
""" Submit nJobs to the Condor batch system
"""
resultDict = {}
MANDATORY_PARAMETERS = ['Executable', 'OutputDir', 'SubmitOptions']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
nJobs = kwargs.get('NJobs')
if not nJobs:
nJobs = 1
numberOfProcessors = kwargs.get('NumberOfProcessors')
wholeNode = kwargs.get('WholeNode')
outputDir = kwargs['OutputDir']
executable = kwargs['Executable']
submitOptions = kwargs['SubmitOptions']
preamble = kwargs.get('Preamble')
if wholeNode:
requirements = '+RequiresWholeMachine=True\n Requirements = ( CAN_RUN_WHOLE_MACHINE ) && ( OpSys == "LINUX" )'
else:
requirements = 'Requirements = OpSys == "LINUX"'
jdlFile = tempfile.NamedTemporaryFile(dir=outputDir, suffix=".jdl")
jdlFile.write("""
Executable = %s
Universe = vanilla
%s
Initialdir = %s
Output = $(Cluster).$(Process).out
Error = $(Cluster).$(Process).err
Log = test.log
Environment = CONDOR_JOBID=$(Cluster).$(Process)
Getenv = False
request_cpus = %s
Queue %s
""" % (executable, requirements, outputDir, numberOfProcessors, nJobs)
)
jdlFile.flush()
cmd = '%s; ' % preamble if preamble else ''
cmd += 'condor_submit %s %s' % (submitOptions, jdlFile.name)
status, output = commands.getstatusoutput(cmd)
jdlFile.close()
if status != 0:
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
submittedJobs = 0
cluster = ''
if status == 0:
lines = output.split('\n')
for line in lines:
if 'cluster' in line:
result = re.match(r'(\d+) job.*cluster (\d+)\.', line)
if result:
submittedJobs, cluster = result.groups()
try:
submittedJobs = int(submittedJobs)
except Exception:
submittedJobs = 0
if submittedJobs > 0 and cluster:
resultDict['Status'] = 0
resultDict['Jobs'] = []
for i in range(submittedJobs):
resultDict['Jobs'].append('.'.join([cluster, str(i)]))
else:
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
def killJob(self, **kwargs):
""" Kill jobs in the given list
"""
resultDict = {}
MANDATORY_PARAMETERS = ['JobIDList']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
jobIDList = kwargs['JobIDList']
if not jobIDList:
resultDict['Status'] = -1
resultDict['Message'] = 'Empty job list'
return resultDict
successful = []
failed = []
for job in jobIDList:
status, output = commands.getstatusoutput('condor_rm %s' % job)
if status != 0:
failed.append(job)
else:
successful.append(job)
resultDict['Status'] = 0
if failed:
resultDict['Status'] = 1
resultDict['Message'] = output
resultDict['Successful'] = successful
resultDict['Failed'] = failed
return resultDict
def getJobStatus(self, **kwargs):
""" Get status of the jobs in the given list
"""
resultDict = {}
MANDATORY_PARAMETERS = ['JobIDList']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
jobIDList = kwargs['JobIDList']
if not jobIDList:
resultDict['Status'] = -1
resultDict['Message'] = 'Empty job list'
return resultDict
user = kwargs.get('User')
if not user:
user = os.environ.get('USER')
if not user:
resultDict['Status'] = -1
resultDict['Message'] = 'No user name'
return resultDict
status, stdout_q = commands.getstatusoutput('condor_q -submitter %s -af:j JobStatus ' % user)
if status != 0:
resultDict['Status'] = status
resultDict['Message'] = stdout_q
return resultDict
qList = stdout_q.strip().split('\n')
# FIXME: condor_history does only support j for autoformat from 8.5.3,
# format adds whitespace for each field This will return a list of 1245 75 3
# needs to cocatenate the first two with a dot
condorHistCall = 'condor_history -af ClusterId ProcId JobStatus -submitter %s' % user
treatCondorHistory(condorHistCall, qList)
statusDict = {}
if len(qList):
for job in jobIDList:
job = str(job)
statusDict[job] = parseCondorStatus(qList, job)
if statusDict[job] == 'HELD':
statusDict[job] = 'Unknown'
# Final output
status = 0
resultDict['Status'] = 0
resultDict['Jobs'] = statusDict
return resultDict
def getCEStatus(self, **kwargs):
""" Get the overall status of the CE
"""
resultDict = {}
user = kwargs.get('User')
if not user:
user = os.environ.get('USER')
if not user:
resultDict['Status'] = -1
resultDict['Message'] = 'No user name'
return resultDict
waitingJobs = 0
runningJobs = 0
status, output = commands.getstatusoutput('condor_q -submitter %s' % user)
if status != 0:
if "no record" in output:
resultDict['Status'] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
if "no record" in output:
resultDict['Status'] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
if output:
lines = output.split('\n')
for line in lines:
if not line.strip():
continue
if " I " in line:
waitingJobs += 1
elif " R " in line:
runningJobs += 1
# Final output
resultDict['Status'] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
|
yujikato/DIRAC
|
src/DIRAC/Resources/Computing/BatchSystems/Condor.py
|
Python
|
gpl-3.0
| 8,507
|
[
"DIRAC"
] |
058934ee7b5285b9f7fcd0164411de4dbd14d9f8183807af6d2c4dc6d42530d0
|
# -*- coding: UTF-8 -*-
# 引入必要的库
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import squares
# 构建命令行参数解析并分析参数
# 对应使用方式 python test_grader.py --image images/test_01.png
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
# 构建答案字典,键为题目号,值为正确答案
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
# 加载图片,将它转换为灰阶,轻度模糊,然后边缘检测。
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 75, 200)
# 从边缘图中寻找轮廓,然后初始化答题卡对应的轮廓
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
docCnt = None
# 确保至少有一个轮廓被找到
if len(cnts) > 0:
# 将轮廓按大小降序排序
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
# 对排序后的轮廓循环处理
for c in cnts:
# 获取近似的轮廓
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# 如果我们的近似轮廓有四个顶点,那么就认为找到了答题卡
if len(approx) == 4:
docCnt = approx
break
# 对原始图像和灰度图都进行四点透视变换
paper = four_point_transform(image, docCnt.reshape(4, 2))
warped = four_point_transform(gray, docCnt.reshape(4, 2))
# # 对灰度图应用大津二值化算法
# thresh = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
#
# # 在二值图像中查找轮廓,然后初始化题目对应的轮廓列表
# cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# questionCnts = []
#
# # 对每一个轮廓进行循环处理
# for q, c in enumerate(cnts):
#
# # 计算轮廓的边界框,然后利用边界框数据计算宽高比
# (x, y, w, h) = cv2.boundingRect(c)
# ar = w / float(h)
# # print 'w' + str(w), 'h' + str(h)
#
# # 为了辨别一个轮廓是一个方框,要求它的边界框不能太小,在这里边至少是40个像素,而且它的宽高比要近似于1
# # 为了去掉外面的那个方框,所以轮廓不能太长,小于300像素
# # 轮廓肯定会有4个点
# if ar >= 0.9 and 30 < w < 300:
# peri = cv2.arcLength(c, True)
# approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if len(approx) >= 4:
# questionCnts.append(c)
# # colorpoint = 255
# # color = ((q % 3 & 01) * colorpoint, ((q + 1) % 3 & 01) * colorpoint, ((q + 2) % 3 & 01) * colorpoint)
# # cv2.drawContours(paper, [c], -1, color, 1)
#
# # 以从顶部到底部的方法将我们的气泡轮廓进行排序,然后初始化正确答案数的变量。
# questionCnts = contours.sort_contours(questionCnts,
# method="top-to-bottom")[0]
#
# for (q, i) in enumerate(questionCnts):
# colorpoint = 255
# color = ((q % 3 & 01) * colorpoint, ((q + 1) % 3 & 01) * colorpoint, ((q + 2) % 3 & 01) * colorpoint)
# cv2.drawContours(paper, [questionCnts[q]], -1, color, 2)
# # cv2.imshow("exam" + str(q), paper)
# cv2.imshow("Original", image)
# cv2.imshow("exam", thresh)
squares.find_squares(paper)
squarespic = squares.find_squares(warped)
cv2.drawContours(paper, squarespic, -1, (0, 0, 255), 2)
cv2.imshow('squares', paper)
# cv2.imshow("exam", gaussian)
cv2.waitKey(0)
|
EdgarNg1024/PaperHelper
|
main.py
|
Python
|
apache-2.0
| 3,834
|
[
"Gaussian"
] |
9fbfd5b160359df8135b3bf9e59cbf255d77a6ab18aa90693326999f8f547654
|
import os
from os.path import join as path_join
import sys
import unittest
import shutil
import tempfile
import numpy.testing
from numpy.testing import assert_array_equal
import numpy as np
import galore
import galore.formats
from galore.cli.galore import simple_dos_from_files
import galore.plot
from contextlib import contextmanager
import io
test_dir = os.path.abspath(os.path.dirname(__file__))
try:
import pymatgen
has_pymatgen = True
except ImportError:
has_pymatgen = False
@contextmanager
def stdout_redirect():
"""Enable tests to inspect stdout in suitable format for Python version"""
if sys.version_info > (3,):
output = io.StringIO()
else:
output = io.BytesIO()
sys.stdout = output
try:
yield output
finally:
output.close()
class test_dos_functions(unittest.TestCase):
def test_simple_dos_spikes(self):
"""Test total DOS / spectrum plotter from CSV data, spike sampling"""
ylabel = 'some label'
xmin = -3
xmax = 220
sampling = 1e-1
plt = simple_dos_from_files(input=path_join(test_dir,
'test_xy_data.csv'),
return_plt=True, xmax=xmax, xmin=xmin,
sampling=sampling,
spikes=True,
lorentzian=2.3, gaussian=3.2,
csv=False, txt=False, plot=None,
units='cm-1', ymax=None, ymin=None,
ylabel=ylabel,
flipx=False)
fig = plt.gcf()
ax = fig.axes[0]
self.assertEqual(ax.get_ylabel(), ylabel)
self.assertEqual(ax.get_xlabel(), r'cm$^{-1}$')
self.assertAlmostEqual(ax.get_xlim()[0], xmin, places=2)
self.assertLess(ax.get_xlim()[1], xmax)
self.assertGreater(ax.get_xlim()[1], (xmax * 0.99))
self.assertEqual(len(ax.lines), 1)
xvals, yvals = ax.lines[0].get_xydata().T
self.assertAlmostEqual(xvals[5], (xmin + 5 * sampling))
self.assertAlmostEqual(yvals[5], 0.0, places=3)
self.assertAlmostEqual(yvals[2000], 0.65245445, places=4)
def test_simple_dos_linear(self):
"""Test total DOS / spectrum plotter from CSV data, linear sampling"""
ylabel = 'some label'
xmin = -3
xmax = 220
sampling = 1e-1
plt = simple_dos_from_files(input=path_join(test_dir,
'test_xy_data.csv'),
return_plt=True, xmax=xmax, xmin=xmin,
sampling=sampling,
lorentzian=2.3, gaussian=3.2,
csv=False, txt=False, plot=None,
units='cm-1', ymax=None, ymin=None,
ylabel=ylabel,
flipx=False)
fig = plt.gcf()
ax = fig.axes[0]
self.assertEqual(ax.get_ylabel(), ylabel)
self.assertEqual(ax.get_xlabel(), r'cm$^{-1}$')
self.assertAlmostEqual(ax.get_xlim()[0], xmin, places=2)
self.assertLess(ax.get_xlim()[1], xmax)
self.assertGreater(ax.get_xlim()[1], (xmax * 0.99))
self.assertEqual(len(ax.lines), 1)
xvals, yvals = ax.lines[0].get_xydata().T
self.assertAlmostEqual(xvals[5], (xmin + 5 * sampling))
self.assertAlmostEqual(yvals[5], 0.0, places=3)
self.assertAlmostEqual(yvals[2000], 98.64411, places=4)
class test_array_functions(unittest.TestCase):
def test_delta(self):
self.assertEqual(galore.delta(1, 1.5, w=1), 1)
def test_xy_to_1d_spikes(self):
"""Check resampling of distinct values as spikes"""
assert_array_equal(
galore.xy_to_1d(
np.array([[2.1, 0.6], [4.3, 0.2], [5.1, 0.3]]), range(6),
spikes=True),
np.array([0., 0., 0.6, 0., 0.2, 0.3]))
def test_xy_to_1d_linear(self):
"""Check resampling with linear interpolation"""
assert_array_equal(
galore.xy_to_1d(
np.array([[1., 0.5], [3., 1.5]]), range(6),
spikes=False),
np.array([0., 0.5, 1.0, 1.5, 0., 0.0]))
def test_gaussian(self):
self.assertAlmostEqual(galore.gaussian(3., f0=1, fwhm=(3 * 2.35482)),
0.8007374029168)
class test_io_functions(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_identify_raman(self):
doscar_path = path_join(test_dir, 'DOSCAR.1')
raman_path = path_join(test_dir, 'CaF2', 'raman_lda_500.dat')
self.assertFalse(galore.formats.is_vasp_raman(doscar_path))
self.assertTrue(galore.formats.is_vasp_raman(raman_path))
def test_identify_doscar(self):
doscar_path = path_join(test_dir, 'DOSCAR.1')
raman_path = path_join(test_dir, 'CaF2', 'raman_lda_500.dat')
self.assertTrue(galore.formats.is_doscar(doscar_path))
self.assertFalse(galore.formats.is_doscar(raman_path))
def test_write_txt(self):
x_values = range(5)
y_values = [x**2 / 200 for x in range(5)]
filename = path_join(self.tempdir, 'write_txt_test.txt')
galore.formats.write_txt(
x_values, y_values, filename=filename, header="# Frequency Value")
with open(filename, 'r') as f:
self.assertEqual(f.read(), txt_test_string)
def test_write_txt_stdout(self):
with stdout_redirect() as stdout:
x_values = range(5)
y_values = [x**2 / 200 for x in range(5)]
filename = path_join(self.tempdir, 'write_txt_test.txt')
galore.formats.write_txt(
x_values, y_values, filename=None, header="# Frequency Value")
self.assertEqual(stdout.getvalue(), txt_test_string)
def test_write_csv(self):
x_values = range(5)
y_values = [x**2 / 200 for x in range(5)]
filename = path_join(self.tempdir, 'write_csv_test.csv')
galore.formats.write_csv(
x_values,
y_values,
filename=filename,
header=["Frequency", "Value"])
with open(filename, 'r') as f:
self.assertEqual(f.read(), csv_test_string)
def test_write_csv_stdout(self):
with stdout_redirect() as stdout:
x_values = range(5)
y_values = [x**2 / 200 for x in range(5)]
galore.formats.write_csv(
x_values,
y_values,
filename=None,
header=["Frequency", "Value"])
self.assertEqual(stdout.getvalue(), csv_test_string)
def test_read_spinpol_doscar(self):
doscar_path = path_join(test_dir, 'DOSCAR.1')
data = galore.formats.read_doscar(doscar_path)
self.assertEqual(data[20, 0], -31.795)
self.assertEqual(data[14, 1], 0.329)
def test_read_raman(self):
raman_path = path_join(test_dir, 'CaF2', 'raman_lda_500.dat')
raman_data = np.array([[3.45589820e+02, 9.89999400e-01],
[3.45589690e+02, 9.89999400e-01],
[3.45580570e+02, 9.89999400e-01],
[2.78757900e+02, 0.00000000e+00],
[2.78757810e+02, 0.00000000e+00],
[2.78757760e+02, 1.00000000e-07],
[6.11230000e-01, 0.00000000e+00],
[6.11260000e-01, 0.00000000e+00],
[6.11920000e-01, 3.80000000e-06]])
assert_array_equal(galore.formats.read_vasp_raman(raman_path),
raman_data)
def test_read_txt_pdos_spin(self):
sample_txt = path_join(test_dir, 'spin_samples', 'mixed.dat')
data = galore.formats.read_pdos_txt(sample_txt)
self.assertAlmostEqual(data['s'][0], 0.1)
self.assertAlmostEqual(data['s'][6], 1.1)
self.assertAlmostEqual(data['p'][0], 0.5)
self.assertAlmostEqual(data['p'][6], 2.5)
self.assertAlmostEqual(data['d'][1], 0.4)
self.assertIn('f', data.dtype.names)
self.assertNotIn('fup', data.dtype.names)
self.assertNotIn('f(down)', data.dtype.names)
self.assertAlmostEqual(data['f'][1], 1.1)
@unittest.skipUnless(has_pymatgen, "requires pymatgen")
def test_read_vasprun_totaldos(self):
vr_path = path_join(test_dir, 'MgO', 'vasprun.xml.gz')
data = galore.formats.read_vasprun_totaldos(vr_path)
self.assertEqual(data[150, 0], -17.2715)
self.assertEqual(data[195, 1], 16.8066)
@unittest.skipUnless(has_pymatgen, "requires pymatgen")
def test_read_vasprun_pdos(self):
vr_path = path_join(test_dir, 'MgO', 'vasprun.xml.gz')
pdos = galore.formats.read_vasprun_pdos(vr_path)
self.assertEqual(pdos['Mg']['s'][150], 0.053)
self.assertEqual(pdos['O']['p'][189], 0.004)
@unittest.skipUnless(has_pymatgen, "requires pymatgen")
def test_identify_complete_dos(self):
from monty.serialization import loadfn
dos = loadfn(path_join(test_dir, 'MgO', 'CompleteDos.yaml.gz'))
self.assertTrue(galore.formats.is_complete_dos(dos))
raman_path = path_join(test_dir, 'CaF2', 'raman_lda_500.dat')
self.assertFalse(galore.formats.is_complete_dos(raman_path))
@unittest.skipUnless(has_pymatgen, "requires pymatgen")
def test_read_complete_dos(self):
from monty.serialization import loadfn
dos = loadfn(path_join(test_dir, 'MgO', 'CompleteDos.yaml.gz'))
pdos = galore.formats.read_vasprun_pdos(dos)
self.assertEqual(pdos['Mg']['s'][150], 0.053)
self.assertEqual(pdos['O']['p'][189], 0.004)
txt_test_string = """# Frequency Value
0.000000e+00 0.000000e+00
1.000000e+00 5.000000e-03
2.000000e+00 2.000000e-02
3.000000e+00 4.500000e-02
4.000000e+00 8.000000e-02
"""
csv_test_string = os.linesep.join(
("Frequency,Value", "0,0.0", "1,0.005", "2,0.02", "3,0.045", "4,0.08", ""))
if __name__ == '__main__':
unittest.main()
|
SMTG-UCL/galore
|
test/test.py
|
Python
|
gpl-3.0
| 10,385
|
[
"Gaussian",
"pymatgen"
] |
1dd4bcbcc478aed60d2f7ea399620c98e0251f536bef6bf8577658d789e6beeb
|
#
#
# This example shows the diffraction by a Si 111 crystal calculated in a variety of modes (see main):
#
# - make_plots( calculate_standard_interface() )
# using the standard interface via definition of a photon grid (DiffractionSetupSweeps) and
# the DiffractionResult object
#
# - calculate_with_complex_amplitude_photon(method=0 or 1)
# Calculates diffraction of many photons (0) or a photon bunch (1) using ComplexAmplitudePhoton,
# so a photon with electric field amplitude.
#
# - calculate_with_polarized_photon(method=0 or 1)
# Calculates Stokes parameters after diffraction of many photons (0) or a photon bunch (1) using
# PolarizedPhoton, so photons with info on the Stokes parameters.
#
#
import numpy
# for plots
from srxraylib.plot.gol import plot
from crystalpy.diffraction.GeometryType import BraggDiffraction
from crystalpy.diffraction.DiffractionSetup import DiffractionSetup
from crystalpy.diffraction.DiffractionSetupSweeps import DiffractionSetupSweeps
from crystalpy.diffraction.Diffraction import Diffraction
from crystalpy.polarization.MuellerDiffraction import MuellerDiffraction
from crystalpy.util.StokesVector import StokesVector
from crystalpy.util.Vector import Vector
from crystalpy.util.Photon import Photon
from crystalpy.util.ComplexAmplitudePhoton import ComplexAmplitidePhoton
from crystalpy.util.PolarizedPhoton import PolarizedPhoton
from crystalpy.util.ComplexAmplitudePhotonBunch import ComplexAmplitudePhotonBunch
from crystalpy.util.PolarizedPhotonBunch import PolarizedPhotonBunch
def calculate_standard_interface():
# Create a diffraction setup.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetupSweeps(geometry_type = BraggDiffraction(), # GeometryType object
crystal_name = "Si", # string
thickness = 1e-2
, # meters
miller_h = 1, # int
miller_k = 1, # int
miller_l = 1, # int
asymmetry_angle = 0,#10.0*numpy.pi/180., # radians
azimuthal_angle = 0.0, # radians
energy_min = 8000.0, # eV
energy_max = 8000.0, # eV
energy_points = 1, # int
angle_deviation_min = -100e-6, # radians
angle_deviation_max = 100e-6, # radians
angle_deviation_points = 500) # int
# Create a Diffraction object.
diffraction = Diffraction()
# Create a DiffractionResult object holding the results of the diffraction calculations.
print("\nCalculating the diffraction results...")
diffraction_result = diffraction.calculateDiffraction(diffraction_setup)
#
# Now the Mueller/Stokes calculation from the diffraction results
#
mueller_diffraction = MuellerDiffraction(diffraction_result,
StokesVector([1,0,1,0]),
inclination_angle=0.0) #np.pi*45/180)
# Create a MullerResult object.
print("\nCalculating the Stokes vector...")
mueller_result = mueller_diffraction.calculate_stokes()
return mueller_result
def make_plots(mueller_result):
#
# plots
#
diffraction_result = mueller_result.diffraction_result
photon_energies = diffraction_result.energies()
deviation_angles = diffraction_result.angleDeviations()
print("Number of energy points: %d"%photon_energies.size)
print("Number of angular points: %d"%deviation_angles.size)
print("_intensity shape: ",diffraction_result._intensities.shape)
print("_phases shape: ",diffraction_result._phases.shape)
from srxraylib.plot.gol import plot, four_plots
plot( 1e6*deviation_angles,diffraction_result._intensities[0,:,0],
1e6*deviation_angles,diffraction_result._intensities[0,:,1],
1e6*deviation_angles,diffraction_result._intensities[0,:,2],
title="Intensity for photon energy = %4.3f "%photon_energies[0],
xtitle="Deviation angle urad",ytitle="Reflectivity",
legend=['s-pol','p-pol','p/s ratio',],show=False)
plot( 1e6*deviation_angles,diffraction_result._phases[0,:,0],
1e6*deviation_angles,diffraction_result._phases[0,:,1],
1e6*deviation_angles,diffraction_result._phases[0,:,2],
title="Phase for photon energy = %4.3f "%photon_energies[0],
xtitle="Deviation angle urad",ytitle="Reflectivity",
legend=['s-pol','p-pol','p minus s pol'],show=False)
# Stokes
four_plots(1e6*deviation_angles,mueller_result._s0[0],
1e6*deviation_angles,mueller_result._s1[0],
1e6*deviation_angles,mueller_result._s2[0],
1e6*deviation_angles,mueller_result._s3[0],
title=["S0","S1","S2","S3"],xtitle="Deviation angle [urad]",
yrange=[-1,1],show=False)
# Plot the degree of circular polarization.
plot(1e6*deviation_angles,mueller_result._s3[0]/mueller_result._s0[0],yrange=[-1,1],
title="Circular Polarization S3/S0",xtitle="Deviation angle [urad]",ytitle="S3/S0",show=True)
#
#
#
def calculate_with_complex_amplitude_photon(method=0):
# Create a diffraction setup.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetup(geometry_type = BraggDiffraction(), # GeometryType object
crystal_name = "Si", # string
thickness = 1e-2, # meters
miller_h = 1, # int
miller_k = 1, # int
miller_l = 1, # int
asymmetry_angle = 0,#10.0*numpy.pi/180., # radians
azimuthal_angle = 0.0) # radians # int
energy = 8000.0 # eV
angle_deviation_min = -100e-6 # radians
angle_deviation_max = 100e-6 # radians
angle_deviation_points = 500
angle_step = (angle_deviation_max-angle_deviation_min)/angle_deviation_points
bragg_angle = diffraction_setup.angleBragg(energy)
print("Bragg angle for E=%f eV is %f deg"%(energy,bragg_angle*180.0/numpy.pi))
# Create a Diffraction object.
diffraction = Diffraction()
#
# get wavevector with incident direction matching Bragg angle
#
K0 = diffraction_setup.getK0(energy)
K0unitary = K0.getNormalizedVector()
print("K0",K0.components())
# method = 0 # diffraction for individual photons
# method = 1 # diffraction for bunch
ZZ = numpy.zeros(angle_deviation_points)
if method == 0:
# deviations = numpy.zeros(angle_deviation_points)
intensityS = numpy.zeros(angle_deviation_points)
intensityP = numpy.zeros(angle_deviation_points)
bunch_out = ComplexAmplitudePhotonBunch()
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vin)
photon_out = diffraction.calculateDiffractedComplexAmplitudePhoton(diffraction_setup,photon)
bunch_out.addPhoton(photon_out)
ZZ[ia] = deviation
elif method == 1: # diffraction for bunch
bunch_in = ComplexAmplitudePhotonBunch()
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = ComplexAmplitidePhoton(energy_in_ev=energy,direction_vector=Vin)
bunch_in.addPhoton( photon )
ZZ[ia] = angle_deviation_min + ia * angle_step
bunch_out = diffraction.calculateDiffractedComplexAmplitudePhotonBunch(diffraction_setup,bunch_in)
bunch_out_dict = bunch_out.toDictionary()
print(bunch_out_dict.keys())
plot(1e6*ZZ,bunch_out_dict["intensityS"],1e6*ZZ,bunch_out_dict["intensityP"],
xtitle="theta - thetaB [urad]",title="Reflectivity calculation using ComplexAmplitudePhoton method:%d"%method,
legend=["Sigma","Pi"])
#
#
#
def calculate_with_polarized_photon(method=0):
# Create a diffraction setup.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetup(geometry_type = BraggDiffraction(), # GeometryType object
crystal_name = "Si", # string
thickness = 1e-2
, # meters
miller_h = 1, # int
miller_k = 1, # int
miller_l = 1, # int
asymmetry_angle = 0,#10.0*numpy.pi/180., # radians
azimuthal_angle = 0.0) # radians # int
energy = 8000.0 # eV
angle_deviation_min = -100e-6 # radians
angle_deviation_max = 100e-6 # radians
angle_deviation_points = 500
angle_step = (angle_deviation_max-angle_deviation_min)/angle_deviation_points
bunch_in = PolarizedPhotonBunch()
bragg_angle = diffraction_setup.angleBragg(energy)
print("Bragg angle for E=%f eV is %f deg"%(energy,bragg_angle*180.0/numpy.pi))
# Create a Diffraction object.
diffraction = Diffraction()
#
# get wavevector with incident direction matching Bragg angle
#
K0 = diffraction_setup.getK0(energy)
K0unitary = K0.getNormalizedVector()
print("K0",K0.components())
# method = 0 # diffraction for individual photons
# method = 1 # diffraction for bunch
ZZ = numpy.zeros(angle_deviation_points)
if method == 0:
bunch_out = PolarizedPhotonBunch()
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz),
# stokes_vector=StokesVector([1,0,1,0]))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vin,
stokes_vector=StokesVector([1,0,1,0]))
photon_out = diffraction.calculateDiffractedPolarizedPhoton(diffraction_setup,
incoming_polarized_photon=photon,
inclination_angle=0.0)
bunch_out.addPhoton( photon_out )
ZZ[ia] = angle_deviation_min + ia * angle_step
elif method == 1: # diffraction for bunch
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
# angle = deviation + bragg_angle
# yy = numpy.cos(angle)
# zz = - numpy.abs(numpy.sin(angle))
# photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vector(0.0,yy,zz),
# stokes_vector=StokesVector([1,0,1,0]))
# minus sign in angle is to perform cw rotation when deviation increses
Vin = K0unitary.rotateAroundAxis(Vector(1,0,0),-deviation)
photon = PolarizedPhoton(energy_in_ev=energy,direction_vector=Vin,
stokes_vector=StokesVector([1,0,1,0]))
bunch_in.addPhoton( photon )
ZZ[ia] = angle_deviation_min + ia * angle_step
bunch_out = diffraction.calculateDiffractedPolarizedPhotonBunch(diffraction_setup,bunch_in,0.0)
bunch_out_dict = bunch_out.toDictionary()
plot(1e6*ZZ,bunch_out_dict["s0"],1e6*ZZ,bunch_out_dict["s1"],legend=["S0","S1"],
xtitle="theta - thetaB [urad]",title="Polarized reflectivity calculation using method %d"%method)
#
# main
#
if __name__ == "__main__":
make_plots( calculate_standard_interface() )
calculate_with_complex_amplitude_photon(method=0)
calculate_with_complex_amplitude_photon(method=1)
calculate_with_polarized_photon(method=0)
calculate_with_polarized_photon(method=1)
|
edocappelli/crystalpy
|
crystalpy/examples/Si111.py
|
Python
|
mit
| 15,040
|
[
"CRYSTAL"
] |
7035b4446a58978832c948f9e07c698a0dd86d403fa932c5a034364e86fde9f8
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# sut
from DIRAC.DataManagementSystem.Service.StorageElementHandler import getDiskSpace, getFreeDiskSpace, getTotalDiskSpace
def test_getDiskSpace():
res = getDiskSpace("/")
assert res["OK"]
res = getTotalDiskSpace()
assert res["OK"]
res = getFreeDiskSpace()
assert res["OK"]
|
ic-hep/DIRAC
|
src/DIRAC/DataManagementSystem/Service/test/Test_Service.py
|
Python
|
gpl-3.0
| 414
|
[
"DIRAC"
] |
58dfcae599032c065a691a8af7aab12c47b8a5fbddcf1e0d73aa51c7bde8eb12
|
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import sys
import re
import argparse
import subprocess
import multiprocessing
import collections
import logging
import mooseutils
# Check for the necessary packages, this does a load so they should all get loaded.
if mooseutils.check_configuration(['yaml', 'jinja2', 'markdown', 'pybtex',
'pandas', 'livereload', 'bs4', 'lxml', 'pylatexenc', 'anytree']):
sys.exit(1)
import yaml #pylint: disable=wrong-import-position
MOOSE_DIR = os.getenv('MOOSE_DIR', os.path.join(os.getcwd(), '..', 'moose'))
if not os.path.exists(MOOSE_DIR):
MOOSE_DIR = os.path.join(os.getenv('HOME'), 'projects', 'moose')
ROOT_DIR = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'],
cwd=os.getcwd(),
stderr=subprocess.STDOUT).strip('\n')
TEMP_DIR = os.path.abspath(os.path.join(os.getenv('HOME'), '.local', 'share', 'moose'))
DEPRECATED_MARKDOWN = [(re.compile(r'(?P<command>^!input|!text|!clang)\s'), '!listing'),
(re.compile(r'(?P<command>^!figure|!image|!video)\s'), '!media'),
(re.compile(r'(?P<command>^!description)\s'), '!syntax description'),
(re.compile(r'(?P<command>^!parameters)\s'), '!syntax parameters'),
(re.compile(r'(?P<command>^!inputfiles)\s'), '!syntax inputs'),
(re.compile(r'(?P<command>^!childobjects)\s'), '!syntax children'),
(re.compile(r'(?P<command>^!systems)\s'), '!syntax complete'),
(re.compile(r'(?P<command>^!subsystems)\s'), '!syntax subsystems')]
def html_id(string):
"""
Returns valid string for use as html id tag.
"""
return re.sub(r'(-+)', '-', re.sub(r'[^\w]', '-', string).lower()).strip('-')
class Loader(yaml.Loader):
"""
A custom loader that handles nested includes. The nested includes should use absolute paths
from the origin yaml file.
"""
def include(self, node):
"""
Allow for the embedding of yaml files.
http://stackoverflow.com/questions/528281/how-can-i-include-an-yaml-file-inside-another
"""
filename = os.path.join(ROOT_DIR, self.construct_scalar(node))
if os.path.exists(filename):
with open(filename, 'r') as f:
return yaml.load(f, Loader)
else:
raise IOError("Unknown included file: {}".format(filename))
def importer(self, node, function):
"""
Method for importing top-level entry from another file
"""
filename, key = self.construct_scalar(node).split(' ')
filename = os.path.join(ROOT_DIR, filename.replace('$MOOSE_DIR', MOOSE_DIR))
if not os.path.exists(filename):
raise IOError("Unknown import file: {}".format(filename))
data = function(filename)
if not isinstance(data, dict):
raise IOError("The imported YAML data must contain a dict() at the top level.")
if key not in data:
raise IOError("The imported YAML data does not contain the desired key.")
return data[key]
def yaml_load(filename):
"""
Load a YAML file capable of including other YAML files.
Args:
filename[str]: The name to the file to load, relative to the git root directory
loader[yaml.Loader]: The loader to utilize.
"""
# Attach the include constructor to our custom loader.
Loader.add_constructor('!include', Loader.include)
Loader.add_constructor('!import', lambda x, y: Loader.importer(x, y, yaml_load))
Loader.add_constructor('!import-config', lambda x, y: Loader.importer(x, y, load_config))
if not os.path.exists(filename):
raise IOError("The supplied configuration file was not found: {}".format(filename))
with open(filename, 'r') as fid:
yml = yaml.load(fid.read(), Loader)
return yml
def load_config(config_file, **kwargs):
"""
Read the MooseDocs configure file (e.g., website.yml)
"""
out = collections.OrderedDict()
config = yaml_load(config_file)
for item in config:
if isinstance(item, str):
out[item] = dict()
else:
out[item.keys()[0]] = item.values()[0]
for value in out.itervalues():
for k, v in kwargs.iteritems():
if k in value:
if hasattr(value[k], 'update'):
value[k].update(v)
else:
value[k] = v
return out
|
Chuban/moose
|
python/MooseDocs/__init__.py
|
Python
|
lgpl-2.1
| 5,924
|
[
"MOOSE"
] |
6a82458f0fc196b643a8714c393e613e65515118003cd9ac5f8fd3b0221c7d76
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a simple algorithm for extracting nearest neighbor
exchange parameters by mapping low energy magnetic orderings to a Heisenberg
model.
"""
import copy
import logging
import sys
from ast import literal_eval
import numpy as np
import pandas as pd
from monty.json import MSONable, jsanitize
from monty.serialization import dumpfn
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "ncfrey@lbl.gov"
__status__ = "Development"
__date__ = "June 2019"
class HeisenbergMapper:
"""
Class to compute exchange parameters from low energy magnetic orderings.
"""
def __init__(self, ordered_structures, energies, cutoff=0.0, tol=0.02):
"""
Exchange parameters are computed by mapping to a classical Heisenberg
model. Strategy is the scheme for generating neighbors. Currently only
MinimumDistanceNN is implemented.
n+1 unique orderings are required to compute n exchange
parameters.
First run a MagneticOrderingsWF to obtain low energy collinear magnetic
orderings and find the magnetic ground state. Then enumerate magnetic
states with the ground state as the input structure, find the subset
of supercells that map to the ground state, and do static calculations
for these orderings.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Total energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
Defaults to 0 (only NN, no NNN, etc.)
tol (float): Tolerance (in Angstrom) on nearest neighbor distances
being equal.
Parameters:
strategy (object): Class from pymatgen.analysis.local_env for
constructing graphs.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom)
"""
# Save original copies of inputs
self.ordered_structures_ = ordered_structures
self.energies_ = energies
# Sanitize inputs and optionally order them by energy / magnetic moments
hs = HeisenbergScreener(ordered_structures, energies, screen=False)
ordered_structures = hs.screened_structures
energies = hs.screened_energies
self.ordered_structures = ordered_structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
# Get graph representations
self.sgraphs = self._get_graphs(cutoff, ordered_structures)
# Get unique site ids and wyckoff symbols
self.unique_site_ids, self.wyckoff_ids = self._get_unique_sites(ordered_structures[0])
# These attributes are set by internal methods
self.nn_interactions = None
self.dists = None
self.ex_mat = None
self.ex_params = None
# Check how many commensurate graphs we found
if len(self.sgraphs) < 2:
print("We need at least 2 unique orderings.")
sys.exit(1)
else: # Set attributes
self._get_nn_dict()
self._get_exchange_df()
@staticmethod
def _get_graphs(cutoff, ordered_structures):
"""
Generate graph representations of magnetic structures with nearest
neighbor bonds. Right now this only works for MinimumDistanceNN.
Args:
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
ordered_structures (list): Structure objects.
Returns:
sgraphs (list): StructureGraph objects.
"""
# Strategy for finding neighbors
if cutoff:
strategy = MinimumDistanceNN(cutoff=cutoff, get_all_sites=True)
else:
strategy = MinimumDistanceNN() # only NN
# Generate structure graphs
sgraphs = [StructureGraph.with_local_env_strategy(s, strategy=strategy) for s in ordered_structures]
return sgraphs
@staticmethod
def _get_unique_sites(structure):
"""
Get dict that maps site indices to unique identifiers.
Args:
structure (Structure): ground state Structure object.
Returns:
unique_site_ids (dict): maps tuples of equivalent site indices to a
unique int identifier
wyckoff_ids (dict): maps tuples of equivalent site indices to their
wyckoff symbols
"""
# Get a nonmagnetic representation of the supercell geometry
s0 = CollinearMagneticStructureAnalyzer(
structure, make_primitive=False, threshold=0.0
).get_nonmagnetic_structure(make_primitive=False)
# Get unique sites and wyckoff positions
if "wyckoff" in s0.site_properties:
s0.remove_site_property("wyckoff")
symm_s0 = SpacegroupAnalyzer(s0).get_symmetrized_structure()
wyckoff = ["n/a"] * len(symm_s0)
equivalent_indices = symm_s0.equivalent_indices
wyckoff_symbols = symm_s0.wyckoff_symbols
# Construct dictionaries that map sites to numerical and wyckoff
# identifiers
unique_site_ids = {}
wyckoff_ids = {}
i = 0
for indices, symbol in zip(equivalent_indices, wyckoff_symbols):
unique_site_ids[tuple(indices)] = i
wyckoff_ids[i] = symbol
i += 1
for index in indices:
wyckoff[index] = symbol
return unique_site_ids, wyckoff_ids
def _get_nn_dict(self):
"""Get dict of unique nearest neighbor interactions.
Returns:
None: (sets self.nn_interactions and self.dists instance variables)
"""
tol = self.tol # tolerance on NN distances
sgraph = self.sgraphs[0]
unique_site_ids = self.unique_site_ids
nn_dict = {}
nnn_dict = {}
nnnn_dict = {}
all_dists = []
# Loop over unique sites and get neighbor distances up to NNNN
for k in unique_site_ids: # pylint: disable=C0206
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
dists = [round(cs[-1], 2) for cs in connected_sites] # i<->j distances
dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
dists = dists[:3] # keep up to NNNN
all_dists += dists
# Keep only up to NNNN and call dists equal if they are within tol
all_dists = sorted(list(set(all_dists)))
rm_list = []
for idx, d in enumerate(all_dists[:-1]):
if abs(d - all_dists[idx + 1]) < tol:
rm_list.append(idx + 1)
all_dists = [d for idx, d in enumerate(all_dists) if idx not in rm_list]
if len(all_dists) < 3: # pad with zeros
all_dists += [0.0] * (3 - len(all_dists))
all_dists = all_dists[:3]
labels = ["nn", "nnn", "nnnn"]
dists = dict(zip(labels, all_dists))
# Get dictionary keys for interactions
for k in unique_site_ids: # pylint: disable=C0206
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
# Loop over sites and determine unique NN, NNN, etc. interactions
for cs in connected_sites:
dist = round(cs[-1], 2) # i_j distance
j = cs[2] # j index
for key, value in unique_site_ids.items():
if j in key:
j_key = value
if abs(dist - dists["nn"]) <= tol:
nn_dict[i_key] = j_key
elif abs(dist - dists["nnn"]) <= tol:
nnn_dict[i_key] = j_key
elif abs(dist - dists["nnnn"]) <= tol:
nnnn_dict[i_key] = j_key
nn_interactions = {"nn": nn_dict, "nnn": nnn_dict, "nnnn": nnnn_dict}
self.dists = dists
self.nn_interactions = nn_interactions
def _get_exchange_df(self):
"""
Loop over all sites in a graph and count the number and types of
nearest neighbor interactions, computing +-|S_i . S_j| to construct
a Heisenberg Hamiltonian for each graph.
Returns:
None: (sets self.ex_mat instance variable)
TODO:
* Deal with large variance in |S| across configs
"""
sgraphs = self.sgraphs
tol = self.tol
unique_site_ids = self.unique_site_ids
nn_interactions = self.nn_interactions
dists = self.dists
# Get |site magmoms| from FM ordering so that S_i and S_j are consistent?
# Large S variations is throwing a loop
# fm_struct = self.get_low_energy_orderings()[0]
# Total energy and nonmagnetic energy contribution
columns = ["E", "E0"]
# Get labels of unique NN interactions
for k0, v0 in nn_interactions.items():
for i, j in v0.items(): # i and j indices
c = str(i) + "-" + str(j) + "-" + str(k0)
c_rev = str(j) + "-" + str(i) + "-" + str(k0)
if c not in columns and c_rev not in columns:
columns.append(c)
num_sgraphs = len(sgraphs)
# Keep n interactions (not counting 'E') for n+1 structure graphs
columns = columns[: num_sgraphs + 1]
num_nn_j = len(columns) - 1 # ignore total energy
j_columns = [name for name in columns if name not in ["E", "E0"]]
ex_mat_empty = pd.DataFrame(columns=columns)
ex_mat = ex_mat_empty.copy()
if len(j_columns) < 2:
self.ex_mat = ex_mat # Only <J> can be calculated here
else:
sgraphs_copy = copy.deepcopy(sgraphs)
sgraph_index = 0
# Loop over all sites in each graph and compute |S_i . S_j|
# for n+1 unique graphs to compute n exchange params
for graph in sgraphs:
sgraph = sgraphs_copy.pop(0)
ex_row = pd.DataFrame(np.zeros((1, num_nn_j + 1)), index=[sgraph_index], columns=columns)
for i, node in enumerate(sgraph.graph.nodes):
# s_i_sign = np.sign(sgraph.structure.site_properties['magmom'][i])
s_i = sgraph.structure.site_properties["magmom"][i]
for k, v in unique_site_ids.items():
if i in k:
i_index = v
# Get all connections for ith site and compute |S_i . S_j|
connections = sgraph.get_connected_sites(i)
# dists = [round(cs[-1], 2) for cs in connections] # i<->j distances
# dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
for j, connection in enumerate(connections):
j_site = connection[2]
dist = round(connection[-1], 2) # i_j distance
# s_j_sign = np.sign(sgraph.structure.site_properties['magmom'][j_site])
s_j = sgraph.structure.site_properties["magmom"][j_site]
for k, v in unique_site_ids.items():
if j_site in k:
j_index = v
# Determine order of connection
if abs(dist - dists["nn"]) <= tol:
order = "-nn"
elif abs(dist - dists["nnn"]) <= tol:
order = "-nnn"
elif abs(dist - dists["nnnn"]) <= tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in ex_mat.columns:
ex_row.at[sgraph_index, j_ij] -= s_i * s_j
elif j_ji in ex_mat.columns:
ex_row.at[sgraph_index, j_ji] -= s_i * s_j
# Ignore the row if it is a duplicate to avoid singular matrix
if ex_mat.append(ex_row)[j_columns].equals(
ex_mat.append(ex_row)[j_columns].drop_duplicates(keep="first")
):
e_index = self.ordered_structures.index(sgraph.structure)
ex_row.at[sgraph_index, "E"] = self.energies[e_index]
sgraph_index += 1
ex_mat = ex_mat.append(ex_row)
# if sgraph_index == num_nn_j: # check for zero columns
# zeros = [b for b in (ex_mat[j_columns] == 0).all(axis=0)]
# if True in zeros:
# sgraph_index -= 1 # keep looking
ex_mat[j_columns] = ex_mat[j_columns].div(2.0) # 1/2 factor in Heisenberg Hamiltonian
ex_mat[["E0"]] = 1 # Nonmagnetic contribution
# Check for singularities and delete columns with all zeros
zeros = list((ex_mat == 0).all(axis=0))
if True in zeros:
c = ex_mat.columns[zeros.index(True)]
ex_mat = ex_mat.drop(columns=[c], axis=1)
# ex_mat = ex_mat.drop(ex_mat.tail(len_zeros).index)
# Force ex_mat to be square
ex_mat = ex_mat[: ex_mat.shape[1] - 1]
self.ex_mat = ex_mat
def get_exchange(self):
"""
Take Heisenberg Hamiltonian and corresponding energy for each row and
solve for the exchange parameters.
Returns:
ex_params (dict): Exchange parameter values (meV/atom).
"""
ex_mat = self.ex_mat
# Solve the matrix equation for J_ij values
E = ex_mat[["E"]]
j_names = [j for j in ex_mat.columns if j not in ["E"]]
# Only 1 NN interaction
if len(j_names) < 3:
# Estimate exchange by J ~ E_AFM - E_FM
j_avg = self.estimate_exchange()
ex_params = {"<J>": j_avg}
self.ex_params = ex_params
return ex_params
# Solve eigenvalue problem for more than 1 NN interaction
H = np.array(ex_mat.loc[:, ex_mat.columns != "E"].values).astype("float64")
H_inv = np.linalg.inv(H)
j_ij = np.dot(H_inv, E)
# Convert J_ij to meV
j_ij[1:] *= 1000 # J_ij in meV
j_ij = j_ij.tolist()
ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij)}
self.ex_params = ex_params
return ex_params
def get_low_energy_orderings(self):
"""
Find lowest energy FM and AFM orderings to compute E_AFM - E_FM.
Returns:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy
afm_e (float): afm energy
"""
fm_struct, afm_struct = None, None
mag_min = np.inf
mag_max = 0.001
fm_e_min = 0
afm_e_min = 0
# epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)]
for s, e in zip(self.ordered_structures, self.energies):
ordering = CollinearMagneticStructureAnalyzer(s, threshold=0.0, make_primitive=False).ordering
magmoms = s.site_properties["magmom"]
# Try to find matching orderings first
if ordering == Ordering.FM and e < fm_e_min:
fm_struct = s
mag_max = abs(sum(magmoms))
fm_e = e
fm_e_min = e
if ordering == Ordering.AFM and e < afm_e_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
# Brute force search for closest thing to FM and AFM
if not fm_struct or not afm_struct:
for s, e in zip(self.ordered_structures, self.energies):
magmoms = s.site_properties["magmom"]
if abs(sum(magmoms)) > mag_max: # FM ground state
fm_struct = s
fm_e = e
mag_max = abs(sum(magmoms))
# AFM ground state
if abs(sum(magmoms)) < mag_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
elif abs(sum(magmoms)) == 0 and mag_min == 0:
if e < afm_e_min:
afm_struct = s
afm_e = e
afm_e_min = e
# Convert to magnetic structures with 'magmom' site property
fm_struct = CollinearMagneticStructureAnalyzer(
fm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
afm_struct = CollinearMagneticStructureAnalyzer(
afm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
return fm_struct, afm_struct, fm_e, afm_e
def estimate_exchange(self, fm_struct=None, afm_struct=None, fm_e=None, afm_e=None):
"""
Estimate <J> for a structure based on low energy FM and AFM orderings.
Args:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy/atom
afm_e (float): afm energy/atom
Returns:
j_avg (float): Average exchange parameter (meV/atom)
"""
# Get low energy orderings if not supplied
if any(arg is None for arg in [fm_struct, afm_struct, fm_e, afm_e]):
fm_struct, afm_struct, fm_e, afm_e = self.get_low_energy_orderings()
magmoms = fm_struct.site_properties["magmom"]
# Normalize energies by number of magnetic ions
# fm_e = fm_e / len(magmoms)
# afm_e = afm_e / len(afm_magmoms)
m_avg = np.mean([np.sqrt(m**2) for m in magmoms])
# If m_avg for FM config is < 1 we won't get sensibile results.
if m_avg < 1:
iamthedanger = """
Local magnetic moments are small (< 1 muB / atom). The
exchange parameters may be wrong, but <J> and the mean
field critical temperature estimate may be OK.
"""
logging.warning(iamthedanger)
delta_e = afm_e - fm_e # J > 0 -> FM
j_avg = delta_e / (m_avg**2) # eV / magnetic ion
j_avg *= 1000 # meV / ion
return j_avg
def get_mft_temperature(self, j_avg):
"""
Crude mean field estimate of critical temperature based on <J> for
one sublattice, or solving the coupled equations for a multisublattice
material.
Args:
j_avg (float): j_avg (float): Average exchange parameter (meV/atom)
Returns:
mft_t (float): Critical temperature (K)
"""
num_sublattices = len(self.unique_site_ids)
k_boltzmann = 0.0861733 # meV/K
# Only 1 magnetic sublattice
if num_sublattices == 1:
mft_t = 2 * abs(j_avg) / 3 / k_boltzmann
else: # multiple magnetic sublattices
omega = np.zeros((num_sublattices, num_sublattices))
ex_params = self.ex_params
ex_params = {k: v for (k, v) in ex_params.items() if k != "E0"} # ignore E0
for k in ex_params:
# split into i, j unique site identifiers
sites = k.split("-")
sites = [int(num) for num in sites[:2]] # cut 'nn' identifier
i, j = sites[0], sites[1]
omega[i, j] += ex_params[k]
omega[j, i] += ex_params[k]
omega = omega * 2 / 3 / k_boltzmann
eigenvals, eigenvecs = np.linalg.eig(omega)
mft_t = max(eigenvals)
if mft_t > 1500: # Not sensible!
stayoutofmyterritory = """
This mean field estimate is too high! Probably
the true low energy orderings were not given as inputs.
"""
logging.warning(stayoutofmyterritory)
return mft_t
def get_interaction_graph(self, filename=None):
"""
Get a StructureGraph with edges and weights that correspond to exchange
interactions and J_ij values, respectively.
Args:
filename (str): if not None, save interaction graph to filename.
Returns:
igraph (StructureGraph): Exchange interaction graph.
"""
structure = self.ordered_structures[0]
sgraph = self.sgraphs[0]
igraph = StructureGraph.with_empty_graph(
structure, edge_weight_name="exchange_constant", edge_weight_units="meV"
)
if "<J>" in self.ex_params: # Only <J> is available
warning_msg = """
Only <J> is available. The interaction graph will not tell
you much.
"""
logging.warning(warning_msg)
# J_ij exchange interaction matrix
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
j = c[2] # index of neighbor
dist = c[-1] # i <-> j distance
j_exc = self._get_j_exc(i, j, dist)
igraph.add_edge(i, j, to_jimage=jimage, weight=j_exc, warn_duplicates=False)
# Save to a json file if desired
if filename:
if filename.endswith(".json"):
dumpfn(igraph, filename)
else:
filename += ".json"
dumpfn(igraph, filename)
return igraph
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites
(10E-2 precision)
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k, v in self.unique_site_ids.items():
if i in k:
i_index = v
if j in k:
j_index = v
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
def get_heisenberg_model(self):
"""Save results of mapping to a HeisenbergModel object.
Returns:
hmodel (HeisenbergModel): MSONable object.
"""
# Original formula unit with nonmagnetic ions
hm_formula = str(self.ordered_structures_[0].composition.reduced_formula)
hm_structures = self.ordered_structures
hm_energies = self.energies
hm_cutoff = self.cutoff
hm_tol = self.tol
hm_sgraphs = self.sgraphs
hm_usi = self.unique_site_ids
hm_wids = self.wyckoff_ids
hm_nni = self.nn_interactions
hm_d = self.dists
# Exchange matrix DataFrame in json format
hm_em = self.ex_mat.to_json()
hm_ep = self.get_exchange()
hm_javg = self.estimate_exchange()
hm_igraph = self.get_interaction_graph()
hmodel = HeisenbergModel(
hm_formula,
hm_structures,
hm_energies,
hm_cutoff,
hm_tol,
hm_sgraphs,
hm_usi,
hm_wids,
hm_nni,
hm_d,
hm_em,
hm_ep,
hm_javg,
hm_igraph,
)
return hmodel
class HeisenbergScreener:
"""
Class to clean and screen magnetic orderings.
"""
def __init__(self, structures, energies, screen=False):
"""
This class pre-processes magnetic orderings and energies for
HeisenbergMapper. It prioritizes low-energy orderings with large and
localized magnetic moments.
Args:
structures (list): Structure objects with magnetic moments.
energies (list): Energies/atom of magnetic orderings.
screen (bool): Try to screen out high energy and low-spin configurations.
Attributes:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
# Cleanup
structures, energies = self._do_cleanup(structures, energies)
n_structures = len(structures)
# If there are more than 2 structures, we want to perform a
# screening to prioritize well-behaved orderings
if screen and n_structures > 2:
structures, energies = self._do_screen(structures, energies)
self.screened_structures = structures
self.screened_energies = energies
@staticmethod
def _do_cleanup(structures, energies):
"""Sanitize input structures and energies.
Takes magnetic structures and performs the following operations
- Erases nonmagnetic ions and gives all ions ['magmom'] site prop
- Converts total energies -> energy / magnetic ion
- Checks for duplicate/degenerate orderings
- Sorts by energy
Args:
structures (list): Structure objects with magmoms.
energies (list): Corresponding energies.
Returns:
ordered_structures (list): Sanitized structures.
ordered_energies (list): Sorted energies.
"""
# Get only magnetic ions & give all structures site_properties['magmom']
# zero threshold so that magnetic ions with small moments
# are preserved
ordered_structures = [
CollinearMagneticStructureAnalyzer(
s, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
for s in structures
]
# Convert to energies / magnetic ion
energies = [e / len(s) for (e, s) in zip(energies, ordered_structures)]
# Check for duplicate / degenerate states (sometimes different initial
# configs relax to the same state)
remove_list = []
for i, e in enumerate(energies):
e_tol = 6 # 10^-6 eV/atom tol on energies
e = round(e, e_tol)
if i not in remove_list:
for i_check, e_check in enumerate(energies):
e_check = round(e_check, e_tol)
if i != i_check and i_check not in remove_list and e == e_check:
remove_list.append(i_check)
# Also discard structures with small |magmoms| < 0.1 uB
# xx - get rid of these or just bury them in the list?
# for i, s in enumerate(ordered_structures):
# magmoms = s.site_properties['magmom']
# if i not in remove_list:
# if any(abs(m) < 0.1 for m in magmoms):
# remove_list.append(i)
# Remove duplicates
if len(remove_list):
ordered_structures = [s for i, s in enumerate(ordered_structures) if i not in remove_list]
energies = [e for i, e in enumerate(energies) if i not in remove_list]
# Sort by energy if not already sorted
ordered_structures = [s for _, s in sorted(zip(energies, ordered_structures), reverse=False)]
ordered_energies = sorted(energies, reverse=False)
return ordered_structures, ordered_energies
@staticmethod
def _do_screen(structures, energies):
"""Screen and sort magnetic orderings based on some criteria.
Prioritize low energy orderings and large, localized magmoms. do_clean should be run first to sanitize inputs.
Args:
structures (list): At least three structure objects.
energies (list): Energies.
Returns:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
magmoms = [s.site_properties["magmom"] for s in structures]
n_below_1ub = [len([m for m in ms if abs(m) < 1]) for ms in magmoms]
df = pd.DataFrame(
{
"structure": structures,
"energy": energies,
"magmoms": magmoms,
"n_below_1ub": n_below_1ub,
}
)
# keep the ground and first excited state fixed to capture the
# low-energy spectrum
index = list(df.index)[2:]
df_high_energy = df.iloc[2:]
# Prioritize structures with fewer magmoms < 1 uB
df_high_energy = df_high_energy.sort_values(by="n_below_1ub")
index = [0, 1] + list(df_high_energy.index)
# sort
df = df.reindex(index)
screened_structures = list(df["structure"].values)
screened_energies = list(df["energy"].values)
return screened_structures, screened_energies
class HeisenbergModel(MSONable):
"""
Store a Heisenberg model fit to low-energy magnetic orderings.
Intended to be generated by HeisenbergMapper.get_heisenberg_model().
"""
def __init__(
self,
formula=None,
structures=None,
energies=None,
cutoff=None,
tol=None,
sgraphs=None,
unique_site_ids=None,
wyckoff_ids=None,
nn_interactions=None,
dists=None,
ex_mat=None,
ex_params=None,
javg=None,
igraph=None,
):
"""
Args:
formula (str): Reduced formula of compound.
structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
tol (float): Tolerance (in Angstrom) on nearest neighbor distances being equal.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom).
javg (float): <J> exchange param (meV/atom).
igraph (StructureGraph): Exchange interaction graph.
"""
self.formula = formula
self.structures = structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
self.sgraphs = sgraphs
self.unique_site_ids = unique_site_ids
self.wyckoff_ids = wyckoff_ids
self.nn_interactions = nn_interactions
self.dists = dists
self.ex_mat = ex_mat
self.ex_params = ex_params
self.javg = javg
self.igraph = igraph
def as_dict(self):
"""
Because some dicts have tuple keys, some sanitization is required for json compatibility.
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["@version"] = __version__
d["formula"] = self.formula
d["structures"] = [s.as_dict() for s in self.structures]
d["energies"] = self.energies
d["cutoff"] = self.cutoff
d["tol"] = self.tol
d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
d["dists"] = self.dists
d["ex_params"] = self.ex_params
d["javg"] = self.javg
d["igraph"] = self.igraph.as_dict()
# Sanitize tuple & int keys
d["ex_mat"] = jsanitize(self.ex_mat)
d["nn_interactions"] = jsanitize(self.nn_interactions)
d["unique_site_ids"] = jsanitize(self.unique_site_ids)
d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)
return d
@classmethod
def from_dict(cls, d):
"""Create a HeisenbergModel from a dict."""
# Reconstitute the site ids
usids = {}
wids = {}
nnis = {}
for k, v in d["nn_interactions"].items():
nn_dict = {}
for k1, v1 in v.items():
key = literal_eval(k1)
nn_dict[key] = v1
nnis[k] = nn_dict
for k, v in d["unique_site_ids"].items():
key = literal_eval(k)
if isinstance(key, int):
usids[tuple([key])] = v
elif isinstance(key, tuple):
usids[key] = v
for k, v in d["wyckoff_ids"].items():
key = literal_eval(k)
wids[key] = v
# Reconstitute the structure and graph objects
structures = []
sgraphs = []
for v in d["structures"]:
structures.append(Structure.from_dict(v))
for v in d["sgraphs"]:
sgraphs.append(StructureGraph.from_dict(v))
# Interaction graph
igraph = StructureGraph.from_dict(d["igraph"])
# Reconstitute the exchange matrix DataFrame
try:
ex_mat = eval(d["ex_mat"])
ex_mat = pd.DataFrame.from_dict(ex_mat)
except SyntaxError: # if ex_mat is empty
ex_mat = pd.DataFrame(columns=["E", "E0"])
hmodel = HeisenbergModel(
formula=d["formula"],
structures=structures,
energies=d["energies"],
cutoff=d["cutoff"],
tol=d["tol"],
sgraphs=sgraphs,
unique_site_ids=usids,
wyckoff_ids=wids,
nn_interactions=nnis,
dists=d["dists"],
ex_mat=ex_mat,
ex_params=d["ex_params"],
javg=d["javg"],
igraph=igraph,
)
return hmodel
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites +- tol
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
|
materialsproject/pymatgen
|
pymatgen/analysis/magnetism/heisenberg.py
|
Python
|
mit
| 37,336
|
[
"pymatgen"
] |
29d25b612a777ae5f716725d9c90b4e3eb184da9c6d21e38b3c3545862fa3d12
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ALADIN, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.modules import get_software_root
from easybuild.tools.ordereddict import OrderedDict
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_ALADIN(EasyBlock):
"""Support for building/installing ALADIN."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ALADIN."""
super(EB_ALADIN, self).__init__(*args, **kwargs)
self.conf_file = None
self.conf_filepath = None
self.rootpack_dir = None
self.orig_library_path = None
@staticmethod
def extra_options():
"""Custom easyconfig parameters for ALADIN."""
extra_vars = {
'optional_extra_param': ['default value', "short description", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for ALADIN."""
# unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking
if 'LIBRARY_PATH' in os.environ:
self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH'])
self.orig_library_path = os.environ.pop('LIBRARY_PATH')
# build auxiliary libraries
auxlibs_dir = None
my_gnu = None
if self.toolchain.comp_family() == toolchain.GCC:
my_gnu = 'y' # gfortran
for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']:
flags = os.getenv(var)
env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags)
self.log.info("Updated %s to '%s'" % (var, os.getenv(var)))
elif self.toolchain.comp_family() == toolchain.INTELCOMP:
my_gnu = 'i' # icc/ifort
else:
self.log.error("Don't know how to set 'my_gnu' variable in auxlibs build script.")
self.log.info("my_gnu set to '%s'" % my_gnu)
tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_')
try:
cwd = os.getcwd()
os.chdir(self.builddir)
builddirs = os.listdir(self.builddir)
auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0]
os.chdir(auxlibs_dir)
auto_driver = 'driver_automatic'
for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line)
line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision
line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build
line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line)
sys.stdout.write(line)
run_cmd("./%s" % auto_driver)
os.chdir(cwd)
except OSError, err:
self.log.error("Failed to build ALADIN: %s" % err)
# build gmkpack, update PATH and set GMKROOT
# we build gmkpack here because a config file is generated in the gmkpack isntall path
try:
gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0]
os.chdir(os.path.join(self.builddir, gmkpack_dir))
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n',
}
run_cmd_qa("./build_gmkpack", qa)
os.chdir(cwd)
paths = os.getenv('PATH').split(':')
paths.append(os.path.join(self.builddir, gmkpack_dir, 'util'))
env.setvar('PATH', ':'.join(paths))
env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir))
except OSError, err:
self.log.error("Failed to build gmkpack: %s" % err)
# generate gmkpack configuration file
self.conf_file = 'ALADIN_%s' % self.version
self.conf_filepath = os.path.join(self.builddir, 'arch', '%s.x' % self.conf_file)
try:
if os.path.exists(self.conf_filepath):
os.remove(self.conf_filepath)
self.log.info("Removed existing gmpack config file %s" % self.conf_filepath)
archdir = os.path.join(self.builddir, 'arch')
if not os.path.exists(archdir):
os.makedirs(archdir)
except OSError, err:
self.log.error("Failed to remove existing file %s: %s" % (self.conf_filepath, err))
mpich = 'n'
known_mpi_libs = [toolchain.MPICH, toolchain.MPICH2, toolchain.INTELMPI]
if self.toolchain.options.get('usempi', None) and self.toolchain.mpi_family() in known_mpi_libs:
mpich = 'y'
qpref = 'Please type the ABSOLUTE name of '
qsuff = ', or ignore (environment variables allowed) :'
qsuff2 = ', or ignore : (environment variables allowed) :'
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC:
gribdir = 'GNU'
elif comp_fam == toolchain.INTELCOMP:
gribdir = 'INTEL'
else:
self.log.error("Don't know which grib lib dir to use for compiler %s" % comp_fam)
aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a')
aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a')
grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a')
grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a')
grib_api_inc = os.path.join(get_software_root('grib_api'), 'include')
jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a')
netcdflib = os.path.join(get_software_root('netCDF'), 'lib', 'libnetcdff.a')
netcdfinc = os.path.join(get_software_root('netCDF'), 'include')
mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED'))
ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ')] # LDFLAGS have form '-L/path/to'
lapacklibs = []
for lib in os.getenv('LAPACK_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
lapacklibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
lapacklib = ' '.join(lapacklibs)
blaslibs = []
for lib in os.getenv('BLAS_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
blaslibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
blaslib = ' '.join(blaslibs)
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y',
'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich,
'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'),
'%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex,
'%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm,
'%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib,
'%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib,
'%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib,
'%sthe library netcdf%s' % (qpref, qsuff): netcdflib,
'%sthe library lapack%s' % (qpref, qsuff): lapacklib,
'%sthe library blas%s' % (qpref, qsuff): blaslib,
'%sthe library mpi%s' % (qpref, qsuff): mpilib,
'%sa MPI dummy library for serial executions, or ignore :' % qpref: '',
'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc,
'Please type the directory name where to find fortint.h or ignore :': '',
'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc,
'Do you want to define CANARI (y/n) [y] ?': 'y',
'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '',
'Please type the name of the script file used to recover local libraries (gget), or ignore :': '',
'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'),
}
f90_seq = os.getenv('F90_SEQ')
if not f90_seq:
# F90_SEQ is only defined when usempi is enabled
f90_seq = os.getenv('F90')
stdqa = OrderedDict([
(r'Confirm library .* is .*', 'y'), # this one needs to be tried first!
(r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', os.getenv('F90')),
(r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''),
(r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file),
])
no_qa = [
".*ignored.",
]
env.setvar('GMKTMP', self.builddir)
env.setvar('GMKFILE', self.conf_file)
run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa)
# set environment variables for installation dirs
env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack'))
env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack'))
env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack'))
env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack'))
def build_step(self):
"""No separate build procedure for ALADIN (see install_step)."""
pass
def test_step(self):
"""Custom built-in test procedure for ALADIN."""
if self.cfg['runtest']:
cmd = "test-command"
run_cmd(cmd, simple=True, log_all=True, log_output=True)
def install_step(self):
"""Custom install procedure for ALADIN."""
try:
os.mkdir(os.getenv('ROOTPACK'))
os.mkdir(os.getenv('HOMEPACK'))
except OSError, err:
self.log.error("Failed to create rootpack dir in %s: %s" % err)
# create rootpack
[v1, v2] = self.version.split('_')
(out, _) = run_cmd("source $GMKROOT/util/berootpack && gmkpack -p master -a -r %s -b %s" % (v1, v2), simple=False)
packdir_regexp = re.compile("Creating main pack (.*) \.\.\.")
res = packdir_regexp.search(out)
if res:
self.rootpack_dir = os.path.join('rootpack', res.group(1))
else:
self.log.error("Failed to determine rootpack dir.")
# copy ALADIN sources to right directory
try:
src_dirs = [d for d in os.listdir(self.builddir) if not (d.startswith('auxlib') or d.startswith('gmk'))]
target = os.path.join(self.installdir, self.rootpack_dir, 'src', 'local')
self.log.info("Copying sources from %s to %s" % (self.builddir, target))
for srcdir in src_dirs:
shutil.copytree(os.path.join(self.builddir, srcdir), os.path.join(target, srcdir))
self.log.info("Copied %s" % srcdir)
except OSError, err:
self.log.error("Failed to copy ALADIN sources: %s" % err)
if self.cfg['parallel']:
env.setvar('GMK_THREADS', str(self.cfg['parallel']))
# build rootpack
run_cmd(os.path.join(self.installdir, self.rootpack_dir, 'ics_master'))
# restore original $LIBRARY_PATH
if self.orig_library_path is not None:
os.environ['LIBRARY_PATH'] = self.orig_library_path
def sanity_check_step(self):
"""Custom sanity check for ALADIN."""
bindir = os.path.join(self.rootpack_dir, 'bin')
libdir = os.path.join(self.rootpack_dir, 'lib')
custom_paths = {
'files': [os.path.join(bindir, x) for x in ['MASTER']] +
[os.path.join(libdir, 'lib%s.local.a' % x) for x in ['aeo', 'ald', 'arp', 'bip',
'bla', 'mpa', 'mse', 'obt',
'odb', 'sat', 'scr', 'sct',
'sur', 'surfex', 'tal', 'tfl',
'uti', 'xla', 'xrd']],
'dirs': [],
}
super(EB_ALADIN, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for environment variables (PATH, ...) for ALADIN."""
guesses = super(EB_ALADIN, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.rootpack_dir, 'bin')],
})
return guesses
|
omula/easybuild-easyblocks
|
easybuild/easyblocks/a/aladin.py
|
Python
|
gpl-2.0
| 14,519
|
[
"NetCDF"
] |
2701e7d0e4247aec96c00f5c4ba094b2ad8565ac2dec1b4691f4116134a46489
|
#!/bin/python
def run_viterbi_test():
"""A simple tester for Viterbi algorithm.
This function generates a bunch of random emission and transition scores,
and computes the best sequence by performing a brute force search over all
possible sequences and scoring them. It then runs Viterbi code to see what
is the score and sequence returned by it.
Compares both the best sequence and its score to make sure Viterbi is correct.
"""
from viterbi import run_viterbi
from numpy import random
import numpy as np
from itertools import product
maxN = 7 # maximum length of a sentence (min is 1)
maxL = 4 # maximum number of labels (min is 2)
num_tests = 1000 # number of sentences to generate
random.seed(0)
tolerance = 1e-5 # how close do the scores have to be?
emission_var = 1.0 # variance of the gaussian generating emission scores
trans_var = 1.0 # variance of the gaussian generating transition scores
passed_y = 0 # how many times the correct sequence was predicted
passed_s = 0 # how many times the correct score was returned
for t in range(num_tests):
N = random.randint(1, maxN+1)
L = random.randint(2, maxL+1)
# Generate the scores
emission_scores = random.normal(0.0, emission_var, (N,L))
trans_scores = random.normal(0.0, trans_var, (L,L))
start_scores = random.normal(0.0, trans_var, L)
end_scores = random.normal(0.0, trans_var, L)
# run viterbi
(viterbi_s,viterbi_y) = run_viterbi(emission_scores, trans_scores, start_scores, end_scores)
# print "Viterbi", viterbi_s, viterbi_y
# compute the best sequence and score
best_y = []
best_s = -np.inf
for y in product(range(L), repeat=N): # all possible ys
# compute its score
score = 0.0
score += start_scores[y[0]]
for i in range(N-1):
score += trans_scores[y[i], y[i+1]]
score += emission_scores[i,y[i]]
score += emission_scores[N-1,y[N-1]]
score += end_scores[y[N-1]]
# update the best
if score > best_s:
best_s = score
best_y = list(y)
# print "Brute", best_s, best_y
# mismatch if any label prediction doesn't match
match_y = True
for i in range(len(best_y)):
if viterbi_y[i] != best_y[i]:
match_y = False
if match_y: passed_y += 1
# the scores should also be very close
if abs(viterbi_s-best_s) < tolerance:
passed_s += 1
print("Passed(y)", passed_y*100.0/num_tests)
print("Passed(s)", passed_s*100.0/num_tests)
assert passed_y == num_tests
assert passed_s == num_tests
if __name__ == "__main__":
run_viterbi_test()
|
sameersingh/uci-statnlp
|
hw3/viterbi_test.py
|
Python
|
apache-2.0
| 2,941
|
[
"Gaussian"
] |
9d81906df274a0dff946f751f5b8fe035ab635b2812d61b96247c5022aa30247
|
#!/usr/bin/env python
from __future__ import print_function
import math
import logging
import argparse
import numpy as np
from PIL import Image
import geoip2.database
from PIL import ImageColor
from itertools import imap
from colorsys import hsv_to_rgb
from collections import defaultdict
__version__ = '0.0.3'
class LinearKernel:
'''Uses a linear falloff, essentially turning a point into a cone.'''
def __init__(self, radius):
self.radius = radius # in pixels
self.radius_float = float(radius) # worthwhile time saver
def heat(self, distance):
if distance >= self.radius:
return 0.0
return 1.0 - (distance / self.radius_float)
class GaussianKernel:
def __init__(self, radius):
'''radius is the distance beyond which you should not bother.'''
self.radius = radius
# We set the scale such that the heat value drops to 1/256 of
# the peak at a distance of radius.
self.scale = math.log(256) / radius
def heat(self, distance):
'''Returns 1.0 at center, 1/e at radius pixels from center.'''
return math.e ** (-distance * self.scale)
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
first = property(lambda self: self.x)
second = property(lambda self: self.y)
def copy(self):
return self.__class__(self.first, self.second)
def __str__(self):
return '(%s, %s)' % (str(self.x), str(self.y))
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, o):
return True if self.x == o.x and self.y == o.y else False
def __sub__(self, o):
return self.__class__(self.first - o.first, self.second - o.second)
class LatLon(Coordinate):
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
def get_lat(self):
return self.y
def set_lat(self, lat):
self.y = lat
def get_lon(self):
return self.x
def set_lon(self, lon):
self.x = lon
lat = property(get_lat, set_lat)
lon = property(get_lon, set_lon)
first = property(get_lat)
second = property(get_lon)
class Extent():
def __init__(self, coords=None, shapes=None):
if coords:
coords = tuple(coords) # if it's a generator, slurp them all
self.min = coords[0].__class__(min(c.first for c in coords),
min(c.second for c in coords))
self.max = coords[0].__class__(max(c.first for c in coords),
max(c.second for c in coords))
elif shapes:
self.from_shapes(shapes)
else:
raise ValueError('Extent must be initialized')
def __str__(self):
return '%s,%s,%s,%s' % (self.min.y, self.min.x, self.max.y, self.max.x)
def update(self, other):
'''grow this bounding box so that it includes the other'''
self.min.x = min(self.min.x, other.min.x)
self.min.y = min(self.min.y, other.min.y)
self.max.x = max(self.max.x, other.max.x)
self.max.y = max(self.max.y, other.max.y)
def from_bounding_box(self, other):
self.min = other.min.copy()
self.max = other.max.copy()
def from_shapes(self, shapes):
shapes = iter(shapes)
self.from_bounding_box(next(shapes).extent)
for s in shapes:
self.update(s.extent)
def corners(self):
return (self.min, self.max)
def size(self):
return self.max.__class__(self.max.x - self.min.x,
self.max.y - self.min.y)
def grow(self, pad):
self.min.x -= pad
self.min.y -= pad
self.max.x += pad
self.max.y += pad
def resize(self, width=None, height=None):
if width:
self.max.x += float(width - self.size().x) / 2
self.min.x = self.max.x - width
if height:
self.max.y += float(height - self.size().y) / 2
self.min.y = self.max.y - height
def is_inside(self, coord):
return (coord.x >= self.min.x and coord.x <= self.max.x and
coord.y >= self.min.y and coord.y <= self.max.y)
def map(self, func):
'''Returns a new Extent whose corners are a function of the
corners of this one. The expected use is to project a Extent
onto a map. For example: bbox_xy = bbox_ll.map(projector.project)'''
return Extent(coords=(func(self.min), func(self.max)))
class Projection(object):
# For guessing scale, we pretend the earth is a sphere with this
# radius in meters, as in Web Mercator (the projection all the
# online maps use).
EARTH_RADIUS = 6378137 # in meters
def get_pixels_per_degree(self):
try:
return self._pixels_per_degree
except AttributeError:
raise AttributeError('projection scale was never set')
def set_pixels_per_degree(self, val):
self._pixels_per_degree = val
logging.info('scale: %f meters/pixel (%f pixels/degree)'
% (self.meters_per_pixel, val))
def get_meters_per_pixel(self):
return 2 * math.pi * self.EARTH_RADIUS / 360 / self.pixels_per_degree
def set_meters_per_pixel(self, val):
self.pixels_per_degree = 2 * math.pi * self.EARTH_RADIUS / 360 / val
return val
pixels_per_degree = property(get_pixels_per_degree, set_pixels_per_degree)
meters_per_pixel = property(get_meters_per_pixel, set_meters_per_pixel)
def is_scaled(self):
return hasattr(self, '_pixels_per_degree')
def project(self, coords):
raise NotImplementedError
def inverse_project(self, coords):
# Not all projections can support this.
raise NotImplementedError
def auto_set_scale(self, extent_in, padding, width=None, height=None):
# We need to choose a scale at which the data's bounding box,
# once projected onto the map, will fit in the specified height
# and/or width. The catch is that we can't project until we
# have a scale, so what we'll do is set a provisional scale,
# project the bounding box onto the map, then adjust the scale
# appropriately. This way we don't need to know anything about
# the projection.
#
# Projection subclasses are free to override this method with
# something simpler that just solves for scale given the lat/lon
# and x/y bounds.
# We'll work large to minimize roundoff error.
SCALE_FACTOR = 1000000.0
self.pixels_per_degree = SCALE_FACTOR
extent_out = extent_in.map(self.project)
padding *= 2 # padding-per-edge -> padding-in-each-dimension
try:
if height:
self.pixels_per_degree = pixels_per_lat = (
float(height - padding) /
extent_out.size().y * SCALE_FACTOR)
if width:
self.pixels_per_degree = (
float(width - padding) /
extent_out.size().x * SCALE_FACTOR)
if height:
self.pixels_per_degree = min(self.pixels_per_degree,
pixels_per_lat)
except ZeroDivisionError:
raise ZeroDivisionError(
'You need at least two data points for auto scaling. '
'Try specifying the scale explicitly (or extent + '
'height or width).')
assert(self.pixels_per_degree > 0)
class EquirectangularProjection(Projection):
# http://en.wikipedia.org/wiki/Equirectangular_projection
def project(self, coord):
x = coord.lon * self.pixels_per_degree
y = -coord.lat * self.pixels_per_degree
return Coordinate(x, y)
def inverse_project(self, coord):
lat = -coord.y / self.pixels_per_degree
lon = coord.x / self.pixels_per_degree
return LatLon(lat, lon)
class MercatorProjection(Projection):
def set_pixels_per_degree(self, val):
super(MercatorProjection, self).set_pixels_per_degree(val)
self._pixels_per_radian = val * (180 / math.pi)
pixels_per_degree = property(Projection.get_pixels_per_degree,
set_pixels_per_degree)
def project(self, coord):
x = coord.lon * self.pixels_per_degree
y = -self._pixels_per_radian * math.log(
math.tan((math.pi/4 + math.pi/360 * coord.lat)))
return Coordinate(x, y)
def inverse_project(self, coord):
lat = (360 / math.pi
* math.atan(math.exp(-coord.y / self._pixels_per_radian)) - 90)
lon = coord.x / self.pixels_per_degree
return LatLon(lat, lon)
class Configuration(object):
'''
This object holds the settings for creating a heatmap as well as
an iterator for the input data.
Most of the command line processing is about settings and data, so
the command line options are also processed with this object.
This happens in two phases.
First the settings are parsed and turned into more useful objects
in set_from_options(). Command line flags go in, and the
Configuration object is populated with the specified values and
defaults.
In the second phase, various other parameters are computed. These
are things we set automatically based on the other settings or on
the data. You can skip this if you set everything manually, but
The idea is that someone could import this module, populate a
Configuration instance manually, and run the process themselves.
Where possible, this object contains instances, rather than option
strings (e.g. for projection, kernel, colormap, etc).
Every parameter is explained in the glossary dictionary, and only
documented parameters are allowed. Parameters default to None.
'''
_kernels = {'linear': LinearKernel,
'gaussian': GaussianKernel, }
_projections = {'equirectangular': EquirectangularProjection,
'mercator': MercatorProjection, }
glossary = {
'width': 0,
'height': 0,
'margin': 0,
'radius': 2,
'shapes': None,
'projection': None,
'colormap': None,
'decay': 0.3,
'kernel': None,
'extent_in': Extent(coords=(LatLon(-80., -180.), LatLon(80., 180.))),
'extent_out': None,
'background': None,
'background_image': None,
'background_brightness': None,
'gradient': None,
'gpx': None
}
def __init__(self, pts=None, bg=None,
projection='equirectangular', kernel='linear', hsva_min=None,
hsva_max=None, height=0, width=0):
for k, v in zip(self.glossary.keys(), self.glossary.values()):
setattr(self, k, v)
if bg is not None:
self.background_image = bg
(self.width, self.height) = self.background_image.size
else:
self.width, self.height = width, height
self.projection = self._projections[projection]()
self.kernel = self._kernels[kernel](self.radius)
self.colormap = ColorMap(hsva_min=ColorMap.str_to_hsva(hsva_min),
hsva_max=ColorMap.str_to_hsva(hsva_max))
padding = self.margin + self.radius
self.projection.auto_set_scale(self.extent_in, padding,
self.width, self.height)
self.extent_out = self.extent_in.map(self.projection.project)
self.extent_out.grow(padding)
self.shapes = pts
class Point:
def __init__(self, coord, weight=1.0):
self.coord = coord
self.weight = weight
def __str__(self):
return 'P(%s)' % str(self.coord)
@staticmethod
def general_distance(x, y):
# assumes square units, which causes distortion in some projections
return (x ** 2 + y ** 2) ** 0.5
@property
def extent(self):
if not hasattr(self, '_extent'):
self._extent = Extent(coords=(self.coord,))
return self._extent
# From a modularity standpoint, it would be reasonable to cache
# distances, not heat values, and let the kernel cache the
# distance to heat map, but this is substantially faster.
heat_cache = {}
@classmethod
def _initialize_heat_cache(cls, kernel):
cache = {}
for x in range(kernel.radius + 1):
for y in range(kernel.radius + 1):
cache[(x, y)] = kernel.heat(cls.general_distance(x, y))
cls.heat_cache[kernel] = cache
def add_heat_to_matrix(self, matrix, kernel):
if kernel not in Point.heat_cache:
Point._initialize_heat_cache(kernel)
cache = Point.heat_cache[kernel]
x = int(self.coord.x)
y = int(self.coord.y)
for dx in range(-kernel.radius, kernel.radius + 1):
for dy in range(-kernel.radius, kernel.radius + 1):
matrix.add(Coordinate(x + dx, y + dy),
self.weight * cache[(abs(dx), abs(dy))])
def map(self, func):
return Point(func(self.coord), self.weight)
class ImageMaker():
def __init__(self, config):
'''Each argument to the constructor should be a 4-tuple of (hue,
saturaton, value, alpha), one to use for minimum data values and
one for maximum. Each should be in [0,1], however because hue is
circular, you may specify hue in any range and it will be shifted
into [0,1] as needed. This is so you can wrap around the color
wheel in either direction.'''
self.config = config
if config.background and not config.background_image:
self.background = ImageColor.getrgb(config.background)
else:
self.background = None
@staticmethod
def _blend_pixels(a, b):
# a is RGBA, b is RGB; we could write this more generically,
# but why complicate things?
alpha = a[3] / 255.0
return tuple(
map(lambda aa, bb: int(aa * alpha + bb * (1 - alpha)), a[:3], b))
def make_image(self, matrix):
extent = self.config.extent_out
if not extent:
extent = matrix.extent()
extent.resize((self.config.width or 1) - 1,
(self.config.height or 1) - 1)
size = extent.size()
size.x = int(size.x) + 1
size.y = int(size.y) + 1
logging.info('saving image (%d x %d)' % (size.x, size.y))
if self.background:
img = Image.new('RGB', (size.x, size.y), self.background)
else:
img = Image.new('RGBA', (size.x, size.y))
maxval = max(matrix.values())
pixels = img.load()
for (coord, val) in matrix.items():
x = int(coord.x - extent.min.x)
y = int(coord.y - extent.min.y)
if extent.is_inside(coord):
color = self.config.colormap.get(val / maxval)
if self.background:
pixels[x, y] = ImageMaker._blend_pixels(color,
self.background)
else:
pixels[x, y] = color
if self.config.background_image:
img = Image.composite(img, self.config.background_image,
img.split()[3])
return img
class ColorMap:
DEFAULT_HSVA_MIN_STR = '000ffff00' # '02acfff00'
DEFAULT_HSVA_MAX_STR = '02affffff' # '02a00ffff'
@staticmethod
def _str_to_float(string, base=16, maxval=256):
return float(int(string, base)) / maxval
@staticmethod
def str_to_hsva(string):
'''
Returns a 4-tuple of ints from a hex string color specification,
such that AAABBCCDD becomes AAA, BB, CC, DD. For example,
str2hsva('06688bbff') returns (102, 136, 187, 255). Note that
the first number is 3 digits.
'''
if string.startswith('#'):
# Leading "#" was once required, is now optional.
string = string[1:]
return tuple(ColorMap._str_to_float(s) for s in (string[0:3],
string[3:5],
string[5:7],
string[7:9]))
def __init__(self, hsva_min=None, hsva_max=None, image=None, steps=256):
'''
Create a color map based on a progression in the specified
range, or using pixels in a provided image.
If supplied, hsva_min and hsva_max must each be a 4-tuple of
(hue, saturation, value, alpha), where each is a float from
0.0 to 1.0. The gradient will be a linear progression from
hsva_min to hsva_max, including both ends of the range.
The optional steps argument specifies how many discrete steps
there should be in the color gradient when using hsva_min
and hsva_max.
'''
# TODO: do the interpolation in Lab space instead of HSV
self.values = []
if image:
assert image.mode == 'RGBA', (
'Gradient image must be RGBA. Yours is %s.' % image.mode)
num_rows = image.size[1]
self.values = [image.getpixel((0, row)) for row in range(num_rows)]
self.values.reverse()
else:
if not hsva_min:
hsva_min = ColorMap.str_to_hsva(self.DEFAULT_HSVA_MIN_STR)
if not hsva_max:
hsva_max = ColorMap.str_to_hsva(self.DEFAULT_HSVA_MAX_STR)
# Turn (h1,s1,v1,a1), (h2,s2,v2,a2) into (h2-h1,s2-s1,v2-v1,a2-a1)
hsva_range = list(map(lambda min, max: max - min,
hsva_min, hsva_max))
for value in range(0, steps):
hsva = list(map(
lambda range, min: value / float(steps - 1) * range + min,
hsva_range, hsva_min))
hsva[0] = hsva[0] % 1 # in case hue is out of range
rgba = tuple(
[int(x * 255) for x in
hsv_to_rgb(*hsva[0:3]) + (hsva[3],)])
self.values.append(rgba)
def get(self, floatval):
return self.values[int(floatval * (len(self.values) - 1))]
class Matrix(defaultdict):
'''An abstract sparse matrix, with data stored as {coord: value}.'''
@staticmethod
def matrix_factory(decay):
# If decay is 0 or 1, we can accumulate as we go and save lots of
# memory.
if decay == 1.0:
logging.info('creating a summing matrix')
return SummingMatrix()
elif decay == 0.0:
logging.info('creating a maxing matrix')
return MaxingMatrix()
logging.info('creating an appending matrix')
return AppendingMatrix(decay)
def __init__(self, default_factory=float):
self.default_factory = default_factory
def add(self, coord, val):
raise NotImplementedError
def extent(self):
return(Extent(coords=self.keys()))
def finalized(self):
return self
class SummingMatrix(Matrix):
def add(self, coord, val):
self[coord] += val
class MaxingMatrix(Matrix):
def add(self, coord, val):
self[coord] = max(val, self.get(coord, val))
class AppendingMatrix(Matrix):
def __init__(self, decay):
self.default_factory = list
self.decay = decay
def add(self, coord, val):
self[coord].append(val)
def finalized(self):
logging.info('combining coincident points')
m = Matrix()
for (coord, values) in self.items():
m[coord] = self.reduce(self.decay, values)
return m
@staticmethod
def reduce(decay, values):
'''
Returns a weighted sum of the values, where weight N is
pow(decay,N). This means the largest value counts fully, but
additional values have diminishing contributions. decay=0 makes
the reduction equivalent to max(), which makes each data point
visible, but says nothing about their relative magnitude.
decay=1 makes this like sum(), which makes the relative
magnitude of the points more visible, but could make smaller
values hard to see. Experiment with values between 0 and 1.
Values outside that range will give weird results.
'''
# It would be nice to do this on the fly, while accumulating data, but
# it needs to be insensitive to data order.
weight = 1.0
total = 0.0
values.sort(reverse=True)
for value in values:
total += value * weight
weight *= decay
return total
def get_coord(addr):
response = reader.city(addr)
lat = response.location.latitude
lng = response.location.longitude
if lat and lng:
return Point(LatLon(lat, lng), 1)
return Point(LatLon(0.0, 0.0), 0)
def process_shapes(config, hook=None):
matrix = Matrix.matrix_factory(config.decay)
logging.info('processing data')
for shape in config.shapes:
shape = shape.map(config.projection.project)
shape.add_heat_to_matrix(matrix, config.kernel)
if hook:
hook(matrix)
return matrix
def get_heatmap(config):
matrix = process_shapes(config).finalized()
return ImageMaker(config).make_image(matrix)
def parse_cmdln():
parser = argparse.ArgumentParser(
description=__doc__,
version=__version__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--in', dest='ip',
help='FAH IP Database', required=True)
parser.add_argument('-db', '--database', dest='db',
help='MaxMind GeoDB', required=True)
parser.add_argument('-bg', '--background-image',
dest='bg', help='Image of the world.', default=None)
parser.add_argument('-d', '--days', dest='days',
help='Number of days to access in database.',
default=30, type=int)
parser.add_argument('-m', '--min', dest='hsva_min',
help='Color of the minimum of the gradient.',
default=ColorMap.DEFAULT_HSVA_MIN_STR, type=str)
parser.add_argument('-M', '--max', dest='hsva_max',
help='Color of the maximum of the gradient.',
default=ColorMap.DEFAULT_HSVA_MAX_STR, type=str)
parser.add_argument('-ht', '--height', dest='height',
help='Height of the image in pixels.',
default=890, type=int)
parser.add_argument('-wd', '--width', dest='width',
help='Width of the image in pixels.',
default=2000, type=int)
args = parser.parse_args()
return args
if __name__ == "__main__":
options = parse_cmdln()
reader = geoip2.database.Reader(options.db)
world = None
if options.bg is not None:
world = Image.open(options.bg)
addr = np.loadtxt(options.ip, dtype=str)
pts = imap(get_coord, addr)
config = Configuration(pts=pts, hsva_min=options.hsva_min,
hsva_max=options.hsva_max, bg=world,
height=options.height, width=options.width)
img = get_heatmap(config)
img.save('./static/png/past' + str(options.days) + '.png')
|
cxhernandez/fah-map
|
scripts/fahmap.py
|
Python
|
mit
| 23,681
|
[
"Gaussian"
] |
059383add32e2101570683c59fdb0a29359a54213093ccccbf99eeedac358348
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0058_remove_visit_involvement_other'),
]
operations = [
migrations.AddField(
model_name='involvementtype',
name='is_custom',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='issueprek',
name='is_custom',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='issueprimary',
name='is_custom',
field=models.BooleanField(default=False),
),
]
|
koebbe/homeworks
|
visit/migrations/0059_auto_20150815_0354.py
|
Python
|
mit
| 748
|
[
"VisIt"
] |
dd10c372e1e68b4daab9f6b86c536f1f7d7e4b99fdb54c2de3e2b115248d9aec
|
# -*- coding:utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""bibindex_engine_tokenizer_tests - unit tests for tokenizers
There should always be at least one test class for each class in b_e_t.
"""
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
load_tokenizers = lazy_import('invenio.legacy.bibindex.engine_utils:load_tokenizers')
_TOKENIZERS = None
class TestAuthorTokenizerScanning(InvenioTestCase):
"""Test BibIndex name tokenization"""
def setUp(self):
_TOKENIZERS = load_tokenizers()
self.tokenizer = _TOKENIZERS["BibIndexAuthorTokenizer"]()
self.scan = self.tokenizer.scan_string_for_phrases
def test_bifnt_scan_single(self):
"""BibIndexAuthorTokenizer - scanning single names like 'Dido'"""
teststr = "Dido"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Dido'], 'nonlastnames': [], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_simple_western_forward(self):
"""BibIndexAuthorTokenizer - scanning simple Western-style: first last"""
teststr = "Ringo Starr"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_simple_western_reverse(self):
"""BibIndexAuthorTokenizer - scanning simple Western-style: last, first"""
teststr = "Starr, Ringo"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_forward(self):
"""BibIndexAuthorTokenizer - scanning multiword: first middle last"""
teststr = "Michael Edward Peskin"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dotcrammed(self):
"""BibIndexAuthorTokenizer - scanning multiword: f.m. last"""
teststr = "M.E. Peskin"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dotcrammed_reversed(self):
"""BibIndexAuthorTokenizer - scanning multiword: last, f.m."""
teststr = "Peskin, M.E."
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dashcrammed(self):
"""BibIndexAuthorTokenizer - scanning multiword: first-middle last"""
teststr = "Jean-Luc Picard"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_multiname_dashcrammed_reversed(self):
"""BibIndexAuthorTokenizer - scanning multiword: last, first-middle"""
teststr = "Picard, Jean-Luc"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_compound_lastname_dashes(self):
"""BibIndexAuthorTokenizer - scanning multiword: first middle last-last"""
teststr = "Cantina Octavia Jones-Smith"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_compound_lastname_dashes_reverse(self):
"""BibIndexAuthorTokenizer - scanning multiword: last-last, first middle"""
teststr = "Jones-Smith, Cantina Octavia"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_compound_lastname_reverse(self):
"""BibIndexAuthorTokenizer - scanning compound last: last last, first"""
teststr = "Alvarez Gaume, Joachim"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_titled(self):
"""BibIndexAuthorTokenizer - scanning title-bearing: last, first, title"""
teststr = "Epstein, Brian, The Fifth Beatle"
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : teststr}
self.assertEqual(output, anticipated)
def test_bifnt_scan_wildly_interesting(self):
"""BibIndexAuthorTokenizer - scanning last last last, first first, title, title"""
teststr = "Ibanez y Gracia, Maria Luisa, II., ed."
output = self.scan(teststr)
anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II.', 'ed.'], 'raw' : teststr}
self.assertEqual(output, anticipated)
class TestAuthorTokenizerTokens(InvenioTestCase):
"""Test BibIndex name variant token generation from scanned and tagged sets"""
def setUp(self):
_TOKENIZERS = load_tokenizers()
self.tokenizer = _TOKENIZERS["BibIndexAuthorTokenizer"]()
self.get_index_tokens = self.tokenizer.parse_scanned_for_phrases
def test_bifnt_tokenize_single(self):
"""BibIndexAuthorTokenizer - tokens for single-word name
Ronaldo
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Ronaldo'], 'nonlastnames': [], 'titles': [], 'raw' : 'Ronaldo'}
output = self.get_index_tokens(tagged_data)
anticipated = ['Ronaldo']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_simple_forward(self):
"""BibIndexAuthorTokenizer - tokens for first last
Ringo Starr
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Ringo Starr'}
output = self.get_index_tokens(tagged_data)
anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_simple_reverse(self):
"""BibIndexAuthorTokenizer - tokens for last, first
Starr, Ringo
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': [], 'raw' : 'Starr, Ringo'}
output = self.get_index_tokens(tagged_data)
anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_twoname_forward(self):
"""BibIndexAuthorTokenizer - tokens for first middle last
Michael Edward Peskin
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': [], 'raw' : 'Michael Edward Peskin'}
output = self.get_index_tokens(tagged_data)
anticipated = ['E Peskin', 'Edward Peskin', 'M E Peskin', 'M Edward Peskin', 'M Peskin',
'Michael E Peskin', 'Michael Edward Peskin', 'Michael Peskin',
'Peskin, E', 'Peskin, Edward', 'Peskin, M',
'Peskin, M E', 'Peskin, M Edward', 'Peskin, Michael',
'Peskin, Michael E', 'Peskin, Michael Edward']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_compound_last(self):
"""BibIndexAuthorTokenizer - tokens for last last, first
Alvarez Gaume, Joachim
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': [], 'raw' : 'Alvarez Gaume, Joachim'}
output = self.get_index_tokens(tagged_data)
anticipated = ['Alvarez Gaume, J', 'Alvarez Gaume, Joachim', 'Alvarez, J', 'Alvarez, Joachim', 'Gaume, J',
'Gaume, Joachim', 'J Alvarez', 'J Alvarez Gaume', 'J Gaume', 'Joachim Alvarez',
'Joachim Alvarez Gaume', 'Joachim Gaume']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_titled(self):
"""BibIndexAuthorTokenizer - tokens for last, first, title
Epstein, Brian, The Fifth Beatle
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle'], 'raw' : 'Epstein, Brian, The Fifth Beatle'}
output = self.get_index_tokens(tagged_data)
anticipated = ['B Epstein', 'B Epstein, The Fifth Beatle', 'Brian Epstein',
'Brian Epstein, The Fifth Beatle', 'Epstein, B', 'Epstein, B, The Fifth Beatle',
'Epstein, Brian', 'Epstein, Brian, The Fifth Beatle']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_wildly_interesting(self):
"""BibIndexAuthorTokenizer - tokens for last last last, first first, title, title
Ibanez y Gracia, Maria Luisa, II, (ed.)
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II', '(ed.)'], 'raw' : 'Ibanez y Gracia, Maria Luisa, II, (ed.)'}
output = self.get_index_tokens(tagged_data)
anticipated = ['Gracia, L', 'Gracia, Luisa', 'Gracia, M', 'Gracia, M L', 'Gracia, M Luisa',
'Gracia, Maria', 'Gracia, Maria L', 'Gracia, Maria Luisa',
'Ibanez y Gracia, L', 'Ibanez y Gracia, L, II',
'Ibanez y Gracia, Luisa', 'Ibanez y Gracia, Luisa, II',
'Ibanez y Gracia, M', 'Ibanez y Gracia, M L', 'Ibanez y Gracia, M L, II',
'Ibanez y Gracia, M Luisa', 'Ibanez y Gracia, M Luisa, II',
'Ibanez y Gracia, M, II',
'Ibanez y Gracia, Maria',
'Ibanez y Gracia, Maria L', 'Ibanez y Gracia, Maria L, II',
'Ibanez y Gracia, Maria Luisa', 'Ibanez y Gracia, Maria Luisa, II',
'Ibanez y Gracia, Maria, II',
'Ibanez, L', 'Ibanez, Luisa',
'Ibanez, M', 'Ibanez, M L', 'Ibanez, M Luisa', 'Ibanez, Maria',
'Ibanez, Maria L', 'Ibanez, Maria Luisa', 'L Gracia', 'L Ibanez',
'L Ibanez y Gracia', 'L Ibanez y Gracia, II', 'Luisa Gracia', 'Luisa Ibanez',
'Luisa Ibanez y Gracia', 'Luisa Ibanez y Gracia, II', 'M Gracia',
'M Ibanez', 'M Ibanez y Gracia', 'M Ibanez y Gracia, II', 'M L Gracia',
'M L Ibanez', 'M L Ibanez y Gracia', 'M L Ibanez y Gracia, II',
'M Luisa Gracia', 'M Luisa Ibanez', 'M Luisa Ibanez y Gracia', 'M Luisa Ibanez y Gracia, II',
'Maria Gracia',
'Maria Ibanez', 'Maria Ibanez y Gracia', 'Maria Ibanez y Gracia, II',
'Maria L Gracia', 'Maria L Ibanez', 'Maria L Ibanez y Gracia', 'Maria L Ibanez y Gracia, II',
'Maria Luisa Gracia', 'Maria Luisa Ibanez', 'Maria Luisa Ibanez y Gracia',
'Maria Luisa Ibanez y Gracia, II']
self.assertEqual(output, anticipated)
def test_bifnt_tokenize_multimiddle_forward(self):
"""BibIndexAuthorTokenizer - tokens for first middle middle last
W K H Panofsky
"""
tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles', 'raw'],
'lastnames': ['Panofsky'], 'nonlastnames': ['W', 'K', 'H'], 'titles': [], 'raw' : 'W K H Panofsky'}
output = self.get_index_tokens(tagged_data)
anticipated = ['H Panofsky', 'K H Panofsky', 'K Panofsky', 'Panofsky, H', 'Panofsky, K',
'Panofsky, K H', 'Panofsky, W', 'Panofsky, W H', 'Panofsky, W K',
'Panofsky, W K H', 'W H Panofsky',
'W K H Panofsky', 'W K Panofsky', 'W Panofsky']
self.assertEqual(output, anticipated)
def test_tokenize(self):
"""BibIndexAuthorTokenizer - check tokenize_for_phrases()
Ringo Starr
"""
teststr = "Ringo Starr"
output = self.tokenizer.tokenize_for_phrases(teststr)
anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo']
self.assertEqual(output, anticipated)
class TestExactAuthorTokenizer(InvenioTestCase):
"""Test exact author name tokenizer."""
def setUp(self):
"""setup"""
_TOKENIZERS = load_tokenizers()
self.tokenizer = _TOKENIZERS["BibIndexExactAuthorTokenizer"]()
self.tokenize = self.tokenizer.tokenize_for_phrases
def test_exact_author_name_tokenizer_bare(self):
"""BibIndexExactNameTokenizer - bare name"""
self.assertEqual(self.tokenize('John Doe'),
['John Doe'])
def test_exact_author_name_tokenizer_dots(self):
"""BibIndexExactNameTokenizer - name with dots"""
self.assertEqual(self.tokenize('J. Doe'),
['J Doe'])
self.assertEqual(self.tokenize('J.R. Doe'),
['J R Doe'])
self.assertEqual(self.tokenize('J. R. Doe'),
['J R Doe'])
def test_exact_author_name_tokenizer_trailing_dots(self):
"""BibIndexExactNameTokenizer - name with trailing dots"""
self.assertEqual(self.tokenize('Doe, J'),
['Doe, J'])
self.assertEqual(self.tokenize('Doe, J.'),
['Doe, J'])
def test_exact_author_name_tokenizer_hyphens(self):
"""BibIndexExactNameTokenizer - name with hyphens"""
self.assertEqual(self.tokenize('Doe, Jean-Pierre'),
['Doe, Jean Pierre'])
class TestCJKTokenizer(InvenioTestCase):
"""Tests for CJK Tokenizer which splits CJK words into characters and treats
every single character as a word"""
@classmethod
def setUp(self):
_TOKENIZERS = load_tokenizers()
self.tokenizer = _TOKENIZERS["BibIndexCJKTokenizer"]()
def test_tokenize_for_words_phrase_galaxy(self):
"""tokenizing phrase: galaxy s4据信"""
phrase = "galaxy s4据信"
result = self.tokenizer.tokenize_for_words(phrase)
self.assertEqual(sorted(['galaxy','s4','据','信']), sorted(result))
def test_tokenize_for_words_phrase_with_special_punctuation(self):
"""tokenizing phrase: 马英九:台湾民"""
phrase = u"马英九:台湾民"
result = self.tokenizer.tokenize_for_words(phrase)
self.assertEqual(sorted(['马','英','九','台','湾','民']), sorted(result))
def test_tokenize_for_words_phrase_with_special_punctuation_two(self):
"""tokenizing phrase: 色的“刀子嘴”"""
phrase = u"色的“刀子嘴”"
result = self.tokenizer.tokenize_for_words(phrase)
self.assertEqual(sorted(['色','的','刀','子','嘴']), sorted(result))
def test_tokenize_for_words_simple_phrase(self):
"""tokenizing phrase: 春眠暁覚"""
self.assertEqual(sorted(self.tokenizer.tokenize_for_words(u'春眠暁覚')), sorted(['春', '眠', '暁', '覚']))
def test_tokenize_for_words_mixed_phrase(self):
"""tokenizing phrase: 春眠暁ABC覚"""
self.assertEqual(sorted(self.tokenizer.tokenize_for_words(u'春眠暁ABC覚')), sorted(['春', '眠', '暁', 'abc', '覚']))
def test_tokenize_for_words_phrase_with_comma(self):
"""tokenizing phrase: 春眠暁, 暁"""
phrase = u"春眠暁, 暁"
self.assertEqual(sorted(self.tokenizer.tokenize_for_words(phrase)), sorted(['春','眠','暁']))
TEST_SUITE = make_test_suite(TestAuthorTokenizerScanning,
TestAuthorTokenizerTokens,
TestExactAuthorTokenizer,
TestCJKTokenizer)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
MSusik/invenio
|
invenio/modules/indexer/testsuite/test_indexer_engine_tokenizer.py
|
Python
|
gpl-2.0
| 18,602
|
[
"Brian",
"Galaxy"
] |
40ad3e3259af338da14801791fba4317c219f3236e3b1d3b38797040af79fcfd
|
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing OpenFOAM, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Xavier Besseron (University of Luxembourg)
@author: Ward Poelmans (Ghent University)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import glob
import os
import re
import shutil
import stat
import tempfile
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import setup_cmake_env
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_shared_lib_ext, get_cpu_architecture, AARCH64, POWER
class EB_OpenFOAM(EasyBlock):
"""Support for building and installing OpenFOAM."""
def __init__(self, *args, **kwargs):
"""Specify that OpenFOAM should be built in install dir."""
super(EB_OpenFOAM, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.openfoamdir = None
self.thrdpartydir = None
# version may start with 'v' for some variants of OpenFOAM
# we need to strip this off to avoid problems when comparing LooseVersion instances in Python 3
clean_version = self.version.strip('v+')
# take into account versions like '4.x',
# assume it's equivalent to a very recent minor version (.99)
if '.x' in clean_version:
clean_version = clean_version.replace('.x', '.99')
self.looseversion = LooseVersion(clean_version)
if 'extend' in self.name.lower():
if self.looseversion >= LooseVersion('3.0'):
self.openfoamdir = 'foam-extend-%s' % self.version
else:
self.openfoamdir = 'OpenFOAM-%s-ext' % self.version
else:
self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])])
self.log.debug("openfoamdir: %s" % self.openfoamdir)
# Set build type to requested value
if self.toolchain.options['debug']:
self.build_type = 'Debug'
else:
self.build_type = 'Opt'
# determine values for wm_compiler and wm_mplib
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC: # @UndefinedVariable
self.wm_compiler = 'Gcc'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
self.wm_compiler = 'Icc'
else:
raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER")
# set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.)
# Note: this name must contain 'MPI' so the MPI version of the
# Pstream library is built (cf src/Pstream/Allwmake)
self.wm_mplib = "EASYBUILDMPI"
def extract_step(self):
"""Extract sources as expected by the OpenFOAM(-Extend) build scripts."""
super(EB_OpenFOAM, self).extract_step()
# make sure that the expected subdir is really there after extracting
# if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail
openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)
if not os.path.exists(openfoam_installdir):
self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir)
try:
contents_installdir = os.listdir(self.installdir)
source = os.path.join(self.installdir, contents_installdir[0])
# it's one directory but has a wrong name
if len(contents_installdir) == 1 and os.path.isdir(source):
target = os.path.join(self.installdir, self.openfoamdir)
self.log.debug("Renaming %s to %s", source, target)
os.rename(source, target)
else:
mkdir(openfoam_installdir)
for fil in contents_installdir:
if fil != self.openfoamdir:
source = os.path.join(self.installdir, fil)
target = os.path.join(openfoam_installdir, fil)
self.log.debug("Moving %s to %s", source, target)
shutil.move(source, target)
os.chdir(openfoam_installdir)
except OSError as err:
raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err)
def patch_step(self, beginpath=None):
"""Adjust start directory and start path for patching to correct directory."""
self.cfg['start_dir'] = os.path.join(self.installdir, self.openfoamdir)
super(EB_OpenFOAM, self).patch_step(beginpath=self.cfg['start_dir'])
def configure_step(self):
"""Configure OpenFOAM build by setting appropriate environment variables."""
# compiler & compiler flags
comp_fam = self.toolchain.comp_family()
extra_flags = ''
if comp_fam == toolchain.GCC: # @UndefinedVariable
if get_software_version('GCC') >= LooseVersion('4.8'):
# make sure non-gold version of ld is used, since OpenFOAM requires it
# see http://www.openfoam.org/mantisbt/view.php?id=685
extra_flags = '-fuse-ld=bfd'
# older versions of OpenFOAM-Extend require -fpermissive
if 'extend' in self.name.lower() and self.looseversion < LooseVersion('2.0'):
extra_flags += ' -fpermissive'
if self.looseversion < LooseVersion('3.0'):
extra_flags += ' -fno-delete-null-pointer-checks'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
# make sure -no-prec-div is used with Intel compilers
extra_flags = '-no-prec-div'
for env_var in ['CFLAGS', 'CXXFLAGS']:
env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags))
# patch out hardcoding of WM_* environment variables
# for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER'
for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]:
self.log.debug("Patching out hardcoded $WM_* env vars in %s", script)
# disable any third party stuff, we use EB controlled builds
regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")]
# this does not work for OpenFOAM Extend lower than 2.0
if 'extend' not in self.name.lower() or self.looseversion >= LooseVersion('2.0'):
key = "WM_PROJECT_VERSION"
regex_subs += [(r"^(setenv|export) %s=.*$" % key, r"export %s=%s #\g<0>" % (key, self.version))]
WM_env_var = ['WM_COMPILER', 'WM_COMPILE_OPTION', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR']
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
WM_env_var.append('WM_LABEL_SIZE')
for env_var in WM_env_var:
regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var,
r": ${\g<var>:=\g<val>}; export \g<var>"))
apply_regex_substitutions(script, regex_subs)
# inject compiler variables into wmake/rules files
ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*'))
if self.looseversion >= LooseVersion('1906'):
ldirs += glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'General', '*'))
langs = ['c', 'c++']
# NOTE: we do not want to change the Debug rules files becuse
# that would change the cOPT/c++OPT values from their empty setting.
suffixes = ['', 'Opt']
wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes]
wmake_rules_files += [os.path.join(ldir, "general") for ldir in ldirs]
mpicc = os.environ['MPICC']
mpicxx = os.environ['MPICXX']
cc_seq = os.environ.get('CC_SEQ', os.environ['CC'])
cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX'])
if self.toolchain.mpi_family() == toolchain.OPENMPI:
# no -cc/-cxx flags supported in OpenMPI compiler wrappers
c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc)
cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx)
else:
# -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI)
c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq)
cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq)
comp_vars = {
# specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them
'cc': c_comp_cmd,
'CC': cxx_comp_cmd,
'cOPT': os.environ['CFLAGS'],
'c++OPT': os.environ['CXXFLAGS'],
}
for wmake_rules_file in wmake_rules_files:
# the cOpt and c++Opt files don't exist in the General directories (which are included for recent versions)
if not os.path.isfile(wmake_rules_file):
continue
fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file)
self.log.debug("Patching compiler variables in %s", fullpath)
regex_subs = []
for comp_var, newval in comp_vars.items():
regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval))
# replace /lib/cpp by cpp, but keep the arguments
regex_subs.append((r"^(CPP\s*=\s*)/lib/cpp(.*)$", r"\1cpp\2"))
apply_regex_substitutions(fullpath, regex_subs)
# enable verbose build for debug purposes
# starting with openfoam-extend 3.2, PS1 also needs to be set
env.setvar("FOAM_VERBOSE", '1')
# installation directory
env.setvar("FOAM_INST_DIR", self.installdir)
# third party directory
self.thrdpartydir = "ThirdParty-%s" % self.version
# only if third party stuff is actually installed
if os.path.exists(self.thrdpartydir):
os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir)
env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir))
env.setvar("WM_COMPILER", self.wm_compiler)
env.setvar("WM_MPLIB", self.wm_mplib)
# Set Compile options according to build type
env.setvar("WM_COMPILE_OPTION", self.build_type)
# parallel build spec
env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel']))
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env.setvar("WM_LABEL_SIZE", '64')
else:
env.setvar("WM_LABEL_SIZE", '32')
# make sure lib/include dirs for dependencies are found
openfoam_extend_v3 = 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0')
if self.looseversion < LooseVersion("2") or openfoam_extend_v3:
self.log.debug("List of deps: %s" % self.cfg.dependencies())
for dep in self.cfg.dependencies():
dep_name = dep['name'].upper(),
dep_root = get_software_root(dep['name'])
env.setvar("%s_SYSTEM" % dep_name, "1")
dep_vars = {
"%s_DIR": "%s",
"%s_BIN_DIR": "%s/bin",
"%s_LIB_DIR": "%s/lib",
"%s_INCLUDE_DIR": "%s/include",
}
for var, val in dep_vars.items():
env.setvar(var % dep_name, val % dep_root)
else:
for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']:
dependloc = get_software_root(depend)
if dependloc:
if depend == 'CGAL' and get_software_root('Boost'):
env.setvar("CGAL_ROOT", dependloc)
env.setvar("BOOST_ROOT", get_software_root('Boost'))
else:
env.setvar("%s_ROOT" % depend.upper(), dependloc)
def build_step(self):
"""Build OpenFOAM using make after sourcing script to set environment."""
# Some parts of OpenFOAM uses CMake to build
# make sure the basic environment is correct
setup_cmake_env(self.toolchain)
precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc")
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('4.0'):
if self.looseversion >= LooseVersion('2006'):
cleancmd = "cd $WM_PROJECT_DIR && wclean -platform -all && cd -"
else:
cleancmd = "cd $WM_PROJECT_DIR && wcleanPlatform -all && cd -"
else:
cleancmd = "wcleanAll"
# make directly in install directory
cmd_tmpl = "%(precmd)s && %(cleancmd)s && %(prebuildopts)s %(makecmd)s" % {
'precmd': precmd,
'cleancmd': cleancmd,
'prebuildopts': self.cfg['prebuildopts'],
'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'),
}
if 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
qa = {
"Proceed without compiling ParaView [Y/n]": 'Y',
"Proceed without compiling cudaSolvers? [Y/n]": 'Y',
}
noqa = [
".* -o .*",
"checking .*",
"warning.*",
"configure: creating.*",
"%s .*" % os.environ['CC'],
"wmake .*",
"Making dependency list for source file.*",
r"\s*\^\s*", # warning indicator
"Cleaning .*",
]
run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True, maxhits=500)
else:
cmd = 'Allwmake'
if self.looseversion > LooseVersion('1606'):
# use Allwmake -log option if possible since this can be useful during builds, but also afterwards
cmd += ' -log'
run_cmd(cmd_tmpl % cmd, log_all=True, simple=True, log_output=True)
def det_psubdir(self):
"""Determine the platform-specific installation directory for OpenFOAM."""
# OpenFOAM >= 3.0.0 can use 64 bit integers
# same goes for OpenFOAM-Extend >= 4.1
if 'extend' in self.name.lower():
set_int_size = self.looseversion >= LooseVersion('4.1')
else:
set_int_size = self.looseversion >= LooseVersion('3.0')
if set_int_size:
if self.toolchain.options['i8']:
int_size = 'Int64'
else:
int_size = 'Int32'
else:
int_size = ''
archpart = '64'
arch = get_cpu_architecture()
if arch == AARCH64:
# Variants have different abbreviations for ARM64...
if self.looseversion < LooseVersion("100"):
archpart = 'Arm64'
else:
archpart = 'ARM64'
elif arch == POWER:
archpart = 'PPC64le'
psubdir = "linux%s%sDP%s%s" % (archpart, self.wm_compiler, int_size, self.build_type)
return psubdir
def install_step(self):
"""Building was performed in install dir, so just fix permissions."""
# fix permissions of OpenFOAM dir
fullpath = os.path.join(self.installdir, self.openfoamdir)
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# fix permissions of ThirdParty dir and subdirs (also for 2.x)
# if the thirdparty tarball is installed
fullpath = os.path.join(self.installdir, self.thrdpartydir)
if os.path.exists(fullpath):
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# create symlinks in the lib directory to all libraries in the mpi subdirectory
# to make sure they take precedence over the libraries in the dummy subdirectory
shlib_ext = get_shared_lib_ext()
psubdir = self.det_psubdir()
openfoam_extend_v3 = 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0')
if openfoam_extend_v3 or self.looseversion < LooseVersion("2"):
libdir = os.path.join(self.installdir, self.openfoamdir, "lib", psubdir)
else:
libdir = os.path.join(self.installdir, self.openfoamdir, "platforms", psubdir, "lib")
# OpenFOAM v2012 puts mpi into eb-mpi
if self.looseversion >= LooseVersion("2012"):
mpilibssubdir = "eb-mpi"
else:
mpilibssubdir = "mpi"
mpilibsdir = os.path.join(libdir, mpilibssubdir)
if os.path.exists(mpilibsdir):
for lib in glob.glob(os.path.join(mpilibsdir, "*.%s" % shlib_ext)):
libname = os.path.basename(lib)
dst = os.path.join(libdir, libname)
os.symlink(os.path.join(mpilibssubdir, libname), dst)
def sanity_check_step(self):
"""Custom sanity check for OpenFOAM"""
shlib_ext = get_shared_lib_ext()
psubdir = self.det_psubdir()
openfoam_extend_v3 = 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0')
if openfoam_extend_v3 or self.looseversion < LooseVersion("2"):
toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir)
libsdir = os.path.join(self.openfoamdir, "lib", psubdir)
dirs = [toolsdir, libsdir]
else:
toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin")
libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib")
dirs = [toolsdir, libsdir]
# some randomly selected binaries
# if one of these is missing, it's very likely something went wrong
bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["paraFoam"]] + \
[os.path.join(toolsdir, "buoyantSimpleFoam")] + \
[os.path.join(toolsdir, "%sFoam" % x) for x in ["boundary", "engine"]] + \
[os.path.join(toolsdir, "surface%s" % x) for x in ["Add", "Find", "Smooth"]] + \
[os.path.join(toolsdir, x) for x in ['blockMesh', 'checkMesh', 'deformedGeom', 'engineSwirl',
'modifyMesh', 'refineMesh']]
# test setting up the OpenFOAM environment in bash shell
load_openfoam_env = "source $FOAM_BASH"
custom_commands = [load_openfoam_env]
# only include Boussinesq and sonic since for OpenFOAM < 7, since those solvers have been deprecated
if self.looseversion < LooseVersion('7'):
bins.extend([
os.path.join(toolsdir, "buoyantBoussinesqSimpleFoam"),
os.path.join(toolsdir, "sonicFoam")
])
# check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one
if 'extend' in self.name.lower():
libs = [os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext),
os.path.join(libsdir, "libmetisDecomp.%s" % shlib_ext)]
if self.looseversion < LooseVersion('3.2'):
# Pstream should have both a dummy and a mpi one
libs.extend([os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", "mpi"]])
libs.extend([os.path.join(libsdir, "mpi", "libparMetisDecomp.%s" % shlib_ext)])
else:
libs.extend([os.path.join(libsdir, "libparMetisDecomp.%s" % shlib_ext)])
else:
# OpenFOAM v2012 puts mpi into eb-mpi
if self.looseversion >= LooseVersion("2012"):
mpilibssubdir = "eb-mpi"
else:
mpilibssubdir = "mpi"
# there must be a dummy one and an mpi one for both
libs = [os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", mpilibssubdir]] + \
[os.path.join(libsdir, x, "libptscotchDecomp.%s" % shlib_ext) for x in ["dummy", mpilibssubdir]] +\
[os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext)] + \
[os.path.join(libsdir, "dummy", "libscotchDecomp.%s" % shlib_ext)]
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion("2.3.0"):
# surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0
bins.remove(os.path.join(toolsdir, "surfaceSmooth"))
bins.append(os.path.join(toolsdir, "surfaceLambdaMuSmooth"))
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion("2.4.0"):
# also check for foamMonitor for OpenFOAM versions other than OpenFOAM-Extend
bins.append(os.path.join(self.openfoamdir, 'bin', 'foamMonitor'))
# test foamMonitor; wrap `foamMonitor -h` to generate exit code 1 if any dependency is missing
# the command `foamMonitor -h` does not return correct exit codes on its own in all versions
test_foammonitor = "! foamMonitor -h 2>&1 | grep 'not installed'"
custom_commands.append(' && '.join([load_openfoam_env, test_foammonitor]))
custom_paths = {
'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins + libs,
'dirs': dirs,
}
# run motorBike tutorial case to ensure the installation is functional (if it's available);
# only for recent (>= v6.0) versions of openfoam.org variant
if self.looseversion >= LooseVersion('6') and self.looseversion < LooseVersion('100'):
openfoamdir_path = os.path.join(self.installdir, self.openfoamdir)
motorbike_path = os.path.join(openfoamdir_path, 'tutorials', 'incompressible', 'simpleFoam', 'motorBike')
if os.path.exists(motorbike_path):
test_dir = tempfile.mkdtemp()
if self.looseversion >= LooseVersion('9'):
geom_target_dir = 'geometry'
else:
geom_target_dir = 'triSurface'
cmds = [
"cp -a %s %s" % (motorbike_path, test_dir),
"cd %s" % os.path.join(test_dir, os.path.basename(motorbike_path)),
"source $FOAM_BASH",
". $WM_PROJECT_DIR/bin/tools/RunFunctions",
"cp $FOAM_TUTORIALS/resources/geometry/motorBike.obj.gz constant/%s/" % geom_target_dir,
"runApplication surfaceFeatures",
"runApplication blockMesh",
"runApplication decomposePar -copyZero",
"runParallel snappyHexMesh -overwrite",
"runParallel patchSummary",
"runParallel potentialFoam",
"runParallel simpleFoam",
"runApplication reconstructParMesh -constant",
"runApplication reconstructPar -latestTime",
"cd %s" % self.builddir,
"rm -r %s" % test_dir,
]
# all commands need to be run in a single shell command,
# because sourcing $FOAM_BASH sets up environment
custom_commands.append(' && '.join(cmds))
super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
def make_module_extra(self, altroot=None, altversion=None):
"""Define extra environment variables required by OpenFOAM"""
txt = super(EB_OpenFOAM, self).make_module_extra()
env_vars = [
# Set WM_COMPILE_OPTION in the module file
# $FOAM_BASH will then pick it up correctly.
('WM_COMPILE_OPTION', self.build_type),
('WM_PROJECT_VERSION', self.version),
('FOAM_INST_DIR', self.installdir),
('WM_COMPILER', self.wm_compiler),
('WM_MPLIB', self.wm_mplib),
('FOAM_BASH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'bashrc')),
('FOAM_CSH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'cshrc')),
]
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env_vars += [('WM_LABEL_SIZE', '64')]
else:
env_vars += [('WM_LABEL_SIZE', '32')]
for (env_var, val) in env_vars:
# check whether value is defined for compatibility with --module-only
if val:
txt += self.module_generator.set_environment(env_var, val)
return txt
|
hpcuantwerpen/easybuild-easyblocks
|
easybuild/easyblocks/o/openfoam.py
|
Python
|
gpl-2.0
| 27,029
|
[
"ParaView"
] |
5f367d01c990be7be910c6a9f0a4826a42f38f1b813e090fb441cc2f910d8a34
|
#!/usr/bin/env python
# Copyright (C) 2014 Swift Navigation Inc.
# Contact: Colin Beighley <colin@swift-nav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from urllib2 import URLError
from json import load as jsonload
from time import sleep
from intelhex import IntelHex, HexRecordError, HexReaderError
from pkg_resources import parse_version
from sbp.bootload import SBP_MSG_BOOTLOADER_JUMP_TO_APP
from sbp.piksi import SBP_MSG_RESET
from threading import Thread
from traits.api import HasTraits, Event, String, Button, Instance, Int, Bool, \
on_trait_change
from traitsui.api import View, Handler, Action, Item, TextEditor, VGroup, \
UItem, InstanceEditor, VSplit, HSplit, HGroup, \
BooleanEditor
from pyface.api import GUI, FileDialog, OK, ProgressDialog
from piksi_tools.version import VERSION as CONSOLE_VERSION
from piksi_tools import bootload
from piksi_tools import flash
import callback_prompt as prompt
from update_downloader import UpdateDownloader
from output_stream import OutputStream
import sys, os
from pyface.image_resource import ImageResource
if getattr(sys, 'frozen', False):
# we are running in a |PyInstaller| bundle
basedir = sys._MEIPASS
os.chdir(basedir)
else:
# we are running in a normal Python environment
basedir = os.path.dirname(__file__)
icon = ImageResource('icon',
search_path=['images', os.path.join(basedir, 'images')])
INDEX_URL = 'http://downloads.swiftnav.com/index.json'
class IntelHexFileDialog(HasTraits):
file_wildcard = String("Intel HEX File (*.hex)|*.hex|All files|*")
status = String('Please choose a file')
choose_fw = Button(label='Choose Firmware File')
view = View(
UItem('status'),
UItem('choose_fw')
)
def __init__(self, flash_type):
"""
Pop-up file dialog to choose an IntelHex file, with status and button to
display in traitsui window.
Parameters
----------
flash_type : string
Which Piksi flash to interact with ("M25" or "STM").
"""
if not flash_type=='M25' and not flash_type=='STM':
raise ValueError("flash_type must be 'M25' or 'STM'")
self._flash_type = flash_type
self.ihx = None
def clear(self, status):
"""
Set text of status box and clear IntelHex file.
Parameters
----------
status : string
Error text to replace status box text with.
"""
self.ihx = None
self.status = status
def load_ihx(self, filepath):
"""
Load IntelHex file and set status to indicate if file was
successfully loaded.
Parameters
----------
filepath : string
Path to IntelHex file.
"""
try:
self.ihx = IntelHex(filepath)
self.status = os.path.split(filepath)[1]
except HexRecordError:
self.clear('Error: File is not a valid Intel HEX File')
# Check that address ranges are valid for self._flash_type.
ihx_addrs = flash.ihx_ranges(self.ihx)
if self._flash_type == "M25":
try:
sectors = flash.sectors_used(ihx_addrs, flash.m25_addr_sector_map)
except IndexError:
self.clear('Error: HEX File contains restricted address ' + \
'(STM Firmware File Chosen?)')
elif self._flash_type == "STM":
try:
sectors = flash.sectors_used(ihx_addrs, flash.stm_addr_sector_map)
except:
self.clear('Error: HEX File contains restricted address ' + \
'(NAP Firmware File Chosen?)')
def _choose_fw_fired(self):
""" Activate file dialog window to choose IntelHex firmware file. """
dialog = FileDialog(label='Choose Firmware File',
action='open', wildcard=self.file_wildcard)
dialog.open()
if dialog.return_code == OK:
filepath = os.path.join(dialog.directory, dialog.filename)
self.load_ihx(filepath)
else:
self.clear('Error while selecting file')
class PulsableProgressDialog(ProgressDialog):
def __init__(self, max, pulsed=False):
"""
Pop-up window for showing a process's progress.
Parameters
----------
max : int
Maximum value of the progress bar.
pulsed : bool
Show non-partial progress initially.
"""
super(PulsableProgressDialog, self).__init__()
self.min = 0
self.max = 0
self.pulsed = pulsed
self.passed_max = max
def progress(self, count):
"""
Update progress of progress bar. If pulsing initially, wait until count
is at least 12 before changing to discrete progress bar.
Parameters
----------
count : int
Current value of progress.
"""
# Provide user feedback initially via pulse for slow sector erases.
if self.pulsed:
if count > 12:
self.max = 100
GUI.invoke_later(self.update, int(100*float(count)/self.passed_max))
else:
self.max = 100
GUI.invoke_later(self.update, int(100*float(count)/self.passed_max))
def close(self):
""" Close progress bar window. """
GUI.invoke_after(0.1, super(PulsableProgressDialog, self).close)
sleep(0.2)
class UpdateView(HasTraits):
piksi_stm_vers = String('Waiting for Piksi to send settings...')
newest_stm_vers = String('Downloading Newest Firmware info...')
piksi_nap_vers = String('Waiting for Piksi to send settings...')
newest_nap_vers = String('Downloading Newest Firmware info...')
local_console_vers = String(CONSOLE_VERSION)
newest_console_vers = String('Downloading Newest Console info...')
erase_stm = Bool(True)
erase_en = Bool(True)
update_firmware = Button(label='Update Piksi Firmware')
updating = Bool(False)
update_en = Bool(False)
download_firmware = Button(label='Download Newest Firmware Files')
downloading = Bool(False)
download_fw_en = Bool(True)
stm_fw = Instance(IntelHexFileDialog)
nap_fw = Instance(IntelHexFileDialog)
stream = Instance(OutputStream)
view = View(
VGroup(
HGroup(
VGroup(
Item('piksi_stm_vers', label='Piksi STM Firmware Version'),
Item('newest_stm_vers', label='Newest STM Firmware Version'),
Item('piksi_nap_vers', label='Piksi NAP Firmware Version'),
Item('newest_nap_vers', label='Newest NAP Firmware Version'),
Item('local_console_vers', label='Local Piksi Console Version'),
Item('newest_console_vers', label='Newest Piksi Console Version'),
),
VGroup(
Item('stm_fw', style='custom', label='STM Firmware File', \
enabled_when='download_fw_en'),
Item('nap_fw', style='custom', label='NAP Firmware File', \
enabled_when='download_fw_en'),
Item('erase_stm', label='Erase Entire STM flash', \
enabled_when='erase_en'),
),
),
UItem('download_firmware', enabled_when='download_fw_en'),
UItem('update_firmware', enabled_when='update_en'),
Item(
'stream',
style='custom',
editor=InstanceEditor(),
label='Update Status',
),
)
)
def __init__(self, link, prompt=True):
"""
Traits tab with UI for updating Piksi firmware.
Parameters
----------
link : sbp.client.handler.Handler
Link for SBP transfer to/from Piksi.
prompt : bool
Prompt user to update console/firmware if out of date.
"""
self.link = link
self.settings = {}
self.prompt = prompt
self.python_console_cmds = {
'update': self
}
self.update_dl = None
self.stm_fw = IntelHexFileDialog('STM')
self.stm_fw.on_trait_change(self._manage_enables, 'status')
self.nap_fw = IntelHexFileDialog('M25')
self.nap_fw.on_trait_change(self._manage_enables, 'status')
self.stream = OutputStream()
self.get_latest_version_info()
def _manage_enables(self):
""" Manages whether traits widgets are enabled in the UI or not. """
if self.updating == True or self.downloading == True:
self.update_en = False
self.download_fw_en = False
else:
self.download_fw_en = True
if self.stm_fw.ihx != None and self.nap_fw.ihx != None:
self.update_en = True
else:
self.update_en = False
if self.updating == True:
self.erase_en = False
else:
self.erase_en = True
def _updating_changed(self):
""" Handles self.updating trait being changed. """
self._manage_enables()
def _downloading_changed(self):
""" Handles self.downloading trait being changed. """
self._manage_enables()
def _write(self, text):
"""
Stream style write function. Allows flashing debugging messages to be
routed to embedded text console.
Parameters
----------
text : string
Text to be written to screen.
"""
self.stream.write(text)
self.stream.write('\n')
self.stream.flush()
def _update_firmware_fired(self):
"""
Handle update_firmware button. Starts thread so as not to block the GUI
thread.
"""
try:
if self._firmware_update_thread.is_alive():
return
except AttributeError:
pass
self._firmware_update_thread = Thread(target=self.manage_firmware_updates)
self._firmware_update_thread.start()
def _download_firmware(self):
""" Download latest firmware from swiftnav.com. """
self._write('')
# Check that we received the index file from the website.
if self.update_dl == None:
self._write("Error: Can't download firmware files")
return
self.downloading = True
status = 'Downloading Newest Firmware...'
self.nap_fw.clear(status)
self.stm_fw.clear(status)
self._write(status)
# Get firmware files from Swift Nav's website, save to disk, and load.
try:
self._write('Downloading Newest NAP firmware')
filepath = self.update_dl.download_nap_firmware()
self._write('Saved file to %s' % filepath)
self.nap_fw.load_ihx(filepath)
except AttributeError:
self.nap_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: index file not downloaded yet")
except KeyError:
self.nap_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: URL not present in index")
except URLError:
self.nap_fw.clear("Error downloading firmware")
self._write("Error: Failed to download latest NAP firmware from Swift Navigation's website")
try:
self._write('Downloading Newest STM firmware')
filepath = self.update_dl.download_stm_firmware()
self._write('Saved file to %s' % filepath)
self.stm_fw.load_ihx(filepath)
except AttributeError:
self.stm_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: index file not downloaded yet")
except KeyError:
self.stm_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: URL not present in index")
except URLError:
self.stm_fw.clear("Error downloading firmware")
self._write("Error: Failed to download latest STM firmware from Swift Navigation's website")
self.downloading = False
def _download_firmware_fired(self):
"""
Handle download_firmware button. Starts thread so as not to block the GUI
thread.
"""
try:
if self._download_firmware_thread.is_alive():
return
except AttributeError:
pass
self._download_firmware_thread = Thread(target=self._download_firmware)
self._download_firmware_thread.start()
def compare_versions(self):
"""
To be called after latest Piksi firmware info has been received from
device, to decide if current firmware on Piksi is out of date. Starts a
thread so as not to block GUI thread.
"""
try:
if self._compare_versions_thread.is_alive():
return
except AttributeError:
pass
self._compare_versions_thread = Thread(target=self._compare_versions)
self._compare_versions_thread.start()
def _compare_versions(self):
"""
Compares version info between received firmware version / current console
and firmware / console info from website to decide if current firmware or
console is out of date. Prompt user to update if so.
"""
# Check that settings received from Piksi contain FW versions.
try:
self.piksi_stm_vers = \
self.settings['system_info']['firmware_version'].value
self.piksi_nap_vers = \
self.settings['system_info']['nap_version'].value
except KeyError:
self._write("\nError: Settings received from Piksi don't contain firmware version keys. Please contact Swift Navigation.\n")
return
# Check that we received the index file from the website.
if self.update_dl == None:
self._write("Error: No website index to use to compare versions with local firmware")
return
# Check if console is out of date and notify user if so.
if self.prompt:
local_console_version = parse_version(CONSOLE_VERSION)
remote_console_version = parse_version(self.newest_console_vers)
self.console_outdated = remote_console_version > local_console_version
if self.console_outdated:
console_outdated_prompt = \
prompt.CallbackPrompt(
title="Piksi Console Outdated",
actions=[prompt.close_button],
)
console_outdated_prompt.text = \
"Your Piksi Console is out of date and may be incompatible\n" + \
"with current firmware. We highly recommend upgrading to\n" + \
"ensure proper behavior.\n\n" + \
"Please visit http://downloads.swiftnav.com to\n" + \
"download the newest version.\n\n" + \
"Local Console Version :\n\t" + \
CONSOLE_VERSION + \
"\nNewest Console Version :\n\t" + \
self.update_dl.index['piksi_v2.3.1']['console']['version'] + "\n"
console_outdated_prompt.run()
# For timing aesthetics between windows popping up.
sleep(0.5)
# Check if firmware is out of date and notify user if so.
if self.prompt:
local_stm_version = parse_version(
self.settings['system_info']['firmware_version'].value)
remote_stm_version = parse_version(self.newest_stm_vers)
local_nap_version = parse_version(
self.settings['system_info']['nap_version'].value)
remote_nap_version = parse_version(self.newest_nap_vers)
self.fw_outdated = remote_nap_version > local_nap_version or \
remote_stm_version > local_stm_version
if self.fw_outdated:
fw_update_prompt = \
prompt.CallbackPrompt(
title='Firmware Update',
actions=[prompt.close_button]
)
fw_update_prompt.text = \
"New Piksi firmware available.\n\n" + \
"Please use the Firmware Update tab to update.\n\n" + \
"Newest STM Version :\n\t%s\n\n" % \
self.update_dl.index['piksi_v2.3.1']['stm_fw']['version'] + \
"Newest SwiftNAP Version :\n\t%s\n\n" % \
self.update_dl.index['piksi_v2.3.1']['nap_fw']['version']
fw_update_prompt.run()
def get_latest_version_info(self):
"""
Get latest firmware / console version from website. Starts thread so as not
to block the GUI thread.
"""
try:
if self._get_latest_version_info_thread.is_alive():
return
except AttributeError:
pass
self._get_latest_version_info_thread = Thread(target=self._get_latest_version_info)
self._get_latest_version_info_thread.start()
def _get_latest_version_info(self):
""" Get latest firmware / console version from website. """
try:
self.update_dl = UpdateDownloader()
except URLError:
self._write("\nError: Failed to download latest file index from Swift Navigation's website. Please visit our website to check that you're running the latest Piksi firmware and Piksi console.\n")
return
# Make sure index contains all keys we are interested in.
try:
self.newest_stm_vers = self.update_dl.index['piksi_v2.3.1']['stm_fw']['version']
self.newest_nap_vers = self.update_dl.index['piksi_v2.3.1']['nap_fw']['version']
self.newest_console_vers = self.update_dl.index['piksi_v2.3.1']['console']['version']
except KeyError:
self._write("\nError: Index downloaded from Swift Navigation's website (%s) doesn't contain all keys. Please contact Swift Navigation.\n" % INDEX_URL)
return
# Executed in GUI thread, called from Handler.
def manage_firmware_updates(self):
"""
Update Piksi firmware. Erase entire STM flash (other than bootloader)
if so directed. Flash NAP only if new firmware is available.
"""
self.updating = True
self._write('')
# Erase all of STM's flash (other than bootloader) if box is checked.
if self.erase_stm:
text = "Erasing STM"
self._write(text)
self.create_flash("STM")
sectors_to_erase = set(range(self.pk_flash.n_sectors)).difference(set(self.pk_flash.restricted_sectors))
progress_dialog = PulsableProgressDialog(len(sectors_to_erase), False)
progress_dialog.title = text
GUI.invoke_later(progress_dialog.open)
erase_count = 0
for s in sorted(sectors_to_erase):
progress_dialog.progress(erase_count)
self._write('Erasing %s sector %d' % (self.pk_flash.flash_type,s))
self.pk_flash.erase_sector(s)
erase_count += 1
self.stop_flash()
self._write("")
progress_dialog.close()
# Flash STM.
text = "Updating STM"
self._write(text)
self.create_flash("STM")
stm_n_ops = self.pk_flash.ihx_n_ops(self.stm_fw.ihx, \
erase = not self.erase_stm)
progress_dialog = PulsableProgressDialog(stm_n_ops, True)
progress_dialog.title = text
GUI.invoke_later(progress_dialog.open)
# Don't erase sectors if we've already done so above.
self.pk_flash.write_ihx(self.stm_fw.ihx, self.stream, mod_print=0x40, \
elapsed_ops_cb = progress_dialog.progress, \
erase = not self.erase_stm)
self.stop_flash()
self._write("")
progress_dialog.close()
# Flash NAP if out of date.
try:
local_nap_version = parse_version(
self.settings['system_info']['nap_version'].value)
remote_nap_version = parse_version(self.newest_nap_vers)
nap_out_of_date = local_nap_version != remote_nap_version
except KeyError:
nap_out_of_date = True
if nap_out_of_date:
text = "Updating NAP"
self._write(text)
self.create_flash("M25")
nap_n_ops = self.pk_flash.ihx_n_ops(self.nap_fw.ihx)
progress_dialog = PulsableProgressDialog(nap_n_ops, True)
progress_dialog.title = text
GUI.invoke_later(progress_dialog.open)
self.pk_flash.write_ihx(self.nap_fw.ihx, self.stream, mod_print=0x40, \
elapsed_ops_cb = progress_dialog.progress)
self.stop_flash()
self._write("")
progress_dialog.close()
# Must tell Piksi to jump to application after updating firmware.
self.link.send(SBP_MSG_BOOTLOADER_JUMP_TO_APP, '\x00')
self._write("Firmware updates finished.")
self._write("")
self.updating = False
def create_flash(self, flash_type):
"""
Create flash.Flash instance and set Piksi into bootloader mode, prompting
user to reset if necessary.
Parameter
---------
flash_type : string
Either "STM" or "M25".
"""
# Reset device if the application is running to put into bootloader mode.
self.link.send(SBP_MSG_RESET, '')
self.pk_boot = bootload.Bootloader(self.link)
self._write("Waiting for bootloader handshake message from Piksi ...")
reset_prompt = None
handshake_received = self.pk_boot.wait_for_handshake(1)
# Prompt user to reset Piksi if we don't receive the handshake message
# within a reasonable amount of tiime (firmware might be corrupted).
while not handshake_received:
reset_prompt = \
prompt.CallbackPrompt(
title="Please Reset Piksi",
actions=[prompt.close_button],
)
reset_prompt.text = \
"You must press the reset button on your Piksi in order\n" + \
"to update your firmware.\n\n" + \
"Please press it now.\n\n"
reset_prompt.run(block=False)
while not reset_prompt.closed and not handshake_received:
handshake_received = self.pk_boot.wait_for_handshake(1)
reset_prompt.kill()
reset_prompt.wait()
self.pk_boot.reply_handshake()
self._write("received bootloader handshake message.")
self._write("Piksi Onboard Bootloader Version: " + self.pk_boot.version)
self.pk_flash = flash.Flash(self.link, flash_type, self.pk_boot.sbp_version)
def stop_flash(self):
"""
Stop Flash and Bootloader instances (removes callback from SerialLink).
"""
self.pk_flash.stop()
self.pk_boot.stop()
|
denniszollo/piksi_tools
|
piksi_tools/console/update_view.py
|
Python
|
lgpl-3.0
| 22,317
|
[
"VisIt"
] |
d7027d87daac293e3ccda7f706e75bad62ada9356989a8a9fe8224d81cb74cfc
|
#### PATTERN | NL | INFLECT ##############################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
##########################################################################
# Regular expressions-based rules for Dutch word inflection:
# - pluralization and singularization of nouns,
# - conjugation of verbs,
# - predicative and attributive of adjectives.
# Accuracy (measured on CELEX Dutch morphology word forms):
# 79% for pluralize()
# 91% for singularize()
# 90% for Verbs.find_lemma()
# 88% for Verbs.find_lexeme()
# 99% for predicative()
# 99% for attributive()
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
PROGRESSIVE,
PARTICIPLE
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = ("a", "e", "i", "o", "u")
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### PLURALIZE ###########################################################
plural_irregular_en = set(("dag", "dak", "dal", "pad", "vat", "weg"))
plural_irregular_een = set(("fee", "genie", "idee", "orgie", "ree"))
plural_irregular_eren = set(
("blad", "ei", "gelid", "gemoed", "kalf", "kind", "lied", "rad", "rund"))
plural_irregular_deren = set(("hoen", "been"))
plural_irregular = {
"centrum": "centra",
"escargot": "escargots",
"gedrag": "gedragingen",
"gelid": "gelederen",
"kaars": "kaarsen",
"kleed": "kleren",
"koe": "koeien",
"lam": "lammeren",
"museum": "museums",
"stad": "steden",
"stoel": "stoelen",
"vlo": "vlooien"
}
def pluralize(word, pos=NOUN, custom={}):
"""Returns the plural of a given word.
For example: stad => steden.
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
w = word.lower()
if pos == NOUN:
if w in plural_irregular_en: # dag => dagen
return w + "en"
if w in plural_irregular_een: # fee => feeën
return w + u"ën"
if w in plural_irregular_eren: # blad => bladeren
return w + "eren"
if w in plural_irregular_deren: # been => beenderen
return w + "deren"
if w in plural_irregular:
return plural_irregular[w]
# Words ending in -icus get -ici: academicus => academici
if w.endswith("icus"):
return w[:-2] + "i"
# Words ending in -s usually get -sen: les => lessen.
if w.endswith(("es", "as", "nis", "ris", "vis")):
return w + "sen"
# Words ending in -s usually get -zen: huis => huizen.
if w.endswith("s") and not w.endswith(("us", "ts", "mens")):
return w[:-1] + "zen"
# Words ending in -f usually get -ven: brief => brieven.
if w.endswith("f"):
return w[:-1] + "ven"
# Words ending in -um get -ums: museum => museums.
if w.endswith("um"):
return w + "s"
# Words ending in unstressed -ee or -ie get -ën: bacterie => bacteriën
if w.endswith("ie"):
return w + "s"
if w.endswith(("ee", "ie")):
return w[:-1] + u"ën"
# Words ending in -heid get -heden: mogelijkheid => mogelijkheden
if w.endswith("heid"):
return w[:-4] + "heden"
# Words ending in -e -el -em -en -er -ie get -s: broer => broers.
if w.endswith((u"é", "e", "el", "em", "en", "er", "eu", "ie", "ue", "ui", "eau", "ah")):
return w + "s"
# Words ending in a vowel get 's: auto => auto's.
if w.endswith(VOWELS) or w.endswith("y") and not w.endswith("e"):
return w + "'s"
# Words ending in -or always get -en: motor => motoren.
if w.endswith("or"):
return w + "en"
# Words ending in -ij get -en: boerderij => boerderijen.
if w.endswith("ij"):
return w + "en"
# Words ending in two consonants get -en: hand => handen.
if len(w) > 1 and not is_vowel(w[-1]) and not is_vowel(w[-2]):
return w + "en"
# Words ending in one consonant with a short sound: fles => flessen.
if len(w) > 2 and not is_vowel(w[-1]) and not is_vowel(w[-3]):
return w + w[-1] + "en"
# Words ending in one consonant with a long sound: raam => ramen.
if len(w) > 2 and not is_vowel(w[-1]) and w[-2] == w[-3]:
return w[:-2] + w[-1] + "en"
return w + "en"
return w
#### SINGULARIZE #########################################################
singular_irregular = dict((v, k) for k, v in plural_irregular.items())
def singularize(word, pos=NOUN, custom={}):
if word in custom.keys():
return custom[word]
w = word.lower()
if pos == NOUN and w in singular_irregular:
return singular_irregular[w]
if pos == NOUN and w.endswith((u"ën", "en", "s", "i")):
# auto's => auto
if w.endswith("'s"):
return w[:-2]
# broers => broer
if w.endswith("s"):
return w[:-1]
# academici => academicus
if w.endswith("ici"):
return w[:-1] + "us"
# feeën => fee
if w.endswith(u"ën") and w[:-2] in plural_irregular_een:
return w[:-2]
# bacteriën => bacterie
if w.endswith(u"ën"):
return w[:-2] + "e"
# mogelijkheden => mogelijkheid
if w.endswith("heden"):
return w[:-5] + "heid"
# artikelen => artikel
if w.endswith("elen") and not w.endswith("delen"):
return w[:-2]
# chinezen => chinees
if w.endswith("ezen"):
return w[:-4] + "ees"
# neven => neef
if w.endswith("even") and len(w) > 4 and not is_vowel(w[-5]):
return w[:-4] + "eef"
if w.endswith("en"):
w = w[:-2]
# ogen => oog
if w in ("og", "om", "ur"):
return w[:-1] + w[-2] + w[-1]
# hoenderen => hoen
if w.endswith("der") and w[:-3] in plural_irregular_deren:
return w[:-3]
# eieren => ei
if w.endswith("er") and w[:-2] in plural_irregular_eren:
return w[:-2]
# dagen => dag (not daag)
if w in plural_irregular_en:
return w
# huizen => huis
if w.endswith("z"):
return w[:-1] + "s"
# brieven => brief
if w.endswith("v"):
return w[:-1] + "f"
# motoren => motor
if w.endswith("or"):
return w
# flessen => fles
if len(w) > 1 and not is_vowel(w[-1]) and w[-1] == w[-2]:
return w[:-1]
# baarden => baard
if len(w) > 1 and not is_vowel(w[-1]) and not is_vowel(w[-2]):
return w
# boerderijen => boerderij
if w.endswith("ij"):
return w
# idealen => ideaal
if w.endswith(("eal", "ean", "eol", "ial", "ian", "iat", "iol")):
return w[:-1] + w[-2] + w[-1]
# ramen => raam
if len(w) > 2 and not is_vowel(w[-1]) and is_vowel(w[-2]) and not is_vowel(w[-3]):
return w[:-1] + w[-2] + w[-1]
return w
return w
#### VERB CONJUGATION ####################################################
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "nl-verbs.txt"),
language="nl",
format=[0, 1, 2, 3, 7, 8, 17, 18, 19, 23, 25,
24, 16, 9, 10, 11, 15, 33, 26, 27, 28, 32],
default={
1: 0, 2: 0, 3: 0, 7: 0, # present singular
4: 7, 5: 7, 6: 7, # present plural
17: 25, 18: 25, 19: 25, 23: 25, # past singular
20: 23, 21: 23, 22: 23, # past plural
9: 16, 10: 16, 11: 16, 15: 16, # present singular negated
12: 15, 13: 15, 14: 15, # present plural negated
26: 33, 27: 33, 28: 33, # past singular negated
29: 32, 30: 32, 31: 32, 32: 33 # past plural negated
})
def load(self):
_Verbs.load(self)
self._inverse["was"] = "zijn" # Instead of "wassen".
self._inverse["waren"] = "zijn"
self._inverse["zagen"] = "zien"
self._inverse["wist"] = "weten"
self._inverse["zou"] = "zullen"
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
This is problematic if a verb ending in -e is given in the past tense or gerund.
"""
v = verb.lower()
# Common prefixes: op-bouwen and ver-bouwen inflect like bouwen.
for prefix in ("aan", "be", "her", "in", "mee", "ont", "op", "over", "uit", "ver"):
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Present participle -end: hengelend, knippend.
if v.endswith("end"):
b = v[:-3]
# Past singular -de or -te: hengelde, knipte.
elif v.endswith(("de", "det", "te", "tet")):
b = v[:-2]
# Past plural -den or -ten: hengelden, knipten.
elif v.endswith(("chten"),):
b = v[:-2]
elif v.endswith(("den", "ten")) and len(v) > 3 and is_vowel(v[-4]):
b = v[:-2]
elif v.endswith(("den", "ten")):
b = v[:-3]
# Past participle ge- and -d or -t: gehengeld, geknipt.
elif v.endswith(("d", "t")) and v.startswith("ge"):
b = v[2:-1]
# Present 2nd or 3rd singular: wordt, denkt, snakt, wacht.
elif v.endswith(("cht"),):
b = v
elif v.endswith(("dt", "bt", "gt", "kt", "mt", "pt", "wt", "xt", "aait", "ooit")):
b = v[:-1]
elif v.endswith("t") and len(v) > 2 and not is_vowel(v[-2]):
b = v[:-1]
elif v.endswith("en") and len(v) > 3:
return v
else:
b = v
# hengel => hengelen (and not hengellen)
if len(b) > 2 and b.endswith(("el", "nder", "om", "tter")) and not is_vowel(b[-3]):
pass
# Long vowel followed by -f or -s: geef => geven.
elif len(b) > 2 and not is_vowel(b[-1]) and is_vowel(b[-2]) and is_vowel(b[-3])\
or b.endswith(("ijf", "erf"),):
if b.endswith("f"):
b = b[:-1] + "v"
if b.endswith("s"):
b = b[:-1] + "z"
if b[-2] == b[-3]:
b = b[:-2] + b[-1]
# Short vowel followed by consonant: snak => snakken.
elif len(b) > 1 and not is_vowel(b[-1]) and is_vowel(b[-2]) and not b.endswith(("er", "ig")):
b = b + b[-1]
b = b + "en"
b = b.replace("vven", "ven") # omgevven => omgeven
b = b.replace("zzen", "zen") # genezzen => genezen
b = b.replace("aen", "aan") # doorgaen => doorgaan
return b
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en.
b = b0 = re.sub("en$", "", v)
# zweven => zweef, graven => graaf
if b.endswith("v"):
b = b[:-1] + "f"
if b.endswith("z"):
b = b[:-1] + "s"
# Vowels with a long sound are doubled, we need to guess how it sounds:
if len(b) > 2 and not is_vowel(b[-1]) and is_vowel(b[-2]) and not is_vowel(b[-3]):
if not v.endswith(("elen", "deren", "keren", "nderen", "tteren")):
b = b[:-1] + b[-2] + b[-1]
# pakk => pak
if len(b) > 1 and not is_vowel(b[-1]) and b[-1] == b[-2]:
b = b[:-1]
# Present tense gets -t:
sg = not b.endswith("t") and b + "t" or b
# Past tense ending in a consonant in "xtc-koffieshop" gets -t,
# otherwise -d:
dt = b0 and b0[-
1] in "xtckfshp" and "t" or (not b.endswith("d") and "d" or "")
# Past tense -e and handle common irregular inflections:
p = b + dt + "e"
for suffix, irregular in (("erfde", "ierf"), ("ijfde", "eef"), ("ingde", "ong"), ("inkte", "onk")):
if p.endswith(suffix):
p = p[:-len(suffix)] + irregular
break
# Past participle: ge-:
pp = re.sub("tt$", "t", "ge" + b + dt)
pp = pp.startswith(("geop", "gein", "geaf")) and pp[
2:4] + "ge" + pp[4:] or pp # geopstart => opgestart
pp = pp.startswith(("gever", "gebe", "gege")) and pp[2:] or pp
return [v, b, sg, sg, v, b0 + "end", p, p, p, b + dt + "en", p, pp]
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE ###########################################
adjective_attributive = {
"civiel": "civiele",
"complex": "complexe",
"enkel": "enkele",
"grof": "grove",
"half": "halve",
"luttel": "luttele",
"mobiel": "mobiele",
"parijs": "parijse",
"ruw": "ruwe",
"simpel": "simpele",
"stabiel": "stabiele",
"steriel": "steriele",
"subtiel": "subtiele",
"teer": "tere"
}
def attributive(adjective):
"""For a predicative adjective, returns the attributive form (lowercase).
In Dutch, the attributive is formed with -e: "fel" => "felle kritiek".
"""
w = adjective.lower()
if w in adjective_attributive:
return adjective_attributive[w]
if w.endswith("e"):
return w
if w.endswith(("er", "st")) and len(w) > 4:
return w + "e"
if w.endswith("ees"):
return w[:-2] + w[-1] + "e"
if w.endswith("el") and len(w) > 2 and not is_vowel(w[-3]):
return w + "e"
if w.endswith("ig"):
return w + "e"
if len(w) > 2 and (not is_vowel(w[-1]) and is_vowel(w[-2]) and is_vowel(w[-3]) or w[:-1].endswith("ij")):
if w.endswith("f"):
w = w[:-1] + "v"
if w.endswith("s"):
w = w[:-1] + "z"
if w[-2] == w[-3]:
w = w[:-2] + w[-1]
elif len(w) > 1 and is_vowel(w[-2]) and w.endswith(tuple("bdfgklmnprst")):
w = w + w[-1]
return w + "e"
adjective_predicative = dict((v, k) for k, v in adjective_attributive.items())
adjective_predicative.update({
"moe": "moe",
"taboe": "taboe",
"voldoende": "voldoende"
})
def predicative(adjective):
"""Returns the predicative adjective (lowercase).
In Dutch, the attributive form preceding a noun is common:
"rake opmerking" => "raak", "straffe uitspraak" => "straf", "dwaze blik" => "dwaas".
"""
w = adjective.lower()
if w in adjective_predicative:
return adjective_predicative[w]
if w.endswith("ste"):
return w[:-1]
if w.endswith("ere"):
return w[:-1]
if w.endswith("bele"):
return w[:-1]
if w.endswith("le") and len(w) > 2 and is_vowel(w[-3]) and not w.endswith(("eule", "oele")):
return w[:-2] + w[-3] + "l"
if w.endswith("ve") and len(w) > 2 and is_vowel(w[-3]) and not w.endswith(("euve", "oeve", "ieve")):
return w[:-2] + w[-3] + "f"
if w.endswith("ze") and len(w) > 2 and is_vowel(w[-3]) and not w.endswith(("euze", "oeze", "ieze")):
return w[:-2] + w[-3] + "s"
if w.endswith("ve"):
return w[:-2] + "f"
if w.endswith("ze"):
return w[:-2] + "s"
if w.endswith("e") and len(w) > 2:
if not is_vowel(w[-2]) and w[-2] == w[-3]:
return w[:-2]
if len(w) > 3 and not is_vowel(w[-2]) and is_vowel(w[-3]) and w[-3] != "i" and not is_vowel(w[-4]):
return w[:-2] + w[-3] + w[-2]
return w[:-1]
return w
|
shubhangiKishore/pattern
|
pattern/text/nl/inflect.py
|
Python
|
bsd-3-clause
| 16,372
|
[
"MOE"
] |
3c32a395d7bb370fa0d24269ea1d53a29ca532e60853eb2e96f9bfc61827f4d9
|
# -*- coding: utf-8 -*-
"""The status view."""
from __future__ import unicode_literals
import ctypes
import sys
import time
try:
import win32api
import win32console
except ImportError:
win32console = None
from dfvfs.lib import definitions as dfvfs_definitions
import plaso
from plaso.cli import tools
from plaso.cli import views
class StatusView(object):
"""Processing status view."""
MODE_LINEAR = 'linear'
MODE_WINDOW = 'window'
_SOURCE_TYPES = {
dfvfs_definitions.SOURCE_TYPE_DIRECTORY: 'directory',
dfvfs_definitions.SOURCE_TYPE_FILE: 'single file',
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE: (
'storage media device'),
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE: (
'storage media image')}
_UNITS_1024 = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'EiB', 'ZiB', 'YiB']
_WINAPI_STD_OUTPUT_HANDLE = -11
_WINAPI_ENABLE_PROCESSED_INPUT = 1
_WINAPI_ENABLE_LINE_INPUT = 2
_WINAPI_ENABLE_ECHO_INPUT = 4
_WINAPI_ANSI_CONSOLE_MODE = (
_WINAPI_ENABLE_PROCESSED_INPUT | _WINAPI_ENABLE_LINE_INPUT |
_WINAPI_ENABLE_ECHO_INPUT)
def __init__(self, output_writer, tool_name):
"""Initializes a status view.
Args:
output_writer (OutputWriter): output writer.
tool_name (str): namd of the tool.
"""
super(StatusView, self).__init__()
self._artifact_filters = None
self._filter_file = None
self._have_ansi_support = not win32console
self._mode = self.MODE_WINDOW
self._output_writer = output_writer
self._source_path = None
self._source_type = None
self._stdout_output_writer = isinstance(
output_writer, tools.StdoutOutputWriter)
self._storage_file_path = None
self._tool_name = tool_name
if win32console:
kernel32 = ctypes.windll.kernel32
stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE)
result = kernel32.SetConsoleMode(
stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE)
self._have_ansi_support = result != 0
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view):
"""Adds an analysis process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
events = ''
if (process_status.number_of_consumed_events is not None and
process_status.number_of_consumed_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_consumed_events,
process_status.number_of_consumed_events_delta)
event_tags = ''
if (process_status.number_of_produced_event_tags is not None and
process_status.number_of_produced_event_tags_delta is not None):
event_tags = '{0:d} ({1:d})'.format(
process_status.number_of_produced_event_tags,
process_status.number_of_produced_event_tags_delta)
reports = ''
if (process_status.number_of_produced_reports is not None and
process_status.number_of_produced_reports_delta is not None):
reports = '{0:d} ({1:d})'.format(
process_status.number_of_produced_reports,
process_status.number_of_produced_reports_delta)
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, events, event_tags, reports])
def _AddExtractionProcessStatusTableRow(self, process_status, table_view):
"""Adds an extraction process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
sources = ''
if (process_status.number_of_produced_sources is not None and
process_status.number_of_produced_sources_delta is not None):
sources = '{0:d} ({1:d})'.format(
process_status.number_of_produced_sources,
process_status.number_of_produced_sources_delta)
events = ''
if (process_status.number_of_produced_events is not None and
process_status.number_of_produced_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_produced_events,
process_status.number_of_produced_events_delta)
# TODO: shorten display name to fit in 80 chars and show the filename.
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, sources, events, process_status.display_name])
def _ClearScreen(self):
"""Clears the terminal/console screen."""
if self._have_ansi_support:
# ANSI escape sequence to clear screen.
self._output_writer.Write('\033[2J')
# ANSI escape sequence to move cursor to top left.
self._output_writer.Write('\033[H')
elif win32console:
# This version of Windows cmd.exe does not support ANSI escape codes, thus
# instead we fill the console screen buffer with spaces. The downside of
# this approach is an annoying flicker.
top_left_coordinate = win32console.PyCOORDType(0, 0)
screen_buffer = win32console.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
screen_buffer_information = screen_buffer.GetConsoleScreenBufferInfo()
screen_buffer_attributes = screen_buffer_information['Attributes']
screen_buffer_size = screen_buffer_information['Size']
console_size = screen_buffer_size.X * screen_buffer_size.Y
screen_buffer.FillConsoleOutputCharacter(
' ', console_size, top_left_coordinate)
screen_buffer.FillConsoleOutputAttribute(
screen_buffer_attributes, console_size, top_left_coordinate)
screen_buffer.SetConsoleCursorPosition(top_left_coordinate)
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
def _FormatSizeInUnitsOf1024(self, size):
"""Represents a number of bytes in units of 1024.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1024 = 0
used_memory_1024 = float(size)
while used_memory_1024 >= 1024:
used_memory_1024 /= 1024
magnitude_1024 += 1
if 0 < magnitude_1024 <= 7:
return '{0:.1f} {1:s}'.format(
used_memory_1024, self._UNITS_1024[magnitude_1024])
return '{0:d} B'.format(size)
def _FormatProcessingTime(self, processing_status):
"""Formats the processing time.
Args:
processing_status (ProcessingStatus): processing status.
Returns:
str: processing time formatted as: "5 days, 12:34:56".
"""
processing_time = 0
if processing_status:
processing_time = time.time() - processing_status.start_time
processing_time, seconds = divmod(int(processing_time), 60)
processing_time, minutes = divmod(processing_time, 60)
days, hours = divmod(processing_time, 24)
if days == 0:
days_string = ''
elif days == 1:
days_string = '1 day, '
else:
days_string = '{0:d} days, '.format(days)
return '{0:s}{1:02d}:{2:02d}:{3:02d}'.format(
days_string, hours, minutes, seconds)
def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_consumed_events)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_consumed_events)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
"""Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',
'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])
self._AddsAnalysisProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintExtractionStatusUpdateLinear(self, processing_status):
"""Prints an extraction status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_produced_events,
processing_status.foreman_status.display_name)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_produced_events,
worker_status.display_name)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintExtractionStatusUpdateWindow(self, processing_status):
"""Prints an extraction status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self.PrintExtractionStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events',
'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])
self._AddExtractionProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddExtractionProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintEventsStatus(self, events_status):
"""Prints the status of the events.
Args:
events_status (EventsStatus): events status.
"""
if events_status:
table_view = views.CLITabularTableView(
column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates',
'MACB grouped', 'Total'],
column_sizes=[15, 15, 15, 15, 15, 0])
table_view.AddRow([
'', events_status.number_of_filtered_events,
events_status.number_of_events_from_time_slice,
events_status.number_of_duplicate_events,
events_status.number_of_macb_grouped_events,
events_status.total_number_of_events])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def _PrintTasksStatus(self, processing_status):
"""Prints the status of the tasks.
Args:
processing_status (ProcessingStatus): processing status.
"""
if processing_status and processing_status.tasks_status:
tasks_status = processing_status.tasks_status
table_view = views.CLITabularTableView(
column_names=['Tasks:', 'Queued', 'Processing', 'Merging',
'Abandoned', 'Total'],
column_sizes=[15, 7, 15, 15, 15, 0])
table_view.AddRow([
'', tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_of_abandoned_tasks,
tasks_status.total_number_of_tasks])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def GetAnalysisStatusUpdateCallback(self):
"""Retrieves the analysis status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintAnalysisStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintAnalysisStatusUpdateWindow
return None
def GetExtractionStatusUpdateCallback(self):
"""Retrieves the extraction status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintExtractionStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintExtractionStatusUpdateWindow
return None
# TODO: refactor to protected method.
def PrintExtractionStatusHeader(self, processing_status):
"""Prints the extraction status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Source path\t\t: {0:s}\n'.format(self._source_path))
self._output_writer.Write(
'Source type\t\t: {0:s}\n'.format(self._source_type))
if self._artifact_filters:
artifacts_string = ', '.join(self._artifact_filters)
self._output_writer.Write('Artifact filters\t: {0:s}\n'.format(
artifacts_string))
if self._filter_file:
self._output_writer.Write('Filter file\t\t: {0:s}\n'.format(
self._filter_file))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
self._PrintTasksStatus(processing_status)
self._output_writer.Write('\n')
def PrintExtractionSummary(self, processing_status):
"""Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
self._output_writer.Write(
'WARNING: missing processing status information.\n')
elif not processing_status.aborted:
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = (
processing_status.foreman_status.number_of_produced_warnings)
if number_of_warnings:
output_text = '\n'.join([
'',
('Number of warnings generated while extracting events: '
'{0:d}.').format(number_of_warnings),
'',
'Use pinfo to inspect warnings in more detail.',
''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join([
'',
'Path specifications that could not be processed:',
''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n')
def SetMode(self, mode):
"""Sets the mode.
Args:
mode (str): status view mode.
"""
self._mode = mode
def SetSourceInformation(
self, source_path, source_type, artifact_filters=None, filter_file=None):
"""Sets the source information.
Args:
source_path (str): path of the source.
source_type (str): source type.
artifact_filters (Optional[list[str]]): names of artifact definitions to
use as filters.
filter_file (Optional[str]): filter file.
"""
self._artifact_filters = artifact_filters
self._filter_file = filter_file
self._source_path = source_path
self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
def SetStorageFileInformation(self, storage_file_path):
"""Sets the storage file information.
Args:
storage_file_path (str): path to the storage file.
"""
self._storage_file_path = storage_file_path
|
rgayon/plaso
|
plaso/cli/status_view.py
|
Python
|
apache-2.0
| 18,822
|
[
"NAMD"
] |
ff8283f53978afaee1818f937c75c8babc0a640268f3f3bba9ce927a0d39242c
|
from math import exp, pi, sin, sqrt, cos, acos
import numpy as np
from ase.data import atomic_numbers
# Table (1) of
# D. WAASMAIER AND A. KIRFEL, Acta Cryst. (1995). A51, 416-431
waasmaier = {
# a1 b1 a2 b2 a3 b3 a4 b4 a5 b5 c
'C' : [2.657506, 14.780758, 1.078079, 0.776775, 1.490909, 42.086843, -4.241070, -0.000294, 0.713791, 0.239535, 4.297983],
'S' : [6.372157, 1.514347, 5.154568, 22.092528, 1.473732, 0.061373, 1.635073, 55.445176, 1.209372, 0.646925, 0.154722],
'Pd': [6.121511, 0.062549, 4.784063, 0.784031, 16.631683, 8.751391, 4.318258, 34.489983, 13.246773, 0.784031, 0.883099],
'Ag': [6.073874, 0.055333, 17.155437, 7.896512, 4.173344, 28.443739, 0.852238, 110.376108, 17.988685, 0.716809, 0.756603],
'Au': [16.777389, 0.122737, 19.317156, 8.621570, 32.979682, 1.256902, 5.595453, 38.008821, 10.576854, 0.000601, -6.279078],
'P' : [1.950541, 0.908139, 4.146930, 27.044953, 1.494560, 0.071280, 1.522042, 67.520190, 5.729711, 1.981173, 0.155233],
'Cl': [1.446071, 0.052357, 6.870609, 1.193165, 6.151801, 18.343416, 1.750347, 46.398394, 0.634168, 0.401005, 0.146773],
}
class XrDebye:
def __init__(self, wavelength, alpha=1.01, damping=0.04, warn=True,
method='Iwasa'):
"""
Obtain powder x-ray spectra.
wavelength in Angstrom
damping in Angstrom**2
"""
self.wavelength = wavelength
self.damping = damping
self.alpha = alpha
self.warn = warn
self.method = method
def set_damping(self, damping):
self.damping = damping
def get(self, atoms, s):
"""Get the powder x-ray (XRD) pattern using the Debye-Formula.
After: T. Iwasa and K. Nobusada, J. Phys. Chem. C 111 (2007) 45
s is assumed to be in 1/Angstrom
"""
sinth = self.wavelength * s / 2.
costh = sqrt(1. - sinth**2)
cos2th = cos(2. * acos(costh))
pre = exp(- self.damping * s**2 / 2)
if self.method == 'Iwasa':
pre *= costh / (1. + self.alpha * cos2th**2)
f = {}
def atomic(symbol):
if not f.has_key(symbol):
if self.method == 'Iwasa':
f[symbol] = self.get_waasmaier(symbol, s)
else:
f[symbol] = atomic_numbers[symbol]
return f[symbol]
def sinc(x):
if x < 1.e-6:
x2 = x * x
return 1 - x2 / 6. + x2 * x2 / 120.
else:
return sin(x) / x
I = 0.
for a in atoms:
fa = atomic(a.symbol)
# print a.symbol, fa
for b in atoms:
fb = atomic(b.symbol)
if a == b:
twopis = 0.
else:
vrij = a.position - b.position
rij = np.sqrt(np.dot(vrij, vrij))
twopisr = 2 * pi * s * rij
I += fa * fb * sinc(twopisr)
return pre * I
def get_waasmaier(self, symbol, s):
"""Scattering factor for free atoms."""
if symbol == 'H':
# XXXX implement analytical H
return 0
elif waasmaier.has_key(symbol):
abc = waasmaier[symbol]
f = abc[10]
s2 = s*s
for i in range(5):
f += abc[2 * i] * exp(-abc[2 * i + 1] * s2)
return f
if self.warn:
print '<xrdebye::get_atomic> Element', symbol, 'not available'
return 0
|
grhawk/ASE
|
tools/ase/xrdebye.py
|
Python
|
gpl-2.0
| 3,657
|
[
"ASE"
] |
4e671a1f15d34c7337d933ce77073856d37d50fbae5e138fc718a9c276426a84
|
# -*- coding: utf-8 -*-
'''
This module provides an access to HITRAN data.
Data is downloaded and cached.
This module serves as a simple database manager frontend.
API is aimed to be RESTful, which means that interaction
between local API and remote data-server will be held
via sending RESTful queries (API->remote) and
receiving data preferrably in text format (remote->API)
Object are supposed to be implemented by structures/dicts
as they present in almost any programming language
Trying to retain functional style for this API.
'''
import httplib
import urllib2
import json
import os, os.path
import re
from os import listdir
from numpy import zeros,array,zeros,where,setdiff1d,ndarray,arange
from numpy import complex128,complex64,int64,int32,float64,float32
from numpy import sqrt,abs,exp,pi,log,sin,cos
from numpy import convolve
#from numpy import linspace
from numpy import any,minimum,maximum
from numpy import modf
from numpy import sort as npsort
from bisect import bisect
#from collections import OrderedDict
from warnings import warn
from urllib2 import HTTPError,URLError
import pydoc
HAPI_VERSION = '1.0'
# version header
print('HAPI VERSION: %s' % HAPI_VERSION)
# define precision
__ComplexType__ = complex128
__IntegerType__ = int64
__FloatType__ = float64
# define zero
cZero = __FloatType__(0.)
# physical constants
cBolts = 1.380648813E-16 # erg/K, CGS
cc = 2.99792458e10 # cm/s, CGS
hh = 6.626196e-27 # erg*s, CGS
# computational constants
cSqrtLn2divSqrtPi = 0.469718639319144059835
cLn2 = 0.6931471805599
cSqrtLn2 = 0.8325546111577
cSqrt2Ln2 = 1.1774100225
# define float range
def frange(x,y,step):
while x<y:
yield x
x+=step
# declare global variables
GLOBAL_DEBUG = False
GLOBAL_CURRENT_DIR ='.'
GLOBAL_HITRAN_APIKEY = 'e20e4bd3-e12c-4931-99e0-4c06e88536bd'
GLOBAL_USER = 'user'
GLOBAL_REQUISITES = []
GLOBAL_CONNECTION = []
GLOBAL_DATABASE = 'hitran'
LOCAL_HOST = 'localhost'
# DEBUG switch
if GLOBAL_DEBUG:
GLOBAL_HOST = LOCAL_HOST+':8000' # localhost
else:
GLOBAL_HOST = 'http://hitran.org'
# this is a backup url in the case GLOBAL_HOST does not work
GLOBAL_HOST_BACKUP = 'http://hitranazure.cloudapp.net/'
# interface for checking of variable's existance
def empty(Instance):
return True if Instance else False
# general interface for getattr
def getAttribute(Object,Attribute):
return getattr(Object,Attribute)
# general interface for setattr
def setAttribute(Object,Attribute,Value):
setattr(Object,Attribute,Value)
return
# UNPARSED QUERY OBJECT
# uses formal language (SQL, noSQL, custom...)
GlobalQueryString = ''
# PARSED QUERY OBJECT
# = prototype for a Query instance
# there should be a getAttrbute/setSettribute functions defined
# For Django: Query=QuerySet (as an example)
Query = {}
# prototype for cache storage
# there must be function for record/retrieve
# caching is performed by the value of Query
# cache parameters: (query+table_name)
# if there is already table with such query, copy it
# if there is already tble with such query AND table_name,
# return it as is => IT MAY DEPEND ON CERTAIN QUERY TYPE!!
TABLES = {} # hash/dictionary
# ---------- CONNECTION MANAGEMENT-------------
# interface for establishing HTTP connection
# can return object/structure/handle
def setupConnection(Host=GLOBAL_HOST):
Connection = httplib.HTTPConnection(Host)
if not empty(Connection):
return Connection
else:
raise Exception('can''t setup connection')
# interface for HTTP-get method
# Connection must be established before use
def httpGet(URL,Connection=GLOBAL_CONNECTION):
Method = 'get'
ServerResponse = Connection.request(Method,URL)
return ServerResponse
# parse local data language to remote frontend
# !!!!!!!!!
def parseToFrontend(Query,Host=GLOBAL_HOST):
# convert Query object to server frontend's
# query language
pass
def prepareURL(Query,Connection=GLOBAL_CONNECTION):
# make full URL from server name and it's parameters
# considering server's frontend query language
Host = getAttribute(Connection,'host')
HostQuery = parseToFrontend(Query)
URL = Host+HostQuery
return URL
# stream raw data from the server
# the data is assumed to be very large that
# ordinary get is unefficient
def streamRawDataRemote(Query,Connection=GLOBAL_CONNECTION):
pass
# collect raw data in whatever format server gives it
def getRawDataRemote(Query,Connection=GLOBAL_CONNECTION):
URL = prepareURL(Query,Connection)
ServerResponse=httpGet(URL,Connection)
return ServerResponse
## parse raw data
#def parseRawData(RawData)
# pass
# ---------- CONNECTION MANAGEMEND END --------
# Two types of interaction between API and DB:
# 1) via API library
# 2) via REST http protocol (torrent-like)
# ---------- NODE MANAGEMENT ------------------
# An interface for a node manager will follow soon.
# This is an implementation in Python
# Different implementations are language-specific.
# dafault node with simple DB engine
# Prototype for a global nodelist for a given host
# each node has it's unique ID, host name and
# node name within it's host
NODE_NAME = 'local'
GLOBAL_NODENAMES = {
0 : 'hitran-main',
1 : 'local'
}
GLOBAL_NODELIST = {
0 : { # main HITRAN node
'host' : GLOBAL_HOST,
'ACCESS_KEY' : '9b6a7975-2a84-43d8-920e-f4dea9db6805' # guest
},
1 : { # local node prototype
'host' : LOCAL_HOST,
'ACCESS_KEY' : '6cfd7040-24a6-4197-81f9-6e25e50005b2', # admin
}
}
def createNode(NodeID,NodeList=GLOBAL_NODELIST):
# create a node, throw if exists
node = NodeList.get(NodeID)
if node: raise Exception('node %s already exists' % NodeName)
NodeList[NodeID] = {}
pass
def getNodeIDs(NodeList=GLOBAL_NODELIST):
# return list of all available nodes
return NodeList.keys()
def getNodeProperty(NodeID,PropName,NodeList=GLOBAL_NODELIST):
# get a property for certain node
# if not found throw exception
node = NodeList.get(NodeName)
if node:
prop = node.get(PropName)
if prop:
return prop
else:
raise Exception('node %s doesn''t have property %s' % (ModeName,Propname) )
else:
raise Exception('no such node %s' % Nodename)
def setNodeProperty(NodeID,PropName,PropValue,NodeList=GLOBAL_NODELIST):
# set a property for certain node
# throw exception if node not found
# if the property doesn't exist it will appear
node = NodeList.get(NodeID)
if not node: raise Exception('no such node %s ' % NodeName)
NodeList[PropName] = PropValue
return
def resolveNodeID(NodeName,NodeNames=GLOBAL_NODENAMES):
for NodeID in NodeNames.keys():
if NodeNames[NodeID]==NodeName: return NodeID
def checkAccess(DBName,TableName,NodeName,UserName,Requisites,NodeList=GLOBAL_NODELIST,NodeNames=GLOBAL_NODENAMES):
# simple node-level authentication (bridge to AUTH system)
NodeID = resolveNodeID(NodeName,NodeNames)
Node = NodeList[NodeID]
if Requisites.key in Node['keys_allowed']:
return True
else:
return False
# ---------- NODE MANAGEMENT END --------------
# ---------- NODE AUTH SYSTEM -----------------
# AUTH SYSTEM is tightly connected to Node manager.
# Prototype for authentication system.
# AUTH is responsible for giving an access privileges to all users.
# Each users has a key ACCESS_KEY which is stored in
# a special database HOST:ACCESS_KEYS on a host.
# Every node has a separate privileges list connected with
# each key. Auth system
# The current auth system is based on secret keys of access
# Default key is 'admin', it's created seamlessly for a local admin.
# Prototype for key storage
# RECONSIDER THIS LATER !!!
GLOBAL_PRIVILEGES = {
'admin' : {
'ACCESS_KEY' : '6cfd7040-24a6-4197-81f9-6e25e50005b2',
'LEVEL' : 'ADMIN'
},
'guest' : {
'ACCESS_KEY' : '9b6a7975-2a84-43d8-920e-f4dea9db6805',
'LEVEL' : 'USER'
}
}
def addUser():
pass
def deleteUser():
pass
def authenticate(UserName,Requisites,Privileges=GLOBAL_PRIVILEGES):
# Authentication
key_list = [Privileges[User]['ACCESS_KEY'] for User in Privileges.keys]
return True if Requisites.AccessKey in key_list else False
def checkPrivileges(Path,UserName=GLOBAL_USER,Requisites=GLOBAL_REQUISITES,
Privileges=GLOBAL_PRIVILEGES,NodeList=GLOBAL_NODELIST,Nodenames=GLOBAL_NODENAMES):
# Privileges are checked before executing every query (needs optimization)
# Path example: SOME_DB::SOME_TABLE::SOME_NODE
if not authenticate(UserName,Requisites,Privileges): return False
(DBName,TableName,NodeName)=Path.split('::')
# loop on all nodes , use NODE_MANAGER's functions instead of
# working with GLOBAL_NODELIST directly
if not checkAccess(DBName,TableName,NodeName,UserName,Requisites,NodeList,NodeNames):
return False
return True
# ---------- NODE AUTH SYSTEM END -------------
# ---------- DATABASE FRONTEND ----------------
# Structure:
# DB::TABLE::NODE
# DB - distributed database
# TABLE - table within the current database
# NODE - instance of this API with fixed DB backend
# !! parameter HOST is deprecated
# HOST - computer at which the NODE/ENGINE is deployed
# NODE or ENGINE ?
# TABLE should be considered as schema-free collection
# (e.g. MongoDB-type)
###? Two databases (DB) - GLOBAL (one) and LOCAL (many)
# Every DB has an ACCESS_KEY providing an access to it
# User can create a database and it will contain
# a list of ACCESS_KEY's for authentication.
###? GLOBAL AND LOCAL are distributed databases.
###? A user can create his GLOBAL database and open an access to it.
###? GLOBAL access implementation:
###? GLOBAL is a dustributed database
###? LOCAL is not a distributed database
# The DB frontend contains interfaces to
# the standard procedures of data creation and
# retrieval of an "average" DBMS.
# ("collection" = table)
#
# Levels of access: (DB permissions implementation)
# 0:USER read-only operations ("select")
# 1:MANAGER manage single DB (create/delete docs)
# 2:ADMIN manage multiple DB's (create/delete DB)
#
# Every ACCESS_KEY has it's own access level.
#
# Commands to implement:
#
# ) create DATABASE
# ) create ACCESS_KEY
# (seamlessly for the local user)
# ) select from LOCAL/GLOBAL doc (cached!)
# ) access database
# (seamlessly for the local user)
# ) create/delete doc
# ) copy/clone LOCAL doc
# ) "create collection as select * from HOST:ENGINE:DB:COLLECTION"
# (other types of table creations are forbidden)
# ATTENTION:
# DB frontend is adapted to denormalized
# schema-fixed tables or schema-independent documents.
# DB frontend is connected to multiple backends
# which are largely language-specific.
###? ATTENTION: since the system is distributed,
###? the table/document caching is supposed to
###? be in the frontend.
###? Current higher-level implementation
###? implies the query-based caching, i.e.
###? cache lookup is performed by the value
###? of Query structure/object.
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# LOCAL DATABASE MANAGEMENT SYSTEM
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# DATABASE BACKEND: simple text files, parsed into a python lists
# Use a directory as a database. Each table is stored in a
# separate text file. Parameters in text are position-fixed.
#BACKEND_DATABASE_NAME_DEFAULT = 'data'
BACKEND_DATABASE_NAME_DEFAULT = '.'
VARIABLES = {}
VARIABLES['BACKEND_DATABASE_NAME'] = BACKEND_DATABASE_NAME_DEFAULT
# For this node local DB is schema-dependent!
LOCAL_TABLE_CACHE = {
'sampletab' : { # table
'header' : { # header
'order' : ('column1','column2','column3'),
'format' : {
'column1' : '%10d',
'column2' : '%20f',
'column3' : '%30s'
},
'default' : {
'column1' : 0,
'column2' : 0.0,
'column3' : ''
},
'number_of_rows' : 3,
'size_in_bytes' : None,
'table_name' : 'sampletab',
'table_type' : 'strict'
}, # /header
'data' : {
'column1' : [1,2,3],
'column2' : [10.5,11.5,12.5],
'column3' : ['one','two','three']
}, # /data
} # /table
} # hash-map of tables
# FORMAT CONVERSION LAYER
# converts between TRANSPORT_FORMAT and OBJECT_FORMAT
HITRAN_FORMAT_160 = {
'M' : {'pos' : 1, 'len' : 2, 'format' : '%2d' },
'I' : {'pos' : 3, 'len' : 1, 'format' : '%1d' },
'nu' : {'pos' : 4, 'len' : 12, 'format' : '%12f'},
'S' : {'pos' : 16, 'len' : 10, 'format' : '%10f'},
'R' : {'pos' : 26, 'len' : 0, 'format' : '%0f' },
'A' : {'pos' : 26, 'len' : 10, 'format' : '%10f'},
'gamma_air' : {'pos' : 36, 'len' : 5, 'format' : '%5f' },
'gamma_self' : {'pos' : 41, 'len' : 5, 'format' : '%5f' },
'E_' : {'pos' : 46, 'len' : 10, 'format' : '%10f'},
'n_air' : {'pos' : 56, 'len' : 4, 'format' : '%4f' },
'delta_air' : {'pos' : 60, 'len' : 8, 'format' : '%8f' },
'V' : {'pos' : 68, 'len' : 15, 'format' : '%15s'},
'V_' : {'pos' : 83, 'len' : 15, 'format' : '%15s'},
'Q' : {'pos' : 98, 'len' : 15, 'format' : '%15s'},
'Q_' : {'pos' : 113, 'len' : 15, 'format' : '%15s'},
'Ierr' : {'pos' : 128, 'len' : 6, 'format' : '%6s' },
'Iref' : {'pos' : 134, 'len' : 12, 'format' : '%12s'},
'flag' : {'pos' : 146, 'len' : 1, 'format' : '%1s' },
'g' : {'pos' : 147, 'len' : 7, 'format' : '%7f' },
'g_' : {'pos' : 154, 'len' : 7, 'format' : '%7f' }
}
# This should be generating from the server's response
HITRAN_DEFAULT_HEADER = {
"table_type": "column-fixed",
"size_in_bytes": -1,
"table_name": "###",
"number_of_rows": -1,
"order": [
"molec_id",
"local_iso_id",
"nu",
"sw",
"a",
"gamma_air",
"gamma_self",
"elower",
"n_air",
"delta_air",
"global_upper_quanta",
"global_lower_quanta",
"local_upper_quanta",
"local_lower_quanta",
"ierr",
"iref",
"line_mixing_flag",
"gp",
"gpp"
],
"format": {
"a": "%10.3E",
"gamma_air": "%5.4f",
"gp": "%7.1f",
"local_iso_id": "%1d",
"molec_id": "%2d",
"sw": "%10.3E",
"local_lower_quanta": "%15s",
"local_upper_quanta": "%15s",
"gpp": "%7.1f",
"elower": "%10.4f",
"n_air": "%4.2f",
"delta_air": "%8.6f",
"global_upper_quanta": "%15s",
"iref": "%12s",
"line_mixing_flag": "%1s",
"ierr": "%6s",
"nu": "%12.6f",
"gamma_self": "%5.3f",
"global_lower_quanta": "%15s"
},
"default": {
"a": 0.0,
"gamma_air": 0.0,
"gp": "FFF",
"local_iso_id": 0,
"molec_id": 0,
"sw": 0.0,
"local_lower_quanta": "000",
"local_upper_quanta": "000",
"gpp": "FFF",
"elower": 0.0,
"n_air": 0.0,
"delta_air": 0.0,
"global_upper_quanta": "000",
"iref": "EEE",
"line_mixing_flag": "EEE",
"ierr": "EEE",
"nu": 0.0,
"gamma_self": 0.0,
"global_lower_quanta": "000"
},
"description": {
"a": "Einstein A-coefficient in s-1",
"gamma_air": "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"gp": "Upper state degeneracy",
"local_iso_id": "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"molec_id": "The HITRAN integer ID for this molecule in all its isotopologue forms",
"sw": "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"local_lower_quanta": "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"local_upper_quanta": "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"gpp": "Lower state degeneracy",
"elower": "Lower-state energy",
"n_air": "Temperature exponent for the air-broadened HWHM",
"delta_air": "Pressure shift induced by air, referred to p=1 atm",
"global_upper_quanta": "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"iref": "Ordered list of reference identifiers for transition parameters",
"line_mixing_flag": "A flag indicating the presence of additional data and code relating to line-mixing",
"ierr": "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"nu": "Transition wavenumber",
"gamma_self": "Self-broadened HWHM at 1 atm pressure and 296 K",
"global_lower_quanta": "Electronic and vibrational quantum numbers and labels for the lower state of a transition"
},
}
# This is a BACKUP
HITRAN_DEFAULT_HEADER_BACKUP = {
"table_type": "column-fixed",
"size_in_bytes": -1,
"table_name": "###",
"number_of_rows": -1,
"order": [
"M",
"I",
"nu",
"S",
"A",
"gamma_air",
"gamma_self",
"E_",
"n_air",
"delta_air",
"V",
"V_",
"Q",
"Q_",
"Ierr",
"Iref",
"flag",
"g",
"g_"
],
"format": {
"A": "%10.3E",
"gamma_air": "%5.4f",
"g": "%7.1f",
"I": "%1d",
"M": "%2d",
"S": "%10.3E",
"Q_": "%15s",
"Q": "%15s",
"g_": "%7.1f",
"E_": "%10.4f",
"n_air": "%4.2f",
"delta_air": "%8.6f",
"V": "%15s",
"Iref": "%12s",
"flag": "%1s",
"Ierr": "%6s",
"nu": "%12.6f",
"gamma_self": "%5.3f",
"V_": "%15s"
},
"default": {
"A": 0.0,
"gamma_air": 0.0,
"g": "FFF",
"I": 0,
"M": 0,
"S": 0.0,
"Q_": "000",
"Q": "000",
"g_": "FFF",
"E_": 0.0,
"n_air": 0.0,
"delta_air": 0.0,
"V": "000",
"Iref": "EEE",
"flag": "EEE",
"Ierr": "EEE",
"nu": 0.0,
"gamma_self": 0.0,
"V_": "000"
}
}
def transport2object(TransportData):
pass
def object2transport(ObjectData):
pass
def getFullTableAndHeaderName(TableName):
#print('TableName=',TableName)
fullpath_data = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.data'
if not os.path.isfile(fullpath_data):
fullpath_data = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.par'
if not os.path.isfile(fullpath_data):
raise Exception('Lonely header \"%s\"' % fullpath_data)
fullpath_header = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.header'
return fullpath_data,fullpath_header
def getParameterFormat(ParameterName,TableName):
return LOCAL_TABLE_CACHE[TableName]['header']['format']
def getTableHeader(TableName):
return LOCAL_TABLE_CACHE[TableName]['header']
# RowObject = list of tuples like (name,value,format)
def addRowObject(RowObject,TableName):
# add RowObject to TableObject in CACHE
# check consistency first
if [p[0] for p in RowObject] != LOCAL_TABLE_CACHE[TableName]['header']['order']:
raise Exception('The row is not consistent with the table')
for par_name,par_value,par_format in RowObject:
LOCAL_TABLE_CACHE[TableName]['data'][par_name] += par_value
pass
def getRowObject(RowID,TableName):
# return RowObject from TableObject in CACHE
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_value = LOCAL_TABLE_CACHE[TableName]['data'][par_name][RowID]
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
RowObject.append((par_name,par_value,par_format))
return RowObject
# INCREASE ROW COUNT
def addRowObject(RowObject,TableName):
#print 'addRowObject: '
#print 'RowObject: '+str(RowObject)
#print 'TableName:'+TableName
for par_name,par_value,par_format in RowObject:
#print 'par_name,par_value,par_format: '+str((par_name,par_value,par_format))
#print '>>> '+ str(LOCAL_TABLE_CACHE[TableName]['data'][par_name])
LOCAL_TABLE_CACHE[TableName]['data'][par_name] += [par_value]
def setRowObject(RowID,RowObject,TableName):
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
if RowID >= 0 and RowID < number_of_rows:
for par_name,par_value,par_format in RowObject:
LOCAL_TABLE_CACHE[TableName]['data'][par_name][RowID] = par_value
else:
# !!! XXX ATTENTION: THIS IS A TEMPORARY INSERTION XXX !!!
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] += 1
addRowObject(RowObject,TableName)
def getDefaultRowObject(TableName):
# get a default RowObject from a table
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_value = LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name]
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
RowObject.append((par_name,par_value,par_format))
return RowObject
def subsetOfRowObject(ParameterNames,RowObject):
# return a subset of RowObject according to
#RowObjectNew = []
#for par_name,par_value,par_format in RowObject:
# if par_name in ParameterNames:
# RowObjectNew.append((par_name,par_value,par_format))
#return RowObjectNew
dct = {}
for par_name,par_value,par_format in RowObject:
dct[par_name] = (par_name,par_value,par_format)
RowObjectNew = []
for par_name in ParameterNames:
RowObjectNew.append(dct[par_name])
return RowObjectNew
#FORMAT_PYTHON_REGEX = '^\%([0-9]*)\.?([0-9]*)([dfs])$'
FORMAT_PYTHON_REGEX = '^\%(\d*)(\.(\d*))?([edfsEDFS])$'
# Fortran string formatting
# based on a pythonic format string
def formatString(par_format,par_value,lang='FORTRAN'):
# Fortran format rules:
# %M.NP
# M - total field length (optional)
# (minus sign included in M)
# . - decimal ceparator (optional)
# N - number of digits after . (optional)
# P - [dfs] int/float/string
# PYTHON RULE: if N is abcent, default value is 6
regex = FORMAT_PYTHON_REGEX
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
result = par_format % par_value
if ty.lower() in set(['f','e']):
lng = int(lng) if lng else 0
lngpnt = int(lngpnt) if lngpnt else 0
result = par_format % par_value
res = result.strip()
if lng==lngpnt+1:
if res[0:1]=='0':
result = '%%%ds' % lng % res[1:]
if par_value<0:
if res[1:2]=='0':
result = '%%%ds' % lng % (res[0:1]+res[2:])
return result
def formatGetLength(fmt,lang='FORTRAN'):
regex = FORMAT_PYTHON_REGEX
def putRowObjectToString(RowObject):
# serialize RowObject to string
# TODO: support different languages (C,Fortran)
output_string = ''
for par_name,par_value,par_format in RowObject:
# Python formatting
#output_string += par_format % par_value
# Fortran formatting
#print 'par_name,par_value,par_format: '+str((par_name,par_value,par_format))
output_string += formatString(par_format,par_value)
return output_string
# Parameter nicknames are hardcoded.
PARAMETER_NICKNAMES = {
"a": "A",
"gamma_air": "gair",
"gp": "g",
"local_iso_id": "I",
"molec_id": "M",
"sw": "S",
"local_lower_quanta": "Q_",
"local_upper_quanta": "Q",
"gpp": "g_",
"elower": "E_",
"n_air": "nair",
"delta_air": "dair",
"global_upper_quanta": "V",
"iref": "Iref",
"line_mixing_flag": "f",
"ierr": "ierr",
"nu": "nu",
"gamma_self": "gsel",
"global_lower_quanta": "V_"
}
def putTableHeaderToString(TableName):
output_string = ''
regex = FORMAT_PYTHON_REGEX
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
fmt = '%%%ss' % lng
try:
par_name_short = PARAMETER_NICKNAMES[par_name]
except:
par_name_short = par_name
#output_string += fmt % par_name
output_string += (fmt % par_name_short)[:int(lng)]
return output_string
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def getRowObjectFromString(input_string,TableName):
# restore RowObject from string, get formats and names in TableName
#print 'getRowObjectFromString:'
pos = 0
RowObject = []
#print 'Header: '+str(LOCAL_TABLE_CACHE[TableName]['header'])
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
#print 'ITERATION\npos: '+str(pos) #
#print 'par_name: '+par_name #
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
#print 'par_format: '+par_format #
regex = '^\%([0-9]+)\.?[0-9]*([dfs])$' #
regex = FORMAT_PYTHON_REGEX
#print 'par_name: '+par_name #
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
lng = int(lng)
#print 'lng,ty:'+str((lng,ty)) #
par_value = input_string[pos:(pos+lng)]
#print 'par_value: '+par_value #
if ty=='d': # integer value
par_value = int(par_value)
elif ty.lower() in set(['e','f']): # float value
par_value = float(par_value)
elif ty=='s': # string value
#par_value = par_value.strip() # strip spaces and tabs
pass # don't strip string value
else:
raise Exception('Format \"%s\" is unknown' % par_format)
RowObject.append((par_name,par_value,par_format))
pos += lng
return RowObject
#LOCAL_TABLE_CACHE[TableName]['data'][par_name] += par_value # or append()?
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Conversion between OBJECT_FORMAT and STORAGE_FORMAT
# This will substitute putTableToStorage and getTableFromStorage
def cache2storage(TableName):
#print 'cache2storage:'
try:
os.mkdir(VARIABLES['BACKEND_DATABASE_NAME'])
except:
pass
fullpath_data,fullpath_header = getFullTableAndHeaderName(TableName)
#print 'fullpath_data:'+fullpath_data
#print 'fullpath_header'+fullpath_header
# check if file exists and throw an exception
#if isfile(fullpath_data): raise Exception('Table \"%s\" already exists',NewTableName)
#if isfile(fullpath_header): raise Exception('SCHEMA IS BROKEN')
OutfileData = open(fullpath_data,'w')
OutfileHeader = open(fullpath_header,'w')
# write table data
line_count = 1
line_number = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
for RowID in range(0,LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']):
#print '%d line from %d' % (line_count,line_number)
line_count += 1
RowObject = getRowObject(RowID,TableName)
#print 'RowObject:'+str(RowObject)
raw_string = putRowObjectToString(RowObject)
#print 'RowObject_string:'+raw_string
OutfileData.write(raw_string+'\n')
# write table header
TableHeader = getTableHeader(TableName)
OutfileHeader.write(json.dumps(TableHeader,indent=2))
def storage2cache(TableName):
#print 'storage2cache:'
#print('TableName',TableName)
fullpath_data,fullpath_header = getFullTableAndHeaderName(TableName)
InfileData = open(fullpath_data,'r')
InfileHeader = open(fullpath_header,'r')
#try:
header_text = InfileHeader.read()
try:
Header = json.loads(header_text)
except:
print('HEADER:')
print(header_text)
raise Exception('Invalid header')
#print 'Header:'+str(Header)
LOCAL_TABLE_CACHE[TableName] = {}
LOCAL_TABLE_CACHE[TableName]['header'] = Header
LOCAL_TABLE_CACHE[TableName]['data'] = {}
# initialize empty data to avoid problems
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
line_count = 0
#line_number = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
for line in InfileData:
#print '%d line from %d' % (line_count,line_number)
#print 'line: '+line #
try:
RowObject = getRowObjectFromString(line,TableName)
line_count += 1
except:
continue
#print 'RowObject: '+str(RowObject)
addRowObject(RowObject,TableName)
#except:
# raise Exception('TABLE FETCHING ERROR')
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = line_count
InfileData.close()
InfileHeader.close()
print ' Lines parsed: %d' % line_count
pass
# / FORMAT CONVERSION LAYER
def getTableNamesFromStorage(StorageName):
file_names = listdir(StorageName)
table_names = []
for file_name in file_names:
# search all files with "header" extensions
#matchObject = re.search('(\w+)\.header$',file_name)
matchObject = re.search('(.+)\.header$',file_name)
if matchObject:
#print('matchObject.group(1)=',matchObject.group(1))
table_names.append(matchObject.group(1))
return table_names
# FIX POSSIBLE BUG: SIMILAR NAMES OF .PAR AND .DATA FILES
# BUG FIXED BY INTRODUCING A PRIORITY:
# *.data files have more priority than *.par files
# See getFullTableAndHeaderName function for explanation
def scanForNewParfiles(StorageName):
file_names = listdir(StorageName)
headers = {} # without extensions!
parfiles_without_header = []
for file_name in file_names:
# create dictionary of unique headers
try:
#fname,fext = re.search('(\w+)\.(\w+)',file_name).groups()
fname,fext = re.search('(.+)\.(\w+)',file_name).groups()
except:
continue
if fext == 'header': headers[fname] = True
for file_name in file_names:
# check if extension is 'par' and the header is absent
try:
#fname,fext = re.search('(\w+)\.(\w+)',file_name).groups()
fname,fext = re.search('(.+)\.(\w+)',file_name).groups()
except:
continue
if fext == 'par' and fname not in headers:
parfiles_without_header.append(fname)
return parfiles_without_header
def createHeader(TableName):
fname = TableName+'.header'
fp = open(VARIABLES['BACKEND_DATABASE_NAME']+'/'+fname,'w')
if os.path.isfile(TableName):
raise Exception('File \"%s\" already exists!' % fname)
fp.write(json.dumps(HITRAN_DEFAULT_HEADER,indent=2))
fp.close()
def loadCache():
#print 'loadCache:'
print('Using '+VARIABLES['BACKEND_DATABASE_NAME']+'\n')
LOCAL_TABLE_CACHE = {} # ?????
table_names = getTableNamesFromStorage(VARIABLES['BACKEND_DATABASE_NAME'])
#print('table_names=',table_names)
parfiles_without_header = scanForNewParfiles(VARIABLES['BACKEND_DATABASE_NAME'])
# create headers for new parfiles
for tab_name in parfiles_without_header:
# get name without 'par' extension
createHeader(tab_name)
table_names.append(tab_name)
for TableName in table_names:
print TableName
storage2cache(TableName)
def saveCache():
#print 'saveCache:'
try:
# delete query buffer
del LOCAL_TABLE_CACHE[QUERY_BUFFER]
except:
pass
for TableName in LOCAL_TABLE_CACHE:
print TableName
cache2storage(TableName)
# DB backend level, start transaction
def databaseBegin(db=None):
if db:
VARIABLES['BACKEND_DATABASE_NAME'] = db
else:
VARIABLES['BACKEND_DATABASE_NAME'] = BACKEND_DATABASE_NAME_DEFAULT
#print 'databaseBegin:'
#print(os.path.isdir("/home/el"))
#print(os.path.exists("/home/el/myfile.txt"))
if not os.path.exists(VARIABLES['BACKEND_DATABASE_NAME']):
os.mkdir(VARIABLES['BACKEND_DATABASE_NAME'])
loadCache()
# DB backend level, end transaction
def databaseCommit():
#print 'databaseCommit:'
saveCache()
#def saveCache():
# for TableName in LOCAL_TABLE_CACHE.keys():
# putTableToStorage(TableName)
# ----------------------------------------------------
# ----------------------------------------------------
# CONDITIONS
# ----------------------------------------------------
# ----------------------------------------------------
# ----------------------------------------------------
# hierarchic query.condition language:
# Conditions: CONS = ('and', ('=','p1','p2'), ('<','p1',13))
# String literals are distinguished from variable names
# by using the operation ('STRING','some_string')
# ----------------------------------------------------
# necessary conditions for hitranonline:
SAMPLE_CONDITIONS = ('AND',('SET','internal_iso_id',[1,2,3,4,5,6]),('>=','nu',0),('<=','nu',100))
# sample hitranonline protocol
# http://hitran.cloudapp.net/lbl/5?output_format_id=1&iso_ids_list=5&numin=0&numax=100&access=api&key=e20e4bd3-e12c-4931-99e0-4c06e88536bd
CONDITION_OPERATIONS = set(['AND','OR','NOT','RANGE','IN','<','>','<=','>=','==','!=','LIKE','STR','+','-','*','/','MATCH','SEARCH','FINDALL'])
# Operations used in Condition verification
# Basic scheme: operationXXX(args),
# where args - list/array of arguments (>=1)
def operationAND(args):
# any number if arguments
for arg in args:
if not arg:
return False
return True
def operationOR(args):
# any number of arguments
for arg in args:
if arg:
return True
return False
def operationNOT(arg):
# one argument
return not arg
def operationRANGE(x,x_min,x_max):
return x_min <= x <= x_max
def operationSUBSET(arg1,arg2):
# True if arg1 is subset of arg2
# arg1 is an element
# arg2 is a set
return arg1 in arg2
def operationLESS(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] >= args[i]:
return False
return True
def operationMORE(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] <= args[i]:
return False
return True
def operationLESSOREQUAL(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] > args[i]:
return False
return True
def operationMOREOREQUAL(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] < args[i]:
return False
return True
def operationEQUAL(args):
# any number of args
for i in range(1,len(args)):
if args[i] != args[i-1]:
return False
return True
def operationNOTEQUAL(arg1,arg2):
return arg1 != arg2
def operationSUM(args):
# any numbers of arguments
if type(args[0]) in set([int,float]):
result = 0
elif type(args[0]) in set([str,unicode]):
result = ''
else:
raise Exception('SUM error: unknown arg type')
for arg in args:
result += arg
return result
def operationDIFF(arg1,arg2):
return arg1-arg2
def operationMUL(args):
# any numbers of arguments
if type(args[0]) in set([int,float]):
result = 1
else:
raise Exception('MUL error: unknown arg type')
for arg in args:
result *= arg
return result
def operationDIV(arg1,arg2):
return arg1/arg2
def operationSTR(arg):
# transform arg to str
if type(arg)!=str:
raise Exception('Type mismatch: STR')
return arg
def operationSET(arg):
# transform arg to list
if type(arg) not in set([list,tuple,set]):
raise Exception('Type mismatch: SET')
return list(arg)
def operationMATCH(arg1,arg2):
# Match regex (arg1) and string (arg2)
#return bool(re.match(arg1,arg2)) # works wrong
return bool(re.search(arg1,arg2))
def operationSEARCH(arg1,arg2):
# Search regex (arg1) in string (arg2)
# Output list of entries
group = re.search(arg1,arg2).groups()
result = []
for item in group:
result.append(('STR',item))
return result
def operationFINDALL(arg1,arg2):
# Search all groups of a regex
# Output a list of groups of entries
# XXX: If a group has more than 1 entry,
# there could be potential problems
list_of_groups = re.findall(arg1,arg2)
result = []
for item in list_of_groups:
result.append(('STR',item))
return result
def operationLIST(args):
# args is a list: do nothing (almost)
return list(args)
# /operations
#def parse(Conditions):
# pass
def BACKUP__evaluateExpression__BACKUP(root,VarDictionary):
# input = local tree root
# XXX: this could be very slow due to passing
# every time VarDictionary as a parameter
# Two special cases: 1) root=varname
# 2) root=list/tuple
# These cases must be processed in a separate way
if type(root) in set([list,tuple]):
# root is not a leaf
head = root[0].upper()
# string constants are treated specially
if head in set(['STR','STRING']): # one arg
return operationSTR(root[1])
elif head in set(['SET','LIST']):
return operationSET(root[1])
tail = root[1:]
args = []
# evaluate arguments recursively
for element in tail: # resolve tree by recursion
args.append(evaluateExpression(element,VarDictionary))
# call functions with evaluated arguments
if head in set(['&','&&','AND']): # many args
return operationAND(args)
elif head in set(['|','||','OR']): # many args
return operationOR(args)
elif head in set(['!','NOT']): # one args
return operationNOT(args[0])
elif head in set(['RANGE','BETWEEN']): # three args
return operationRANGE(args[0],args[1],args[2])
elif head in set(['IN','SUBSET']): # two args
return operationSUBSET(args[0],args[1])
elif head in set(['<','LESS','LT']): # many args
return operationLESS(args)
elif head in set(['>','MORE','MT']): # many args
return operationMORE(args)
elif head in set(['<=','LESSOREQUAL','LTE']): # many args
return operationLESSOREQUAL(args)
elif head in set(['>=','MOREOREQUAL','MTE']): # many args
return operationMOREOREQUAL(args)
elif head in set(['=','==','EQ','EQUAL','EQUALS']): # many args
return operationEQUAL(args)
elif head in set(['!=','<>','~=','NE','NOTEQUAL']): # two args
return operationNOTEQUAL(args[0],args[1])
elif head in set(['+','SUM']): # many args
return operationSUM(args)
elif head in set(['-','DIFF']): # two args
return operationDIFF(args[0],args[1])
elif head in set(['*','MUL']): # many args
return operationMUL(args)
elif head in set(['/','DIV']): # two args
return operationDIV(args[0],args[1])
elif head in set(['MATCH','LIKE']): # two args
return operationMATCH(args[0],args[1])
elif head in set(['SEARCH']): # two args
return operationSEARCH(args[0],args[1])
elif head in set(['FINDALL']): # two args
return operationFINDALL(args[0],args[1])
else:
raise Exception('Unknown operator: %s' % root[0])
elif type(root)==str:
# root is a par_name
return VarDictionary[root]
else:
# root is a non-string constant
return root
# GROUPING ----------------------------------------------
GROUP_INDEX = {}
# GROUP_INDEX has the following structure:
# GROUP_INDEX[KEY] = VALUE
# KEY = table line values
# VALUE = {'FUNCTIONS':DICT,'FLAG':LOGICAL,'ROWID':INTEGER}
# FUNCTIONS = {'FUNC_NAME':DICT}
# FUNC_NAME = {'FLAG':LOGICAL,'NAME':STRING}
# name and default value
GROUP_FUNCTION_NAMES = { 'COUNT' : 0,
'SUM' : 0,
'MUL' : 1,
'AVG' : 0,
'MIN' : +1e100,
'MAX' : -1e100,
'SSQ' : 0,
}
def clearGroupIndex():
#GROUP_INDEX = {}
# XXX ??? is there a better solution ???
for key in GROUP_INDEX.keys():
del GROUP_INDEX[key]
def getValueFromGroupIndex(GroupIndexKey,FunctionName):
# If no such index_key, create it and return a value
if FunctionName not in GROUP_FUNCTION_NAMES:
raise Exception('No such function \"%s\"' % FunctionName)
# In the case if NewRowObjectDefault is requested
if not GroupIndexKey:
return GROUP_FUNCTION_NAMES[FunctionName]
if FunctionName not in GROUP_INDEX[GroupIndexKey]['FUNCTIONS']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName] = {}
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = True
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE'] = \
GROUP_FUNCTION_NAMES[FunctionName]
return GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE']
def setValueToGroupIndex(GroupIndexKey,FunctionName,Value):
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE'] = Value
def initializeGroup(GroupIndexKey):
if GroupIndexKey not in GROUP_INDEX:
print 'GROUP_DESC[COUNT]='+str(GROUP_DESC['COUNT'])
GROUP_INDEX[GroupIndexKey] = {}
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'] = {}
GROUP_INDEX[GroupIndexKey]['ROWID'] = len(GROUP_INDEX) - 1
for FunctionName in GROUP_FUNCTION_NAMES:
# initialize function flags (UpdateFlag)
if FunctionName in GROUP_INDEX[GroupIndexKey]['FUNCTIONS']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = True
print 'initializeGroup: GROUP_INDEX='+str(GROUP_INDEX)
def groupCOUNT(GroupIndexKey):
FunctionName = 'COUNT'
Value = getValueFromGroupIndex(GroupIndexKey,FunctionName)
if GroupIndexKey:
if GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = False
Value = Value + 1
setValueToGroupIndex(GroupIndexKey,FunctionName,Value)
return Value
def groupSUM():
pass
def grouoMUL():
pass
def groupAVG(): # TODO REMAKE
pass
def groupMIN():
pass
def groupMAX():
pass
def groupSSQ(): # TODO REMAKE
pass
# new evaluateExpression function,
# accounting for groups
def evaluateExpression(root,VarDictionary,GroupIndexKey=None):
# input = local tree root
# XXX: this could be very slow due to passing
# every time VarDictionary as a parameter
# Two special cases: 1) root=varname
# 2) root=list/tuple
# These cases must be processed in a separate way
if type(root) in set([list,tuple]):
# root is not a leaf
head = root[0].upper()
# string constants are treated specially
if head in set(['STR','STRING']): # one arg
return operationSTR(root[1])
elif head in set(['SET']):
return operationSET(root[1])
tail = root[1:]
args = []
# evaluate arguments recursively
for element in tail: # resolve tree by recursion
args.append(evaluateExpression(element,VarDictionary,GroupIndexKey))
# call functions with evaluated arguments
if head in set(['LIST']): # list arg
return operationLIST(args)
elif head in set(['&','&&','AND']): # many args
return operationAND(args)
elif head in set(['|','||','OR']): # many args
return operationOR(args)
elif head in set(['!','NOT']): # one args
return operationNOT(args[0])
elif head in set(['RANGE','BETWEEN']): # three args
return operationRANGE(args[0],args[1],args[2])
elif head in set(['IN','SUBSET']): # two args
return operationSUBSET(args[0],args[1])
elif head in set(['<','LESS','LT']): # many args
return operationLESS(args)
elif head in set(['>','MORE','MT']): # many args
return operationMORE(args)
elif head in set(['<=','LESSOREQUAL','LTE']): # many args
return operationLESSOREQUAL(args)
elif head in set(['>=','MOREOREQUAL','MTE']): # many args
return operationMOREOREQUAL(args)
elif head in set(['=','==','EQ','EQUAL','EQUALS']): # many args
return operationEQUAL(args)
elif head in set(['!=','<>','~=','NE','NOTEQUAL']): # two args
return operationNOTEQUAL(args[0],args[1])
elif head in set(['+','SUM']): # many args
return operationSUM(args)
elif head in set(['-','DIFF']): # two args
return operationDIFF(args[0],args[1])
elif head in set(['*','MUL']): # many args
return operationMUL(args)
elif head in set(['/','DIV']): # two args
return operationDIV(args[0],args[1])
elif head in set(['MATCH','LIKE']): # two args
return operationMATCH(args[0],args[1])
elif head in set(['SEARCH']): # two args
return operationSEARCH(args[0],args[1])
elif head in set(['FINDALL']): # two args
return operationFINDALL(args[0],args[1])
# --- GROUPING OPERATOINS ---
elif head in set(['COUNT']):
return groupCOUNT(GroupIndexKey)
else:
raise Exception('Unknown operator: %s' % root[0])
elif type(root)==str:
# root is a par_name
return VarDictionary[root]
else:
# root is a non-string constant
return root
def getVarDictionary(RowObject):
# get VarDict from RowObject
# VarDict: par_name => par_value
VarDictionary = {}
for par_name,par_value,par_format in RowObject:
VarDictionary[par_name] = par_value
return VarDictionary
def checkRowObject(RowObject,Conditions,VarDictionary):
#VarDictionary = getVarDictionary(RowObject)
if Conditions:
Flag = evaluateExpression(Conditions,VarDictionary)
else:
Flag=True
return Flag
# ----------------------------------------------------
# /CONDITIONS
# ----------------------------------------------------
# ----------------------------------------------------
# PARAMETER NAMES (includeing creation of new ones)
# ----------------------------------------------------
# Bind an expression to a new parameter
# in a form: ('BIND','new_par',('some_exp',...))
def operationBIND(parname,Expression,VarDictionary): # DISCARD?
pass
# This section is for more detail processing of
# parlists.
# Table creation must include not only subsets of
# existing parameters, but also new parameters
# derived from functions on a special prefix language
# For this reason subsetOfRowObject(..) must be substituted
# by newRowObject(ParameterNames,RowObject)
# For parsing use the function evaluateExpression
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Get names from expression.
# Must merge this one with evaluateExrpression.
# This is VERY LIMITED version of what will be
# when i'll make a language parser.
# For more ideas and info see LANGUAGE_REFERENCE
# more advansed version of expression evaluator
def evaluateExpressionPAR(ParameterNames,VarDictionary=None): # XXX DISCARD
# RETURN: 1) Upper-level Expression names
# 2) Upper-level Expression values
# Is it reasonable to pass a Context to every parse function?
# For now the function does the following:
# 1) iterates through all UPPER-LEVEL list elements
# 2) if element is a parname: return parname
# if element is an BIND expression: return bind name
# (see operationBIND)
# 3) if element is an anonymous expression: return #N(=1,2,3...)
# N.B. Binds can be only on the 0-th level of Expression
pass
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# GET FORMATS FROM SUB-EXPRESSION
# Could be very unstable error prone because the
# format is COLUMN-FIXED!!!
# Should think about it some more.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Important function of the STORAGE LEVEL (column-fixed tables)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def getContextFormat(RowObject):
# Get context format from the whole RowObject
ContextFormat = {}
for par_name,par_value,par_format in RowObject:
ContextFormat[par_name] = par_format
return ContextFormat
def getDefaultFormat(Type):
if Type is int:
return '%10d'
elif Type is float:
return '%25.15E'
elif Type is str:
return '%20s'
elif Type is bool:
return '%2d'
else:
raise Exception('Unknown type')
def getDefaultValue(Type):
if Type is int:
return 0
elif Type is float:
return 0.0
elif Type is str:
return ''
elif Type is bool:
return False
else:
raise Exception('Unknown type')
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# VarDictionary = Context (this name is more suitable)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# GroupINdexKey is a key to special structure/dictionary GROUP_INDEX.
# GROUP_INDEX contains information needed to calculate streamed group functions
# such as COUNT, AVG, MIN, MAX etc...
# TODO: remove RowObject from parameters
def newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat,GroupIndexKey=None):
# Return a subset of RowObject according to
# ParameterNames include either parnames
# or expressions containing parnames literals
# ContextFormat contains format for ParNames
anoncount = 0
RowObjectNew = []
for expr in ParameterNames:
if type(expr) in set([list,tuple]): # bind
head = expr[0]
if head in set(['let','bind','LET','BIND']):
par_name = expr[1]
par_expr = expr[2]
else:
par_name = "#%d" % anoncount
anoncount += 1
par_expr = expr
par_value = evaluateExpression(par_expr,VarDictionary,GroupIndexKey)
try:
par_format = expr[3]
except:
par_format = getDefaultFormat(type(par_value))
else: # parname
par_name = expr
par_value = VarDictionary[par_name]
par_format = ContextFormat[par_name]
RowObjectNew.append((par_name,par_value,par_format))
return RowObjectNew
# ----------------------------------------------------
# /PARAMETER NAMES
# ----------------------------------------------------
# ----------------------------------------------------
# OPERATIONS ON TABLES
# ----------------------------------------------------
QUERY_BUFFER = '__BUFFER__'
def getTableList():
return LOCAL_TABLE_CACHE.keys()
def describeTable(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to describe
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Print information about table, including
parameter names, formats and wavenumber range.
---
EXAMPLE OF USAGE:
describeTable('sampletab')
---
"""
print('-----------------------------------------')
print TableName+' summary:'
try:
print('-----------------------------------------')
print 'Comment: \n'+LOCAL_TABLE_CACHE[TableName]['header']['comment']
except:
pass
print 'Number of rows: '+str(LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'])
print 'Table type: '+str(LOCAL_TABLE_CACHE[TableName]['header']['table_type'])
print('-----------------------------------------')
print(' PAR_NAME PAR_FORMAT')
print('')
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
print '%20s %20s' % (par_name,par_format)
print('-----------------------------------------')
# Write a table to File or STDOUT
def outputTable(TableName,Conditions=None,File=None,Header=True):
# Display or record table with condition checking
if File:
Header = False
OutputFile = open(File,'w')
if Header:
headstr = putTableHeaderToString(TableName)
if File:
OutputFile.write(headstr)
else:
print headstr
for RowID in range(0,LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']):
RowObject = getRowObject(RowID,TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
if not checkRowObject(RowObject,Conditions,VarDictionary):
continue
raw_string = putRowObjectToString(RowObject)
if File:
OutputFile.write(raw_string+'\n')
else:
print raw_string
# Create table "prototype-based" way
def createTable(TableName,RowObjectDefault):
# create a Table based on a RowObjectDefault
LOCAL_TABLE_CACHE[TableName] = {}
header_order = []
header_format = {}
header_default = {}
data = {}
for par_name,par_value,par_format in RowObjectDefault:
header_order.append(par_name)
header_format[par_name] = par_format
header_default[par_name] = par_value
data[par_name] = []
#header_order = tuple(header_order) # XXX ?
LOCAL_TABLE_CACHE[TableName]['header']={}
LOCAL_TABLE_CACHE[TableName]['header']['order'] = header_order
LOCAL_TABLE_CACHE[TableName]['header']['format'] = header_format
LOCAL_TABLE_CACHE[TableName]['header']['default'] = header_default
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = 0
LOCAL_TABLE_CACHE[TableName]['header']['size_in_bytes'] = 0
LOCAL_TABLE_CACHE[TableName]['header']['table_name'] = TableName
LOCAL_TABLE_CACHE[TableName]['header']['table_type'] = 'column-fixed'
LOCAL_TABLE_CACHE[TableName]['data'] = data
# simple "drop table" capability
def dropTable(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to delete
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Deletes a table from local database.
---
EXAMPLE OF USAGE:
dropTable('some_dummy_table')
---
"""
# delete Table from both Cache and Storage
try:
LOCAL_TABLE_CACHE[TableName] = {}
except:
pass
# delete from storage
pass # TODO
# Returns a column corresponding to parameter name
def getColumn(TableName,ParameterName):
"""
INPUT PARAMETERS:
TableName: source table name (required)
ParameterName: name of column to get (required)
OUTPUT PARAMETERS:
ColumnData: list of values from specified column
---
DESCRIPTION:
Returns a column with a name ParameterName from
table TableName. Column is returned as a list of values.
---
EXAMPLE OF USAGE:
p1 = getColumn('sampletab','p1')
---
"""
return LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]
# Returns a list of columns corresponding to parameter names
def getColumns(TableName,ParameterNames):
"""
INPUT PARAMETERS:
TableName: source table name (required)
ParameterNames: list of column names to get (required)
OUTPUT PARAMETERS:
ListColumnData: tuple of lists of values from specified column
---
DESCRIPTION:
Returns columns with a names in ParameterNames from
table TableName. Columns are returned as a tuple of lists.
---
EXAMPLE OF USAGE:
p1,p2,p3 = getColumns('sampletab',('p1','p2','p3'))
---
"""
Columns = []
for par_name in ParameterNames:
Columns.append(LOCAL_TABLE_CACHE[TableName]['data'][par_name])
return Columns
def addColumn(TableName,ParameterName,Before=None,Expression=None,Type=None,Default=None,Format=None):
if ParameterName in LOCAL_TABLE_CACHE[TableName]['header']['format']:
raise Exception('Column \"%s\" already exists' % ParameterName)
if not Type: Type = float
if not Default: Default = getDefaultValue(Type)
if not Format: Format = getDefaultFormat(Type)
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# Mess with data
if not Expression:
LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]=[Default for i in range(0,number_of_rows)]
else:
data = []
for RowID in range(0,number_of_rows):
RowObject = getRowObject(RowID,TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
par_value = evaluateExpression(Expression,VarDictionary)
data.append(par_value)
LOCAL_TABLE_CACHE[TableName]['data'][ParameterName] = data
# Mess with header
header_order = LOCAL_TABLE_CACHE[TableName]['header']['order']
if not Before:
header_order.append(ParameterName)
else:
#i = 0
#for par_name in header_order:
# if par_name == Before: break
# i += 1
i = header_order.index(Before)
header_order = header_order[:i] + [ParameterName,] + header_order[i:]
LOCAL_TABLE_CACHE[TableName]['header']['order'] = header_order
LOCAL_TABLE_CACHE[TableName]['header']['format'][ParameterName] = Format
LOCAL_TABLE_CACHE[TableName]['header']['default'][ParameterName] = Default
def deleteColumn(TableName,ParameterName):
if ParameterName not in LOCAL_TABLE_CACHE[TableName]['header']['format']:
raise Exception('No such column \"%s\"' % ParameterName)
# Mess with data
i = LOCAL_TABLE_CACHE[TableName]['header']['order'].index(ParameterName)
del LOCAL_TABLE_CACHE[TableName]['header']['order'][i]
del LOCAL_TABLE_CACHE[TableName]['header']['format'][ParameterName]
del LOCAL_TABLE_CACHE[TableName]['header']['default'][ParameterName]
if not LOCAL_TABLE_CACHE[TableName]['header']['order']:
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = 0
# Mess with header
del LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]
def deleteColumns(TableName,ParameterNames):
if type(ParameterNames) not in set([list,tuple,set]):
ParameterNames = [ParameterNames]
for ParameterName in ParameterNames:
deleteColumn(TableName,ParameterName)
def renameColumn(TableName,OldParameterName,NewParameterName):
pass
def insertRow():
pass
def deleteRows(TableName,ParameterNames,Conditions):
pass
# select from table to another table
#def selectInto(DestinationTableName,TableName,ParameterNames,Conditions):
# # TableName must refer to an existing table in cache!!
# # Conditions = Restrictables in specific format
# # Sample conditions: cond = {'par1':{'range',[b_lo,b_hi]},'par2':b}
# # return structure similar to TableObject and put it to QUERY_BUFFER
# # if ParameterNames is '*' then all parameters are used
# #table_columns = LOCAL_TABLE_CACHE[TableName]['data'].keys()
# #table_length = len(TableObject['header']['number_of_rows'])
# #if ParameterNames=='*':
# # ParameterNames = table_columns
# # check if Conditions contain elements which are not in the TableObject
# #condition_variables = getConditionVariables(Conditions)
# #strange_pars = set(condition_variables)-set(table_variables)
# #if strange_pars:
# # raise Exception('The following parameters are not in the table \"%s\"' % (TableName,list(strange_pars)))
# # do full scan each time
# if DestinationTableName == TableName:
# raise Exception('Selecting into source table is forbidden')
# table_length = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# row_count = 0
# for RowID in range(0,table_length):
# RowObject = getRowObject(RowID,TableName)
# RowObjectNew = subsetOfRowObject(ParameterNames,RowObject)
# VarDictionary = getVarDictionary(RowObject)
# if checkRowObject(RowObject,Conditions,VarDictionary):
# addRowObject(RowObjectNew,DestinationTableName)
# row_count += 1
# LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] += row_count
# select from table to another table
def selectInto(DestinationTableName,TableName,ParameterNames,Conditions):
# TableName must refer to an existing table in cache!!
# Conditions = Restrictables in specific format
# Sample conditions: cond = {'par1':{'range',[b_lo,b_hi]},'par2':b}
# return structure similar to TableObject and put it to QUERY_BUFFER
# if ParameterNames is '*' then all parameters are used
#table_columns = LOCAL_TABLE_CACHE[TableName]['data'].keys()
#table_length = len(TableObject['header']['number_of_rows'])
#if ParameterNames=='*':
# ParameterNames = table_columns
# check if Conditions contain elements which are not in the TableObject
#condition_variables = getConditionVariables(Conditions)
#strange_pars = set(condition_variables)-set(table_variables)
#if strange_pars:
# raise Exception('The following parameters are not in the table \"%s\"' % (TableName,list(strange_pars)))
# do full scan each time
if DestinationTableName == TableName:
raise Exception('Selecting into source table is forbidden')
table_length = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
row_count = 0
for RowID in range(0,table_length):
RowObject = getRowObject(RowID,TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
ContextFormat = getContextFormat(RowObject)
RowObjectNew = newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat)
if checkRowObject(RowObject,Conditions,VarDictionary):
addRowObject(RowObjectNew,DestinationTableName)
row_count += 1
LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] += row_count
def length(TableName):
tab_len = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
#print(str(tab_len)+' rows in '+TableName)
return tab_len
# select from table to QUERY_BUFFER
#def select(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,Conditions=None,Output=True,File=None):
# if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
# LOCAL_TABLE_CACHE[DestinationTableName] = {} # clear QUERY_BUFFER for the new result
# RowObjectDefault = getDefaultRowObject(TableName)
# RowObjectDefaultNew = subsetOfRowObject(ParameterNames,RowObjectDefault)
# dropTable(DestinationTableName) # redundant
# createTable(DestinationTableName,RowObjectDefaultNew)
# selectInto(DestinationTableName,TableName,ParameterNames,Conditions)
# if Output and DestinationTableName==QUERY_BUFFER:
# outputTable(DestinationTableName,File=File)
# Select parameters from a table with certain conditions.
# Parameters can be the names or expressions.
# Conditions contain a list of expressions in a special language.
# Set Output to False to suppress output
# Set File=FileName to redirect output to a file.
def select(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,Conditions=None,Output=True,File=None):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions (optional)
Conditions: list of logincal expressions (optional)
Output: enable (True) or suppress (False) text output (optional)
File: enable (True) or suppress (False) file output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Select or filter the data in some table
either to standard output or to file (if specified)
---
EXAMPLE OF USAGE:
select('sampletab',DestinationTableName='outtab',ParameterNames=(p1,p2),
Conditions=(('and',('>=','p1',1),('<',('*','p1','p2'),20))))
Conditions means (p1>=1 and p1*p2<20)
---
"""
# TODO: Variables defined in ParameterNames ('LET') MUST BE VISIBLE IN Conditions !!
# check if table exists
if TableName not in LOCAL_TABLE_CACHE.keys():
raise Exception('%s: no such table. Check tableList() for more info.' % TableName)
if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
LOCAL_TABLE_CACHE[DestinationTableName] = {} # clear QUERY_BUFFER for the new result
RowObjectDefault = getDefaultRowObject(TableName)
VarDictionary = getVarDictionary(RowObjectDefault)
ContextFormat = getContextFormat(RowObjectDefault)
RowObjectDefaultNew = newRowObject(ParameterNames,RowObjectDefault,VarDictionary,ContextFormat)
dropTable(DestinationTableName) # redundant
createTable(DestinationTableName,RowObjectDefaultNew)
selectInto(DestinationTableName,TableName,ParameterNames,Conditions)
if Output and DestinationTableName==QUERY_BUFFER:
outputTable(DestinationTableName,File=File)
# SORTING ===========================================================
def arrangeTable(TableName,DestinationTableName=None,RowIDList=None):
#print 'AT/'
#print 'AT: RowIDList = '+str(RowIDList)
# make a subset of table rows according to RowIDList
if not DestinationTableName:
DestinationTablename = TableName
if DestinationTableName != TableName:
dropTable(DestinationTableName)
LOCAL_TABLE_CACHE[DestinationTableName]['header']=LOCAL_TABLE_CACHE[TableName]['header']
LOCAL_TABLE_CACHE[DestinationTableName]['data']={}
LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] = len(RowIDList)
#print 'AT: RowIDList = '+str(RowIDList)
for par_name in LOCAL_TABLE_CACHE[DestinationTableName]['header']['order']:
par_data = LOCAL_TABLE_CACHE[TableName]['data'][par_name]
LOCAL_TABLE_CACHE[DestinationTableName]['data'][par_name] = [par_data[i] for i in RowIDList]
def compareLESS(RowObject1,RowObject2,ParameterNames):
#print 'CL/'
# arg1 and arg2 are RowObjects
# Compare them according to ParameterNames
# Simple validity check:
#if len(arg1) != len(arg2):
# raise Exception('Arguments have different lengths')
#RowObject1Subset = subsetOfRowObject(ParameterNames,RowObject1)
#RowObject2Subset = subsetOfRowObject(ParameterNames,RowObject2)
#return RowObject1Subset < RowObject2Subset
row1 = []
row2 = []
#n = len(RowObject1)
#for i in range(0,n):
# par_name1 = RowObject1[i][0]
# if par_name1 in ParameterNames:
# par_value1 = RowObject1[i][1]
# par_value2 = RowObject2[i][1]
# row1 += [par_value1]
# row2 += [par_value2]
VarDictionary1 = getVarDictionary(RowObject1)
VarDictionary2 = getVarDictionary(RowObject2)
for par_name in ParameterNames:
par_value1 = VarDictionary1[par_name]
par_value2 = VarDictionary2[par_name]
row1 += [par_value1]
row2 += [par_value2]
Flag = row1 < row2
#print 'CL: row1 = '+str(row1)
#print 'CL: row2 = '+str(row2)
#print 'CL: Flag = '+str(Flag)
return Flag
def quickSort(index,TableName,ParameterNames,Accending=True):
#print ''
#print 'QS/'
#print 'QS: index = '+str(index)
#print index
# ParameterNames: names of parameters which are
# taking part in the sorting
if index == []:
return []
else:
#pivot = lst[0]
#lesser = quickSort([x for x in lst[1:] if x < pivot])
#greater = quickSort([x for x in lst[1:] if x >= pivot])
PivotID = index[0]
Pivot = getRowObject(PivotID,TableName)
lesser_index = []
greater_index = [];
for RowID in index[1:]:
RowObject = getRowObject(RowID,TableName)
if compareLESS(RowObject,Pivot,ParameterNames):
lesser_index += [RowID]
else:
greater_index += [RowID]
#print 'QS: lesser_index = '+str(lesser_index)
#print 'QS: greater_index = '+str(greater_index)
lesser = quickSort(lesser_index,TableName,ParameterNames,Accending)
greater = quickSort(greater_index,TableName,ParameterNames,Accending)
#return lesser + [pivot_index] + greater
if Accending:
return lesser + [PivotID] + greater
else:
return greater + [PivotID] + lesser
# Sorting must work well on the table itself!
def sort(TableName,DestinationTableName=None,ParameterNames=None,Accending=True,Output=False,File=None):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions to sort by (optional)
Accending: sort in ascending (True) or descending (False) order (optional)
Output: enable (True) or suppress (False) text output (optional)
File: enable (True) or suppress (False) file output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Sort a table by a list of it's parameters or expressions.
The sorted table is saved in DestinationTableName (if specified).
---
EXAMPLE OF USAGE:
sort('sampletab',ParameterNames=(p1,('+',p1,p2)))
---
"""
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
index = range(0,number_of_rows)
#print 'num = '+str(number_of_rows)
if not DestinationTableName:
DestinationTableName = TableName
# if names are not provided use all parameters in sorting
if not ParameterNames:
ParameterNames = LOCAL_TABLE_CACHE[TableName]['header']['order']
elif type(ParameterNames) not in set([list,tuple]):
ParameterNames = [ParameterNames] # fix of stupid bug where ('p1',) != ('p1')
#print 'SRT: ParameterNames = '+str(ParameterNames)
#print 'parnames: '+str(ParameterNames)
index_sorted = quickSort(index,TableName,ParameterNames,Accending)
arrangeTable(TableName,DestinationTableName,index_sorted)
if Output:
outputTable(DestinationTableName,File=File)
# /SORTING ==========================================================
# GROUPING ==========================================================
# GROUP_INDEX global auxillary structure is a Dictionary,
# which has the following properties:
# 1) Each key is a composite variable:
# [array of values of ParameterNames variable
# STREAM_UPDATE_FLAG]
# 2) Each value is an index in LOCAL_TABLE_CACHE[TableName]['data'][...],
# corresponding to this key
# STREAM_UPDATE_FLAG = TRUE if value in GROUP_INDEX needs updating
# = FALSE otherwise
# If no grouping variables are specified (GroupParameterNames==None)
# than the following key is used: "__GLOBAL__"
#def select(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,Conditions=None,Output=True,File=None):
# # TODO: Variables defined in ParameterNames ('LET') MUST BE VISIBLE IN Conditions !!
# if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
# LOCAL_TABLE_CACHE[DestinationTableName] = {} # clear QUERY_BUFFER for the new result
# RowObjectDefault = getDefaultRowObject(TableName)
# VarDictionary = getVarDictionary(RowObjectDefault)
# ContextFormat = getContextFormat(RowObjectDefault)
# RowObjectDefaultNew = newRowObject(ParameterNames,RowObjectDefault,VarDictionary,ContextFormat)
# dropTable(DestinationTableName) # redundant
# createTable(DestinationTableName,RowObjectDefaultNew)
# selectInto(DestinationTableName,TableName,ParameterNames,Conditions)
# if Output and DestinationTableName==QUERY_BUFFER:
# outputTable(DestinationTableName,File=File)
#def selectInto(DestinationTableName,TableName,ParameterNames,Conditions):
# # do full scan each time
# if DestinationTableName == TableName:
# raise Exception('Selecting into source table is forbidden')
# table_length = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# row_count = 0
# for RowID in range(0,table_length):
# RowObject = getRowObject(RowID,TableName)
# VarDictionary = getVarDictionary(RowObject)
# VarDictionary['_ID_'] = RowID
# ContextFormat = getContextFormat(RowObject)
# RowObjectNew = newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat)
# if checkRowObject(RowObject,Conditions,VarDictionary):
# addRowObject(RowObjectNew,DestinationTableName)
# row_count += 1
# LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] += row_count
#def newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat):
# anoncount = 0
# RowObjectNew = []
# for expr in ParameterNames:
# if type(expr) in {list,tuple}: # bind
# head = expr[0]
# if head in {'BIND','LET'}:
# par_name = expr[1]
# par_expr = expr[2]
# else:
# par_name = "#%d" % anoncount
# anoncount += 1
# par_expr = expr
# par_value = evaluateExpression(par_expr,VarDictionary)
# try:
# par_format = expr[3]
# except:
# par_format = getDefaultFormat(type(par_value))
# else: # parname
# par_name = expr
# par_value = VarDictionary[par_name]
# par_format = ContextFormat[par_name]
# RowObjectNew.append((par_name,par_value,par_format))
# return RowObjectNew
def group(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,GroupParameterNames=None,Output=True):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions to take (optional)
GroupParameterNames: list of parameters or expressions to group by (optional)
Accending: sort in ascending (True) or descending (False) order (optional)
Output: enable (True) or suppress (False) text output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
none
---
EXAMPLE OF USAGE:
group('sampletab',ParameterNames=('p1',('sum','p2')),GroupParameterNames=('p1'))
... makes grouping by p1,p2. For each group it calculates sum of p2 values.
---
"""
# Implements such functions as:
# count,sum,avg,min,max,ssq etc...
# 1) ParameterNames can contain group functions
# 2) GroupParameterNames can't contain group functions
# 3) If ParameterNames contains parameters defined by LET directive,
# it IS visible in the sub-context of GroupParameterNames
# 4) Parameters defined in GroupParameterNames are NOT visible in ParameterNames
# 5) ParameterNames variable represents the structure of the resulting table/collection
# 6) GroupParameterNames can contain either par_names or expressions with par_names
# Clear old GROUP_INDEX value
clearGroupIndex()
# Consistency check
if TableName == DestinationTableName:
raise Exception('TableName and DestinationTableName must be different')
#if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
# Prepare the new DestinationTable
RowObjectDefault = getDefaultRowObject(TableName)
VarDictionary = getVarDictionary(RowObjectDefault)
ContextFormat = getContextFormat(RowObjectDefault)
RowObjectDefaultNew = newRowObject(ParameterNames,RowObjectDefault,VarDictionary,ContextFormat)
dropTable(DestinationTableName) # redundant
createTable(DestinationTableName,RowObjectDefaultNew)
# Loop through rows of source Table
# On each iteration group functions update GROUP_INDEX (see description above)
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# STAGE 1: CREATE GROUPS
print 'LOOP:'
for RowID in range(0,number_of_rows):
print '--------------------------------'
print 'RowID='+str(RowID)
RowObject = getRowObject(RowID,TableName) # RowObject from source table
VarDictionary = getVarDictionary(RowObject)
print 'VarDictionary='+str(VarDictionary)
# This is a trick which makes evaluateExpression function
# not consider first expression as an operation
GroupParameterNames_ = ['LIST'] + list(GroupParameterNames)
GroupIndexKey = evaluateExpression(GroupParameterNames_,VarDictionary)
# List is an unhashable type in Python!
GroupIndexKey = tuple(GroupIndexKey)
initializeGroup(GroupIndexKey)
print 'GROUP_INDEX='+str(GROUP_INDEX)
ContextFormat = getContextFormat(RowObject)
RowObjectNew = newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat,GroupIndexKey)
RowIDGroup = GROUP_INDEX[GroupIndexKey]['ROWID']
setRowObject(RowIDGroup,RowObjectNew,DestinationTableName)
# Output result if required
if Output and DestinationTableName==QUERY_BUFFER:
outputTable(DestinationTableName,File=File)
# /GROUPING =========================================================
# EXTRACTING ========================================================
REGEX_INTEGER = '[+-]?\d+'
REGEX_STRING = '[^\s]+'
REGEX_FLOAT_F = '[+-]?\d*\.?\d+'
REGEX_FLOAT_E = '[+-]?\d*\.?\d+[eEfF]?[+-]?\d+'
REGEX_INTEGER_FIXCOL = lambda n: '\d{%d}' % n
REGEX_STRING_FIXCOL = lambda n: '[^\s]{%d}' % n
REGEX_FLOAT_F_FIXCOL = lambda n: '[\+\-\.\d]{%d}' % n
REGEX_FLOAT_E_FIXCOL = lambda n: '[\+\-\.\deEfF]{%d}' % n
# Extract sub-columns from string column
def extractColumns(TableName,SourceParameterName,ParameterFormats,ParameterNames=None,FixCol=False):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
SourceParameterName: name of source column to process (required)
ParameterFormats: c formats of unpacked parameters (required)
ParameterNames: list of resulting parameter names (optional)
FixCol: column-fixed (True) format of source column (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Note, that this function is aimed to do some extra job on
interpreting string parameters which is normally supposed
to be done by the user.
---
EXAMPLE OF USAGE:
extractColumns('sampletab',SourceParameterName='p5',
ParameterFormats=('%d','%d','%d'),
ParameterNames=('p5_1','p5_2','p5_3'))
This example extracts three integer parameters from
a source column 'p5' and puts results in ('p5_1','p5_2','p5_3').
---
"""
# ParameterNames = just the names without expressions
# ParFormats contains python formats for par extraction
# Example: ParameterNames=('v1','v2','v3')
# ParameterFormats=('%1s','%1s','%1s')
# By default the format of parameters is column-fixed
if type(LOCAL_TABLE_CACHE[TableName]['header']['default'][SourceParameterName]) not in set([str,unicode]):
raise Exception('Source parameter must be a string')
i=-1
# bug when (a,) != (a)
if ParameterNames and type(ParameterNames) not in set([list,tuple]):
ParameterNames = [ParameterNames]
if ParameterFormats and type(ParameterFormats) not in set([list,tuple]):
ParameterFormats = [ParameterFormats]
# if ParameterNames is empty, fill it with #1-2-3-...
if not ParameterNames:
ParameterNames = []
# using naming convension #i, i=0,1,2,3...
for par_format in ParameterFormats:
while True:
i+=1
par_name = '#%d' % i
fmt = LOCAL_TABLE_CACHE[TableName]['header']['format'].get(par_name,None)
if not fmt: break
ParameterNames.append(par_name)
# check if ParameterNames are valid
Intersection = set(ParameterNames).intersection(LOCAL_TABLE_CACHE[TableName]['header']['order'])
if Intersection:
raise Exception('Parameters %s already exist' % str(list(Intersection)))
# loop over ParameterNames to prepare LOCAL_TABLE_CACHE
i=0
for par_name in ParameterNames:
par_format = ParameterFormats[i]
LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]=par_format
LOCAL_TABLE_CACHE[TableName]['data'][par_name]=[]
i+=1
# append new parameters in order list
LOCAL_TABLE_CACHE[TableName]['header']['order'] += ParameterNames
# cope with default values
i=0
format_regex = []
format_types = []
#print 'ParameterNames='+str(ParameterNames)
for par_format in ParameterFormats:
par_name = ParameterNames[i]
regex = FORMAT_PYTHON_REGEX
#print 'par_name: '+par_name
#print 'par_format: '+par_format
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
ty = ty.lower()
if ty == 'd':
par_type = int
if FixCol:
format_regex_part = REGEX_INTEGER_FIXCOL(lng)
else:
format_regex_part = REGEX_INTEGER
elif ty == 's':
par_type = str
if FixCol:
format_regex_part = REGEX_STRING_FIXCOL(lng)
else:
format_regex_part = REGEX_STRING
elif ty == 'f':
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_F_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_F
elif ty == 'e':
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_E_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_E
else:
raise Exception('Unknown data type')
format_regex.append('('+format_regex_part+')')
format_types.append(par_type)
def_val = getDefaultValue(par_type)
LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name]=def_val
i+=1
format_regex = '\s*'.join(format_regex)
#print 'format_regex='+str(format_regex)
#return format_regex
# loop through values of SourceParameter
for SourceParameterString in LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName]:
try:
ExtractedValues = list(re.search(format_regex,SourceParameterString).groups())
except:
raise Exception('Error with line \"%s\"' % SourceParameterString)
i=0
# loop through all parameters which are supposed to be extracted
for par_name in ParameterNames:
#print 'ExtractedValues[i]='+ExtractedValues[i]
#print 'par_name='+par_name
par_value = format_types[i](ExtractedValues[i])
LOCAL_TABLE_CACHE[TableName]['data'][par_name].append(par_value)
i+=1
# explicitly check that number of rows are equal
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
number_of_rows2 = len(LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName])
number_of_rows3 = len(LOCAL_TABLE_CACHE[TableName]['data'][ParameterNames[0]])
if not (number_of_rows == number_of_rows2 == number_of_rows3):
raise Exception('Error while extracting parameters: check your regexp')
# Split string columns into sub-columns with given names
def splitColumn(TableName,SourceParameterName,ParameterNames,Splitter):
pass
# /EXTRACTING =======================================================
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# /LOCAL DATABASE MANAGEMENT SYSTEM
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# GLOBAL API FUNCTIONS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def queryHITRAN(TableName,iso_id_list,numin,numax):
#import httplib
#conn = httplib.HTTPConnection('hitranazure.cloudapp.com')
#conn.Request('')
#r = conn.getresponse()
#print r.status, r.reason
#data1 = data1.read
TableHeader = HITRAN_DEFAULT_HEADER
TableHeader['table_name'] = TableName
DataFileName = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.data'
HeaderFileName = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.header'
#if TableName in LOCAL_TABLE_CACHE.keys():
# raise Exception('Table \"%s\" exists' % TableName)
#if os.path.isfile(DataFileName):
# raise Exception('File \"%s\" exists' % DataFileName)
#if os.path.isfile(HeaderFileName):
# raise Exception('!!File \"%s\" exists' % HeaderFileName)
# create URL
iso_id_list_str = [str(iso_id) for iso_id in iso_id_list]
iso_id_list_str = ','.join(iso_id_list_str)
#url = 'http://hitran.cloudapp.net' + '/lbl/5?' + \
#url = 'http://hitranazure.cloudapp.net' + '/lbl/5?' + \
#'iso_ids_list=' + iso_id_list_str + '&' + \
#'numin=' + str(numin) + '&' + \
#'numax=' + str(numax) + '&' + \
#'access=api' + '&' + \
#'key=' + GLOBAL_HITRAN_APIKEY
url = GLOBAL_HOST + '/lbl/api?' + \
'iso_ids_list=' + iso_id_list_str + '&' + \
'numin=' + str(numin) + '&' + \
'numax=' + str(numax)
#print('url=',url) # DEBUG
# More efficient way: download by chunks
try:
req = urllib2.urlopen(url)
except HTTPError:
raise Exception('Failed to retrieve data for given parameters.')
except URLError:
raise Exception('Cannot connect to %s. Try again or edit GLOBAL_HOST variable.' % GLOBAL_HOST)
#CHUNK = 16 * 1024 # default value
CHUNK = 64 * 1024
print 'BEGIN DOWNLOAD: '+TableName
with open(DataFileName,'w') as fp:
while True:
chunk = req.read(CHUNK)
if not chunk: break
fp.write(chunk)
print ' %d bytes written to %s' % (CHUNK,DataFileName)
with open(HeaderFileName,'w') as fp:
fp.write(json.dumps(TableHeader,indent=2))
print 'Header written to %s' % HeaderFileName
print 'END DOWNLOAD'
# Set comment
# Get this table to LOCAL_TABLE_CACHE
storage2cache(TableName)
print 'PROCESSED'
# NODE CODE
NODE_READY = False
# Node initialization
def nodeInit():
# very unoptimal, since it loads all tables in memory!!
#loadCache()
databaseBegin() # DB backend level, start transaction
NODE_READY = True
# returns a table instance created from Query object
def globalSelectInto(NewTablePath,SourceTablePath,ParameterNames,Conditions):
# creates table from parsed data
# and store it in the database DB
dbname,tablename,nodename = NewTablePath.split('::')
dbname1,tablename1,nodename1 = SourceTablePath.split('::')
if not NODE_READY: raise Exception('Node \"%s\" is not ready. Call nodeInit()' % NODE_NAME)
# should get rid of selectLocal as planning to use network interface
# ...... selectLocal OR selectRemote
pass
# ---------------------------------------------------------------
###?def cacheTableLookup(Query,Cache=GlobalCache):
###? # try to find table in Cache by it's Query
###? # if fails, return empty instance
###? # reasons of failure:
###? # - Query is not registered in cache
###? # - Query is registered, but Table is too old
###? return []
###?def cacheTableUpdate()
###? pass
###?def cacheTable(Query,Cache=GlobalCache,Connection=GlobalConnection):
###? # returns a table from the Cache by table's Query
###? # if cashed table is not found fetch it remotely
###? OldTable = cacheTableLookup(Query,Cache)
###? if not empty(OldTable)
###? return OldTable
###? else:
###? RawData = getRawDataRemote(Query,Connection)
###? ParsedData = parseRawData(RawData)
###? NewTable = createTable(ParsedData)
###? updateCache(Cache,NewTable)
###? return NewTable
# query_string - query written in the
# formal language of local database frontend
def makeQuery(query_string,Connection=GLOBAL_CONNECTION):
# makes a query to remote server
# using connection instance
pass
# ---------- DATABASE FRONTEND END -------------
# ---------- DATABASE BACKEND 1 ----------------
# This is a simple database backend for Python
# which uses standard
# ---------- DATABASE BACKEND 1 END ------------
# simple implementation of getting a line list from a remote server
def getLinelist(local_name,query,api_key):
return makeQuery(local_name)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# / GLOBABL API FUNCTIONS
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# ---------------- FILTER ---------------------------------------------
def filter(TableName,Conditions):
select(TableName=TableName,Conditions=Conditions,Output=False)
# ---------------------- ISO.PY ---------------------------------------
ISO_ID_INDEX = {
'M':0,
'I':1,
'iso_name':2,
'abundance':3,
'mass':4,
'mol_name':5
}
# id M I iso_name abundance mass mol_name
ISO_ID = {
1 : [ 1, 1, 'H2(16O)', 0.997317, 18.010565, 'H2O' ],
2 : [ 1, 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O' ],
3 : [ 1, 3, 'H2(17O)', 0.000372, 19.01478, 'H2O' ],
4 : [ 1, 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O' ],
5 : [ 1, 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O' ],
6 : [ 1, 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O' ],
7 : [ 2, 1, '(12C)(16O)2', 0.9842, 43.98983, 'CO2' ],
8 : [ 2, 2, '(13C)(16O)2', 0.01106, 44.993185, 'CO2' ],
9 : [ 2, 3, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2' ],
10 : [ 2, 4, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2' ],
11 : [ 2, 5, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2' ],
12 : [ 2, 6, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2' ],
13 : [ 2, 7, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2' ],
14 : [ 2, 8, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2' ],
15 : [ 2, 0, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2' ],
120 : [ 2, 11, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2' ],
121 : [ 2, 9, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2' ],
16 : [ 3, 1, '(16O)3', 0.992901, 47.984745, 'O3' ],
17 : [ 3, 2, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3' ],
18 : [ 3, 3, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3' ],
19 : [ 3, 4, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3' ],
20 : [ 3, 5, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3' ],
21 : [ 4, 1, '(14N)2(16O)', 0.990333, 44.001062, 'N2O' ],
22 : [ 4, 2, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O' ],
23 : [ 4, 3, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O' ],
24 : [ 4, 4, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O' ],
25 : [ 4, 5, '(14N)2(17O)', 0.000369, 45.005278, 'N2O' ],
26 : [ 5, 1, '(12C)(16O)', 0.98654, 27.994915, 'CO' ],
27 : [ 5, 2, '(13C)(16O)', 0.01108, 28.99827, 'CO' ],
28 : [ 5, 3, '(12C)(18O)', 0.0019782, 29.999161, 'CO' ],
29 : [ 5, 4, '(12C)(17O)', 0.000368, 28.99913, 'CO' ],
30 : [ 5, 5, '(13C)(18O)', 0.00002222, 31.002516, 'CO' ],
31 : [ 5, 6, '(13C)(17O)', 0.00000413, 30.002485, 'CO' ],
32 : [ 6, 1, '(12C)H4', 0.98827, 16.0313, 'CH4' ],
33 : [ 6, 2, '(13C)H4', 0.0111, 17.034655, 'CH4' ],
34 : [ 6, 3, '(12C)H3D', 0.00061575, 17.037475, 'CH4' ],
35 : [ 6, 4, '(13C)H3D', 0.0000049203, 18.04083, 'CH4' ],
36 : [ 7, 1, '(16O)2', 0.995262, 31.98983, 'O2' ],
37 : [ 7, 2, '(16O)(18O)', 0.00399141, 33.994076, 'O2' ],
38 : [ 7, 3, '(16O)(17O)', 0.000742, 32.994045, 'O2' ],
39 : [ 8, 1, '(14N)(16O)', 0.993974, 29.997989, 'NO' ],
40 : [ 8, 2, '(15N)(16O)', 0.0036543, 30.995023, 'NO' ],
41 : [ 8, 3, '(14N)(18O)', 0.00199312, 32.002234, 'NO' ],
42 : [ 9, 1, '(32S)(16O)2', 0.94568, 63.961901, 'SO2' ],
43 : [ 9, 2, '(34S)(16O)2', 0.04195, 65.957695, 'SO2' ],
44 : [ 10, 1, '(14N)(16O)2', 0.991616, 45.992904, 'NO2' ],
45 : [ 11, 1, '(14N)H3', 0.9958715, 17.026549, 'NH3' ],
46 : [ 11, 2, '(15N)H3', 0.0036613, 18.023583, 'NH3' ],
47 : [ 12, 1, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3' ],
117 : [ 12, 2, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3' ],
48 : [ 13, 1, '(16O)H', 0.997473, 17.00274, 'OH' ],
49 : [ 13, 2, '(18O)H', 0.00200014, 19.006986, 'OH' ],
50 : [ 13, 3, '(16O)D', 0.00015537, 18.008915, 'OH' ],
51 : [ 14, 1, 'H(19F)', 0.99984425, 20.006229, 'HF' ],
110 : [ 14, 2, 'D(19F)', 0.000115, 21.0125049978, 'HF' ],
52 : [ 15, 1, 'H(35Cl)', 0.757587, 35.976678, 'HCl' ],
53 : [ 15, 2, 'H(37Cl)', 0.242257, 37.973729, 'HCl' ],
107 : [ 15, 3, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl' ],
108 : [ 15, 4, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl' ],
54 : [ 16, 1, 'H(79Br)', 0.50678, 79.92616, 'HBr' ],
55 : [ 16, 2, 'H(81Br)', 0.49306, 81.924115, 'HBr' ],
111 : [ 16, 3, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr' ],
112 : [ 16, 4, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr' ],
56 : [ 17, 1, 'H(127I)', 0.99984425, 127.912297, 'HI' ],
113 : [ 17, 2, 'D(127I)', 0.000115, 128.918574778, 'HI' ],
57 : [ 18, 1, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO' ],
58 : [ 18, 2, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO' ],
59 : [ 19, 1, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS' ],
60 : [ 19, 2, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS' ],
61 : [ 19, 3, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS' ],
62 : [ 19, 4, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS' ],
63 : [ 19, 5, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS' ],
64 : [ 20, 1, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO' ],
65 : [ 20, 2, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO' ],
66 : [ 20, 3, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO' ],
67 : [ 21, 1, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl' ],
68 : [ 21, 2, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl' ],
69 : [ 22, 1, '(14N)2', 0.9926874, 28.006147, 'N2' ],
118 : [ 22, 2, '(14N)(15N)', 0.0072535, 29.997989, 'N2' ],
70 : [ 23, 1, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN' ],
71 : [ 23, 2, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN' ],
72 : [ 23, 3, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN' ],
73 : [ 24, 1, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl' ],
74 : [ 24, 2, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl' ],
75 : [ 25, 1, 'H2(16O)2', 0.994952, 34.00548, 'H2O2' ],
76 : [ 26, 1, '(12C)2H2', 0.9776, 26.01565, 'C2H2' ],
77 : [ 26, 2, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2' ],
105 : [ 26, 3, '(12C)2HD', 0.00030455, 27.021825, 'C2H2' ],
78 : [ 27, 1, '(12C)2H6', 0.97699, 30.04695, 'C2H6' ],
106 : [ 27, 2, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6' ],
79 : [ 28, 1, '(31P)H3', 0.99953283, 33.997238, 'PH3' ],
80 : [ 29, 1, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2' ],
119 : [ 29, 2, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2' ],
81 : [ 31, 1, 'H2(32S)', 0.94988, 33.987721, 'H2S' ],
82 : [ 31, 2, 'H2(34S)', 0.04214, 35.983515, 'H2S' ],
83 : [ 31, 3, 'H2(33S)', 0.007498, 34.987105, 'H2S' ],
84 : [ 32, 1, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH' ],
85 : [ 33, 1, 'H(16O)2', 0.995107, 32.997655, 'HO2' ],
86 : [ 34, 1, '(16O)', 0.997628, 15.994915, 'O' ],
87 : [ 36, 1, '(14N)(16O)+', 0.993974, 29.997989, 'NOp' ],
88 : [ 37, 1, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr' ],
89 : [ 37, 2, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr' ],
90 : [ 38, 1, '(12C)2H4', 0.9773, 28.0313, 'C2H4' ],
91 : [ 38, 2, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4' ],
92 : [ 39, 1, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH' ],
93 : [ 40, 1, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br' ],
94 : [ 40, 2, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br' ],
95 : [ 41, 1, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN' ],
96 : [ 42, 1, '(12C)(19F)4', 0.9893, 87.993616, 'CF4' ],
116 : [ 43, 1, '(12C)4H2', 0.955998, 50.01565, 'C4H2' ],
109 : [ 44, 1, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N' ],
103 : [ 45, 1, 'H2', 0.999688, 2.01565, 'H2' ],
115 : [ 45, 2, 'HD', 0.00022997, 3.021825, 'H2' ],
97 : [ 46, 1, '(12C)(32S)', 0.939624, 43.971036, 'CS' ],
98 : [ 46, 2, '(12C)(34S)', 0.0416817, 45.966787, 'CS' ],
99 : [ 46, 3, '(13C)(32S)', 0.0105565, 44.974368, 'CS' ],
100 : [ 46, 4, '(12C)(33S)', 0.00741668, 44.970399, 'CS' ],
114 : [ 47, 1, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3' ],
101 : [ 1001, 1, 'H', None, None, 'H' ],
102 : [ 1002, 1, 'He', None, None, 'He' ],
104 : [ 1018, 1, 'Ar', None, None, 'Ar' ],
}
#ISO_ID = OrderedDict([
#
# ( 1 , [ 1, 1, 'H2(16O)', 0.997317, 18.010565, 'H2O' ]),
# ( 2 , [ 1, 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O' ]),
# ( 3 , [ 1, 3, 'H2(17O)', 0.000372, 19.01478, 'H2O' ]),
# ( 4 , [ 1, 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O' ]),
# ( 5 , [ 1, 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O' ]),
# ( 6 , [ 1, 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O' ]),
# ( 7 , [ 2, 1, '(12C)(16O)2', 0.9842, 43.98983, 'CO2' ]),
# ( 8 , [ 2, 2, '(13C)(16O)2', 0.01106, 44.993185, 'CO2' ]),
# ( 9 , [ 2, 3, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2' ]),
# ( 10 , [ 2, 4, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2' ]),
# ( 11 , [ 2, 5, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2' ]),
# ( 12 , [ 2, 6, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2' ]),
# ( 13 , [ 2, 7, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2' ]),
# ( 14 , [ 2, 8, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2' ]),
# ( 15 , [ 2, 0, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2' ]),
# ( 120 , [ 2, 11, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2' ]),
# ( 121 , [ 2, 9, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2' ]),
# ( 16 , [ 3, 1, '(16O)3', 0.992901, 47.984745, 'O3' ]),
# ( 17 , [ 3, 2, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3' ]),
# ( 18 , [ 3, 3, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3' ]),
# ( 19 , [ 3, 4, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3' ]),
# ( 20 , [ 3, 5, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3' ]),
# ( 21 , [ 4, 1, '(14N)2(16O)', 0.990333, 44.001062, 'N2O' ]),
# ( 22 , [ 4, 2, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O' ]),
# ( 23 , [ 4, 3, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O' ]),
# ( 24 , [ 4, 4, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O' ]),
# ( 25 , [ 4, 5, '(14N)2(17O)', 0.000369, 45.005278, 'N2O' ]),
# ( 26 , [ 5, 1, '(12C)(16O)', 0.98654, 27.994915, 'CO' ]),
# ( 27 , [ 5, 2, '(13C)(16O)', 0.01108, 28.99827, 'CO' ]),
# ( 28 , [ 5, 3, '(12C)(18O)', 0.0019782, 29.999161, 'CO' ]),
# ( 29 , [ 5, 4, '(12C)(17O)', 0.000368, 28.99913, 'CO' ]),
# ( 30 , [ 5, 5, '(13C)(18O)', 0.00002222, 31.002516, 'CO' ]),
# ( 31 , [ 5, 6, '(13C)(17O)', 0.00000413, 30.002485, 'CO' ]),
# ( 32 , [ 6, 1, '(12C)H4', 0.98827, 16.0313, 'CH4' ]),
# ( 33 , [ 6, 2, '(13C)H4', 0.0111, 17.034655, 'CH4' ]),
# ( 34 , [ 6, 3, '(12C)H3D', 0.00061575, 17.037475, 'CH4' ]),
# ( 35 , [ 6, 4, '(13C)H3D', 0.0000049203, 18.04083, 'CH4' ]),
# ( 36 , [ 7, 1, '(16O)2', 0.995262, 31.98983, 'O2' ]),
# ( 37 , [ 7, 2, '(16O)(18O)', 0.00399141, 33.994076, 'O2' ]),
# ( 38 , [ 7, 3, '(16O)(17O)', 0.000742, 32.994045, 'O2' ]),
# ( 39 , [ 8, 1, '(14N)(16O)', 0.993974, 29.997989, 'NO' ]),
# ( 40 , [ 8, 2, '(15N)(16O)', 0.0036543, 30.995023, 'NO' ]),
# ( 41 , [ 8, 3, '(14N)(18O)', 0.00199312, 32.002234, 'NO' ]),
# ( 42 , [ 9, 1, '(32S)(16O)2', 0.94568, 63.961901, 'SO2' ]),
# ( 43 , [ 9, 2, '(34S)(16O)2', 0.04195, 65.957695, 'SO2' ]),
# ( 44 , [ 10, 1, '(14N)(16O)2', 0.991616, 45.992904, 'NO2' ]),
# ( 45 , [ 11, 1, '(14N)H3', 0.9958715, 17.026549, 'NH3' ]),
# ( 46 , [ 11, 2, '(15N)H3', 0.0036613, 18.023583, 'NH3' ]),
# ( 47 , [ 12, 1, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3' ]),
# ( 117 , [ 12, 2, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3' ]),
# ( 48 , [ 13, 1, '(16O)H', 0.997473, 17.00274, 'OH' ]),
# ( 49 , [ 13, 2, '(18O)H', 0.00200014, 19.006986, 'OH' ]),
# ( 50 , [ 13, 3, '(16O)D', 0.00015537, 18.008915, 'OH' ]),
# ( 51 , [ 14, 1, 'H(19F)', 0.99984425, 20.006229, 'HF' ]),
# ( 110 , [ 14, 2, 'D(19F)', 0.000115, 21.0125049978, 'HF' ]),
# ( 52 , [ 15, 1, 'H(35Cl)', 0.757587, 35.976678, 'HCl' ]),
# ( 53 , [ 15, 2, 'H(37Cl)', 0.242257, 37.973729, 'HCl' ]),
# ( 107 , [ 15, 3, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl' ]),
# ( 108 , [ 15, 4, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl' ]),
# ( 54 , [ 16, 1, 'H(79Br)', 0.50678, 79.92616, 'HBr' ]),
# ( 55 , [ 16, 2, 'H(81Br)', 0.49306, 81.924115, 'HBr' ]),
# ( 111 , [ 16, 3, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr' ]),
# ( 112 , [ 16, 4, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr' ]),
# ( 56 , [ 17, 1, 'H(127I)', 0.99984425, 127.912297, 'HI' ]),
# ( 113 , [ 17, 2, 'D(127I)', 0.000115, 128.918574778, 'HI' ]),
# ( 57 , [ 18, 1, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO' ]),
# ( 58 , [ 18, 2, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO' ]),
# ( 59 , [ 19, 1, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS' ]),
# ( 60 , [ 19, 2, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS' ]),
# ( 61 , [ 19, 3, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS' ]),
# ( 62 , [ 19, 4, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS' ]),
# ( 63 , [ 19, 5, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS' ]),
# ( 64 , [ 20, 1, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO' ]),
# ( 65 , [ 20, 2, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO' ]),
# ( 66 , [ 20, 3, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO' ]),
# ( 67 , [ 21, 1, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl' ]),
# ( 68 , [ 21, 2, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl' ]),
# ( 69 , [ 22, 1, '(14N)2', 0.9926874, 28.006147, 'N2' ]),
# ( 118 , [ 22, 2, '(14N)(15N)', 0.0072535, 29.997989, 'N2' ]),
# ( 70 , [ 23, 1, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN' ]),
# ( 71 , [ 23, 2, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN' ]),
# ( 72 , [ 23, 3, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN' ]),
# ( 73 , [ 24, 1, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl' ]),
# ( 74 , [ 24, 2, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl' ]),
# ( 75 , [ 25, 1, 'H2(16O)2', 0.994952, 34.00548, 'H2O2' ]),
# ( 76 , [ 26, 1, '(12C)2H2', 0.9776, 26.01565, 'C2H2' ]),
# ( 77 , [ 26, 2, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2' ]),
# ( 105 , [ 26, 3, '(12C)2HD', 0.00030455, 27.021825, 'C2H2' ]),
# ( 78 , [ 27, 1, '(12C)2H6', 0.97699, 30.04695, 'C2H6' ]),
# ( 106 , [ 27, 2, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6' ]),
# ( 79 , [ 28, 1, '(31P)H3', 0.99953283, 33.997238, 'PH3' ]),
# ( 80 , [ 29, 1, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2' ]),
# ( 119 , [ 29, 2, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2' ]),
# ( 81 , [ 31, 1, 'H2(32S)', 0.94988, 33.987721, 'H2S' ]),
# ( 82 , [ 31, 2, 'H2(34S)', 0.04214, 35.983515, 'H2S' ]),
# ( 83 , [ 31, 3, 'H2(33S)', 0.007498, 34.987105, 'H2S' ]),
# ( 84 , [ 32, 1, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH' ]),
# ( 85 , [ 33, 1, 'H(16O)2', 0.995107, 32.997655, 'HO2' ]),
# ( 86 , [ 34, 1, '(16O)', 0.997628, 15.994915, 'O' ]),
# ( 87 , [ 36, 1, '(14N)(16O)+', 0.993974, 29.997989, 'NOp' ]),
# ( 88 , [ 37, 1, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr' ]),
# ( 89 , [ 37, 2, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr' ]),
# ( 90 , [ 38, 1, '(12C)2H4', 0.9773, 28.0313, 'C2H4' ]),
# ( 91 , [ 38, 2, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4' ]),
# ( 92 , [ 39, 1, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH' ]),
# ( 93 , [ 40, 1, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br' ]),
# ( 94 , [ 40, 2, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br' ]),
# ( 95 , [ 41, 1, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN' ]),
# ( 96 , [ 42, 1, '(12C)(19F)4', 0.9893, 87.993616, 'CF4' ]),
# ( 116 , [ 43, 1, '(12C)4H2', 0.955998, 50.01565, 'C4H2' ]),
# ( 109 , [ 44, 1, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N' ]),
# ( 103 , [ 45, 1, 'H2', 0.999688, 2.01565, 'H2' ]),
# ( 115 , [ 45, 2, 'HD', 0.00022997, 3.021825, 'H2' ]),
# ( 97 , [ 46, 1, '(12C)(32S)', 0.939624, 43.971036, 'CS' ]),
# ( 98 , [ 46, 2, '(12C)(34S)', 0.0416817, 45.966787, 'CS' ]),
# ( 99 , [ 46, 3, '(13C)(32S)', 0.0105565, 44.974368, 'CS' ]),
# ( 100 , [ 46, 4, '(12C)(33S)', 0.00741668, 44.970399, 'CS' ]),
# ( 114 , [ 47, 1, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3' ]),
# ( 101 , [ 1001, 1, 'H', None, None, 'H' ]),
# ( 102 , [ 1002, 1, 'He', None, None, 'He' ]),
# ( 104 , [ 1018, 1, 'Ar', None, None, 'Ar' ]),
#
#])
ISO_INDEX = {
'id':0,
'iso_name':1,
'abundance':2,
'mass':3,
'mol_name':4
}
# M I id iso_name abundance mass mol_name
ISO = {
( 1, 1 ): [ 1, 'H2(16O)', 0.997317, 18.010565, 'H2O' ],
( 1, 2 ): [ 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O' ],
( 1, 3 ): [ 3, 'H2(17O)', 0.000372, 19.01478, 'H2O' ],
( 1, 4 ): [ 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O' ],
( 1, 5 ): [ 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O' ],
( 1, 6 ): [ 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O' ],
( 2, 1 ): [ 7, '(12C)(16O)2', 0.9842, 43.98983, 'CO2' ],
( 2, 2 ): [ 8, '(13C)(16O)2', 0.01106, 44.993185, 'CO2' ],
( 2, 3 ): [ 9, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2' ],
( 2, 4 ): [ 10, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2' ],
( 2, 5 ): [ 11, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2' ],
( 2, 6 ): [ 12, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2' ],
( 2, 7 ): [ 13, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2' ],
( 2, 8 ): [ 14, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2' ],
( 2, 0 ): [ 15, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2' ],
( 2, 11 ): [ 120, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2' ],
( 2, 9 ): [ 121, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2' ],
( 3, 1 ): [ 16, '(16O)3', 0.992901, 47.984745, 'O3' ],
( 3, 2 ): [ 17, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3' ],
( 3, 3 ): [ 18, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3' ],
( 3, 4 ): [ 19, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3' ],
( 3, 5 ): [ 20, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3' ],
( 4, 1 ): [ 21, '(14N)2(16O)', 0.990333, 44.001062, 'N2O' ],
( 4, 2 ): [ 22, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O' ],
( 4, 3 ): [ 23, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O' ],
( 4, 4 ): [ 24, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O' ],
( 4, 5 ): [ 25, '(14N)2(17O)', 0.000369, 45.005278, 'N2O' ],
( 5, 1 ): [ 26, '(12C)(16O)', 0.98654, 27.994915, 'CO' ],
( 5, 2 ): [ 27, '(13C)(16O)', 0.01108, 28.99827, 'CO' ],
( 5, 3 ): [ 28, '(12C)(18O)', 0.0019782, 29.999161, 'CO' ],
( 5, 4 ): [ 29, '(12C)(17O)', 0.000368, 28.99913, 'CO' ],
( 5, 5 ): [ 30, '(13C)(18O)', 0.00002222, 31.002516, 'CO' ],
( 5, 6 ): [ 31, '(13C)(17O)', 0.00000413, 30.002485, 'CO' ],
( 6, 1 ): [ 32, '(12C)H4', 0.98827, 16.0313, 'CH4' ],
( 6, 2 ): [ 33, '(13C)H4', 0.0111, 17.034655, 'CH4' ],
( 6, 3 ): [ 34, '(12C)H3D', 0.00061575, 17.037475, 'CH4' ],
( 6, 4 ): [ 35, '(13C)H3D', 0.0000049203, 18.04083, 'CH4' ],
( 7, 1 ): [ 36, '(16O)2', 0.995262, 31.98983, 'O2' ],
( 7, 2 ): [ 37, '(16O)(18O)', 0.00399141, 33.994076, 'O2' ],
( 7, 3 ): [ 38, '(16O)(17O)', 0.000742, 32.994045, 'O2' ],
( 8, 1 ): [ 39, '(14N)(16O)', 0.993974, 29.997989, 'NO' ],
( 8, 2 ): [ 40, '(15N)(16O)', 0.0036543, 30.995023, 'NO' ],
( 8, 3 ): [ 41, '(14N)(18O)', 0.00199312, 32.002234, 'NO' ],
( 9, 1 ): [ 42, '(32S)(16O)2', 0.94568, 63.961901, 'SO2' ],
( 9, 2 ): [ 43, '(34S)(16O)2', 0.04195, 65.957695, 'SO2' ],
( 10, 1 ): [ 44, '(14N)(16O)2', 0.991616, 45.992904, 'NO2' ],
( 11, 1 ): [ 45, '(14N)H3', 0.9958715, 17.026549, 'NH3' ],
( 11, 2 ): [ 46, '(15N)H3', 0.0036613, 18.023583, 'NH3' ],
( 12, 1 ): [ 47, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3' ],
( 12, 2 ): [ 117, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3' ],
( 13, 1 ): [ 48, '(16O)H', 0.997473, 17.00274, 'OH' ],
( 13, 2 ): [ 49, '(18O)H', 0.00200014, 19.006986, 'OH' ],
( 13, 3 ): [ 50, '(16O)D', 0.00015537, 18.008915, 'OH' ],
( 14, 1 ): [ 51, 'H(19F)', 0.99984425, 20.006229, 'HF' ],
( 14, 2 ): [ 110, 'D(19F)', 0.000115, 21.0125049978, 'HF' ],
( 15, 1 ): [ 52, 'H(35Cl)', 0.757587, 35.976678, 'HCl' ],
( 15, 2 ): [ 53, 'H(37Cl)', 0.242257, 37.973729, 'HCl' ],
( 15, 3 ): [ 107, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl' ],
( 15, 4 ): [ 108, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl' ],
( 16, 1 ): [ 54, 'H(79Br)', 0.50678, 79.92616, 'HBr' ],
( 16, 2 ): [ 55, 'H(81Br)', 0.49306, 81.924115, 'HBr' ],
( 16, 3 ): [ 111, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr' ],
( 16, 4 ): [ 112, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr' ],
( 17, 1 ): [ 56, 'H(127I)', 0.99984425, 127.912297, 'HI' ],
( 17, 2 ): [ 113, 'D(127I)', 0.000115, 128.918574778, 'HI' ],
( 18, 1 ): [ 57, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO' ],
( 18, 2 ): [ 58, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO' ],
( 19, 1 ): [ 59, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS' ],
( 19, 2 ): [ 60, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS' ],
( 19, 3 ): [ 61, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS' ],
( 19, 4 ): [ 62, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS' ],
( 19, 5 ): [ 63, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS' ],
( 20, 1 ): [ 64, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO' ],
( 20, 2 ): [ 65, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO' ],
( 20, 3 ): [ 66, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO' ],
( 21, 1 ): [ 67, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl' ],
( 21, 2 ): [ 68, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl' ],
( 22, 1 ): [ 69, '(14N)2', 0.9926874, 28.006147, 'N2' ],
( 22, 2 ): [ 118, '(14N)(15N)', 0.0072535, 29.997989, 'N2' ],
( 23, 1 ): [ 70, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN' ],
( 23, 2 ): [ 71, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN' ],
( 23, 3 ): [ 72, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN' ],
( 24, 1 ): [ 73, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl' ],
( 24, 2 ): [ 74, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl' ],
( 25, 1 ): [ 75, 'H2(16O)2', 0.994952, 34.00548, 'H2O2' ],
( 26, 1 ): [ 76, '(12C)2H2', 0.9776, 26.01565, 'C2H2' ],
( 26, 2 ): [ 77, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2' ],
( 26, 3 ): [ 105, '(12C)2HD', 0.00030455, 27.021825, 'C2H2' ],
( 27, 1 ): [ 78, '(12C)2H6', 0.97699, 30.04695, 'C2H6' ],
( 27, 2 ): [ 106, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6' ],
( 28, 1 ): [ 79, '(31P)H3', 0.99953283, 33.997238, 'PH3' ],
( 29, 1 ): [ 80, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2' ],
( 29, 2 ): [ 119, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2' ],
( 31, 1 ): [ 81, 'H2(32S)', 0.94988, 33.987721, 'H2S' ],
( 31, 2 ): [ 82, 'H2(34S)', 0.04214, 35.983515, 'H2S' ],
( 31, 3 ): [ 83, 'H2(33S)', 0.007498, 34.987105, 'H2S' ],
( 32, 1 ): [ 84, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH' ],
( 33, 1 ): [ 85, 'H(16O)2', 0.995107, 32.997655, 'HO2' ],
( 34, 1 ): [ 86, '(16O)', 0.997628, 15.994915, 'O' ],
( 36, 1 ): [ 87, '(14N)(16O)+', 0.993974, 29.997989, 'NOp' ],
( 37, 1 ): [ 88, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr' ],
( 37, 2 ): [ 89, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr' ],
( 38, 1 ): [ 90, '(12C)2H4', 0.9773, 28.0313, 'C2H4' ],
( 38, 2 ): [ 91, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4' ],
( 39, 1 ): [ 92, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH' ],
( 40, 1 ): [ 93, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br' ],
( 40, 2 ): [ 94, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br' ],
( 41, 1 ): [ 95, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN' ],
( 42, 1 ): [ 96, '(12C)(19F)4', 0.9893, 87.993616, 'CF4' ],
( 43, 1 ): [ 116, '(12C)4H2', 0.955998, 50.01565, 'C4H2' ],
( 44, 1 ): [ 109, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N' ],
( 45, 1 ): [ 103, 'H2', 0.999688, 2.01565, 'H2' ],
( 45, 2 ): [ 115, 'HD', 0.00022997, 3.021825, 'H2' ],
( 46, 1 ): [ 97, '(12C)(32S)', 0.939624, 43.971036, 'CS' ],
( 46, 2 ): [ 98, '(12C)(34S)', 0.0416817, 45.966787, 'CS' ],
( 46, 3 ): [ 99, '(13C)(32S)', 0.0105565, 44.974368, 'CS' ],
( 46, 4 ): [ 100, '(12C)(33S)', 0.00741668, 44.970399, 'CS' ],
( 47, 1 ): [ 114, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3' ],
( 1001, 1 ): [ 101, 'H', None, None, 'H' ],
( 1002, 1 ): [ 102, 'He', None, None, 'He' ],
( 1018, 1 ): [ 104, 'Ar', None, None, 'Ar' ],
}
#ISO = OrderedDict([
#
#(( 1, 1 ), [ 1, 'H2(16O)', 0.997317, 18.010565, 'H2O' ]),
#(( 1, 2 ), [ 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O' ]),
#(( 1, 3 ), [ 3, 'H2(17O)', 0.000372, 19.01478, 'H2O' ]),
#(( 1, 4 ), [ 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O' ]),
#(( 1, 5 ), [ 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O' ]),
#(( 1, 6 ), [ 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O' ]),
#(( 2, 1 ), [ 7, '(12C)(16O)2', 0.9842, 43.98983, 'CO2' ]),
#(( 2, 2 ), [ 8, '(13C)(16O)2', 0.01106, 44.993185, 'CO2' ]),
#(( 2, 3 ), [ 9, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2' ]),
#(( 2, 4 ), [ 10, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2' ]),
#(( 2, 5 ), [ 11, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2' ]),
#(( 2, 6 ), [ 12, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2' ]),
#(( 2, 7 ), [ 13, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2' ]),
#(( 2, 8 ), [ 14, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2' ]),
#(( 2, 0 ), [ 15, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2' ]),
#(( 2, 11 ), [ 120, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2' ]),
#(( 2, 9 ), [ 121, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2' ]),
#(( 3, 1 ), [ 16, '(16O)3', 0.992901, 47.984745, 'O3' ]),
#(( 3, 2 ), [ 17, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3' ]),
#(( 3, 3 ), [ 18, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3' ]),
#(( 3, 4 ), [ 19, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3' ]),
#(( 3, 5 ), [ 20, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3' ]),
#(( 4, 1 ), [ 21, '(14N)2(16O)', 0.990333, 44.001062, 'N2O' ]),
#(( 4, 2 ), [ 22, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O' ]),
#(( 4, 3 ), [ 23, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O' ]),
#(( 4, 4 ), [ 24, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O' ]),
#(( 4, 5 ), [ 25, '(14N)2(17O)', 0.000369, 45.005278, 'N2O' ]),
#(( 5, 1 ), [ 26, '(12C)(16O)', 0.98654, 27.994915, 'CO' ]),
#(( 5, 2 ), [ 27, '(13C)(16O)', 0.01108, 28.99827, 'CO' ]),
#(( 5, 3 ), [ 28, '(12C)(18O)', 0.0019782, 29.999161, 'CO' ]),
#(( 5, 4 ), [ 29, '(12C)(17O)', 0.000368, 28.99913, 'CO' ]),
#(( 5, 5 ), [ 30, '(13C)(18O)', 0.00002222, 31.002516, 'CO' ]),
#(( 5, 6 ), [ 31, '(13C)(17O)', 0.00000413, 30.002485, 'CO' ]),
#(( 6, 1 ), [ 32, '(12C)H4', 0.98827, 16.0313, 'CH4' ]),
#(( 6, 2 ), [ 33, '(13C)H4', 0.0111, 17.034655, 'CH4' ]),
#(( 6, 3 ), [ 34, '(12C)H3D', 0.00061575, 17.037475, 'CH4' ]),
#(( 6, 4 ), [ 35, '(13C)H3D', 0.0000049203, 18.04083, 'CH4' ]),
#(( 7, 1 ), [ 36, '(16O)2', 0.995262, 31.98983, 'O2' ]),
#(( 7, 2 ), [ 37, '(16O)(18O)', 0.00399141, 33.994076, 'O2' ]),
#(( 7, 3 ), [ 38, '(16O)(17O)', 0.000742, 32.994045, 'O2' ]),
#(( 8, 1 ), [ 39, '(14N)(16O)', 0.993974, 29.997989, 'NO' ]),
#(( 8, 2 ), [ 40, '(15N)(16O)', 0.0036543, 30.995023, 'NO' ]),
#(( 8, 3 ), [ 41, '(14N)(18O)', 0.00199312, 32.002234, 'NO' ]),
#(( 9, 1 ), [ 42, '(32S)(16O)2', 0.94568, 63.961901, 'SO2' ]),
#(( 9, 2 ), [ 43, '(34S)(16O)2', 0.04195, 65.957695, 'SO2' ]),
#(( 10, 1 ), [ 44, '(14N)(16O)2', 0.991616, 45.992904, 'NO2' ]),
#(( 11, 1 ), [ 45, '(14N)H3', 0.9958715, 17.026549, 'NH3' ]),
#(( 11, 2 ), [ 46, '(15N)H3', 0.0036613, 18.023583, 'NH3' ]),
#(( 12, 1 ), [ 47, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3' ]),
#(( 12, 2 ), [ 117, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3' ]),
#(( 13, 1 ), [ 48, '(16O)H', 0.997473, 17.00274, 'OH' ]),
#(( 13, 2 ), [ 49, '(18O)H', 0.00200014, 19.006986, 'OH' ]),
#(( 13, 3 ), [ 50, '(16O)D', 0.00015537, 18.008915, 'OH' ]),
#(( 14, 1 ), [ 51, 'H(19F)', 0.99984425, 20.006229, 'HF' ]),
#(( 14, 2 ), [ 110, 'D(19F)', 0.000115, 21.0125049978, 'HF' ]),
#(( 15, 1 ), [ 52, 'H(35Cl)', 0.757587, 35.976678, 'HCl' ]),
#(( 15, 2 ), [ 53, 'H(37Cl)', 0.242257, 37.973729, 'HCl' ]),
#(( 15, 3 ), [ 107, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl' ]),
#(( 15, 4 ), [ 108, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl' ]),
#(( 16, 1 ), [ 54, 'H(79Br)', 0.50678, 79.92616, 'HBr' ]),
#(( 16, 2 ), [ 55, 'H(81Br)', 0.49306, 81.924115, 'HBr' ]),
#(( 16, 3 ), [ 111, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr' ]),
#(( 16, 4 ), [ 112, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr' ]),
#(( 17, 1 ), [ 56, 'H(127I)', 0.99984425, 127.912297, 'HI' ]),
#(( 17, 2 ), [ 113, 'D(127I)', 0.000115, 128.918574778, 'HI' ]),
#(( 18, 1 ), [ 57, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO' ]),
#(( 18, 2 ), [ 58, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO' ]),
#(( 19, 1 ), [ 59, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS' ]),
#(( 19, 2 ), [ 60, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS' ]),
#(( 19, 3 ), [ 61, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS' ]),
#(( 19, 4 ), [ 62, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS' ]),
#(( 19, 5 ), [ 63, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS' ]),
#(( 20, 1 ), [ 64, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO' ]),
#(( 20, 2 ), [ 65, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO' ]),
#(( 20, 3 ), [ 66, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO' ]),
#(( 21, 1 ), [ 67, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl' ]),
#(( 21, 2 ), [ 68, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl' ]),
#(( 22, 1 ), [ 69, '(14N)2', 0.9926874, 28.006147, 'N2' ]),
#(( 22, 2 ), [ 118, '(14N)(15N)', 0.0072535, 29.997989, 'N2' ]),
#(( 23, 1 ), [ 70, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN' ]),
#(( 23, 2 ), [ 71, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN' ]),
#(( 23, 3 ), [ 72, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN' ]),
#(( 24, 1 ), [ 73, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl' ]),
#(( 24, 2 ), [ 74, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl' ]),
#(( 25, 1 ), [ 75, 'H2(16O)2', 0.994952, 34.00548, 'H2O2' ]),
#(( 26, 1 ), [ 76, '(12C)2H2', 0.9776, 26.01565, 'C2H2' ]),
#(( 26, 2 ), [ 77, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2' ]),
#(( 26, 3 ), [ 105, '(12C)2HD', 0.00030455, 27.021825, 'C2H2' ]),
#(( 27, 1 ), [ 78, '(12C)2H6', 0.97699, 30.04695, 'C2H6' ]),
#(( 27, 2 ), [ 106, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6' ]),
#(( 28, 1 ), [ 79, '(31P)H3', 0.99953283, 33.997238, 'PH3' ]),
#(( 29, 1 ), [ 80, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2' ]),
#(( 29, 2 ), [ 119, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2' ]),
#(( 31, 1 ), [ 81, 'H2(32S)', 0.94988, 33.987721, 'H2S' ]),
#(( 31, 2 ), [ 82, 'H2(34S)', 0.04214, 35.983515, 'H2S' ]),
#(( 31, 3 ), [ 83, 'H2(33S)', 0.007498, 34.987105, 'H2S' ]),
#(( 32, 1 ), [ 84, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH' ]),
#(( 33, 1 ), [ 85, 'H(16O)2', 0.995107, 32.997655, 'HO2' ]),
#(( 34, 1 ), [ 86, '(16O)', 0.997628, 15.994915, 'O' ]),
#(( 36, 1 ), [ 87, '(14N)(16O)+', 0.993974, 29.997989, 'NOp' ]),
#(( 37, 1 ), [ 88, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr' ]),
#(( 37, 2 ), [ 89, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr' ]),
#(( 38, 1 ), [ 90, '(12C)2H4', 0.9773, 28.0313, 'C2H4' ]),
#(( 38, 2 ), [ 91, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4' ]),
#(( 39, 1 ), [ 92, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH' ]),
#(( 40, 1 ), [ 93, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br' ]),
#(( 40, 2 ), [ 94, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br' ]),
#(( 41, 1 ), [ 95, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN' ]),
#(( 42, 1 ), [ 96, '(12C)(19F)4', 0.9893, 87.993616, 'CF4' ]),
#(( 43, 1 ), [ 116, '(12C)4H2', 0.955998, 50.01565, 'C4H2' ]),
#(( 44, 1 ), [ 109, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N' ]),
#(( 45, 1 ), [ 103, 'H2', 0.999688, 2.01565, 'H2' ]),
#(( 45, 2 ), [ 115, 'HD', 0.00022997, 3.021825, 'H2' ]),
#(( 46, 1 ), [ 97, '(12C)(32S)', 0.939624, 43.971036, 'CS' ]),
#(( 46, 2 ), [ 98, '(12C)(34S)', 0.0416817, 45.966787, 'CS' ]),
#(( 46, 3 ), [ 99, '(13C)(32S)', 0.0105565, 44.974368, 'CS' ]),
#(( 46, 4 ), [ 100, '(12C)(33S)', 0.00741668, 44.970399, 'CS' ]),
#(( 47, 1 ), [ 114, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3' ]),
#(( 1001, 1 ), [ 101, 'H', None, None, 'H' ]),
#(( 1002, 1 ), [ 102, 'He', None, None, 'He' ]),
#(( 1018, 1 ), [ 104, 'Ar', None, None, 'Ar' ]),
#
#])
def print_iso():
print('The dictionary \"ISO\" contains information on isotopologues in HITRAN\n')
print(' M I id iso_name abundance mass mol_name')
for i in ISO:
ab = ISO[i][ISO_INDEX['abundance']]
ma = ISO[i][ISO_INDEX['mass']]
ab = ab if ab else -1
ma = ma if ma else -1
print('%4i %4i : %5i %25s %10f %10f %15s' % (i[0],i[1],ISO[i][ISO_INDEX['id']],ISO[i][ISO_INDEX['iso_name']],ab,ma,ISO[i][ISO_INDEX['mol_name']]))
def print_iso_id():
print('The dictionary \"ISO_ID\" contains information on \"global\" IDs of isotopologues in HITRAN\n')
print(' id M I iso_name abundance mass mol_name')
for i in ISO_ID:
ab = ISO_ID[i][ISO_ID_INDEX['abundance']]
ma = ISO_ID[i][ISO_ID_INDEX['mass']]
ab = ab if ab else -1
ma = ma if ma else -1
print('%5i : %4i %4i %25s %15.10f %10f %15s' % (i,ISO_ID[i][ISO_ID_INDEX['M']],ISO_ID[i][ISO_ID_INDEX['I']],ISO_ID[i][ISO_ID_INDEX['iso_name']],ab,ma,ISO_ID[i][ISO_ID_INDEX['mol_name']]))
profiles = 'profiles'
def print_profiles():
print('Profiles available:')
print(' HT : PROFILE_HT')
print(' Voigt : PROFILE_VOIGT')
print(' Lorentz : PROFILE_LORENTZ')
print(' Doppler : PROFILE_DOPPLER')
slit_functions = 'slit_functions'
def print_slit_functions():
print(' RECTANGULAR : SLIT_RECTANGULAR')
print(' TRIANGULAR : SLIT_TRIANGULAR')
print(' GAUSSIAN : SLIT_GAUSSIAN')
print(' DIFFRACTION : SLIT_DIFFRACTION')
print(' MICHELSON : SLIT_MICHELSON')
print(' DISPERSION/LORENTZ : SLIT_DISPERSION')
def getHelp__BAK(arg=None):
if not arg:
print('getHelp( ... )')
print('---------------------')
print('db_begin')
print('db_commit')
print('tableList')
print('describe')
print('select')
print('sort')
print('group')
print('extractColumn')
print('getColumn')
print('getColumns')
print('dropTable')
print('absorptionCoefficient_HT')
print('absorptionCoefficient_Voigt')
print('absorptionCoefficient_Lorentz')
print('absorptionCoefficient_Doppler')
print('transmittanceSpectrum')
print('absorptionSpectrum')
print('radianceSpectrum')
print('partitionSum')
print('profiles')
print('slit_functions')
print('convolveSpectrum')
print('ISO_ID')
print('read_hotw')
print('getStickXY')
print('abundance')
print('molecularMass')
print('moleculeName')
print('isotopologueName')
return
if arg == ISO:
print_iso()
elif arg == ISO_ID:
print_iso_id()
elif arg == profiles:
print_profiles()
elif arg == slit_functions:
print_slit_functions()
else:
help(arg)
tutorial='tutorial'
units='units'
index='index'
data='data'
spectra='spectra'
plotting='plotting'
python='python'
python_tutorial_text = \
"""
THIS TUTORIAL IS TAKEN FROM http://www.stavros.io/tutorials/python/
AUTHOR: Stavros Korokithakis
----- LEARN PYTHON IN 10 MINUTES -----
PRELIMINARY STUFF
So, you want to learn the Python programming language but can't find a concise
and yet full-featured tutorial. This tutorial will attempt to teach you Python in 10 minutes.
It's probably not so much a tutorial as it is a cross between a tutorial and a cheatsheet,
so it will just show you some basic concepts to start you off. Obviously, if you want to
really learn a language you need to program in it for a while. I will assume that you are
already familiar with programming and will, therefore, skip most of the non-language-specific stuff.
The important keywords will be highlighted so you can easily spot them. Also, pay attention because,
due to the terseness of this tutorial, some things will be introduced directly in code and only
briefly commented on.
PROPERTIES
Python is strongly typed (i.e. types are enforced), dynamically, implicitly typed (i.e. you don't
have to declare variables), case sensitive (i.e. var and VAR are two different variables) and
object-oriented (i.e. everything is an object).
GETTING HELP
Help in Python is always available right in the interpreter. If you want to know how an object works,
all you have to do is call help(<object>)! Also useful are dir(), which shows you all the object's methods,
and <object>.__doc__, which shows you its documentation string:
>>> help(5)
Help on int object:
(etc etc)
>>> dir(5)
['__abs__', '__add__', ...]
>>> abs.__doc__
'abs(number) -> number
Return the absolute value of the argument.'
SYNTAX
Python has no mandatory statement termination characters and blocks are specified by indentation.
Indent to begin a block, dedent to end one. Statements that expect an indentation level end in a colon (:).
Comments start with the pound (#) sign and are single-line, multi-line strings are used for multi-line comments.
Values are assigned (in fact, objects are bound to names) with the _equals_ sign ("="), and equality testing is
done using two _equals_ signs ("=="). You can increment/decrement values using the += and -= operators respectively
by the right-hand amount. This works on many datatypes, strings included. You can also use multiple variables on one
line. For example:
>>> myvar = 3
>>> myvar += 2
>>> myvar
5
>>> myvar -= 1
>>> myvar
4
\"\"\"This is a multiline comment.
The following lines concatenate the two strings.\"\"\"
>>> mystring = "Hello"
>>> mystring += " world."
>>> print mystring
Hello world.
# This swaps the variables in one line(!).
# It doesn't violate strong typing because values aren't
# actually being assigned, but new objects are bound to
# the old names.
>>> myvar, mystring = mystring, myvar
DATA TYPES
The data structures available in python are lists, tuples and dictionaries.
Sets are available in the sets library (but are built-in in Python 2.5 and later).
Lists are like one-dimensional arrays (but you can also have lists of other lists),
dictionaries are associative arrays (a.k.a. hash tables) and tuples are immutable
one-dimensional arrays (Python "arrays" can be of any type, so you can mix e.g. integers,
strings, etc in lists/dictionaries/tuples). The index of the first item in all array types is 0.
Negative numbers count from the end towards the beginning, -1 is the last item. Variables
can point to functions. The usage is as follows:
>>> sample = [1, ["another", "list"], ("a", "tuple")]
>>> mylist = ["List item 1", 2, 3.14]
>>> mylist[0] = "List item 1 again" # We're changing the item.
>>> mylist[-1] = 3.21 # Here, we refer to the last item.
>>> mydict = {"Key 1": "Value 1", 2: 3, "pi": 3.14}
>>> mydict["pi"] = 3.15 # This is how you change dictionary values.
>>> mytuple = (1, 2, 3)
>>> myfunction = len
>>> print myfunction(mylist)
3
You can access array ranges using a colon (:). Leaving the start index empty assumes the first item,
leaving the end index assumes the last item. Negative indexes count from the last item backwards
(thus -1 is the last item) like so:
>>> mylist = ["List item 1", 2, 3.14]
>>> print mylist[:]
['List item 1', 2, 3.1400000000000001]
>>> print mylist[0:2]
['List item 1', 2]
>>> print mylist[-3:-1]
['List item 1', 2]
>>> print mylist[1:]
[2, 3.14]
# Adding a third parameter, "step" will have Python step in
# N item increments, rather than 1.
# E.g., this will return the first item, then go to the third and
# return that (so, items 0 and 2 in 0-indexing).
>>> print mylist[::2]
['List item 1', 3.14]
STRINGS
Its strings can use either single or double quotation marks, and you can have quotation
marks of one kind inside a string that uses the other kind (i.e. "He said 'hello'." is valid).
Multiline strings are enclosed in _triple double (or single) quotes_ (\"\"\").
Python supports Unicode out of the box, using the syntax u"This is a unicode string".
To fill a string with values, you use the % (modulo) operator and a tuple.
Each %s gets replaced with an item from the tuple, left to right, and you can also use
dictionary substitutions, like so:
>>>print "Name: %s\
Number: %s\
String: %s" % (myclass.name, 3, 3 * "-")
Name: Poromenos
Number: 3
String: ---
strString = \"\"\"This is
a multiline
string.\"\"\"
# WARNING: Watch out for the trailing s in "%(key)s".
>>> print "This %(verb)s a %(noun)s." % {"noun": "test", "verb": "is"}
This is a test.
FLOW CONTROL STATEMENTS
Flow control statements are if, for, and while. There is no select; instead, use if.
Use for to enumerate through members of a list. To obtain a list of numbers,
use range(<number>). These statements' syntax is thus:
rangelist = range(10)
>>> print rangelist
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> for number in rangelist:
# Check if number is one of
# the numbers in the tuple.
if number in (3, 4, 7, 9):
# "Break" terminates a for without
# executing the "else" clause.
break
else:
# "Continue" starts the next iteration
# of the loop. It's rather useless here,
# as it's the last statement of the loop.
continue
else:
# The "else" clause is optional and is
# executed only if the loop didn't "break".
pass # Do nothing
>>> if rangelist[1] == 2:
print "The second item (lists are 0-based) is 2"
elif rangelist[1] == 3:
print "The second item (lists are 0-based) is 3"
else:
print "Dunno"
>>> while rangelist[1] == 1:
pass
FUNCTIONS
Functions are declared with the "def" keyword. Optional arguments are set in
the function declaration after the mandatory arguments by being assigned a default
value. For named arguments, the name of the argument is assigned a value.
Functions can return a tuple (and using tuple unpacking you can effectively return
multiple values). Lambda functions are ad hoc functions that are comprised of
a single statement. Parameters are passed by reference, but immutable types (tuples,
ints, strings, etc) *cannot be changed*. This is because only the memory location of
the item is passed, and binding another object to a variable discards the old one,
so immutable types are replaced. For example:
# Same as def funcvar(x): return x + 1
>>> funcvar = lambda x: x + 1
>>> print funcvar(1)
2
# an_int and a_string are optional, they have default values
# if one is not passed (2 and "A default string", respectively).
>>> def passing_example(a_list, an_int=2, a_string="A default string"):
a_list.append("A new item")
an_int = 4
return a_list, an_int, a_string
>>> my_list = [1, 2, 3]
>>> my_int = 10
>>> print passing_example(my_list, my_int)
([1, 2, 3, 'A new item'], 4, "A default string")
>>> my_list
[1, 2, 3, 'A new item']
>>> my_int
10
CLASSES
Python supports a limited form of multiple inheritance in classes.
Private variables and methods can be declared (by convention, this is not enforced
by the language) by adding at least two leading underscores and at most one trailing
one (e.g. "__spam"). We can also bind arbitrary names to class instances.
An example follows:
>>> class MyClass(object):
common = 10
def __init__(self):
self.myvariable = 3
def myfunction(self, arg1, arg2):
return self.myvariable
# This is the class instantiation
>>> classinstance = MyClass()
>>> classinstance.myfunction(1, 2)
3
# This variable is shared by all classes.
>>> classinstance2 = MyClass()
>>> classinstance.common
10
>>> classinstance2.common
10
# Note how we use the class name
# instead of the instance.
>>> MyClass.common = 30
>>> classinstance.common
30
>>> classinstance2.common
30
# This will not update the variable on the class,
# instead it will bind a new object to the old
# variable name.
>>> classinstance.common = 10
>>> classinstance.common
10
>>> classinstance2.common
30
>>> MyClass.common = 50
# This has not changed, because "common" is
# now an instance variable.
>>> classinstance.common
10
>>> classinstance2.common
50
# This class inherits from MyClass. The example
# class above inherits from "object", which makes
# it what's called a "new-style class".
# Multiple inheritance is declared as:
# class OtherClass(MyClass1, MyClass2, MyClassN)
>>> class OtherClass(MyClass):
# The "self" argument is passed automatically
# and refers to the class instance, so you can set
# instance variables as above, but from inside the class.
def __init__(self, arg1):
self.myvariable = 3
print arg1
>>> classinstance = OtherClass("hello")
hello
>>> classinstance.myfunction(1, 2)
3
# This class doesn't have a .test member, but
# we can add one to the instance anyway. Note
# that this will only be a member of classinstance.
>>> classinstance.test = 10
>>> classinstance.test
10
EXCEPTIONS
Exceptions in Python are handled with try-except [exceptionname] blocks:
>>> def some_function():
try:
# Division by zero raises an exception
10 / 0
except ZeroDivisionError:
print "Oops, invalid."
else:
# Exception didn't occur, we're good.
pass
finally:
# This is executed after the code block is run
# and all exceptions have been handled, even
# if a new exception is raised while handling.
print "We're done with that."
>>> some_function()
Oops, invalid.
We're done with that.
IMPORTING:
External libraries are used with the import [libname] keyword.
You can also use from [libname] import [funcname] for individual functions.
Here is an example:
>>> import random
>>> from time import clock
>>> randomint = random.randint(1, 100)
>>> print randomint
64
FILE I/O
Python has a wide array of libraries built in. As an example, here is how serializing
(converting data structures to strings using the pickle library) with file I/O is used:
>>> import pickle
>>> mylist = ["This", "is", 4, 13327]
# Open the file C:\\binary.dat for writing. The letter r before the
# filename string is used to prevent backslash escaping.
>>> yfile = open(r"C:\\binary.dat", "w")
>>> pickle.dump(mylist, myfile)
>>> myfile.close()
>>> myfile = open(r"C:\\text.txt", "w")
>>> myfile.write("This is a sample string")
>>> myfile.close()
>>> myfile = open(r"C:\\text.txt")
>>> print myfile.read()
'This is a sample string'
>>> myfile.close()
# Open the file for reading.
>>> myfile = open(r"C:\\binary.dat")
>>> loadedlist = pickle.load(myfile)
>>> myfile.close()
>>> print loadedlist
['This', 'is', 4, 13327]
MISCELLANEOUS
-> Conditions can be chained. 1 < a < 3 checks
that a is both less than 3 and greater than 1.
-> You can use del to delete variables or items in arrays.
-> List comprehensions provide a powerful way to create
and manipulate lists. They consist of an expression
followed by a for clause followed by zero or more
if or for clauses, like so:
>>> lst1 = [1, 2, 3]
>>> lst2 = [3, 4, 5]
>>> print [x * y for x in lst1 for y in lst2]
[3, 4, 5, 6, 8, 10, 9, 12, 15]
>>> print [x for x in lst1 if 4 > x > 1]
[2, 3]
# Check if a condition is true for any items.
# "any" returns true if any item in the list is true.
>>> any([i % 3 for i in [3, 3, 4, 4, 3]])
True
# This is because 4 % 3 = 1, and 1 is true, so any()
# returns True.
# Check for how many items a condition is true.
>>> sum(1 for i in [3, 3, 4, 4, 3] if i == 4)
2
>>> del lst1[0]
>>> print lst1
[2, 3]
>>> del lst1
-> Global variables are declared outside of functions
and can be read without any special declarations,
but if you want to write to them you must declare them
at the beginning of the function with the "global" keyword,
otherwise Python will bind that object to a new local
variable (be careful of that, it's a small catch that can
get you if you don't know it). For example:
>>> number = 5
>>> def myfunc():
# This will print 5.
print number
>>> def anotherfunc():
# This raises an exception because the variable has not
# been bound before printing. Python knows that it an
# object will be bound to it later and creates a new, local
# object instead of accessing the global one.
print number
number = 3
>>> def yetanotherfunc():
global number
# This will correctly change the global.
number = 3
EPILOGUE
This tutorial is not meant to be an exhaustive list of all (or even a subset) of Python.
Python has a vast array of libraries and much much more functionality which you will
have to discover through other means, such as the excellent book Dive into Python.
I hope I have made your transition in Python easier. Please leave comments if you believe
there is something that could be improved or added or if there is anything else
you would like to see (classes, error handling, anything).
"""
def print_python_tutorial():
pydoc.pager(python_tutorial_text)
data_tutorial_text = \
"""
ACCESS YOUR DATA!
Welcome to tutorial on retrieving and processing the data from HITRANonline.
///////////////
/// PREFACE ///
///////////////
HITRANonline API is a set of routines in Python which is aimed to
provide a remote access to functionality and data given by a new project
HITRANonline (http://hitranazure.cloudapp.net).
At the present moment the API can download, filter and process data on
molecular and atomic line-by-line spectra which is provided by HITRANonline portal.
One of the major purposes of introducing API is extending a functionality
of the main site, particularly providing a possibility to calculate several
types of high- and low-resolution spectra based on a flexible HT lineshape.
Each feature of API is represented by a Python function with a set of parameters
providing a flexible approach to the task.
///////////////////////
/// FEATURE SUMMARY ///
///////////////////////
1) Downloading line-by-line data from the HITRANonline site to local database.
2) Filtering and processing the data in SQL-like fashion.
3) Conventional Python structures (lists, tuples, dictionaries) for representing
a spectroscopic data.
4) Possibility to use a large set of third-party Python libraries to work with a data
5) Python implementation of an HT (Hartmann-Tran [1]) lineshape which is used in spectra.
simulations. This lineshape can also be reduced to a number of conventional
line profiles such as Gaussian (Doppler), Lorentzian, Voigt, Rautian,
Speed-dependent Voigt and Rautian.
6) Python implementation of total internal partition sums (TIPS-2011 [2])
which is used in spectra simulations.
7) High-resolution spectra simulation accounting pressure,
temperature and optical path length. The following spectral functions
can be calculated:
a) absorption coefficient
b) absorption spectrum
c) transmittance spectrum
d) radiance spectrum
8) Low-resolution spectra simulation using a number of apparatus functions.
9) Possibility to extend with the user's functionality by adding custom lineshapes,
partitions sums and apparatus functions.
References:
[1] N.H. Ngo, D. Lisak, H. Tran, J.-M. Hartmann.
An isolated line-shape model to go beyond the Voigt profile in
spectroscopic databases and radiative transfer codes.
JQSRT, Volume 129, November 2013, Pages 89–100
http://dx.doi.org/10.1016/j.jqsrt.2013.05.034
[2] A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman.
Total internal partition sums to support planetary remote sensing.
Icarus, Volume 215, Issue 1, September 2011, Pages 391–400
http://dx.doi.org/10.1016/j.icarus.2011.06.004
_______________________________________________________________________
This tutorial will give you an insight of how to use HAPI for Python.
First, let's choose a folder for our local database. Every time you start
your Python project, you have to specify explicitly the name of the
database folder.
>>> db_begin('data')
So, let's download some data from the server and do some processing on it.
Suppose that we want to get line by line data on the main isotopologue of H2O.
For retrieving the data to the local database, user have to specify the following parameters:
1) Name of the local table which will store the downloaded data.
2) Either a pair of molecule and isotopologue HITRAN numbers (M and I),
or a "global" isotopologue ID (iso_id).
3) Wavenumber range (nu_min and nu_max)
N.B. If you specify the name which already exists in the database,
the existing table with that name will be overrided.
To get additional information on function fetch,
call getHelp:
>>> getHelp(fetch)
...
To download the data, simply call the function "fetch".
This will establish a connection with the main server and get the data using
the parameters listed above:
>>> fetch('H2O',1,1,3400,4100)
BEGIN DOWNLOAD: H2O
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
...
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
Header written to data/H2O.header
END DOWNLOAD
Lines parsed: 7524
PROCESSED
The output is shown right after the console line ">>>".
To check the file that you've just downloaded you can open the database
folder. The new plain text file should have a name "H2O.data" and
it should contain line-by-line data in HITRAN format.
N.B. If we want several isotopologues in one table, we should
use fetch_by_ids instead of just fetch. Fetch_by_ids takes a "global"
isotopologue ID numbers as an input instead of HITRAN's "local" identification.
See getHelp(fetch_by_ids) to get more information on this.
To get a list of tables which are already in the database,
use tableList() function (it takes no arguments):
>>> tableList()
To learn about the table we just downloaded, let's use a function "describeTable".
>>> describeTable('H2O')
-----------------------------------------
H2O summary:
-----------------------------------------
Comment:
Contains lines for H2(16O)
in 3400.000-4100.000 wavenumber range
Number of rows: 7524
Table type: column-fixed
-----------------------------------------
PAR_NAME PAR_FORMAT
molec_id %2d
local_iso_id %1d
nu %12.6f
sw %10.3E
a %10.3E
gamma_air %5.4f
gamma_self %5.3f
elower %10.4f
n_air %4.2f
delta_air %8.6f
global_upper_quanta %15s
global_lower_quanta %15s
local_upper_quanta %15s
local_lower_quanta %15s
ierr %6s
iref %12s
line_mixing_flag %1s
gp %7.1f
gpp %7.1f
-----------------------------------------
This output tells how many rows are currenty in the table H2O, which
wavenumber range was used by fetch(). Also this gives a basic information
about parameters stored in the table.
So, having the table downloaded, one can perform different operations on it
using API.
Here is a list of operations currently available with API:
1) FILTERING
2) OUTPUTTING
3) SORTING
4) GROUPING
////////////////////////////////
/// FILTERING AND OUTPUTTING ///
////////////////////////////////
The table data can be filtered with the help of select() function.
Use simple select() call to output the table content:
>>> select('H2O')
MI nu S A gair gsel E_nair dair ...
11 1000.288940 1.957E-24 2.335E-02.07100.350 1813.22270.680.008260 ...
11 1000.532321 2.190E-28 1.305E-05.04630.281 2144.04590.39-.011030 ...
...
This will display the list of line parameters containing in the table "H2O".
That's the simplest way of using the function select(). Full information
on control parameters can be obtained via getHelp(select) statement.
Suppose that we need a lines from a table within some wavenumber range.
That's what filtering is for. Let's apply a simple range filter on a table.
>>> select('H2O',Conditions=('between','nu',4000,4100))
MI nu S A gair gsel E_nair dair
11 4000.188800 1.513E-25 1.105E-02.03340.298 1581.33570.51-.013910 ...
11 4000.204070 3.482E-24 8.479E-03.08600.454 586.47920.61-.007000 ...
11 4000.469910 3.268E-23 1.627E+00.05410.375 1255.91150.56-.013050 ...
......
As a result of this operation, we see a list of lines of H2O table,
whose wavenumbers lie between 4000 cm-1 and 4100 cm-1.
The condition is taken as an input parameter to API function "select".
To specify a subset of columns to display, use another control parameter -
ParameterNames:
>>> select('H2O',ParameterNames=('nu','sw'),Conditions=('between','nu',4000,4100))
The usage of ParameterNames is outlined below in the section "Specifying a list
of parameters". So far it worth mentioning that this parameter is a part
of a powerful tool for displaying and processing tables from database.
In the next section we will show how to create quieries
with more complex conditions.
////////////////////////////
/// FILTERING CONDITIONS ///
////////////////////////////
Let's analyze the last example of filtering. Condition input variable is
as follows:
('between','nu',4000,4100)
Thus, this is a python list (or tuple), containing logical expressions
defined under column names of the table. For example, 'nu' is a name of
the column in 'H2O' table, and this column contains a transition wavenumber.
The structure of a simple condition is as follows:
(OPERATION,ARG1,ARG2,...)
Where OPERATION must be in a set of predefined operations (see below),
and ARG1,ARG2 etc. are the arguments for this operation.
Conditions can be nested, i.e. ARG can itself be a condition (see examples).
The following operations are available in select (case insensitive):
DESCRIPTION LITERAL EXAMPLE
---------------------------------------------------------------------------------
Range: 'RANGE','BETWEEN': ('BETWEEN','nu',0,1000)
Subset: 'IN','SUBSET': ('IN','local_iso_id',[1,2,3,4])
And: '&','&&','AND': ('AND',('<','nu',1000),('>','nu',10))
Or: '|','||','OR': ('OR',('>','nu',1000),('<','nu',10))
Not: '!','NOT': ('NOT',('IN','local_iso_id',[1,2,3]))
Less than: '<','LESS','LT': ('<','nu',1000)
More than: '>','MORE','MT': ('>','sw',1.0e-20)
Less or equal than: '<=','LESSOREQUAL','LTE': ('<=','local_iso_id',10)
More or equal than '>=','MOREOREQUAL','MTE': ('>=','sw',1e-20)
Equal: '=','==','EQ','EQUAL','EQUALS': ('<=','local_iso_id',10)
Not equal: '!=','<>','~=','NE','NOTEQUAL': ('!=','local_iso_id',1)
Summation: '+','SUM': ('+','v1','v2','v3')
Difference: '-','DIFF': ('-','nu','elow')
Multiplication: '*','MUL': ('*','sw',0.98)
Division: '/','DIV': ('/','A',2)
Cast to string: 'STR','STRING': ('STR','some_string')
Cast to Python list 'LIST': ('LIST',[1,2,3,4,5])
Match regexp 'MATCH','LIKE': ('MATCH','\w+','some string')
Search single match: 'SEARCH': ('SEARCH','\d \d \d','1 2 3 4')
Search all matches: 'FINDALL': ('FINDALL','\d','1 2 3 4 5')
Count within group: 'COUNT' : ('COUNT','local_iso_id')
---------------------------------------------------------------------------------
Let's create a query with more complex condition. Suppese that we are
interested in all lines between 3500 and 4000 with 1e-19 intensity cutoff.
The query will look like this:
>>> Cond = ('AND',('BETWEEN','nu',3500,4000),('>=','Sw',1e-19))
>>> select('H2O',Conditions=Cond,DestinationTableName='tmp')
Here, apart from other parameters, we have used a new parameter
DestinationTableName. This parameter contains a name of the table
where we want to put a result of the query. Thus we have chosen
a name 'tmp' for a new table.
////////////////////////////////////
/// ACCESSING COLUMNS IN A TABLE ///
////////////////////////////////////
To get an access to particular table column (or columns) all we need
is to get a column from a table and put it to Python variable.
For this purpose, there exist two functions:
getColumn(...)
getColumns(...)
The first one returns just one column at a time. The second one returns
a list of solumns.
So, here are some examples of how to use both:
>>> nu1 = getColumn('H2O','nu')
>>> nu2,sw2 = getColumns('H2O',['nu','sw'])
N.B. If you don't remember exact names of columns in a particular table,
use describeTable to get an info on it's structure!
///////////////////////////////////////
/// SPECIFYING A LIST OF PARAMETERS ///
///////////////////////////////////////
Suppose that we want not only select a set of parameters/columns
from a table, but do a certain transformations with them (for example,
multiply column on a coefficient, or add one column to another etc...).
We can make it in two ways. First, we can extract a column from table
using one of the functions (getColumn or getColumns) and do the rest
in Python. The second way is to do it on the level of select.
The select function has a control parameter "ParameterNames", which
makes it possible to specify parameters we want to be selected,
and evaluate some simple arithmetic expressions with them.
Assume that we need only wavenumber and intensity from H2O table.
Also we need to scale an intensity to the unitary abundance. To do so,
we must divide an 'sw' parameter by it's natural abundance (0.99731) for
principal isotopologue of water).
Thus, we have to select two columns:
wavenumber (nu) and scaled intensity (sw/0.99731)
>>> select('H2O',)
////////////////////////////
/// SAVING QUERY TO DISK ///
////////////////////////////
To quickly save a result of a query to disk, the user can take an
advantage of an additional parameter "File".
If this parameter is presented in function call, then the query is
saved to file with the name which was specified in "File".
For example, select all lines from H2O and save the result in file 'H2O.txt':
>>> select('H2O',File='H2O.txt')
////////////////////////////////////////////
/// GETTING INFORMATION ON ISOTOPOLOGUES ///
////////////////////////////////////////////
API provides the following auxillary information about isotopologues
present in HITRAN. Corresponding functions use the standard HITRAN
molecule-isotopologue notation:
1) Natural abundances
>>> abundance(mol_id,iso_id)
2) Molecular masses
>>> molecularMass(mol_id,iso_id)
3) Molecule names
>>> moleculeName(mol_id,iso_id)
4) Isotopologue names
>>> isotopologueName(mol_id,iso_id)
5) ISO_ID
>>> getHelp(ISO_ID)
The latter is a dictionary, which contain all information about
isotopologues concentrated in one place.
"""
def print_data_tutorial():
pydoc.pager(data_tutorial_text)
spectra_tutorial_text = \
"""
CALCULATE YOUR SPECTRA!
Welcome to tutorial on calculating a spectra from line-by-line data.
///////////////
/// PREFACE ///
///////////////
This tutorial will demonstrate how to use different lineshapes and partition
functions, and how to calculate synthetic spectra with respect to different
instruments. It will be shown how to combine different parameters of spectral
calculation to achieve better precision and performance for cross sections.
API provides a powerful tool to calculate cross-sections based on line-by-line
data containing in HITRAN. This features:
*) Python implementation of an HT (Hartmann-Tran [1]) lineshape which is used in
spectra simulations. This lineshape can also be reduced to a number of
conventional line profiles such as Gaussian (Doppler), Lorentzian, Voigt,
Rautian, Speed-dependent Voigt and Rautian.
*) Python implementation of total internal partition sums (TIPS-2011 [2])
which is used in spectra simulations.
*) High-resolution spectra simulation accounting pressure,
temperature and optical path length. The following spectral functions
can be calculated:
a) absorption coefficient
b) absorption spectrum
c) transmittance spectrum
d) radiance spectrum
*) Low-resolution spectra simulation using a number of apparatus functions.
*) Possibility to extend with the user's functionality by adding custom lineshapes,
partitions sums and apparatus functions.
*) An approach to function code is aimed to be flexible enough yet hopefully
intuitive.
References:
[1] N.H. Ngo, D. Lisak, H. Tran, J.-M. Hartmann.
An isolated line-shape model to go beyond the Voigt profile in
spectroscopic databases and radiative transfer codes.
JQSRT, Volume 129, November 2013, Pages 89–100
http://dx.doi.org/10.1016/j.jqsrt.2013.05.034
[2] A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman.
Total internal partition sums to support planetary remote sensing.
Icarus, Volume 215, Issue 1, September 2011, Pages 391–400
http://dx.doi.org/10.1016/j.icarus.2011.06.004
///////////////////////////
/// USING LINE PROFILES ///
///////////////////////////
Several lineshape (line profile) families are currently available:
1) Gaussian (Doppler) profile
2) Lorentzian profile
3) Voigt profile
4) HT profile (Hartmann-Tran)
Each profile has it's own uniwue set of parameters. Normally one should
use profile parameters only in conjunction with their "native" profiles.
So, let's start exploring the available profiles using getHelp:
>>> getHelp(profiles)
Profiles available:
HTP : PROFILE_HT
Voigt : PROFILE_VOIGT
Lorentz : PROFILE_LORENTZ
Doppler : PROFILE_DOPPLER
Output gives all available profiles. We can get additional info on each
of them just by calling getHelp(ProfileName):
>>> getHelp(PROFILE_HT)
Line profiles, adapted for using with HAPI, are written in Python and
heavily using the numerical library "Numpy". This means that the user
can calculate multiple values of particular profile at once having just
pasted a numpy array as a wavenumber grid (array). Let's give a short
example of how to calculate HT profile on a numpy array.
>>> from numpy import arange
w0 = 1000.
GammaD = 0.005
Gamma0 = 0.2
Gamma2 = 0.01 * Gamma0
Delta0 = 0.002
Delta2 = 0.001 * Delta0
nuVC = 0.2
eta = 0.5
Dw = 1.
ww = arange(w0-Dw, w0+Dw, 0.01) # GRID WITH THE STEP 0.01
l1 = PROFILE_HT(w0,GammaD,Gamma0,Gamma2,Delta0,Delta2,nuVC,eta,ww)[0]
# now l1 contains values of HT profile calculates on the grid ww
On additional information about parameters see getHelp(PROFILE_HT).
It worth noting that PROFILE_HT returns 2 entities: real and imaginary part
of lineshape (as it described in the article given in preface). Apart from
HT, all other profiles return just one entity (the real part).
////////////////////////////
/// USING PARTITION SUMS ///
////////////////////////////
As it was mentioned in the preface to this tutorial, the partition sums
are taken from the TIPS-2011 (the link is given above). Partition sums
are taken for those isotopologues, which are present in HITRAN and in
TIPS-2011 simultaneousely.
N.B. Partition sums are omitted for the following isotopologues which
are in HITRAN at the moment:
ID M I ISO MOL
--------------------------------------------------
117 12 2 H(15N)(16O)3 HNO3
110 14 2 D(19F) HF
107 15 3 D(35Cl) HCl
108 15 4 D(37Cl) HCl
111 16 3 D(79Br) HBr
112 16 4 D(81Br) HBr
113 17 2 D(127I) HI
118 22 2 (14N)(15N) N2
119 29 2 (13C)(16O)(19F)2 COF2
86 34 1 (16O) O
92 39 1 (12C)H3(16O)H CH3OH
114 47 1 (32S)(16O)3 SO3
--------------------------------------------------
The data on these isotopologues is not present in TIPS-2011 but is
present in HITRAN. We're planning to add these molecules after TIPS-2013
is released.
To calculate a partition sum for most of the isotopologues in HITRAN,
we will use a function partitionSum (use getHelp for detailed info).
Let's just mention that
The syntax is as follows: partitionSum(M,I,T), where M,I - standard
HITRAN molecule-isotopologue notation, T - definition of temperature
range.
Usecase 1: temperatuer is defined by a list:
>>> Q = partitionSum(1,1,[70,80,90])
Usecase 2: temperature is defined by bounds and the step:
>>> T,Q = partiionSum(1,1,[70,3000],step=1.0)
In the latter example we calculate a partition sum on a range of
temperatures from 70K to 3000K using a step 1.0 K, and having arrays
of temperature (T) and partition sum (Q) at the output.
///////////////////////////////////////////
/// CALCULATING ABSORPTION COEFFICIENTS ///
///////////////////////////////////////////
Currently API can calculate the following spectral function at arbitrary
thermodynamic parameters:
1) Absorption coefficient
2) Absorption spectrum
3) Transmittance spectrum
4) Radiance spectrum
All these functions can be calculated with or without accounting of
an instrument properties (apparatus function, resolution, path length etc...)
As it well known, the spectral functions such as absorption,
transmittance, and radiance spectra, are calculated on the basis
of the absorption coefficient. By that resaon, absorption coefficient
is the most important part of simulating a cross section. This part of
tutorial is devoted to demonstration how to calculate absorption
coefficient from the HITRAN line-by-line data. Here we give a brief
insight on basic parameters of calculation procedure, talk about some
useful practices and precautions.
To calculate an absorption coefficient, we can use one of the following
functions:
-> absorptionCoefficient_HT
-> absorptionCoefficient_Voigt
-> absorptionCoefficient_Lorentz
-> absorptionCoefficient_Doppler
Each of these function calculates cross sections using different
lineshapes (the names a quite self-explanatory).
You can get detailed information on using each of these functions
by calling getHelp(function_name).
Let's look more closely to the cross sections based on the Lorentz profile.
For doing that, let's have a table downloaded from HITRANonline.
# get data on CO2 main isotopologue in the range 2000-2100 cm-1
>>> fetch('CO2',2,1,2000,2100)
OK, now we're ready to run a fast example of how to calculate an
absorption coefficient cross section:
>>> nu,coef = absorptionCoefficient_Lorentz(SourceTables='CO2')
This example calculates a Lorentz cross section using the whole set of
lines in the "co2" table. This is the simplest possible way to use these
functions, because major part of parameters bound to their default values.
If we have matplotlib installed, then we can visualize it using a plotter:
>>> from pylab import plot
>>> plot(nu,coef)
API provides a flexible control over a calculation procedure. This control
can be achieved by using a number of input parameters. So, let's dig
into the depth of the settings.
The input parameters of absorptionCoefficient_Lorentz are as follows:
Name Default value
-------------------------------------------------------------------
SourceTables '__BUFFER__'
Components All isotopologues in SourceTables
partitionFunction PYTIPS
Environment {'T':296.,'p':1.}
OmegaRange depends on Components
OmegaStep 0.01 cm-1
OmegaWing 10 cm-1
OmegaWingHW 50 HWHMs
IntensityThreshold 0 cm/molec
GammaL 'gamma_air'
HITRAN_units True
File None
Format '%e %e'
-------------------------------------------------------------------
Newt we'll give a brief explanation for each parameter. After each description
we'll make some notes about the usage of the correspondent parameter.
SourceTables: (required parameter)
List of source tables to take line-by-line data from.
NOTE: User must provide at least one table in the list.
Components: (optional parameter)
List of tuples (M,I,D) to consider in cross section calculation.
M here is a molecule number, I is an isotopologue number,
D is an abundance of the component.
NOTE: If this input contains more than one tuple, then the output
is an absorption coefficient for mixture of corresponding gases.
NOTE2: If omitted, then all data from the source tables is involved.
partitionFunction: (optional parameter)
Instance of partition function of the following format:
Func(M,I,T), where Func - numae of function, (M,I) - HITRAN numbers
for molecule and isotopologue, T - temperature.
Function must return only one output - value of partition sum.
NOTE: Deafult value is PYTIPS - python version of TIPS-2011
Environment: (optional parameter)
Python dictionary containing value of pressure and temperature.
The format is as follows: Environment = {'p':pval,'T':tval},
where "pval" and "tval" are corresponding values in atm and K
respectively.
NOTE: Default value is {'p':1.0,'T':296.0}
OmegaRange: (optional parameter)
List containing minimum and maximum value of wavenumber to consider
in cross-section calculation. All lines that are out of htese bounds
will be skipped. The firmat is as follows: OmegaRange=[wn_low,wn_high]
NOTE: If this parameter os skipped, then min and max are taken
from the data from SourceTables.
OmegaStep: (optional parameter)
Value for the wavenumber step.
NOTE: Default value is 0.01 cm-1.
NOTE2: Normally user would want to take the step under 0.001 when
calculating absorption coefficient with Doppler profile
because of very narrow spectral lines.
OmegaWing: (optional parameter)
Absolute value of the line wing in cm-1, i.e. distance from the center
of each line to the most far point where the profile is considered
to be non zero.
NOTE: if omitted, then only OmegaWingHW is taken into account.
OmegaWingHW: (optional parameter)
Relative value of the line wing in halfwidths.
NOTE: The resulting wing is a maximum value from both OmegaWing and
OmegaWingHW.
IntensityThreshold: (optional parameter)
Absolute value of minimum intensity in cm/molec to consider.
NOTE: default value is 0.
GammaL: (optional parameter)
This is the name of broadening parameter to consider a "Lorentzian"
part in the Voigt profile. In the current 160-char format there is
a choise between "gamma_air" and "gamma_self".
NOTE: If the table has custom columns with a broadening coefficients,
the user can specify the name of this column in GammaL. This
would let the function calculate an absorption with custom
broadening parameter.
HITRAN_units: (optional parameter)
Logical flag for units, in which the absorption coefficient shoould be
calculated. Currently, the choises are: cm^2/molec (if True) and
cm-1 (if False).
NOTE: to calculate other spectral functions like transmitance,
radiance and absorption spectra, user should set HITRAN_units to False.
File: (optional parameter)
The name of the file to save the calculated absorption coefficient.
The file is saved only if this parameter is specified.
Format: (optional parameter)
C-style format for the text data to be saved. Default value is "%e %e".
NOTE: C-style output format specification (which are mostly valid for Python)
can be found, for instance, by the link:
http://www.gnu.org/software/libc/manual/html_node/Formatted-Output.html
N.B. Other functions such as absorptionCoefficient_Voigt(_HT,_Doppler) have
identical parameter sets so the description is the same for each function.
///////////////////////////////////////////////////////////////////
/// CALCULATING ABSORPTION, TRANSMITTANCE, AND RADIANCE SPECTRA ///
///////////////////////////////////////////////////////////////////
Let's calculate an absorption, transmittance, and radiance
spectra on the basis of apsorption coefficient. In order to be consistent
with internal API's units, we need to have an absorption coefficient cm-1:
>>> nu,coef = absorptionCoefficient_Lorentz(SourceTables='CO2',HITRAN_units=False)
To calculate absorption spectrum, use the function absorptionSpectrum():
>>> nu,absorp = absorptionSpectrum(nu,coef)
To calculate transmittance spectrum, use function transmittanceSpectrum():
>>> nu,trans = transmittanceSpectrum(nu,coef)
To calculate radiance spectrum, use function radianceSpectrum():
>>> nu,radi = radianceSpectrum(nu,coef)
The last three commands used a default path length (1 m).
To see complete info on all three functions, look for section
"calculating spectra" in getHelp()
Generally, all these three functions use similar set of parameters:
Omegas: (required parameter)
Wavenumber grid to for spectrum.
AbsorptionCoefficient (optional parameter)
Absorption coefficient as input.
Environment={'T': 296.0, 'l': 100.0} (optional parameter)
Environmental parameters for calculating spectrum.
This parameter is a bit specific for each of functions:
For absorptionSpectrum() and transmittanceSpectrum() the default
value is as follows: Environment={'l': 100.0}
For transmittanceSpectrum() the default value, besides path length,
contains a temperature: Environment={'T': 296.0, 'l': 100.0}
NOTE: temperature must be equal to that which was used in
absorptionCoefficient_ routine!
File (optional parameter)
Filename of output file for calculated spectrum.
If omitted, then the file is not created.
Format (optional parameter)
C-style format for spectra output file.
NOTE: Default value is as follows: Format='%e %e'
///////////////////////////////////////
/// APPLYING INSTRUMENTAL FUNCTIONS ///
///////////////////////////////////////
For comparison of the theoretical spectra with the real-world
instruments output it's necessary to take into account instrumental resolution.
For this purpose HAPI has a function convolveSpectrum() which can emulate
spectra with lower resolution using custom instrumental functions.
The following instrumental functions are available:
1) Rectangular
2) Triangular
3) Gaussian
4) Diffraction
5) Michelson
6) Dispersion
7) Lorentz
To get a description of each instrumental function we can use getHelp():
>>> getHelp(slit_functions)
RECTANGULAR : SLIT_RECTANGULAR
TRIANGULAR : SLIT_TRIANGULAR
GAUSSIAN : SLIT_GAUSSIAN
DIFFRACTION : SLIT_DIFFRACTION
MICHELSON : SLIT_MICHELSON
DISPERSION/LORENTZ : SLIT_DISPERSION
For instance,
>>> getHelp(SLIT_MICHELSON)
... will give a datailed info about Michelson's instrumental function.
The function convolveSpectrum() convolutes a high-resulution spectrum
with one of supplied instrumental (slit) functions. The folowing
parameters of this function are provided:
Omega (required parameter)
Array of wavenumbers in high-resolution input spectrum.
CrossSection (required parameter)
Values of high-resolution input spectrum.
Resolution (optional parameter)
This parameter is passed to the slit function. It represents
the resolution of corresponding instrument.
NOTE: default value is 0.1 cm-1
AF_wing (optional parameter)
Width of an instrument function where it is considered non-zero.
NOTE: default value is 10.0 cm-1
SlitFunction (optional parameter)
Custom instrumental function to convolve with spectrum.
Format of the instrumental function must be as follows:
Func(x,g), where Func - function name, x - wavenumber,
g - resolution.
NOTE: if omitted, then the default value is SLIT_RECTANGULAR
Before using the convolution procedure it worth giving some practical
advices and remarks:
1) Quality of a convolution depends on many things: quality of calculated
spectra, width of AF_wing and OmegaRange, Resolution, OmegaStep etc ...
Most of these factors are taken from previus stages of spectral calculation.
Right choise of all these factors is crucial for the correct computation.
2) Dispersion, Diffraction and Michelson AF's don't work well in narrow
wavenumber range because of their broad wings.
3) Generally one must consider OmegaRange and AF_wing as wide as possible.
4) After applying a convolution, the resulting spectral range for
the lower-resolution spectra is reduced by the doubled value of AF_wing.
For this reason, try to make an initial spectral range for high-resolution
spectrum (absorption, transmittance, radiance) sufficiently broad.
The following command will calculate a lower-resolution spectra from
the CO2 transmittance, which was calculated in a previous section.
The Spectral resolution is 1 cm-1,
>>> nu_,trans_,i1,i2,slit = convolveSpectrum(nu,trans)
The outputs are:
nu_, trans_ - wavenumbers and transmittance for the resulting
low-resolution spectrum.
i1,i2 - indexes for initial nu,trans spectrum denoting the part of
wavenumber range which was taken for lower resolution spectrum.
=> Low-res spectrum is calculated on nu[i1:i2]
Note, than to achieve more flexibility, one have to specify most of
the optional parameters. For instance, more complete call is as follows:
>>> nu_,trans_,i1,i2,slit = convolveSpectrum(nu,trans,SlitFunction=SLIT_MICHELSON,Resolution=1.0,AF_wing=20.0)
"""
def print_spectra_tutorial():
pydoc.pager(spectra_tutorial_text)
plotting_tutorial_text = \
"""
PLOTTING THE SPECTRA WITH MATPLOTLIB
This tutorial briefly explains how to make plots using
the Matplotlib - Python library for plotting.
Prerequisites:
To tun through this tutorial, user must have the following
Python libraries installed:
1) Matplotlib
Matplotlib can be obtained by the link http://matplotlib.org/
2) Numpy (required by HAPI itself)
Numpy can be obtained via pip:
sudo pip install numpy (under Linux and Mac)
pip install numpy (under Windows)
Or by the link http://www.numpy.org/
As an option, user can download one of the many scientific Python
distributions, such as Anaconda, Canopy etc...
So, let's calculate plot the basic entities which ar provided by HAPI.
To do so, we will do all necessary steps to download, filter and
calculate cross sections "from scratch". To demonstrate the different
possibilities of matplotlib, we will mostly use Pylab - a part of
Matplotlib with the interface similar to Matlab. Please note, that it's
not the only way to use Matplotlib. More information can be found on it's site.
The next part is a step-by-step guide, demonstrating basic possilities
of HITRANonline API in conjunction with Matplotlib.
First, do some preliminary imports:
>>> from hapi import *
>>> from pylab import show,plot,subplot,xlim,ylim,title,legend,xlabel,ylabel,hold
Start the database 'data':
>>> db_begin('data')
Download lines for main isotopologue of ozone in [3900,4050] range:
>>> fetch('O3',3,1,3900,4050)
PLot a sick spectrum using the function getStickXY()
>>> x,y = getStickXY('O3')
>>> plot(x,y); show()
Zoom in spectral region [4020,4035] cm-1:
>>> plot(x,y); xlim([4020,4035]); show()
Calculate and plot difference between Voigt and Lorentzian lineshape:
>>> wn = arange(3002,3008,0.01) # get wavenumber range of interest
>>> voi = PROFILE_VOIGT(3005,0.1,0.3,wn)[0] # calc Voigt
>>> lor = PROFILE_LORENTZ(3005,0.3,wn) # calc Lorentz
>>> diff = voi-lor # calc difference
>>> subplot(2,1,1) # upper panel
>>> plot(wn,voi,'red',wn,lor,'blue') # plot both profiles
>>> legend(['Voigt','Lorentz']) # show legend
>>> title('Voigt and Lorentz profiles') # show title
>>> subplot(2,1,2) # lower panel
>>> plot(wn,diff) # plot diffenence
>>> title('Voigt-Lorentz residual') # show title
>>> show() # show all figures
Calculate and plot absorption coefficients for ozone using Voigt
profile. Spectra are calculated for 4 cases of thermodynamic parameters:
(1 atm, 296 K), (5 atm, 296 K), (1 atm, 500 K), and (5 atm, 500 K)
>>> nu1,coef1 = absorptionCoefficient_Voigt(((3,1),),'O3',
OmegaStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':1,'T':296.})
>>> nu2,coef2 = absorptionCoefficient_Voigt(((3,1),),'O3',
OmegaStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':5,'T':296.})
>>> nu3,coef3 = absorptionCoefficient_Voigt(((3,1),),'O3',
OmegaStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':1,'T':500.})
>>> nu4,coef4 = absorptionCoefficient_Voigt(((3,1),),'O3',
OmegaStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':5,'T':500.})
>>> subplot(2,2,1); plot(nu1,coef1); title('O3 k(w): p=1 atm, T=296K')
>>> subplot(2,2,2); plot(nu2,coef2); title('O3 k(w): p=5 atm, T=296K')
>>> subplot(2,2,3); plot(nu3,coef3); title('O3 k(w): p=1 atm, T=500K')
>>> subplot(2,2,4); plot(nu4,coef4); title('O3 k(w): p=5 atm, T=500K')
>>> show()
Calculate and plot absorption, transmittance and radiance spectra for 1 atm
and 296K. Path length is set to 10 m.
>>> nu,absorp = absorptionSpectrum(nu1,coef1,Environment={'l':1000.})
>>> nu,transm = transmittanceSpectrum(nu1,coef1,Environment={'l':1000.})
>>> nu,radian = radianceSpectrum(nu1,coef1,Environment={'l':1000.,'T':296.})
>>> subplot(2,2,1); plot(nu1,coef1,'r'); title('O3 k(w): p=1 atm, T=296K')
>>> subplot(2,2,2); plot(nu,absorp,'g'); title('O3 absorption: p=1 atm, T=296K')
>>> subplot(2,2,3); plot(nu,transm,'b'); title('O3 transmittance: p=1 atm, T=296K')
>>> subplot(2,2,4); plot(nu,radian,'y'); title('O3 radiance: p=1 atm, T=296K')
>>> show()
Calculate and compare high resolution spectrum for O3 with lower resolution
spectrum convoluted with an instrumental function of ideal Michelson interferometer.
>>> nu_,trans_,i1,i2,slit = convolveSpectrum(nu,transm,SlitFunction=SLIT_MICHELSON,Resolution=1.0,AF_wing=20.0)
>>> plot(nu,transm,'red',nu_,trans_,'blue'); legend(['HI-RES','Michelson']); show()
"""
def print_plotting_tutorial():
pydoc.pager(plotting_tutorial_text)
def getHelp(arg=None):
"""
This function provides interactive manuals and tutorials.
"""
if arg==None:
print('--------------------------------------------------------------')
print('Hello, this is an interactive help system of HITRANonline API.')
print('--------------------------------------------------------------')
print('Run getHelp(.) with one of the following arguments:')
print(' tutorial - interactive tutorials on HAPI')
print(' units - units used in calculations')
print(' index - index of available HAPI functions')
elif arg=='tutorial':
print('-----------------------------------')
print('This is a tutorial section of help.')
print('-----------------------------------')
print('Please choose the subject of tutorial:')
print(' data - downloading the data and working with it')
print(' spectra - calculating spectral functions')
print(' plotting - visualizing data with matplotlib')
print(' python - Python quick start guide')
elif arg=='python':
print_python_tutorial()
elif arg=='data':
print_data_tutorial()
elif arg=='spectra':
print_spectra_tutorial()
elif arg=='plotting':
print_plotting_tutorial()
elif arg=='index':
print('------------------------------')
print('FETCHING DATA:')
print('------------------------------')
print(' fetch')
print(' fetch_by_ids')
print('')
print('------------------------------')
print('WORKING WITH DATA:')
print('------------------------------')
print(' db_begin')
print(' db_commit')
print(' tableList')
print(' describe')
print(' select')
print(' sort')
print(' group')
print(' extractColumns')
print(' getColumn')
print(' getColumns')
print(' dropTable')
print('')
print('------------------------------')
print('CALCULATING SPECTRA:')
print('------------------------------')
print(' profiles')
print(' partitionSum')
print(' absorptionCoefficient_HT')
print(' absorptionCoefficient_Voigt')
print(' absorptionCoefficient_Lorentz')
print(' absorptionCoefficient_Doppler')
print(' transmittanceSpectrum')
print(' absorptionSpectrum')
print(' radianceSpectrum')
print('')
print('------------------------------')
print('CONVOLVING SPECTRA:')
print('------------------------------')
print(' convolveSpectrum')
print(' slit_functions')
print('')
print('------------------------------')
print('INFO ON ISOTOPOLOGUES:')
print('------------------------------')
print(' ISO_ID')
print(' abundance')
print(' molecularMass')
print(' moleculeName')
print(' isotopologueName')
print('')
print('------------------------------')
print('MISCELLANEOUS:')
print('------------------------------')
print(' getStickXY')
print(' read_hotw')
elif arg == ISO:
print_iso()
elif arg == ISO_ID:
print_iso_id()
elif arg == profiles:
print_profiles()
elif arg == slit_functions:
print_slit_functions()
else:
help(arg)
# Get atmospheric (natural) abundance
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def abundance(M,I):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
I: HITRAN isotopologue number
OUTPUT PARAMETERS:
Abbundance: natural abundance
---
DESCRIPTION:
Return natural (Earth) abundance of HITRAN isotolopogue.
---
EXAMPLE OF USAGE:
ab = abundance(1,1) # H2O
---
"""
return ISO[(M,I)][ISO_INDEX['abundance']]
# Get molecular mass
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def molecularMass(M,I):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
I: HITRAN isotopologue number
OUTPUT PARAMETERS:
MolMass: molecular mass
---
DESCRIPTION:
Return molecular mass of HITRAN isotolopogue.
---
EXAMPLE OF USAGE:
mass = molecularMass(1,1) # H2O
---
"""
return ISO[(M,I)][ISO_INDEX['mass']]
# Get molecule name
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def moleculeName(M):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
OUTPUT PARAMETERS:
MolName: molecular name
---
DESCRIPTION:
Return name of HITRAN molecule.
---
EXAMPLE OF USAGE:
molname = moleculeName(1) # H2O
---
"""
return ISO[(M,1)][ISO_INDEX['mol_name']]
# Get isotopologue name
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def isotopologueName(M,I):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
I: HITRAN isotopologue number
OUTPUT PARAMETERS:
IsoMass: isotopologue mass
---
DESCRIPTION:
Return name of HITRAN isotolopogue.
---
EXAMPLE OF USAGE:
isoname = isotopologueName(1,1) # H2O
---
"""
return ISO[(M,I)][ISO_INDEX['iso_name']]
# ----------------------- table list ----------------------------------
def tableList():
"""
INPUT PARAMETERS:
none
OUTPUT PARAMETERS:
TableList: a list of available tables
---
DESCRIPTION:
Return a list of tables present in database.
---
EXAMPLE OF USAGE:
lst = tableList()
---
"""
return getTableList()
# ----------------------- describe ----------------------------------
def describe(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to describe
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Print information about table, including
parameter names, formats and wavenumber range.
---
EXAMPLE OF USAGE:
describe('sampletab')
---
"""
describeTable(TableName)
# ---------------------- /ISO.PY ---------------------------------------
def db_begin(db=None):
"""
INPUT PARAMETERS:
db: database name (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Open a database connection. A database is stored
in a folder given in db input parameter.
Default=data
---
EXAMPLE OF USAGE:
db_begin('bar')
---
"""
databaseBegin(db)
def db_commit():
"""
INPUT PARAMETERS:
none
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Commit all changes made to opened database.
All tables will be saved in corresponding files.
---
EXAMPLE OF USAGE:
db_commit()
---
"""
databaseCommit()
# ------------------ QUERY HITRAN ---------------------------------------
def comment(TableName,Comment):
LOCAL_TABLE_CACHE[TableName]['header']['comment'] = Comment
def fetch_by_ids(TableName,iso_id_list,numin,numax):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
iso_id_list: list of isotopologue id's (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameter iso_id_list
contains list of "global" isotopologue Ids (see help on ISO_ID).
Note: this function is required if user wants to download
multiple species into single table.
---
EXAMPLE OF USAGE:
fetch_by_ids('water',[1,2,3,4],4000,4100)
---
"""
if type(iso_id_list) not in set([list,tuple]):
iso_id_list = [iso_id_list]
queryHITRAN(TableName,iso_id_list,numin,numax)
iso_names = [ISO_ID[i][ISO_ID_INDEX['iso_name']] for i in iso_id_list]
Comment = 'Contains lines for '+','.join(iso_names)
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax))
comment(TableName,Comment)
#def queryHITRAN(TableName,iso_id_list,numin,numax):
def fetch(TableName,M,I,numin,numax):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameters M and I
are the HITRAN molecule and isotopologue numbers.
This function results in a table containing single
isotopologue specie. To have multiple species in a
single table use fetch_by_ids instead.
---
EXAMPLE OF USAGE:
fetch('HOH',1,1,4000,4100)
---
"""
queryHITRAN(TableName,[ISO[(M,I)][ISO_INDEX['id']]],numin,numax)
iso_name = ISO[(M,I)][ISO_INDEX['iso_name']]
Comment = 'Contains lines for '+iso_name
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax))
comment(TableName,Comment)
# ------------------ partition sum --------------------------------------
# ------------------- LAGRANGE INTERPOLATION ----------------------
#def AtoB(aa,bb,A,B,npt)
def AtoB(aa,A,B,npt):
#***************************
#...LaGrange 3- and 4-point interpolation
#...arrays A and B are the npt data points, given aa, a value of the
#...A variable, the routine will find the corresponding bb value
#
#...input: aa
#...output: bb
for I in range(2,npt+1):
if A[I-1] >= aa:
if I < 3 or I == npt:
J = I
if I < 3: J = 3
if I == npt: J = npt
J = J-1 # zero index correction
A0D1=A[J-2]-A[J-1]
if A0D1 == 0.0: A0D1=0.0001
A0D2=A[J-2]-A[J]
if A0D2 == 0.0: A0D2=0.0000
A1D1=A[J-1]-A[J-2]
if A1D1 == 0.0: A1D1=0.0001
A1D2=A[J-1]-A[J]
if A1D2 == 0.0: A1D2=0.0001
A2D1=A[J]-A[J-2]
if A2D1 == 0.0: A2D1=0.0001
A2D2=A[J]-A[J-1]
if A2D2 == 0.0: A2D2=0.0001
A0=(aa-A[J-1])*(aa-A[J])/(A0D1*A0D2)
A1=(aa-A[J-2])*(aa-A[J])/(A1D1*A1D2)
A2=(aa-A[J-2])*(aa-A[J-1])/(A2D1*A2D2)
bb = A0*B[J-2] + A1*B[J-1] + A2*B[J]
else:
J = I
J = J-1 # zero index correction
A0D1=A[J-2]-A[J-1]
if A0D1 == 0.0: A0D1=0.0001
A0D2=A[J-2]-A[J]
if A0D2 == 0.0: A0D2=0.0001
A0D3 = (A[J-2]-A[J+1])
if A0D3 == 0.0: A0D3=0.0001
A1D1=A[J-1]-A[J-2]
if A1D1 == 0.0: A1D1=0.0001
A1D2=A[J-1]-A[J]
if A1D2 == 0.0: A1D2=0.0001
A1D3 = A[J-1]-A[J+1]
if A1D3 == 0.0: A1D3=0.0001
A2D1=A[J]-A[J-2]
if A2D1 == 0.0: A2D1=0.0001
A2D2=A[J]-A[J-1]
if A2D2 == 0.0: A2D2=0.0001
A2D3 = A[J]-A[J+1]
if A2D3 == 0.0: A2D3=0.0001
A3D1 = A[J+1]-A[J-2]
if A3D1 == 0.0: A3D1=0.0001
A3D2 = A[J+1]-A[J-1]
if A3D2 == 0.0: A3D2=0.0001
A3D3 = A[J+1]-A[J]
if A3D3 == 0.0: A3D3=0.0001
A0=(aa-A[J-1])*(aa-A[J])*(aa-A[J+1])
A0=A0/(A0D1*A0D2*A0D3)
A1=(aa-A[J-2])*(aa-A[J])*(aa-A[J+1])
A1=A1/(A1D1*A1D2*A1D3)
A2=(aa-A[J-2])*(aa-A[J-1])*(aa-A[J+1])
A2=A2/(A2D1*A2D2*A2D3)
A3=(aa-A[J-2])*(aa-A[J-1])*(aa-A[J])
A3=A3/(A3D1*A3D2*A3D3)
bb = A0*B[J-2] + A1*B[J-1] + A2*B[J] + A3*B[J+1]
break
return bb
# --------------- ISOTOPOLOGUE HASH ----------------------
TIPS_ISO_HASH = {}
# --------------- STATISTICAL WEIGHT HASH ----------------------
TIPS_GSI_HASH = {}
# --------------- INTERPOLATION NODES ----------------------
Tdat = __FloatType__( [60., 85., 110., 135., 160., 185., 210., 235.,
260., 285., 310., 335., 360., 385., 410., 435., 460., 485.,
510., 535., 560., 585., 610., 635., 660., 685., 710., 735.,
760., 785., 810., 835., 860., 885., 910., 935., 960., 985.,
1010.,1035.,1060.,1085.,1110.,1135.,1160.,1185.,1210.,1235.,
1260.,1285.,1310.,1335.,1360.,1385.,1410.,1435.,1460.,1485.,
1510.,1535.,1560.,1585.,1610.,1635.,1660.,1685.,1710.,1735.,
1760.,1785.,1810.,1835.,1860.,1885.,1910.,1935.,1960.,1985.,
2010.,2035.,2060.,2085.,2110.,2135.,2160.,2185.,2210.,2235.,
2260.,2285.,2310.,2335.,2360.,2385.,2410.,2435.,2460.,2485.,
2510.,2535.,2560.,2585.,2610.,2635.,2660.,2685.,2710.,2735.,
2760.,2785.,2810.,2835.,2860.,2885.,2910.,2935.,2960.,2985.,
3010.] )
TIPS_NPT = len(Tdat)
# REMARK
# float32 gives exactly the same results as fortran TIPS, because
# all constants in the fortran code given as xx.xxE+-XX, i.e.
# in single precision. By this fact all unsignificant figures
# over single precision are filled with digital garbage
# --------------- H2O 161: M = 1, I = 1 ---------------------
M = 1
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.16824E+02, 0.27771E+02, 0.40408E+02,
0.54549E+02, 0.70054E+02, 0.86817E+02, 0.10475E+03, 0.12380E+03,
0.14391E+03, 0.16503E+03, 0.18714E+03, 0.21021E+03, 0.23425E+03,
0.25924E+03, 0.28518E+03, 0.31209E+03, 0.33997E+03, 0.36883E+03,
0.39870E+03, 0.42959E+03, 0.46152E+03, 0.49452E+03, 0.52860E+03,
0.56380E+03, 0.60015E+03, 0.63766E+03, 0.67637E+03, 0.71631E+03,
0.75750E+03, 0.79999E+03, 0.84380E+03, 0.88897E+03, 0.93553E+03,
0.98353E+03, 0.10330E+04, 0.10840E+04, 0.11365E+04, 0.11906E+04,
0.12463E+04, 0.13037E+04, 0.13628E+04, 0.14237E+04, 0.14863E+04,
0.15509E+04, 0.16173E+04, 0.16856E+04, 0.17559E+04, 0.18283E+04,
0.19028E+04, 0.19793E+04, 0.20581E+04, 0.21391E+04, 0.22224E+04,
0.23080E+04, 0.24067E+04, 0.24975E+04, 0.25908E+04, 0.26867E+04,
0.27853E+04, 0.28865E+04, 0.29904E+04, 0.30972E+04, 0.32068E+04,
0.33194E+04, 0.34349E+04, 0.35535E+04, 0.36752E+04, 0.38001E+04,
0.39282E+04, 0.40597E+04, 0.41945E+04, 0.43327E+04, 0.44745E+04,
0.46199E+04, 0.47688E+04, 0.49215E+04, 0.50780E+04, 0.52384E+04,
0.54027E+04, 0.55710E+04, 0.57434E+04, 0.59200E+04, 0.61008E+04,
0.62859E+04, 0.64754E+04, 0.66693E+04, 0.68679E+04, 0.70710E+04,
0.72788E+04, 0.74915E+04, 0.77090E+04, 0.79315E+04, 0.81590E+04,
0.83917E+04, 0.86296E+04, 0.88728E+04, 0.91214E+04, 0.93755E+04,
0.96351E+04, 0.99005E+04, 0.10171E+05, 0.10448E+05, 0.10731E+05,
0.11020E+05, 0.11315E+05, 0.11617E+05, 0.11924E+05, 0.12238E+05,
0.12559E+05, 0.12886E+05, 0.13220E+05, 0.13561E+05, 0.13909E+05,
0.14263E+05, 0.14625E+05, 0.14995E+05, 0.15371E+05, 0.15755E+05,
0.16147E+05])
# --------------- H2O 181: M = 1, I = 2 ---------------------
M = 1
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.15960E+02, 0.26999E+02, 0.39743E+02,
0.54003E+02, 0.69639E+02, 0.86543E+02, 0.10463E+03, 0.12384E+03,
0.14412E+03, 0.16542E+03, 0.18773E+03, 0.21103E+03, 0.23531E+03,
0.26057E+03, 0.28681E+03, 0.31406E+03, 0.34226E+03, 0.37130E+03,
0.40135E+03, 0.43243E+03, 0.46456E+03, 0.49777E+03, 0.53206E+03,
0.56748E+03, 0.60405E+03, 0.64179E+03, 0.68074E+03, 0.72093E+03,
0.76238E+03, 0.80513E+03, 0.84922E+03, 0.89467E+03, 0.94152E+03,
0.98982E+03, 0.10396E+04, 0.10909E+04, 0.11437E+04, 0.11982E+04,
0.12543E+04, 0.13120E+04, 0.13715E+04, 0.14328E+04, 0.14959E+04,
0.15608E+04, 0.16276E+04, 0.16964E+04, 0.17672E+04, 0.18401E+04,
0.19151E+04, 0.19922E+04, 0.20715E+04, 0.21531E+04, 0.22370E+04,
0.23232E+04, 0.24118E+04, 0.25030E+04, 0.25967E+04, 0.26929E+04,
0.27918E+04, 0.28934E+04, 0.29978E+04, 0.31050E+04, 0.32151E+04,
0.33281E+04, 0.34441E+04, 0.35632E+04, 0.36854E+04, 0.38108E+04,
0.39395E+04, 0.40715E+04, 0.42070E+04, 0.43459E+04, 0.44883E+04,
0.46343E+04, 0.47840E+04, 0.49374E+04, 0.50946E+04, 0.52558E+04,
0.54209E+04, 0.55900E+04, 0.57632E+04, 0.59407E+04, 0.61224E+04,
0.63084E+04, 0.64988E+04, 0.66938E+04, 0.68933E+04, 0.70975E+04,
0.73064E+04, 0.75202E+04, 0.77389E+04, 0.79625E+04, 0.81913E+04,
0.84252E+04, 0.86644E+04, 0.89089E+04, 0.91588E+04, 0.94143E+04,
0.96754E+04, 0.99422E+04, 0.10215E+05, 0.10493E+05, 0.10778E+05,
0.11068E+05, 0.11365E+05, 0.11668E+05, 0.11977E+05, 0.12293E+05,
0.12616E+05, 0.12945E+05, 0.13281E+05, 0.13624E+05, 0.13973E+05,
0.14330E+05, 0.14694E+05, 0.15066E+05, 0.15445E+05, 0.15831E+05,
0.16225E+05])
# --------------- H2O 171: M = 1, I = 3 ---------------------
M = 1
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.95371E+02, 0.16134E+03, 0.23750E+03,
0.32273E+03, 0.41617E+03, 0.51722E+03, 0.62540E+03, 0.74036E+03,
0.86185E+03, 0.98970E+03, 0.11238E+04, 0.12642E+04, 0.14097E+04,
0.15599E+04, 0.17159E+04, 0.18777E+04, 0.20453E+04, 0.22188E+04,
0.23983E+04, 0.25840E+04, 0.27760E+04, 0.29743E+04, 0.31792E+04,
0.33907E+04, 0.36091E+04, 0.38346E+04, 0.40672E+04, 0.43072E+04,
0.45547E+04, 0.48100E+04, 0.50732E+04, 0.53446E+04, 0.56244E+04,
0.59128E+04, 0.62100E+04, 0.65162E+04, 0.68317E+04, 0.71567E+04,
0.74915E+04, 0.78363E+04, 0.81914E+04, 0.85571E+04, 0.89335E+04,
0.93211E+04, 0.97200E+04, 0.10131E+05, 0.10553E+05, 0.10988E+05,
0.11435E+05, 0.11895E+05, 0.12368E+05, 0.12855E+05, 0.13356E+05,
0.13870E+05, 0.14399E+05, 0.14943E+05, 0.15502E+05, 0.16076E+05,
0.16666E+05, 0.17272E+05, 0.17895E+05, 0.18534E+05, 0.19191E+05,
0.19865E+05, 0.20557E+05, 0.21267E+05, 0.21996E+05, 0.22744E+05,
0.23512E+05, 0.24299E+05, 0.25106E+05, 0.25935E+05, 0.26784E+05,
0.27655E+05, 0.28547E+05, 0.29462E+05, 0.30400E+05, 0.31361E+05,
0.32345E+05, 0.33353E+05, 0.34386E+05, 0.35444E+05, 0.36527E+05,
0.37637E+05, 0.38772E+05, 0.39934E+05, 0.41124E+05, 0.42341E+05,
0.43587E+05, 0.44861E+05, 0.46165E+05, 0.47498E+05, 0.48862E+05,
0.50256E+05, 0.51682E+05, 0.53139E+05, 0.54629E+05, 0.56152E+05,
0.57708E+05, 0.59299E+05, 0.60923E+05, 0.62583E+05, 0.64279E+05,
0.66011E+05, 0.67779E+05, 0.69585E+05, 0.71429E+05, 0.73312E+05,
0.75234E+05, 0.77195E+05, 0.79197E+05, 0.81240E+05, 0.83325E+05,
0.85452E+05, 0.87622E+05, 0.89835E+05, 0.92093E+05, 0.94395E+05,
0.96743E+05])
# --------------- H2O 162: M = 1, I = 4 ---------------------
M = 1
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.75792E+02, 0.12986E+03, 0.19244E+03,
0.26253E+03, 0.33942E+03, 0.42259E+03, 0.51161E+03, 0.60619E+03,
0.70609E+03, 0.81117E+03, 0.92132E+03, 0.10365E+04, 0.11567E+04,
0.12820E+04, 0.14124E+04, 0.15481E+04, 0.16891E+04, 0.18355E+04,
0.19876E+04, 0.21455E+04, 0.23092E+04, 0.24791E+04, 0.26551E+04,
0.28376E+04, 0.30268E+04, 0.32258E+04, 0.34288E+04, 0.36392E+04,
0.38571E+04, 0.40828E+04, 0.43165E+04, 0.45584E+04, 0.48089E+04,
0.50681E+04, 0.53363E+04, 0.56139E+04, 0.59009E+04, 0.61979E+04,
0.65049E+04, 0.68224E+04, 0.71506E+04, 0.74898E+04, 0.78403E+04,
0.82024E+04, 0.85765E+04, 0.89628E+04, 0.93618E+04, 0.97736E+04,
0.10199E+05, 0.10637E+05, 0.11090E+05, 0.11557E+05, 0.12039E+05,
0.12535E+05, 0.13047E+05, 0.13575E+05, 0.14119E+05, 0.14679E+05,
0.15257E+05, 0.15851E+05, 0.16464E+05, 0.17094E+05, 0.17743E+05,
0.18411E+05, 0.19098E+05, 0.19805E+05, 0.20532E+05, 0.21280E+05,
0.22049E+05, 0.22840E+05, 0.23652E+05, 0.24487E+05, 0.25345E+05,
0.26227E+05, 0.27132E+05, 0.28062E+05, 0.29016E+05, 0.29997E+05,
0.31002E+05, 0.32035E+05, 0.33094E+05, 0.34180E+05, 0.35295E+05,
0.36438E+05, 0.37610E+05, 0.38812E+05, 0.40044E+05, 0.41306E+05,
0.42600E+05, 0.43926E+05, 0.45284E+05, 0.46675E+05, 0.48100E+05,
0.49559E+05, 0.51053E+05, 0.52583E+05, 0.54148E+05, 0.55750E+05,
0.57390E+05, 0.59067E+05, 0.60783E+05, 0.62539E+05, 0.64334E+05,
0.66170E+05, 0.68047E+05, 0.69967E+05, 0.71929E+05, 0.73934E+05,
0.75983E+05, 0.78078E+05, 0.80217E+05, 0.82403E+05, 0.84636E+05,
0.86917E+05, 0.89246E+05, 0.91625E+05, 0.94053E+05, 0.96533E+05,
0.99064E+05])
# --------------- H2O 182: M = 1, I = 5 ---------------------
M = 1
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.82770E+02, 0.13749E+03, 0.20083E+03,
0.27176E+03, 0.34955E+03, 0.43370E+03, 0.52376E+03, 0.61944E+03,
0.72050E+03, 0.82679E+03, 0.93821E+03, 0.10547E+04, 0.11763E+04,
0.13031E+04, 0.14350E+04, 0.15723E+04, 0.17150E+04, 0.18633E+04,
0.20172E+04, 0.21770E+04, 0.23429E+04, 0.25149E+04, 0.26934E+04,
0.28784E+04, 0.30702E+04, 0.32690E+04, 0.34750E+04, 0.36885E+04,
0.39096E+04, 0.41386E+04, 0.43758E+04, 0.46213E+04, 0.48755E+04,
0.51386E+04, 0.54109E+04, 0.56927E+04, 0.59841E+04, 0.62856E+04,
0.65973E+04, 0.69197E+04, 0.72529E+04, 0.75973E+04, 0.79533E+04,
0.83210E+04, 0.87009E+04, 0.90933E+04, 0.94985E+04, 0.99168E+04,
0.10348E+05, 0.10794E+05, 0.11254E+05, 0.11728E+05, 0.12217E+05,
0.12722E+05, 0.13242E+05, 0.13778E+05, 0.14331E+05, 0.14900E+05,
0.15486E+05, 0.16091E+05, 0.16713E+05, 0.17353E+05, 0.18012E+05,
0.18691E+05, 0.19389E+05, 0.20108E+05, 0.20847E+05, 0.21607E+05,
0.22388E+05, 0.23191E+05, 0.24017E+05, 0.24866E+05, 0.25738E+05,
0.26633E+05, 0.27553E+05, 0.28498E+05, 0.29468E+05, 0.30464E+05,
0.31486E+05, 0.32536E+05, 0.33612E+05, 0.34716E+05, 0.35849E+05,
0.37011E+05, 0.38202E+05, 0.39424E+05, 0.40676E+05, 0.41959E+05,
0.43274E+05, 0.44622E+05, 0.46002E+05, 0.47416E+05, 0.48864E+05,
0.50348E+05, 0.51866E+05, 0.53421E+05, 0.55012E+05, 0.56640E+05,
0.58307E+05, 0.60012E+05, 0.61757E+05, 0.63541E+05, 0.65366E+05,
0.67233E+05, 0.69141E+05, 0.71092E+05, 0.73087E+05, 0.75125E+05,
0.77209E+05, 0.79338E+05, 0.81513E+05, 0.83736E+05, 0.86006E+05,
0.88324E+05, 0.90693E+05, 0.93111E+05, 0.95580E+05, 0.98100E+05,
0.10067E+06])
# --------------- H2O 172: M = 1, I = 6 ---------------------
M = 1
I = 6
TIPS_GSI_HASH[(M,I)] = __FloatType__(36.)
TIPS_ISO_HASH[(M,I)] = float32([0.49379E+03, 0.82021E+03, 0.11980E+04,
0.16211E+04, 0.20851E+04, 0.25870E+04, 0.31242E+04, 0.36949E+04,
0.42977E+04, 0.49317E+04, 0.55963E+04, 0.62911E+04, 0.70164E+04,
0.77722E+04, 0.85591E+04, 0.93777E+04, 0.10228E+05, 0.11112E+05,
0.12030E+05, 0.12983E+05, 0.13971E+05, 0.14997E+05, 0.16061E+05,
0.17163E+05, 0.18306E+05, 0.19491E+05, 0.20719E+05, 0.21991E+05,
0.23309E+05, 0.24673E+05, 0.26086E+05, 0.27549E+05, 0.29064E+05,
0.30631E+05, 0.32254E+05, 0.33932E+05, 0.35669E+05, 0.37464E+05,
0.39321E+05, 0.41242E+05, 0.43227E+05, 0.45279E+05, 0.47399E+05,
0.49589E+05, 0.51852E+05, 0.54189E+05, 0.56602E+05, 0.59094E+05,
0.61666E+05, 0.64320E+05, 0.67058E+05, 0.69883E+05, 0.72796E+05,
0.75801E+05, 0.78899E+05, 0.82092E+05, 0.85382E+05, 0.88773E+05,
0.92266E+05, 0.95863E+05, 0.99568E+05, 0.10338E+06, 0.10731E+06,
0.11135E+06, 0.11551E+06, 0.11979E+06, 0.12419E+06, 0.12871E+06,
0.13337E+06, 0.13815E+06, 0.14307E+06, 0.14812E+06, 0.15331E+06,
0.15865E+06, 0.16412E+06, 0.16975E+06, 0.17553E+06, 0.18146E+06,
0.18754E+06, 0.19379E+06, 0.20020E+06, 0.20678E+06, 0.21352E+06,
0.22044E+06, 0.22753E+06, 0.23480E+06, 0.24226E+06, 0.24990E+06,
0.25773E+06, 0.26575E+06, 0.27397E+06, 0.28239E+06, 0.29102E+06,
0.29985E+06, 0.30889E+06, 0.31814E+06, 0.32762E+06, 0.33731E+06,
0.34724E+06, 0.35739E+06, 0.36777E+06, 0.37840E+06, 0.38926E+06,
0.40038E+06, 0.41174E+06, 0.42335E+06, 0.43523E+06, 0.44737E+06,
0.45977E+06, 0.47245E+06, 0.48540E+06, 0.49863E+06, 0.51214E+06,
0.52595E+06, 0.54005E+06, 0.55444E+06, 0.56914E+06, 0.58415E+06,
0.59947E+06])
# --------------- CO2 626: M = 2, I = 1 ---------------------
M = 2
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.53642E+02, 0.75947E+02, 0.98292E+02,
0.12078E+03, 0.14364E+03, 0.16714E+03, 0.19160E+03, 0.21731E+03,
0.24454E+03, 0.27355E+03, 0.30456E+03, 0.33778E+03, 0.37343E+03,
0.41170E+03, 0.45280E+03, 0.49692E+03, 0.54427E+03, 0.59505E+03,
0.64948E+03, 0.70779E+03, 0.77019E+03, 0.83693E+03, 0.90825E+03,
0.98440E+03, 0.10656E+04, 0.11522E+04, 0.12445E+04, 0.13427E+04,
0.14471E+04, 0.15580E+04, 0.16759E+04, 0.18009E+04, 0.19334E+04,
0.20739E+04, 0.22225E+04, 0.23798E+04, 0.25462E+04, 0.27219E+04,
0.29074E+04, 0.31032E+04, 0.33097E+04, 0.35272E+04, 0.37564E+04,
0.39976E+04, 0.42514E+04, 0.45181E+04, 0.47985E+04, 0.50929E+04,
0.54019E+04, 0.57260E+04, 0.60659E+04, 0.64221E+04, 0.67952E+04,
0.71859E+04, 0.75946E+04, 0.80222E+04, 0.84691E+04, 0.89362E+04,
0.94241E+04, 0.99335E+04, 0.10465E+05, 0.11020E+05, 0.11598E+05,
0.12201E+05, 0.12828E+05, 0.13482E+05, 0.14163E+05, 0.14872E+05,
0.15609E+05, 0.16376E+05, 0.17173E+05, 0.18001E+05, 0.18861E+05,
0.19754E+05, 0.20682E+05, 0.21644E+05, 0.22643E+05, 0.23678E+05,
0.24752E+05, 0.25865E+05, 0.27018E+05, 0.28212E+05, 0.29449E+05,
0.30730E+05, 0.32055E+05, 0.33426E+05, 0.34845E+05, 0.36312E+05,
0.37828E+05, 0.39395E+05, 0.41015E+05, 0.42688E+05, 0.44416E+05,
0.46199E+05, 0.48041E+05, 0.49942E+05, 0.51902E+05, 0.53925E+05,
0.56011E+05, 0.58162E+05, 0.60379E+05, 0.62664E+05, 0.65019E+05,
0.67444E+05, 0.69942E+05, 0.72515E+05, 0.75163E+05, 0.77890E+05,
0.80695E+05, 0.83582E+05, 0.86551E+05, 0.89605E+05, 0.92746E+05,
0.95975E+05, 0.99294E+05, 0.10271E+06, 0.10621E+06, 0.10981E+06,
0.11351E+06])
# --------------- CO2 636: M = 2, I = 2 ---------------------
M = 2
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.10728E+03, 0.15189E+03, 0.19659E+03,
0.24164E+03, 0.28753E+03, 0.33486E+03, 0.38429E+03, 0.43643E+03,
0.49184E+03, 0.55104E+03, 0.61449E+03, 0.68263E+03, 0.75589E+03,
0.83468E+03, 0.91943E+03, 0.10106E+04, 0.11085E+04, 0.12137E+04,
0.13266E+04, 0.14477E+04, 0.15774E+04, 0.17163E+04, 0.18649E+04,
0.20237E+04, 0.21933E+04, 0.23743E+04, 0.25673E+04, 0.27729E+04,
0.29917E+04, 0.32245E+04, 0.34718E+04, 0.37345E+04, 0.40132E+04,
0.43087E+04, 0.46218E+04, 0.49533E+04, 0.53041E+04, 0.56749E+04,
0.60668E+04, 0.64805E+04, 0.69171E+04, 0.73774E+04, 0.78626E+04,
0.83736E+04, 0.89114E+04, 0.94772E+04, 0.10072E+05, 0.10697E+05,
0.11353E+05, 0.12042E+05, 0.12765E+05, 0.13523E+05, 0.14317E+05,
0.15148E+05, 0.16019E+05, 0.16930E+05, 0.17883E+05, 0.18879E+05,
0.19920E+05, 0.21008E+05, 0.22143E+05, 0.23328E+05, 0.24563E+05,
0.25852E+05, 0.27195E+05, 0.28594E+05, 0.30051E+05, 0.31568E+05,
0.33146E+05, 0.34788E+05, 0.36496E+05, 0.38271E+05, 0.40115E+05,
0.42031E+05, 0.44021E+05, 0.46086E+05, 0.48230E+05, 0.50453E+05,
0.52759E+05, 0.55150E+05, 0.57628E+05, 0.60195E+05, 0.62854E+05,
0.65608E+05, 0.68459E+05, 0.71409E+05, 0.74461E+05, 0.77618E+05,
0.80883E+05, 0.84258E+05, 0.87746E+05, 0.91350E+05, 0.95073E+05,
0.98918E+05, 0.10289E+06, 0.10698E+06, 0.11121E+06, 0.11558E+06,
0.12008E+06, 0.12472E+06, 0.12950E+06, 0.13443E+06, 0.13952E+06,
0.14475E+06, 0.15015E+06, 0.15571E+06, 0.16143E+06, 0.16732E+06,
0.17338E+06, 0.17962E+06, 0.18604E+06, 0.19264E+06, 0.19943E+06,
0.20642E+06, 0.21360E+06, 0.22098E+06, 0.22856E+06, 0.23636E+06,
0.24436E+06])
# --------------- CO2 628: M = 2, I = 3 ---------------------
M = 2
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.11368E+03, 0.16096E+03, 0.20833E+03,
0.25603E+03, 0.30452E+03, 0.35442E+03, 0.40640E+03, 0.46110E+03,
0.51910E+03, 0.58093E+03, 0.64709E+03, 0.71804E+03, 0.79422E+03,
0.87607E+03, 0.96402E+03, 0.10585E+04, 0.11600E+04, 0.12689E+04,
0.13857E+04, 0.15108E+04, 0.16449E+04, 0.17883E+04, 0.19416E+04,
0.21054E+04, 0.22803E+04, 0.24668E+04, 0.26655E+04, 0.28770E+04,
0.31021E+04, 0.33414E+04, 0.35956E+04, 0.38654E+04, 0.41516E+04,
0.44549E+04, 0.47761E+04, 0.51160E+04, 0.54755E+04, 0.58555E+04,
0.62568E+04, 0.66804E+04, 0.71273E+04, 0.75982E+04, 0.80944E+04,
0.86169E+04, 0.91666E+04, 0.97446E+04, 0.10352E+05, 0.10990E+05,
0.11660E+05, 0.12363E+05, 0.13101E+05, 0.13874E+05, 0.14683E+05,
0.15531E+05, 0.16418E+05, 0.17347E+05, 0.18317E+05, 0.19332E+05,
0.20392E+05, 0.21499E+05, 0.22654E+05, 0.23859E+05, 0.25116E+05,
0.26426E+05, 0.27792E+05, 0.29214E+05, 0.30695E+05, 0.32236E+05,
0.33840E+05, 0.35508E+05, 0.37242E+05, 0.39045E+05, 0.40917E+05,
0.42862E+05, 0.44881E+05, 0.46977E+05, 0.49152E+05, 0.51407E+05,
0.53746E+05, 0.56171E+05, 0.58683E+05, 0.61286E+05, 0.63981E+05,
0.66772E+05, 0.69661E+05, 0.72650E+05, 0.75742E+05, 0.78940E+05,
0.82246E+05, 0.85664E+05, 0.89196E+05, 0.92845E+05, 0.96613E+05,
0.10050E+06, 0.10452E+06, 0.10867E+06, 0.11295E+06, 0.11736E+06,
0.12191E+06, 0.12661E+06, 0.13145E+06, 0.13643E+06, 0.14157E+06,
0.14687E+06, 0.15232E+06, 0.15794E+06, 0.16372E+06, 0.16968E+06,
0.17580E+06, 0.18211E+06, 0.18859E+06, 0.19526E+06, 0.20213E+06,
0.20918E+06, 0.21643E+06, 0.22388E+06, 0.23154E+06, 0.23941E+06,
0.24750E+06])
# --------------- CO2 627: M = 2, I = 4 ---------------------
M = 2
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.66338E+03, 0.93923E+03, 0.12156E+04,
0.14938E+04, 0.17766E+04, 0.20676E+04, 0.23705E+04, 0.26891E+04,
0.30267E+04, 0.33866E+04, 0.37714E+04, 0.41839E+04, 0.46267E+04,
0.51023E+04, 0.56132E+04, 0.61618E+04, 0.67508E+04, 0.73827E+04,
0.80603E+04, 0.87863E+04, 0.95636E+04, 0.10395E+05, 0.11284E+05,
0.12233E+05, 0.13246E+05, 0.14326E+05, 0.15477E+05, 0.16702E+05,
0.18005E+05, 0.19390E+05, 0.20861E+05, 0.22422E+05, 0.24077E+05,
0.25832E+05, 0.27689E+05, 0.29655E+05, 0.31734E+05, 0.33931E+05,
0.36250E+05, 0.38698E+05, 0.41280E+05, 0.44002E+05, 0.46869E+05,
0.49886E+05, 0.53062E+05, 0.56400E+05, 0.59909E+05, 0.63594E+05,
0.67462E+05, 0.71521E+05, 0.75777E+05, 0.80238E+05, 0.84911E+05,
0.89804E+05, 0.94925E+05, 0.10028E+06, 0.10588E+06, 0.11173E+06,
0.11785E+06, 0.12423E+06, 0.13090E+06, 0.13785E+06, 0.14510E+06,
0.15265E+06, 0.16053E+06, 0.16873E+06, 0.17727E+06, 0.18615E+06,
0.19540E+06, 0.20501E+06, 0.21501E+06, 0.22540E+06, 0.23619E+06,
0.24740E+06, 0.25904E+06, 0.27112E+06, 0.28365E+06, 0.29664E+06,
0.31012E+06, 0.32409E+06, 0.33856E+06, 0.35356E+06, 0.36908E+06,
0.38516E+06, 0.40180E+06, 0.41902E+06, 0.43683E+06, 0.45525E+06,
0.47429E+06, 0.49397E+06, 0.51431E+06, 0.53532E+06, 0.55702E+06,
0.57943E+06, 0.60256E+06, 0.62644E+06, 0.65107E+06, 0.67648E+06,
0.70269E+06, 0.72972E+06, 0.75758E+06, 0.78629E+06, 0.81588E+06,
0.84636E+06, 0.87775E+06, 0.91008E+06, 0.94337E+06, 0.97763E+06,
0.10129E+07, 0.10492E+07, 0.10865E+07, 0.11249E+07, 0.11644E+07,
0.12050E+07, 0.12467E+07, 0.12896E+07, 0.13337E+07, 0.13789E+07,
0.14255E+07])
# --------------- CO2 638: M = 2, I = 5 ---------------------
M = 2
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.22737E+03, 0.32194E+03, 0.41671E+03,
0.51226E+03, 0.60963E+03, 0.71017E+03, 0.81528E+03, 0.92628E+03,
0.10444E+04, 0.11707E+04, 0.13061E+04, 0.14518E+04, 0.16085E+04,
0.17772E+04, 0.19588E+04, 0.21542E+04, 0.23644E+04, 0.25903E+04,
0.28330E+04, 0.30934E+04, 0.33726E+04, 0.36717E+04, 0.39918E+04,
0.43342E+04, 0.47001E+04, 0.50907E+04, 0.55074E+04, 0.59515E+04,
0.64244E+04, 0.69276E+04, 0.74626E+04, 0.80310E+04, 0.86344E+04,
0.92744E+04, 0.99528E+04, 0.10671E+05, 0.11432E+05, 0.12236E+05,
0.13086E+05, 0.13984E+05, 0.14932E+05, 0.15932E+05, 0.16985E+05,
0.18096E+05, 0.19265E+05, 0.20495E+05, 0.21788E+05, 0.23148E+05,
0.24576E+05, 0.26075E+05, 0.27648E+05, 0.29298E+05, 0.31027E+05,
0.32839E+05, 0.34736E+05, 0.36721E+05, 0.38798E+05, 0.40970E+05,
0.43240E+05, 0.45611E+05, 0.48087E+05, 0.50671E+05, 0.53368E+05,
0.56180E+05, 0.59111E+05, 0.62165E+05, 0.65347E+05, 0.68659E+05,
0.72107E+05, 0.75694E+05, 0.79425E+05, 0.83303E+05, 0.87334E+05,
0.91522E+05, 0.95872E+05, 0.10039E+06, 0.10507E+06, 0.10994E+06,
0.11498E+06, 0.12021E+06, 0.12563E+06, 0.13125E+06, 0.13707E+06,
0.14309E+06, 0.14933E+06, 0.15579E+06, 0.16247E+06, 0.16938E+06,
0.17653E+06, 0.18392E+06, 0.19156E+06, 0.19946E+06, 0.20761E+06,
0.21604E+06, 0.22473E+06, 0.23371E+06, 0.24298E+06, 0.25254E+06,
0.26240E+06, 0.27258E+06, 0.28307E+06, 0.29388E+06, 0.30502E+06,
0.31651E+06, 0.32834E+06, 0.34052E+06, 0.35307E+06, 0.36599E+06,
0.37929E+06, 0.39298E+06, 0.40706E+06, 0.42155E+06, 0.43645E+06,
0.45178E+06, 0.46753E+06, 0.48373E+06, 0.50038E+06, 0.51748E+06,
0.53506E+06])
# --------------- CO2 637: M = 2, I = 6 ---------------------
M = 2
I = 6
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.13267E+04, 0.18785E+04, 0.24314E+04,
0.29888E+04, 0.35566E+04, 0.41426E+04, 0.47550E+04, 0.54013E+04,
0.60886E+04, 0.68232E+04, 0.76109E+04, 0.84574E+04, 0.93678E+04,
0.10348E+05, 0.11402E+05, 0.12536E+05, 0.13755E+05, 0.15065E+05,
0.16471E+05, 0.17980E+05, 0.19598E+05, 0.21330E+05, 0.23184E+05,
0.25166E+05, 0.27283E+05, 0.29543E+05, 0.31953E+05, 0.34521E+05,
0.37256E+05, 0.40164E+05, 0.43256E+05, 0.46541E+05, 0.50026E+05,
0.53723E+05, 0.57641E+05, 0.61790E+05, 0.66180E+05, 0.70823E+05,
0.75729E+05, 0.80910E+05, 0.86378E+05, 0.92145E+05, 0.98224E+05,
0.10463E+06, 0.11137E+06, 0.11846E+06, 0.12592E+06, 0.13375E+06,
0.14198E+06, 0.15062E+06, 0.15969E+06, 0.16920E+06, 0.17916E+06,
0.18959E+06, 0.20052E+06, 0.21196E+06, 0.22392E+06, 0.23642E+06,
0.24949E+06, 0.26314E+06, 0.27740E+06, 0.29227E+06, 0.30779E+06,
0.32398E+06, 0.34085E+06, 0.35842E+06, 0.37673E+06, 0.39579E+06,
0.41563E+06, 0.43626E+06, 0.45772E+06, 0.48003E+06, 0.50322E+06,
0.52730E+06, 0.55232E+06, 0.57829E+06, 0.60524E+06, 0.63320E+06,
0.66219E+06, 0.69226E+06, 0.72342E+06, 0.75571E+06, 0.78916E+06,
0.82380E+06, 0.85966E+06, 0.89678E+06, 0.93518E+06, 0.97490E+06,
0.10160E+07, 0.10585E+07, 0.11023E+07, 0.11477E+07, 0.11946E+07,
0.12430E+07, 0.12929E+07, 0.13445E+07, 0.13977E+07, 0.14526E+07,
0.15093E+07, 0.15677E+07, 0.16280E+07, 0.16901E+07, 0.17541E+07,
0.18200E+07, 0.18880E+07, 0.19579E+07, 0.20300E+07, 0.21042E+07,
0.21805E+07, 0.22591E+07, 0.23400E+07, 0.24232E+07, 0.25087E+07,
0.25967E+07, 0.26871E+07, 0.27801E+07, 0.28757E+07, 0.29739E+07,
0.30747E+07])
# --------------- CO2 828: M = 2, I = 7 ---------------------
M = 2
I = 7
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.60334E+02, 0.85430E+02, 0.11058E+03,
0.13590E+03, 0.16167E+03, 0.18821E+03, 0.21588E+03, 0.24502E+03,
0.27595E+03, 0.30896E+03, 0.34431E+03, 0.38225E+03, 0.42301E+03,
0.46684E+03, 0.51397E+03, 0.56464E+03, 0.61907E+03, 0.67753E+03,
0.74027E+03, 0.80753E+03, 0.87961E+03, 0.95676E+03, 0.10393E+04,
0.11275E+04, 0.12217E+04, 0.13222E+04, 0.14293E+04, 0.15434E+04,
0.16648E+04, 0.17940E+04, 0.19312E+04, 0.20769E+04, 0.22315E+04,
0.23954E+04, 0.25691E+04, 0.27529E+04, 0.29474E+04, 0.31530E+04,
0.33702E+04, 0.35995E+04, 0.38414E+04, 0.40965E+04, 0.43654E+04,
0.46484E+04, 0.49464E+04, 0.52598E+04, 0.55892E+04, 0.59353E+04,
0.62988E+04, 0.66803E+04, 0.70804E+04, 0.74998E+04, 0.79394E+04,
0.83998E+04, 0.88817E+04, 0.93859E+04, 0.99132E+04, 0.10464E+05,
0.11040E+05, 0.11642E+05, 0.12270E+05, 0.12925E+05, 0.13609E+05,
0.14321E+05, 0.15064E+05, 0.15838E+05, 0.16643E+05, 0.17482E+05,
0.18355E+05, 0.19263E+05, 0.20207E+05, 0.21188E+05, 0.22208E+05,
0.23267E+05, 0.24366E+05, 0.25508E+05, 0.26692E+05, 0.27921E+05,
0.29195E+05, 0.30516E+05, 0.31886E+05, 0.33304E+05, 0.34773E+05,
0.36294E+05, 0.37869E+05, 0.39499E+05, 0.41185E+05, 0.42929E+05,
0.44732E+05, 0.46596E+05, 0.48522E+05, 0.50513E+05, 0.52569E+05,
0.54692E+05, 0.56884E+05, 0.59146E+05, 0.61481E+05, 0.63890E+05,
0.66375E+05, 0.68937E+05, 0.71578E+05, 0.74301E+05, 0.77107E+05,
0.79998E+05, 0.82976E+05, 0.86043E+05, 0.89201E+05, 0.92452E+05,
0.95799E+05, 0.99242E+05, 0.10278E+06, 0.10643E+06, 0.11018E+06,
0.11403E+06, 0.11799E+06, 0.12206E+06, 0.12625E+06, 0.13055E+06,
0.13497E+06])
# --------------- CO2 728: M = 2, I = 8 ---------------------
M = 2
I = 8
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.70354E+03, 0.99615E+03, 0.12893E+04,
0.15846E+04, 0.18848E+04, 0.21940E+04, 0.25162E+04, 0.28554E+04,
0.32152E+04, 0.35991E+04, 0.40099E+04, 0.44507E+04, 0.49242E+04,
0.54332E+04, 0.59802E+04, 0.65681E+04, 0.71996E+04, 0.78776E+04,
0.86050E+04, 0.93847E+04, 0.10220E+05, 0.11114E+05, 0.12070E+05,
0.13091E+05, 0.14182E+05, 0.15345E+05, 0.16585E+05, 0.17906E+05,
0.19311E+05, 0.20805E+05, 0.22393E+05, 0.24078E+05, 0.25865E+05,
0.27760E+05, 0.29768E+05, 0.31893E+05, 0.34140E+05, 0.36516E+05,
0.39025E+05, 0.41674E+05, 0.44469E+05, 0.47416E+05, 0.50520E+05,
0.53789E+05, 0.57229E+05, 0.60847E+05, 0.64650E+05, 0.68645E+05,
0.72840E+05, 0.77242E+05, 0.81859E+05, 0.86699E+05, 0.91770E+05,
0.97081E+05, 0.10264E+06, 0.10846E+06, 0.11454E+06, 0.12090E+06,
0.12754E+06, 0.13447E+06, 0.14171E+06, 0.14927E+06, 0.15715E+06,
0.16536E+06, 0.17392E+06, 0.18284E+06, 0.19213E+06, 0.20179E+06,
0.21185E+06, 0.22231E+06, 0.23319E+06, 0.24450E+06, 0.25625E+06,
0.26845E+06, 0.28112E+06, 0.29427E+06, 0.30791E+06, 0.32206E+06,
0.33674E+06, 0.35196E+06, 0.36772E+06, 0.38406E+06, 0.40098E+06,
0.41850E+06, 0.43663E+06, 0.45539E+06, 0.47480E+06, 0.49488E+06,
0.51564E+06, 0.53710E+06, 0.55928E+06, 0.58219E+06, 0.60586E+06,
0.63029E+06, 0.65553E+06, 0.68157E+06, 0.70844E+06, 0.73616E+06,
0.76476E+06, 0.79424E+06, 0.82464E+06, 0.85597E+06, 0.88826E+06,
0.92153E+06, 0.95580E+06, 0.99108E+06, 0.10274E+07, 0.10648E+07,
0.11033E+07, 0.11429E+07, 0.11837E+07, 0.12256E+07, 0.12687E+07,
0.13131E+07, 0.13586E+07, 0.14055E+07, 0.14536E+07, 0.15031E+07,
0.15539E+07])
# --------------- CO2 727: M = 2, I = 9 ---------------------
M = 2
I = 9
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.20518E+04, 0.29051E+04, 0.37601E+04,
0.46209E+04, 0.54961E+04, 0.63969E+04, 0.73353E+04, 0.83227E+04,
0.93698E+04, 0.10486E+05, 0.11681E+05, 0.12962E+05, 0.14337E+05,
0.15815E+05, 0.17403E+05, 0.19110E+05, 0.20942E+05, 0.22909E+05,
0.25018E+05, 0.27278E+05, 0.29699E+05, 0.32290E+05, 0.35060E+05,
0.38019E+05, 0.41177E+05, 0.44545E+05, 0.48135E+05, 0.51957E+05,
0.56023E+05, 0.60346E+05, 0.64938E+05, 0.69812E+05, 0.74981E+05,
0.80461E+05, 0.86264E+05, 0.92406E+05, 0.98902E+05, 0.10577E+06,
0.11302E+06, 0.12067E+06, 0.12875E+06, 0.13726E+06, 0.14622E+06,
0.15566E+06, 0.16559E+06, 0.17604E+06, 0.18702E+06, 0.19855E+06,
0.21066E+06, 0.22336E+06, 0.23669E+06, 0.25065E+06, 0.26528E+06,
0.28061E+06, 0.29664E+06, 0.31342E+06, 0.33096E+06, 0.34930E+06,
0.36845E+06, 0.38845E+06, 0.40933E+06, 0.43111E+06, 0.45383E+06,
0.47751E+06, 0.50219E+06, 0.52790E+06, 0.55466E+06, 0.58252E+06,
0.61151E+06, 0.64166E+06, 0.67300E+06, 0.70558E+06, 0.73943E+06,
0.77458E+06, 0.81108E+06, 0.84896E+06, 0.88827E+06, 0.92904E+06,
0.97131E+06, 0.10151E+07, 0.10605E+07, 0.11076E+07, 0.11563E+07,
0.12068E+07, 0.12590E+07, 0.13130E+07, 0.13689E+07, 0.14267E+07,
0.14865E+07, 0.15483E+07, 0.16121E+07, 0.16781E+07, 0.17462E+07,
0.18165E+07, 0.18892E+07, 0.19641E+07, 0.20415E+07, 0.21213E+07,
0.22036E+07, 0.22884E+07, 0.23759E+07, 0.24661E+07, 0.25590E+07,
0.26547E+07, 0.27533E+07, 0.28549E+07, 0.29594E+07, 0.30670E+07,
0.31778E+07, 0.32918E+07, 0.34090E+07, 0.35296E+07, 0.36536E+07,
0.37812E+07, 0.39123E+07, 0.40470E+07, 0.41855E+07, 0.43278E+07,
0.44739E+07])
# --------------- CO2 838: M = 2, I = 10 ---------------------
M = 2
I = 10
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.12066E+03, 0.17085E+03, 0.22116E+03,
0.27190E+03, 0.32364E+03, 0.37711E+03, 0.43305E+03, 0.49219E+03,
0.55516E+03, 0.62256E+03, 0.69492E+03, 0.77276E+03, 0.85657E+03,
0.94685E+03, 0.10441E+04, 0.11488E+04, 0.12614E+04, 0.13826E+04,
0.15127E+04, 0.16525E+04, 0.18024E+04, 0.19630E+04, 0.21351E+04,
0.23191E+04, 0.25158E+04, 0.27260E+04, 0.29502E+04, 0.31892E+04,
0.34438E+04, 0.37148E+04, 0.40031E+04, 0.43094E+04, 0.46346E+04,
0.49797E+04, 0.53455E+04, 0.57331E+04, 0.61434E+04, 0.65775E+04,
0.70364E+04, 0.75212E+04, 0.80330E+04, 0.85730E+04, 0.91424E+04,
0.97423E+04, 0.10374E+05, 0.11039E+05, 0.11738E+05, 0.12474E+05,
0.13246E+05, 0.14057E+05, 0.14908E+05, 0.15801E+05, 0.16737E+05,
0.17717E+05, 0.18744E+05, 0.19819E+05, 0.20944E+05, 0.22120E+05,
0.23349E+05, 0.24634E+05, 0.25975E+05, 0.27376E+05, 0.28837E+05,
0.30361E+05, 0.31950E+05, 0.33605E+05, 0.35330E+05, 0.37126E+05,
0.38996E+05, 0.40942E+05, 0.42965E+05, 0.45069E+05, 0.47256E+05,
0.49528E+05, 0.51888E+05, 0.54338E+05, 0.56882E+05, 0.59521E+05,
0.62259E+05, 0.65097E+05, 0.68040E+05, 0.71090E+05, 0.74249E+05,
0.77522E+05, 0.80910E+05, 0.84417E+05, 0.88046E+05, 0.91801E+05,
0.95684E+05, 0.99699E+05, 0.10385E+06, 0.10814E+06, 0.11257E+06,
0.11715E+06, 0.12187E+06, 0.12675E+06, 0.13179E+06, 0.13699E+06,
0.14235E+06, 0.14788E+06, 0.15358E+06, 0.15946E+06, 0.16552E+06,
0.17176E+06, 0.17819E+06, 0.18482E+06, 0.19164E+06, 0.19867E+06,
0.20590E+06, 0.21335E+06, 0.22101E+06, 0.22889E+06, 0.23699E+06,
0.24533E+06, 0.25390E+06, 0.26271E+06, 0.27177E+06, 0.28108E+06,
0.29064E+06])
# --------------- CO2 838: M = 2, I = 0 ALIAS-----------------
TIPS_GSI_HASH[(M,0)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,0)] = TIPS_ISO_HASH[(M,I)]
# --------------- CO2 837: M = 2, I = 11 ---------------------
M = 2
I = 11
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.14071E+04, 0.19923E+04, 0.25789E+04,
0.31704E+04, 0.37733E+04, 0.43962E+04, 0.50477E+04, 0.57360E+04,
0.64687E+04, 0.72525E+04, 0.80938E+04, 0.89984E+04, 0.99723E+04,
0.11021E+05, 0.12150E+05, 0.13366E+05, 0.14673E+05, 0.16079E+05,
0.17589E+05, 0.19211E+05, 0.20949E+05, 0.22812E+05, 0.24807E+05,
0.26940E+05, 0.29221E+05, 0.31656E+05, 0.34254E+05, 0.37023E+05,
0.39972E+05, 0.43111E+05, 0.46449E+05, 0.49996E+05, 0.53762E+05,
0.57756E+05, 0.61991E+05, 0.66477E+05, 0.71226E+05, 0.76249E+05,
0.81558E+05, 0.87167E+05, 0.93088E+05, 0.99334E+05, 0.10592E+06,
0.11286E+06, 0.12016E+06, 0.12785E+06, 0.13594E+06, 0.14444E+06,
0.15337E+06, 0.16274E+06, 0.17258E+06, 0.18290E+06, 0.19371E+06,
0.20504E+06, 0.21691E+06, 0.22933E+06, 0.24233E+06, 0.25592E+06,
0.27012E+06, 0.28496E+06, 0.30046E+06, 0.31663E+06, 0.33351E+06,
0.35111E+06, 0.36946E+06, 0.38858E+06, 0.40850E+06, 0.42924E+06,
0.45083E+06, 0.47329E+06, 0.49666E+06, 0.52095E+06, 0.54620E+06,
0.57243E+06, 0.59967E+06, 0.62796E+06, 0.65732E+06, 0.68778E+06,
0.71938E+06, 0.75214E+06, 0.78611E+06, 0.82131E+06, 0.85777E+06,
0.89553E+06, 0.93463E+06, 0.97511E+06, 0.10170E+07, 0.10603E+07,
0.11051E+07, 0.11514E+07, 0.11993E+07, 0.12488E+07, 0.12999E+07,
0.13527E+07, 0.14073E+07, 0.14636E+07, 0.15217E+07, 0.15816E+07,
0.16435E+07, 0.17072E+07, 0.17730E+07, 0.18408E+07, 0.19107E+07,
0.19827E+07, 0.20569E+07, 0.21334E+07, 0.22121E+07, 0.22931E+07,
0.23765E+07, 0.24624E+07, 0.25507E+07, 0.26416E+07, 0.27351E+07,
0.28312E+07, 0.29301E+07, 0.30317E+07, 0.31361E+07, 0.32434E+07,
0.33537E+07])
# --------------- O3 666: M = 3, I = 1 ---------------------
M = 3
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.30333E+03, 0.51126E+03, 0.75274E+03,
0.10241E+04, 0.13236E+04, 0.16508E+04, 0.20068E+04, 0.23935E+04,
0.28136E+04, 0.32703E+04, 0.37672E+04, 0.43082E+04, 0.48975E+04,
0.55395E+04, 0.62386E+04, 0.69996E+04, 0.78272E+04, 0.87264E+04,
0.97026E+04, 0.10761E+05, 0.11907E+05, 0.13146E+05, 0.14485E+05,
0.15929E+05, 0.17484E+05, 0.19158E+05, 0.20957E+05, 0.22887E+05,
0.24956E+05, 0.27172E+05, 0.29541E+05, 0.32072E+05, 0.34773E+05,
0.37652E+05, 0.40718E+05, 0.43979E+05, 0.47444E+05, 0.51123E+05,
0.55026E+05, 0.59161E+05, 0.63540E+05, 0.68172E+05, 0.73069E+05,
0.78240E+05, 0.83698E+05, 0.89453E+05, 0.95517E+05, 0.10190E+06,
0.10862E+06, 0.11569E+06, 0.12311E+06, 0.13091E+06, 0.13909E+06,
0.14767E+06, 0.15666E+06, 0.16608E+06, 0.17594E+06, 0.18626E+06,
0.19706E+06, 0.20834E+06, 0.22012E+06, 0.23242E+06, 0.24526E+06,
0.25866E+06, 0.27262E+06, 0.28717E+06, 0.30233E+06, 0.31811E+06,
0.33453E+06, 0.35161E+06, 0.36937E+06, 0.38784E+06, 0.40702E+06,
0.42694E+06, 0.44762E+06, 0.46909E+06, 0.49135E+06, 0.51444E+06,
0.53838E+06, 0.56318E+06, 0.58887E+06, 0.61548E+06, 0.64303E+06,
0.67153E+06, 0.70102E+06, 0.73153E+06, 0.76306E+06, 0.79566E+06,
0.82934E+06, 0.86413E+06, 0.90006E+06, 0.93716E+06, 0.97545E+06,
0.10150E+07, 0.10557E+07, 0.10977E+07, 0.11411E+07, 0.11858E+07,
0.12318E+07, 0.12792E+07, 0.13281E+07, 0.13784E+07, 0.14302E+07,
0.14835E+07, 0.15384E+07, 0.15948E+07, 0.16529E+07, 0.17126E+07,
0.17740E+07, 0.18371E+07, 0.19020E+07, 0.19686E+07, 0.20371E+07,
0.21074E+07, 0.21797E+07, 0.22538E+07, 0.23300E+07, 0.24081E+07,
0.24883E+07])
# --------------- O3 668: M = 3, I = 2 ---------------------
M = 3
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.64763E+03, 0.10916E+04, 0.16073E+04,
0.21870E+04, 0.28271E+04, 0.35272E+04, 0.42900E+04, 0.51197E+04,
0.60225E+04, 0.70057E+04, 0.80771E+04, 0.92455E+04, 0.10520E+05,
0.11911E+05, 0.13427E+05, 0.15079E+05, 0.16878E+05, 0.18834E+05,
0.20960E+05, 0.23267E+05, 0.25767E+05, 0.28472E+05, 0.31397E+05,
0.34553E+05, 0.37957E+05, 0.41620E+05, 0.45559E+05, 0.49790E+05,
0.54327E+05, 0.59187E+05, 0.64387E+05, 0.69944E+05, 0.75877E+05,
0.82203E+05, 0.88943E+05, 0.96114E+05, 0.10374E+06, 0.11184E+06,
0.12043E+06, 0.12954E+06, 0.13918E+06, 0.14939E+06, 0.16018E+06,
0.17159E+06, 0.18362E+06, 0.19632E+06, 0.20970E+06, 0.22380E+06,
0.23863E+06, 0.25423E+06, 0.27063E+06, 0.28786E+06, 0.30594E+06,
0.32490E+06, 0.34478E+06, 0.36561E+06, 0.38743E+06, 0.41026E+06,
0.43413E+06, 0.45909E+06, 0.48517E+06, 0.51241E+06, 0.54084E+06,
0.57049E+06, 0.60141E+06, 0.63365E+06, 0.66722E+06, 0.70219E+06,
0.73858E+06, 0.77644E+06, 0.81581E+06, 0.85674E+06, 0.89927E+06,
0.94345E+06, 0.98932E+06, 0.10369E+07, 0.10863E+07, 0.11375E+07,
0.11906E+07, 0.12457E+07, 0.13027E+07, 0.13618E+07, 0.14229E+07,
0.14862E+07, 0.15517E+07, 0.16194E+07, 0.16894E+07, 0.17618E+07,
0.18366E+07, 0.19139E+07, 0.19937E+07, 0.20761E+07, 0.21612E+07,
0.22490E+07, 0.23395E+07, 0.24330E+07, 0.25293E+07, 0.26286E+07,
0.27309E+07, 0.28363E+07, 0.29449E+07, 0.30568E+07, 0.31720E+07,
0.32905E+07, 0.34125E+07, 0.35381E+07, 0.36672E+07, 0.38000E+07,
0.39366E+07, 0.40770E+07, 0.42213E+07, 0.43696E+07, 0.45220E+07,
0.46785E+07, 0.48392E+07, 0.50043E+07, 0.51737E+07, 0.53476E+07,
0.55261E+07])
# --------------- O3 686: M = 3, I = 3 ---------------------
M = 3
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.31656E+03, 0.53355E+03, 0.78557E+03,
0.10688E+04, 0.13815E+04, 0.17235E+04, 0.20960E+04, 0.25011E+04,
0.29420E+04, 0.34223E+04, 0.39459E+04, 0.45172E+04, 0.51408E+04,
0.58213E+04, 0.65639E+04, 0.73735E+04, 0.82555E+04, 0.92152E+04,
0.10259E+05, 0.11391E+05, 0.12619E+05, 0.13949E+05, 0.15387E+05,
0.16940E+05, 0.18614E+05, 0.20417E+05, 0.22357E+05, 0.24440E+05,
0.26675E+05, 0.29070E+05, 0.31633E+05, 0.34374E+05, 0.37299E+05,
0.40420E+05, 0.43746E+05, 0.47285E+05, 0.51049E+05, 0.55047E+05,
0.59289E+05, 0.63788E+05, 0.68554E+05, 0.73598E+05, 0.78932E+05,
0.84568E+05, 0.90519E+05, 0.96796E+05, 0.10341E+06, 0.11039E+06,
0.11772E+06, 0.12544E+06, 0.13356E+06, 0.14208E+06, 0.15103E+06,
0.16041E+06, 0.17026E+06, 0.18057E+06, 0.19137E+06, 0.20268E+06,
0.21450E+06, 0.22687E+06, 0.23979E+06, 0.25328E+06, 0.26736E+06,
0.28206E+06, 0.29738E+06, 0.31336E+06, 0.33000E+06, 0.34733E+06,
0.36537E+06, 0.38414E+06, 0.40366E+06, 0.42396E+06, 0.44505E+06,
0.46696E+06, 0.48971E+06, 0.51332E+06, 0.53782E+06, 0.56323E+06,
0.58958E+06, 0.61689E+06, 0.64518E+06, 0.67448E+06, 0.70482E+06,
0.73623E+06, 0.76872E+06, 0.80234E+06, 0.83710E+06, 0.87303E+06,
0.91017E+06, 0.94853E+06, 0.98816E+06, 0.10291E+07, 0.10713E+07,
0.11149E+07, 0.11599E+07, 0.12063E+07, 0.12541E+07, 0.13034E+07,
0.13542E+07, 0.14066E+07, 0.14606E+07, 0.15161E+07, 0.15733E+07,
0.16322E+07, 0.16928E+07, 0.17552E+07, 0.18194E+07, 0.18854E+07,
0.19532E+07, 0.20230E+07, 0.20947E+07, 0.21684E+07, 0.22441E+07,
0.23219E+07, 0.24018E+07, 0.24838E+07, 0.25680E+07, 0.26545E+07,
0.27432E+07])
# --------------- O3 667: M = 3, I = 4 ---------------------
M = 3
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.37657E+04, 0.63472E+04, 0.93454E+04,
0.12715E+05, 0.16435E+05, 0.20502E+05, 0.24929E+05, 0.29742E+05,
0.34975E+05, 0.40668E+05, 0.46868E+05, 0.53624E+05, 0.60990E+05,
0.69018E+05, 0.77768E+05, 0.87296E+05, 0.97666E+05, 0.10894E+06,
0.12118E+06, 0.13446E+06, 0.14885E+06, 0.16441E+06, 0.18123E+06,
0.19938E+06, 0.21894E+06, 0.23998E+06, 0.26261E+06, 0.28690E+06,
0.31295E+06, 0.34084E+06, 0.37068E+06, 0.40256E+06, 0.43659E+06,
0.47287E+06, 0.51151E+06, 0.55262E+06, 0.59632E+06, 0.64272E+06,
0.69194E+06, 0.74412E+06, 0.79937E+06, 0.85783E+06, 0.91963E+06,
0.98492E+06, 0.10538E+07, 0.11265E+07, 0.12031E+07, 0.12837E+07,
0.13686E+07, 0.14579E+07, 0.15517E+07, 0.16502E+07, 0.17536E+07,
0.18621E+07, 0.19758E+07, 0.20949E+07, 0.22196E+07, 0.23501E+07,
0.24866E+07, 0.26292E+07, 0.27783E+07, 0.29339E+07, 0.30963E+07,
0.32658E+07, 0.34425E+07, 0.36266E+07, 0.38184E+07, 0.40181E+07,
0.42260E+07, 0.44422E+07, 0.46671E+07, 0.49008E+07, 0.51437E+07,
0.53959E+07, 0.56578E+07, 0.59296E+07, 0.62116E+07, 0.65040E+07,
0.68071E+07, 0.71213E+07, 0.74468E+07, 0.77838E+07, 0.81328E+07,
0.84939E+07, 0.88676E+07, 0.92541E+07, 0.96536E+07, 0.10067E+08,
0.10493E+08, 0.10934E+08, 0.11390E+08, 0.11860E+08, 0.12345E+08,
0.12846E+08, 0.13363E+08, 0.13895E+08, 0.14445E+08, 0.15011E+08,
0.15595E+08, 0.16196E+08, 0.16815E+08, 0.17453E+08, 0.18110E+08,
0.18786E+08, 0.19482E+08, 0.20198E+08, 0.20934E+08, 0.21691E+08,
0.22470E+08, 0.23270E+08, 0.24093E+08, 0.24939E+08, 0.25807E+08,
0.26699E+08, 0.27616E+08, 0.28556E+08, 0.29522E+08, 0.30514E+08,
0.31531E+08])
# --------------- O3 676: M = 3, I = 5 ---------------------
M = 3
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.18608E+04, 0.31363E+04, 0.46177E+04,
0.62826E+04, 0.81202E+04, 0.10129E+05, 0.12316E+05, 0.14693E+05,
0.17277E+05, 0.20089E+05, 0.23153E+05, 0.26492E+05, 0.30133E+05,
0.34103E+05, 0.38430E+05, 0.43145E+05, 0.48277E+05, 0.53858E+05,
0.59920E+05, 0.66497E+05, 0.73624E+05, 0.81336E+05, 0.89671E+05,
0.98668E+05, 0.10836E+06, 0.11880E+06, 0.13002E+06, 0.14207E+06,
0.15500E+06, 0.16884E+06, 0.18365E+06, 0.19947E+06, 0.21636E+06,
0.23438E+06, 0.25356E+06, 0.27398E+06, 0.29568E+06, 0.31873E+06,
0.34318E+06, 0.36911E+06, 0.39656E+06, 0.42561E+06, 0.45632E+06,
0.48877E+06, 0.52302E+06, 0.55914E+06, 0.59722E+06, 0.63732E+06,
0.67952E+06, 0.72390E+06, 0.77055E+06, 0.81954E+06, 0.87097E+06,
0.92491E+06, 0.98146E+06, 0.10407E+07, 0.11027E+07, 0.11677E+07,
0.12356E+07, 0.13066E+07, 0.13807E+07, 0.14582E+07, 0.15390E+07,
0.16233E+07, 0.17113E+07, 0.18029E+07, 0.18984E+07, 0.19978E+07,
0.21012E+07, 0.22089E+07, 0.23208E+07, 0.24372E+07, 0.25581E+07,
0.26837E+07, 0.28141E+07, 0.29494E+07, 0.30898E+07, 0.32354E+07,
0.33864E+07, 0.35428E+07, 0.37049E+07, 0.38728E+07, 0.40466E+07,
0.42264E+07, 0.44125E+07, 0.46050E+07, 0.48040E+07, 0.50098E+07,
0.52224E+07, 0.54420E+07, 0.56689E+07, 0.59031E+07, 0.61449E+07,
0.63943E+07, 0.66517E+07, 0.69172E+07, 0.71909E+07, 0.74731E+07,
0.77639E+07, 0.80635E+07, 0.83721E+07, 0.86900E+07, 0.90172E+07,
0.93541E+07, 0.97008E+07, 0.10058E+08, 0.10424E+08, 0.10802E+08,
0.11190E+08, 0.11589E+08, 0.11999E+08, 0.12420E+08, 0.12853E+08,
0.13298E+08, 0.13755E+08, 0.14223E+08, 0.14705E+08, 0.15199E+08,
0.15706E+08])
# --------------- O3 886: M = 3, I = 6 ---------------------
M = 3
I = 6
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.67639E+03, 0.11401E+04, 0.16787E+04,
0.22843E+04, 0.29532E+04, 0.36856E+04, 0.44842E+04, 0.53545E+04,
0.63030E+04, 0.73381E+04, 0.84686E+04, 0.97040E+04, 0.11054E+05,
0.12530E+05, 0.14143E+05, 0.15903E+05, 0.17823E+05, 0.19915E+05,
0.22190E+05, 0.24663E+05, 0.27346E+05, 0.30254E+05, 0.33400E+05,
0.36800E+05, 0.40469E+05, 0.44423E+05, 0.48678E+05, 0.53251E+05,
0.58160E+05, 0.63423E+05, 0.69058E+05, 0.75085E+05, 0.81524E+05,
0.88395E+05, 0.95719E+05, 0.10352E+06, 0.11181E+06, 0.12063E+06,
0.12999E+06, 0.13991E+06, 0.15043E+06, 0.16157E+06, 0.17335E+06,
0.18580E+06, 0.19895E+06, 0.21283E+06, 0.22746E+06, 0.24288E+06,
0.25911E+06, 0.27619E+06, 0.29415E+06, 0.31301E+06, 0.33283E+06,
0.35362E+06, 0.37542E+06, 0.39827E+06, 0.42221E+06, 0.44726E+06,
0.47348E+06, 0.50089E+06, 0.52954E+06, 0.55947E+06, 0.59072E+06,
0.62332E+06, 0.65733E+06, 0.69279E+06, 0.72973E+06, 0.76821E+06,
0.80827E+06, 0.84996E+06, 0.89332E+06, 0.93840E+06, 0.98526E+06,
0.10339E+07, 0.10845E+07, 0.11370E+07, 0.11914E+07, 0.12479E+07,
0.13065E+07, 0.13672E+07, 0.14302E+07, 0.14953E+07, 0.15628E+07,
0.16327E+07, 0.17050E+07, 0.17798E+07, 0.18571E+07, 0.19371E+07,
0.20197E+07, 0.21051E+07, 0.21933E+07, 0.22844E+07, 0.23785E+07,
0.24755E+07, 0.25757E+07, 0.26790E+07, 0.27855E+07, 0.28954E+07,
0.30086E+07, 0.31253E+07, 0.32455E+07, 0.33693E+07, 0.34967E+07,
0.36280E+07, 0.37631E+07, 0.39021E+07, 0.40451E+07, 0.41922E+07,
0.43435E+07, 0.44990E+07, 0.46589E+07, 0.48232E+07, 0.49920E+07,
0.51654E+07, 0.53436E+07, 0.55265E+07, 0.57143E+07, 0.59071E+07,
0.61050E+07])
# --------------- O3 868: M = 3, I = 7 ---------------------
M = 3
I = 7
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.34615E+03, 0.58348E+03, 0.85915E+03,
0.11692E+04, 0.15117E+04, 0.18868E+04, 0.22960E+04, 0.27419E+04,
0.32278E+04, 0.37579E+04, 0.43366E+04, 0.49686E+04, 0.56591E+04,
0.64134E+04, 0.72369E+04, 0.81354E+04, 0.91148E+04, 0.10181E+05,
0.11341E+05, 0.12600E+05, 0.13966E+05, 0.15446E+05, 0.17046E+05,
0.18775E+05, 0.20640E+05, 0.22649E+05, 0.24810E+05, 0.27132E+05,
0.29624E+05, 0.32295E+05, 0.35154E+05, 0.38211E+05, 0.41475E+05,
0.44958E+05, 0.48670E+05, 0.52621E+05, 0.56823E+05, 0.61288E+05,
0.66026E+05, 0.71052E+05, 0.76376E+05, 0.82011E+05, 0.87972E+05,
0.94271E+05, 0.10092E+06, 0.10794E+06, 0.11534E+06, 0.12313E+06,
0.13134E+06, 0.13997E+06, 0.14905E+06, 0.15858E+06, 0.16859E+06,
0.17909E+06, 0.19010E+06, 0.20164E+06, 0.21373E+06, 0.22638E+06,
0.23962E+06, 0.25346E+06, 0.26792E+06, 0.28302E+06, 0.29879E+06,
0.31524E+06, 0.33240E+06, 0.35029E+06, 0.36892E+06, 0.38833E+06,
0.40853E+06, 0.42956E+06, 0.45142E+06, 0.47416E+06, 0.49778E+06,
0.52233E+06, 0.54781E+06, 0.57427E+06, 0.60172E+06, 0.63019E+06,
0.65971E+06, 0.69031E+06, 0.72201E+06, 0.75485E+06, 0.78886E+06,
0.82405E+06, 0.86048E+06, 0.89815E+06, 0.93711E+06, 0.97739E+06,
0.10190E+07, 0.10620E+07, 0.11065E+07, 0.11523E+07, 0.11997E+07,
0.12485E+07, 0.12990E+07, 0.13510E+07, 0.14046E+07, 0.14599E+07,
0.15169E+07, 0.15756E+07, 0.16361E+07, 0.16984E+07, 0.17626E+07,
0.18287E+07, 0.18966E+07, 0.19666E+07, 0.20386E+07, 0.21126E+07,
0.21887E+07, 0.22669E+07, 0.23474E+07, 0.24300E+07, 0.25150E+07,
0.26022E+07, 0.26919E+07, 0.27839E+07, 0.28784E+07, 0.29753E+07,
0.30749E+07])
# --------------- O3 678: M = 3, I = 8 ---------------------
M = 3
I = 8
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.39745E+04, 0.66993E+04, 0.98642E+04,
0.13422E+05, 0.17352E+05, 0.21652E+05, 0.26339E+05, 0.31442E+05,
0.37000E+05, 0.43058E+05, 0.49669E+05, 0.56885E+05, 0.64766E+05,
0.73372E+05, 0.82765E+05, 0.93011E+05, 0.10418E+06, 0.11633E+06,
0.12955E+06, 0.14390E+06, 0.15946E+06, 0.17632E+06, 0.19455E+06,
0.21424E+06, 0.23547E+06, 0.25835E+06, 0.28296E+06, 0.30939E+06,
0.33776E+06, 0.36816E+06, 0.40070E+06, 0.43549E+06, 0.47264E+06,
0.51228E+06, 0.55451E+06, 0.59947E+06, 0.64728E+06, 0.69807E+06,
0.75198E+06, 0.80915E+06, 0.86971E+06, 0.93381E+06, 0.10016E+07,
0.10733E+07, 0.11489E+07, 0.12287E+07, 0.13128E+07, 0.14015E+07,
0.14948E+07, 0.15930E+07, 0.16961E+07, 0.18045E+07, 0.19183E+07,
0.20378E+07, 0.21629E+07, 0.22942E+07, 0.24316E+07, 0.25754E+07,
0.27258E+07, 0.28831E+07, 0.30475E+07, 0.32192E+07, 0.33984E+07,
0.35855E+07, 0.37805E+07, 0.39838E+07, 0.41956E+07, 0.44162E+07,
0.46458E+07, 0.48847E+07, 0.51332E+07, 0.53916E+07, 0.56601E+07,
0.59390E+07, 0.62286E+07, 0.65292E+07, 0.68412E+07, 0.71647E+07,
0.75002E+07, 0.78479E+07, 0.82081E+07, 0.85813E+07, 0.89676E+07,
0.93676E+07, 0.97814E+07, 0.10209E+08, 0.10652E+08, 0.11110E+08,
0.11583E+08, 0.12071E+08, 0.12576E+08, 0.13097E+08, 0.13635E+08,
0.14190E+08, 0.14763E+08, 0.15354E+08, 0.15963E+08, 0.16592E+08,
0.17239E+08, 0.17906E+08, 0.18593E+08, 0.19301E+08, 0.20030E+08,
0.20780E+08, 0.21553E+08, 0.22347E+08, 0.23165E+08, 0.24006E+08,
0.24870E+08, 0.25759E+08, 0.26673E+08, 0.27612E+08, 0.28577E+08,
0.29568E+08, 0.30585E+08, 0.31631E+08, 0.32704E+08, 0.33805E+08,
0.34936E+08])
# --------------- O3 768: M = 3, I = 9 ---------------------
M = 3
I = 9
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.40228E+04, 0.67808E+04, 0.99842E+04,
0.13586E+05, 0.17564E+05, 0.21919E+05, 0.26665E+05, 0.31833E+05,
0.37461E+05, 0.43596E+05, 0.50286E+05, 0.57589E+05, 0.65562E+05,
0.74264E+05, 0.83761E+05, 0.94115E+05, 0.10540E+06, 0.11767E+06,
0.13102E+06, 0.14550E+06, 0.16121E+06, 0.17822E+06, 0.19661E+06,
0.21646E+06, 0.23788E+06, 0.26094E+06, 0.28574E+06, 0.31239E+06,
0.34097E+06, 0.37160E+06, 0.40437E+06, 0.43941E+06, 0.47683E+06,
0.51673E+06, 0.55925E+06, 0.60451E+06, 0.65262E+06, 0.70374E+06,
0.75799E+06, 0.81550E+06, 0.87643E+06, 0.94092E+06, 0.10091E+07,
0.10812E+07, 0.11572E+07, 0.12375E+07, 0.13221E+07, 0.14112E+07,
0.15050E+07, 0.16037E+07, 0.17074E+07, 0.18164E+07, 0.19307E+07,
0.20507E+07, 0.21765E+07, 0.23084E+07, 0.24464E+07, 0.25909E+07,
0.27421E+07, 0.29001E+07, 0.30652E+07, 0.32377E+07, 0.34177E+07,
0.36055E+07, 0.38014E+07, 0.40055E+07, 0.42182E+07, 0.44397E+07,
0.46703E+07, 0.49102E+07, 0.51597E+07, 0.54191E+07, 0.56886E+07,
0.59686E+07, 0.62593E+07, 0.65611E+07, 0.68742E+07, 0.71989E+07,
0.75356E+07, 0.78846E+07, 0.82461E+07, 0.86206E+07, 0.90083E+07,
0.94097E+07, 0.98249E+07, 0.10254E+08, 0.10699E+08, 0.11158E+08,
0.11632E+08, 0.12123E+08, 0.12629E+08, 0.13152E+08, 0.13691E+08,
0.14248E+08, 0.14823E+08, 0.15416E+08, 0.16027E+08, 0.16657E+08,
0.17307E+08, 0.17976E+08, 0.18665E+08, 0.19375E+08, 0.20106E+08,
0.20858E+08, 0.21633E+08, 0.22430E+08, 0.23250E+08, 0.24093E+08,
0.24960E+08, 0.25851E+08, 0.26767E+08, 0.27709E+08, 0.28676E+08,
0.29670E+08, 0.30691E+08, 0.31739E+08, 0.32815E+08, 0.33919E+08,
0.35053E+08])
# --------------- O3 786: M = 3, I = 10 ---------------------
M = 3
I = 10
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.39315E+04, 0.66267E+04, 0.97569E+04,
0.13276E+05, 0.17162E+05, 0.21414E+05, 0.26048E+05, 0.31094E+05,
0.36590E+05, 0.42581E+05, 0.49120E+05, 0.56260E+05, 0.64061E+05,
0.72580E+05, 0.81882E+05, 0.92031E+05, 0.10309E+06, 0.11514E+06,
0.12824E+06, 0.14247E+06, 0.15791E+06, 0.17463E+06, 0.19272E+06,
0.21226E+06, 0.23333E+06, 0.25604E+06, 0.28047E+06, 0.30673E+06,
0.33490E+06, 0.36510E+06, 0.39743E+06, 0.43200E+06, 0.46892E+06,
0.50831E+06, 0.55029E+06, 0.59498E+06, 0.64251E+06, 0.69301E+06,
0.74662E+06, 0.80347E+06, 0.86370E+06, 0.92747E+06, 0.99491E+06,
0.10662E+07, 0.11414E+07, 0.12208E+07, 0.13046E+07, 0.13928E+07,
0.14856E+07, 0.15833E+07, 0.16860E+07, 0.17939E+07, 0.19072E+07,
0.20261E+07, 0.21508E+07, 0.22814E+07, 0.24182E+07, 0.25614E+07,
0.27112E+07, 0.28679E+07, 0.30316E+07, 0.32026E+07, 0.33811E+07,
0.35674E+07, 0.37617E+07, 0.39642E+07, 0.41752E+07, 0.43950E+07,
0.46237E+07, 0.48618E+07, 0.51094E+07, 0.53668E+07, 0.56343E+07,
0.59123E+07, 0.62009E+07, 0.65005E+07, 0.68113E+07, 0.71338E+07,
0.74681E+07, 0.78147E+07, 0.81737E+07, 0.85457E+07, 0.89308E+07,
0.93295E+07, 0.97420E+07, 0.10169E+08, 0.10610E+08, 0.11066E+08,
0.11538E+08, 0.12025E+08, 0.12528E+08, 0.13048E+08, 0.13584E+08,
0.14138E+08, 0.14709E+08, 0.15298E+08, 0.15906E+08, 0.16532E+08,
0.17178E+08, 0.17843E+08, 0.18528E+08, 0.19234E+08, 0.19961E+08,
0.20710E+08, 0.21480E+08, 0.22272E+08, 0.23088E+08, 0.23926E+08,
0.24789E+08, 0.25675E+08, 0.26587E+08, 0.27523E+08, 0.28485E+08,
0.29474E+08, 0.30489E+08, 0.31532E+08, 0.32603E+08, 0.33701E+08,
0.34829E+08])
# --------------- O3 776: M = 3, I = 11 ---------------------
M = 3
I = 11
TIPS_GSI_HASH[(M,I)] = __FloatType__(36.)
TIPS_ISO_HASH[(M,I)] = float32([0.23106E+05, 0.38945E+05, 0.57342E+05,
0.78021E+05, 0.10085E+06, 0.12582E+06, 0.15302E+06, 0.18262E+06,
0.21482E+06, 0.24989E+06, 0.28812E+06, 0.32983E+06, 0.37535E+06,
0.42501E+06, 0.47919E+06, 0.53825E+06, 0.60258E+06, 0.67256E+06,
0.74862E+06, 0.83118E+06, 0.92069E+06, 0.10176E+07, 0.11223E+07,
0.12354E+07, 0.13574E+07, 0.14887E+07, 0.16299E+07, 0.17816E+07,
0.19443E+07, 0.21187E+07, 0.23052E+07, 0.25047E+07, 0.27176E+07,
0.29447E+07, 0.31866E+07, 0.34441E+07, 0.37179E+07, 0.40087E+07,
0.43173E+07, 0.46444E+07, 0.49910E+07, 0.53578E+07, 0.57456E+07,
0.61554E+07, 0.65880E+07, 0.70444E+07, 0.75255E+07, 0.80322E+07,
0.85656E+07, 0.91266E+07, 0.97163E+07, 0.10336E+08, 0.10986E+08,
0.11668E+08, 0.12383E+08, 0.13133E+08, 0.13918E+08, 0.14739E+08,
0.15598E+08, 0.16496E+08, 0.17435E+08, 0.18415E+08, 0.19438E+08,
0.20505E+08, 0.21619E+08, 0.22779E+08, 0.23987E+08, 0.25246E+08,
0.26556E+08, 0.27920E+08, 0.29337E+08, 0.30811E+08, 0.32343E+08,
0.33934E+08, 0.35585E+08, 0.37300E+08, 0.39079E+08, 0.40924E+08,
0.42837E+08, 0.44819E+08, 0.46873E+08, 0.49001E+08, 0.51203E+08,
0.53483E+08, 0.55842E+08, 0.58282E+08, 0.60805E+08, 0.63414E+08,
0.66109E+08, 0.68894E+08, 0.71770E+08, 0.74740E+08, 0.77806E+08,
0.80970E+08, 0.84234E+08, 0.87600E+08, 0.91072E+08, 0.94651E+08,
0.98339E+08, 0.10214E+09, 0.10605E+09, 0.11009E+09, 0.11424E+09,
0.11851E+09, 0.12291E+09, 0.12744E+09, 0.13209E+09, 0.13688E+09,
0.14180E+09, 0.14687E+09, 0.15207E+09, 0.15742E+09, 0.16291E+09,
0.16855E+09, 0.17435E+09, 0.18030E+09, 0.18641E+09, 0.19268E+09,
0.19912E+09])
# --------------- O3 767: M = 3, I = 12 ---------------------
M = 3
I = 12
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.11692E+05, 0.19707E+05, 0.29017E+05,
0.39482E+05, 0.51038E+05, 0.63680E+05, 0.77450E+05, 0.92432E+05,
0.10873E+06, 0.12649E+06, 0.14584E+06, 0.16694E+06, 0.18996E+06,
0.21507E+06, 0.24245E+06, 0.27229E+06, 0.30478E+06, 0.34013E+06,
0.37853E+06, 0.42020E+06, 0.46536E+06, 0.51424E+06, 0.56708E+06,
0.62411E+06, 0.68559E+06, 0.75178E+06, 0.82296E+06, 0.89939E+06,
0.98137E+06, 0.10692E+07, 0.11631E+07, 0.12636E+07, 0.13708E+07,
0.14851E+07, 0.16069E+07, 0.17365E+07, 0.18742E+07, 0.20206E+07,
0.21758E+07, 0.23404E+07, 0.25148E+07, 0.26992E+07, 0.28943E+07,
0.31004E+07, 0.33179E+07, 0.35474E+07, 0.37892E+07, 0.40440E+07,
0.43121E+07, 0.45940E+07, 0.48904E+07, 0.52017E+07, 0.55285E+07,
0.58713E+07, 0.62306E+07, 0.66071E+07, 0.70014E+07, 0.74140E+07,
0.78456E+07, 0.82967E+07, 0.87681E+07, 0.92604E+07, 0.97742E+07,
0.10310E+08, 0.10869E+08, 0.11452E+08, 0.12059E+08, 0.12691E+08,
0.13348E+08, 0.14033E+08, 0.14745E+08, 0.15484E+08, 0.16253E+08,
0.17052E+08, 0.17881E+08, 0.18741E+08, 0.19634E+08, 0.20560E+08,
0.21520E+08, 0.22515E+08, 0.23546E+08, 0.24613E+08, 0.25718E+08,
0.26862E+08, 0.28046E+08, 0.29270E+08, 0.30536E+08, 0.31845E+08,
0.33197E+08, 0.34594E+08, 0.36037E+08, 0.37527E+08, 0.39065E+08,
0.40652E+08, 0.42289E+08, 0.43977E+08, 0.45719E+08, 0.47514E+08,
0.49363E+08, 0.51270E+08, 0.53233E+08, 0.55255E+08, 0.57337E+08,
0.59480E+08, 0.61686E+08, 0.63956E+08, 0.66290E+08, 0.68691E+08,
0.71160E+08, 0.73699E+08, 0.76307E+08, 0.78988E+08, 0.81743E+08,
0.84572E+08, 0.87478E+08, 0.90462E+08, 0.93525E+08, 0.96669E+08,
0.99896E+08])
# --------------- O3 888: M = 3, I = 13 ---------------------
M = 3
I = 13
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.36175E+03, 0.60978E+03, 0.89790E+03,
0.12219E+04, 0.15802E+04, 0.19728E+04, 0.24016E+04, 0.28696E+04,
0.33807E+04, 0.39394E+04, 0.45506E+04, 0.52196E+04, 0.59521E+04,
0.67538E+04, 0.76308E+04, 0.85894E+04, 0.96361E+04, 0.10777E+05,
0.12021E+05, 0.13373E+05, 0.14841E+05, 0.16434E+05, 0.18158E+05,
0.20023E+05, 0.22037E+05, 0.24208E+05, 0.26547E+05, 0.29061E+05,
0.31762E+05, 0.34659E+05, 0.37762E+05, 0.41083E+05, 0.44632E+05,
0.48421E+05, 0.52462E+05, 0.56766E+05, 0.61346E+05, 0.66215E+05,
0.71386E+05, 0.76873E+05, 0.82688E+05, 0.88848E+05, 0.95365E+05,
0.10226E+06, 0.10954E+06, 0.11722E+06, 0.12532E+06, 0.13387E+06,
0.14286E+06, 0.15233E+06, 0.16229E+06, 0.17275E+06, 0.18374E+06,
0.19528E+06, 0.20737E+06, 0.22006E+06, 0.23335E+06, 0.24726E+06,
0.26182E+06, 0.27705E+06, 0.29297E+06, 0.30960E+06, 0.32696E+06,
0.34509E+06, 0.36399E+06, 0.38371E+06, 0.40425E+06, 0.42566E+06,
0.44794E+06, 0.47114E+06, 0.49527E+06, 0.52036E+06, 0.54644E+06,
0.57354E+06, 0.60169E+06, 0.63091E+06, 0.66124E+06, 0.69270E+06,
0.72533E+06, 0.75916E+06, 0.79421E+06, 0.83053E+06, 0.86814E+06,
0.90708E+06, 0.94737E+06, 0.98907E+06, 0.10322E+07, 0.10768E+07,
0.11229E+07, 0.11705E+07, 0.12197E+07, 0.12705E+07, 0.13230E+07,
0.13771E+07, 0.14330E+07, 0.14906E+07, 0.15501E+07, 0.16114E+07,
0.16745E+07, 0.17397E+07, 0.18067E+07, 0.18759E+07, 0.19470E+07,
0.20203E+07, 0.20957E+07, 0.21733E+07, 0.22532E+07, 0.23353E+07,
0.24198E+07, 0.25067E+07, 0.25960E+07, 0.26878E+07, 0.27821E+07,
0.28790E+07, 0.29785E+07, 0.30807E+07, 0.31857E+07, 0.32934E+07,
0.34040E+07])
# --------------- O3 887: M = 3, I = 14 ---------------------
M = 3
I = 14
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.42000E+04, 0.70796E+04, 0.10424E+05,
0.14186E+05, 0.18342E+05, 0.22896E+05, 0.27866E+05, 0.33285E+05,
0.39199E+05, 0.45659E+05, 0.52720E+05, 0.60444E+05, 0.68895E+05,
0.78139E+05, 0.88246E+05, 0.99288E+05, 0.11134E+06, 0.12447E+06,
0.13877E+06, 0.15431E+06, 0.17119E+06, 0.18949E+06, 0.20930E+06,
0.23071E+06, 0.25383E+06, 0.27875E+06, 0.30558E+06, 0.33442E+06,
0.36539E+06, 0.39861E+06, 0.43418E+06, 0.47224E+06, 0.51291E+06,
0.55632E+06, 0.60260E+06, 0.65189E+06, 0.70434E+06, 0.76008E+06,
0.81927E+06, 0.88206E+06, 0.94862E+06, 0.10191E+07, 0.10937E+07,
0.11725E+07, 0.12558E+07, 0.13436E+07, 0.14363E+07, 0.15340E+07,
0.16368E+07, 0.17450E+07, 0.18588E+07, 0.19784E+07, 0.21040E+07,
0.22358E+07, 0.23741E+07, 0.25190E+07, 0.26708E+07, 0.28297E+07,
0.29961E+07, 0.31700E+07, 0.33518E+07, 0.35417E+07, 0.37400E+07,
0.39469E+07, 0.41628E+07, 0.43878E+07, 0.46224E+07, 0.48667E+07,
0.51210E+07, 0.53858E+07, 0.56611E+07, 0.59475E+07, 0.62451E+07,
0.65544E+07, 0.68755E+07, 0.72089E+07, 0.75550E+07, 0.79139E+07,
0.82861E+07, 0.86720E+07, 0.90719E+07, 0.94861E+07, 0.99151E+07,
0.10359E+08, 0.10819E+08, 0.11294E+08, 0.11786E+08, 0.12294E+08,
0.12820E+08, 0.13363E+08, 0.13924E+08, 0.14503E+08, 0.15101E+08,
0.15719E+08, 0.16356E+08, 0.17013E+08, 0.17690E+08, 0.18389E+08,
0.19109E+08, 0.19851E+08, 0.20616E+08, 0.21404E+08, 0.22215E+08,
0.23050E+08, 0.23910E+08, 0.24794E+08, 0.25704E+08, 0.26640E+08,
0.27603E+08, 0.28593E+08, 0.29610E+08, 0.30656E+08, 0.31731E+08,
0.32835E+08, 0.33969E+08, 0.35133E+08, 0.36329E+08, 0.37556E+08,
0.38816E+08])
# --------------- O3 878: M = 3, I = 15 ---------------------
M = 3
I = 15
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.21250E+04, 0.35820E+04, 0.52744E+04,
0.71778E+04, 0.92814E+04, 0.11586E+05, 0.14102E+05, 0.16845E+05,
0.19839E+05, 0.23108E+05, 0.26680E+05, 0.30588E+05, 0.34861E+05,
0.39534E+05, 0.44642E+05, 0.50219E+05, 0.56305E+05, 0.62937E+05,
0.70155E+05, 0.78001E+05, 0.86516E+05, 0.95747E+05, 0.10574E+06,
0.11653E+06, 0.12819E+06, 0.14075E+06, 0.15427E+06, 0.16881E+06,
0.18441E+06, 0.20114E+06, 0.21906E+06, 0.23823E+06, 0.25871E+06,
0.28056E+06, 0.30386E+06, 0.32867E+06, 0.35507E+06, 0.38312E+06,
0.41291E+06, 0.44450E+06, 0.47799E+06, 0.51344E+06, 0.55095E+06,
0.59060E+06, 0.63248E+06, 0.67667E+06, 0.72327E+06, 0.77238E+06,
0.82409E+06, 0.87850E+06, 0.93571E+06, 0.99583E+06, 0.10590E+07,
0.11252E+07, 0.11947E+07, 0.12675E+07, 0.13438E+07, 0.14237E+07,
0.15072E+07, 0.15946E+07, 0.16859E+07, 0.17814E+07, 0.18810E+07,
0.19849E+07, 0.20934E+07, 0.22064E+07, 0.23242E+07, 0.24469E+07,
0.25747E+07, 0.27076E+07, 0.28459E+07, 0.29897E+07, 0.31391E+07,
0.32944E+07, 0.34557E+07, 0.36231E+07, 0.37968E+07, 0.39770E+07,
0.41639E+07, 0.43576E+07, 0.45583E+07, 0.47663E+07, 0.49816E+07,
0.52045E+07, 0.54352E+07, 0.56739E+07, 0.59207E+07, 0.61759E+07,
0.64396E+07, 0.67121E+07, 0.69936E+07, 0.72844E+07, 0.75845E+07,
0.78943E+07, 0.82139E+07, 0.85436E+07, 0.88837E+07, 0.92342E+07,
0.95956E+07, 0.99680E+07, 0.10352E+08, 0.10747E+08, 0.11154E+08,
0.11573E+08, 0.12004E+08, 0.12448E+08, 0.12904E+08, 0.13374E+08,
0.13857E+08, 0.14353E+08, 0.14864E+08, 0.15388E+08, 0.15927E+08,
0.16481E+08, 0.17050E+08, 0.17634E+08, 0.18234E+08, 0.18849E+08,
0.19481E+08])
# --------------- O3 778: M = 3, I = 16 ---------------------
M = 3
I = 16
TIPS_GSI_HASH[(M,I)] = __FloatType__(36.)
TIPS_ISO_HASH[(M,I)] = float32([0.24692E+05, 0.41621E+05, 0.61284E+05,
0.83394E+05, 0.10782E+06, 0.13457E+06, 0.16375E+06, 0.19554E+06,
0.23020E+06, 0.26801E+06, 0.30930E+06, 0.35443E+06, 0.40375E+06,
0.45763E+06, 0.51650E+06, 0.58075E+06, 0.65080E+06, 0.72711E+06,
0.81012E+06, 0.90030E+06, 0.99815E+06, 0.11042E+07, 0.12189E+07,
0.13428E+07, 0.14765E+07, 0.16206E+07, 0.17757E+07, 0.19423E+07,
0.21212E+07, 0.23129E+07, 0.25181E+07, 0.27377E+07, 0.29721E+07,
0.32223E+07, 0.34890E+07, 0.37729E+07, 0.40750E+07, 0.43959E+07,
0.47365E+07, 0.50978E+07, 0.54807E+07, 0.58860E+07, 0.63147E+07,
0.67678E+07, 0.72463E+07, 0.77512E+07, 0.82836E+07, 0.88445E+07,
0.94351E+07, 0.10056E+08, 0.10710E+08, 0.11396E+08, 0.12117E+08,
0.12873E+08, 0.13666E+08, 0.14497E+08, 0.15367E+08, 0.16279E+08,
0.17232E+08, 0.18229E+08, 0.19271E+08, 0.20359E+08, 0.21495E+08,
0.22681E+08, 0.23917E+08, 0.25206E+08, 0.26549E+08, 0.27948E+08,
0.29404E+08, 0.30920E+08, 0.32496E+08, 0.34135E+08, 0.35838E+08,
0.37608E+08, 0.39445E+08, 0.41353E+08, 0.43332E+08, 0.45385E+08,
0.47514E+08, 0.49721E+08, 0.52007E+08, 0.54376E+08, 0.56829E+08,
0.59367E+08, 0.61995E+08, 0.64712E+08, 0.67523E+08, 0.70429E+08,
0.73432E+08, 0.76535E+08, 0.79740E+08, 0.83050E+08, 0.86467E+08,
0.89993E+08, 0.93632E+08, 0.97385E+08, 0.10126E+09, 0.10525E+09,
0.10936E+09, 0.11360E+09, 0.11796E+09, 0.12246E+09, 0.12709E+09,
0.13186E+09, 0.13677E+09, 0.14182E+09, 0.14701E+09, 0.15236E+09,
0.15785E+09, 0.16350E+09, 0.16931E+09, 0.17528E+09, 0.18141E+09,
0.18771E+09, 0.19418E+09, 0.20082E+09, 0.20764E+09, 0.21465E+09,
0.22183E+09])
# --------------- O3 787: M = 3, I = 17 ---------------------
M = 3
I = 17
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.12211E+05, 0.20582E+05, 0.30305E+05,
0.41237E+05, 0.53314E+05, 0.66536E+05, 0.80957E+05, 0.96672E+05,
0.11380E+06, 0.13250E+06, 0.15292E+06, 0.17524E+06, 0.19965E+06,
0.22632E+06, 0.25546E+06, 0.28728E+06, 0.32199E+06, 0.35980E+06,
0.40094E+06, 0.44565E+06, 0.49417E+06, 0.54676E+06, 0.60366E+06,
0.66516E+06, 0.73152E+06, 0.80305E+06, 0.88002E+06, 0.96276E+06,
0.10516E+07, 0.11468E+07, 0.12488E+07, 0.13578E+07, 0.14743E+07,
0.15987E+07, 0.17312E+07, 0.18723E+07, 0.20225E+07, 0.21820E+07,
0.23514E+07, 0.25310E+07, 0.27214E+07, 0.29230E+07, 0.31362E+07,
0.33616E+07, 0.35997E+07, 0.38509E+07, 0.41158E+07, 0.43949E+07,
0.46887E+07, 0.49980E+07, 0.53231E+07, 0.56647E+07, 0.60234E+07,
0.63998E+07, 0.67946E+07, 0.72084E+07, 0.76418E+07, 0.80955E+07,
0.85702E+07, 0.90666E+07, 0.95854E+07, 0.10127E+08, 0.10693E+08,
0.11284E+08, 0.11900E+08, 0.12542E+08, 0.13211E+08, 0.13907E+08,
0.14633E+08, 0.15388E+08, 0.16173E+08, 0.16990E+08, 0.17838E+08,
0.18720E+08, 0.19636E+08, 0.20586E+08, 0.21573E+08, 0.22596E+08,
0.23657E+08, 0.24757E+08, 0.25896E+08, 0.27077E+08, 0.28299E+08,
0.29565E+08, 0.30874E+08, 0.32229E+08, 0.33630E+08, 0.35079E+08,
0.36576E+08, 0.38123E+08, 0.39721E+08, 0.41371E+08, 0.43075E+08,
0.44833E+08, 0.46647E+08, 0.48518E+08, 0.50448E+08, 0.52438E+08,
0.54489E+08, 0.56603E+08, 0.58780E+08, 0.61023E+08, 0.63332E+08,
0.65710E+08, 0.68157E+08, 0.70676E+08, 0.73266E+08, 0.75931E+08,
0.78672E+08, 0.81490E+08, 0.84386E+08, 0.87363E+08, 0.90422E+08,
0.93564E+08, 0.96791E+08, 0.10011E+09, 0.10351E+09, 0.10700E+09,
0.11059E+09])
# --------------- O3 777: M = 3, I = 18 ---------------------
M = 3
I = 18
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.71750E+05, 0.12094E+06, 0.17807E+06,
0.24230E+06, 0.31324E+06, 0.39088E+06, 0.47550E+06, 0.56764E+06,
0.66800E+06, 0.77740E+06, 0.89677E+06, 0.10271E+07, 0.11694E+07,
0.13249E+07, 0.14945E+07, 0.16796E+07, 0.18813E+07, 0.21009E+07,
0.23396E+07, 0.25989E+07, 0.28801E+07, 0.31847E+07, 0.35140E+07,
0.38698E+07, 0.42535E+07, 0.46669E+07, 0.51115E+07, 0.55893E+07,
0.61019E+07, 0.66513E+07, 0.72393E+07, 0.78680E+07, 0.85395E+07,
0.92558E+07, 0.10019E+08, 0.10832E+08, 0.11696E+08, 0.12614E+08,
0.13588E+08, 0.14621E+08, 0.15716E+08, 0.16875E+08, 0.18100E+08,
0.19395E+08, 0.20762E+08, 0.22205E+08, 0.23726E+08, 0.25328E+08,
0.27015E+08, 0.28789E+08, 0.30654E+08, 0.32614E+08, 0.34671E+08,
0.36830E+08, 0.39093E+08, 0.41465E+08, 0.43949E+08, 0.46549E+08,
0.49269E+08, 0.52112E+08, 0.55084E+08, 0.58188E+08, 0.61428E+08,
0.64809E+08, 0.68335E+08, 0.72010E+08, 0.75840E+08, 0.79828E+08,
0.83979E+08, 0.88299E+08, 0.92792E+08, 0.97463E+08, 0.10232E+09,
0.10736E+09, 0.11260E+09, 0.11803E+09, 0.12367E+09, 0.12952E+09,
0.13559E+09, 0.14187E+09, 0.14839E+09, 0.15513E+09, 0.16212E+09,
0.16935E+09, 0.17683E+09, 0.18457E+09, 0.19257E+09, 0.20085E+09,
0.20940E+09, 0.21824E+09, 0.22736E+09, 0.23678E+09, 0.24651E+09,
0.25655E+09, 0.26691E+09, 0.27759E+09, 0.28861E+09, 0.29997E+09,
0.31167E+09, 0.32374E+09, 0.33616E+09, 0.34896E+09, 0.36214E+09,
0.37571E+09, 0.38967E+09, 0.40404E+09, 0.41882E+09, 0.43403E+09,
0.44966E+09, 0.46573E+09, 0.48226E+09, 0.49923E+09, 0.51668E+09,
0.53460E+09, 0.55301E+09, 0.57191E+09, 0.59131E+09, 0.61123E+09,
0.63167E+09])
# --------------- N2O 446: M = 4, I = 1 ---------------------
M = 4
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(9.)
TIPS_ISO_HASH[(M,I)] = float32([0.89943E+03, 0.12734E+04, 0.16489E+04,
0.20293E+04, 0.24205E+04, 0.28289E+04, 0.32609E+04, 0.37222E+04,
0.42180E+04, 0.47529E+04, 0.53312E+04, 0.59572E+04, 0.66348E+04,
0.73683E+04, 0.81616E+04, 0.90190E+04, 0.99450E+04, 0.10944E+05,
0.12021E+05, 0.13180E+05, 0.14426E+05, 0.15766E+05, 0.17203E+05,
0.18745E+05, 0.20396E+05, 0.22162E+05, 0.24051E+05, 0.26069E+05,
0.28222E+05, 0.30517E+05, 0.32962E+05, 0.35564E+05, 0.38331E+05,
0.41271E+05, 0.44393E+05, 0.47704E+05, 0.51214E+05, 0.54932E+05,
0.58868E+05, 0.63030E+05, 0.67429E+05, 0.72075E+05, 0.76979E+05,
0.82151E+05, 0.87604E+05, 0.93348E+05, 0.99395E+05, 0.10576E+06,
0.11245E+06, 0.11948E+06, 0.12686E+06, 0.13461E+06, 0.14275E+06,
0.15128E+06, 0.16021E+06, 0.16958E+06, 0.17938E+06, 0.18964E+06,
0.20037E+06, 0.21159E+06, 0.22331E+06, 0.23556E+06, 0.24834E+06,
0.26169E+06, 0.27561E+06, 0.29012E+06, 0.30525E+06, 0.32101E+06,
0.33743E+06, 0.35452E+06, 0.37230E+06, 0.39080E+06, 0.41004E+06,
0.43004E+06, 0.45082E+06, 0.47241E+06, 0.49483E+06, 0.51810E+06,
0.54225E+06, 0.56730E+06, 0.59329E+06, 0.62022E+06, 0.64814E+06,
0.67707E+06, 0.70703E+06, 0.73806E+06, 0.77018E+06, 0.80342E+06,
0.83781E+06, 0.87338E+06, 0.91016E+06, 0.94818E+06, 0.98748E+06,
0.10281E+07, 0.10700E+07, 0.11133E+07, 0.11581E+07, 0.12042E+07,
0.12519E+07, 0.13010E+07, 0.13517E+07, 0.14040E+07, 0.14579E+07,
0.15134E+07, 0.15707E+07, 0.16297E+07, 0.16905E+07, 0.17530E+07,
0.18175E+07, 0.18838E+07, 0.19521E+07, 0.20224E+07, 0.20947E+07,
0.21690E+07, 0.22455E+07, 0.23242E+07, 0.24050E+07, 0.24881E+07,
0.25735E+07])
# --------------- N2O 456: M = 4, I = 2 ---------------------
M = 4
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.59966E+03, 0.84903E+03, 0.10995E+04,
0.13538E+04, 0.16158E+04, 0.18903E+04, 0.21815E+04, 0.24934E+04,
0.28295E+04, 0.31927E+04, 0.35862E+04, 0.40128E+04, 0.44752E+04,
0.49763E+04, 0.55189E+04, 0.61059E+04, 0.67404E+04, 0.74256E+04,
0.81646E+04, 0.89609E+04, 0.98180E+04, 0.10740E+05, 0.11729E+05,
0.12791E+05, 0.13930E+05, 0.15149E+05, 0.16453E+05, 0.17847E+05,
0.19335E+05, 0.20922E+05, 0.22614E+05, 0.24416E+05, 0.26333E+05,
0.28371E+05, 0.30535E+05, 0.32833E+05, 0.35269E+05, 0.37851E+05,
0.40585E+05, 0.43478E+05, 0.46537E+05, 0.49769E+05, 0.53182E+05,
0.56783E+05, 0.60580E+05, 0.64582E+05, 0.68796E+05, 0.73232E+05,
0.77898E+05, 0.82803E+05, 0.87957E+05, 0.93369E+05, 0.99048E+05,
0.10501E+06, 0.11125E+06, 0.11780E+06, 0.12465E+06, 0.13182E+06,
0.13933E+06, 0.14718E+06, 0.15539E+06, 0.16396E+06, 0.17291E+06,
0.18226E+06, 0.19201E+06, 0.20218E+06, 0.21278E+06, 0.22383E+06,
0.23534E+06, 0.24733E+06, 0.25980E+06, 0.27278E+06, 0.28628E+06,
0.30032E+06, 0.31491E+06, 0.33007E+06, 0.34581E+06, 0.36216E+06,
0.37912E+06, 0.39673E+06, 0.41499E+06, 0.43392E+06, 0.45355E+06,
0.47389E+06, 0.49496E+06, 0.51678E+06, 0.53937E+06, 0.56276E+06,
0.58695E+06, 0.61199E+06, 0.63788E+06, 0.66464E+06, 0.69231E+06,
0.72090E+06, 0.75044E+06, 0.78094E+06, 0.81244E+06, 0.84496E+06,
0.87853E+06, 0.91316E+06, 0.94889E+06, 0.98573E+06, 0.10237E+07,
0.10629E+07, 0.11033E+07, 0.11449E+07, 0.11877E+07, 0.12319E+07,
0.12773E+07, 0.13241E+07, 0.13723E+07, 0.14219E+07, 0.14729E+07,
0.15254E+07, 0.15793E+07, 0.16349E+07, 0.16919E+07, 0.17506E+07,
0.18109E+07])
# --------------- N2O 546: M = 4, I = 3 ---------------------
M = 4
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.62051E+03, 0.87856E+03, 0.11377E+04,
0.14003E+04, 0.16705E+04, 0.19529E+04, 0.22518E+04, 0.25713E+04,
0.29149E+04, 0.32859E+04, 0.36873E+04, 0.41220E+04, 0.45929E+04,
0.51028E+04, 0.56547E+04, 0.62515E+04, 0.68963E+04, 0.75923E+04,
0.83428E+04, 0.91511E+04, 0.10021E+05, 0.10956E+05, 0.11960E+05,
0.13036E+05, 0.14190E+05, 0.15425E+05, 0.16746E+05, 0.18158E+05,
0.19664E+05, 0.21271E+05, 0.22984E+05, 0.24806E+05, 0.26745E+05,
0.28806E+05, 0.30995E+05, 0.33317E+05, 0.35780E+05, 0.38389E+05,
0.41151E+05, 0.44073E+05, 0.47162E+05, 0.50425E+05, 0.53871E+05,
0.57505E+05, 0.61338E+05, 0.65375E+05, 0.69628E+05, 0.74102E+05,
0.78808E+05, 0.83755E+05, 0.88951E+05, 0.94407E+05, 0.10013E+06,
0.10614E+06, 0.11243E+06, 0.11902E+06, 0.12593E+06, 0.13316E+06,
0.14072E+06, 0.14862E+06, 0.15689E+06, 0.16552E+06, 0.17453E+06,
0.18394E+06, 0.19376E+06, 0.20399E+06, 0.21466E+06, 0.22578E+06,
0.23737E+06, 0.24942E+06, 0.26198E+06, 0.27503E+06, 0.28861E+06,
0.30273E+06, 0.31741E+06, 0.33265E+06, 0.34848E+06, 0.36492E+06,
0.38197E+06, 0.39967E+06, 0.41803E+06, 0.43706E+06, 0.45679E+06,
0.47723E+06, 0.49840E+06, 0.52033E+06, 0.54303E+06, 0.56653E+06,
0.59084E+06, 0.61599E+06, 0.64200E+06, 0.66888E+06, 0.69667E+06,
0.72539E+06, 0.75506E+06, 0.78569E+06, 0.81733E+06, 0.84998E+06,
0.88369E+06, 0.91846E+06, 0.95433E+06, 0.99132E+06, 0.10295E+07,
0.10688E+07, 0.11093E+07, 0.11511E+07, 0.11941E+07, 0.12384E+07,
0.12840E+07, 0.13310E+07, 0.13793E+07, 0.14291E+07, 0.14803E+07,
0.15329E+07, 0.15871E+07, 0.16428E+07, 0.17000E+07, 0.17589E+07,
0.18194E+07])
# --------------- N2O 448: M = 4, I = 4 ---------------------
M = 4
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(9.)
TIPS_ISO_HASH[(M,I)] = float32([0.95253E+03, 0.13487E+04, 0.17465E+04,
0.21498E+04, 0.25648E+04, 0.29986E+04, 0.34580E+04, 0.39493E+04,
0.44779E+04, 0.50488E+04, 0.56669E+04, 0.63366E+04, 0.70625E+04,
0.78488E+04, 0.87003E+04, 0.96216E+04, 0.10617E+05, 0.11692E+05,
0.12852E+05, 0.14102E+05, 0.15447E+05, 0.16893E+05, 0.18446E+05,
0.20112E+05, 0.21898E+05, 0.23811E+05, 0.25856E+05, 0.28042E+05,
0.30377E+05, 0.32866E+05, 0.35520E+05, 0.38345E+05, 0.41351E+05,
0.44545E+05, 0.47939E+05, 0.51540E+05, 0.55359E+05, 0.59405E+05,
0.63689E+05, 0.68222E+05, 0.73015E+05, 0.78078E+05, 0.83424E+05,
0.89064E+05, 0.95012E+05, 0.10128E+06, 0.10788E+06, 0.11482E+06,
0.12213E+06, 0.12981E+06, 0.13788E+06, 0.14635E+06, 0.15524E+06,
0.16456E+06, 0.17433E+06, 0.18457E+06, 0.19530E+06, 0.20652E+06,
0.21827E+06, 0.23055E+06, 0.24338E+06, 0.25679E+06, 0.27079E+06,
0.28541E+06, 0.30066E+06, 0.31656E+06, 0.33314E+06, 0.35042E+06,
0.36841E+06, 0.38715E+06, 0.40666E+06, 0.42695E+06, 0.44805E+06,
0.46999E+06, 0.49279E+06, 0.51649E+06, 0.54109E+06, 0.56664E+06,
0.59315E+06, 0.62066E+06, 0.64919E+06, 0.67877E+06, 0.70943E+06,
0.74121E+06, 0.77413E+06, 0.80822E+06, 0.84351E+06, 0.88004E+06,
0.91783E+06, 0.95693E+06, 0.99737E+06, 0.10392E+07, 0.10824E+07,
0.11270E+07, 0.11732E+07, 0.12208E+07, 0.12700E+07, 0.13208E+07,
0.13732E+07, 0.14272E+07, 0.14830E+07, 0.15405E+07, 0.15999E+07,
0.16610E+07, 0.17240E+07, 0.17890E+07, 0.18559E+07, 0.19248E+07,
0.19957E+07, 0.20687E+07, 0.21439E+07, 0.22213E+07, 0.23009E+07,
0.23828E+07, 0.24671E+07, 0.25537E+07, 0.26428E+07, 0.27343E+07,
0.28284E+07])
# --------------- N2O 447: M = 4, I = 5 ---------------------
M = 4
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(54.)
TIPS_ISO_HASH[(M,I)] = float32([0.55598E+04, 0.78718E+04, 0.10193E+05,
0.12546E+05, 0.14966E+05, 0.17495E+05, 0.20171E+05, 0.23031E+05,
0.26106E+05, 0.29426E+05, 0.33018E+05, 0.36908E+05, 0.41121E+05,
0.45684E+05, 0.50622E+05, 0.55962E+05, 0.61731E+05, 0.67958E+05,
0.74671E+05, 0.81902E+05, 0.89681E+05, 0.98043E+05, 0.10702E+06,
0.11665E+06, 0.12697E+06, 0.13801E+06, 0.14983E+06, 0.16244E+06,
0.17591E+06, 0.19028E+06, 0.20558E+06, 0.22188E+06, 0.23920E+06,
0.25762E+06, 0.27718E+06, 0.29793E+06, 0.31993E+06, 0.34323E+06,
0.36791E+06, 0.39401E+06, 0.42160E+06, 0.45074E+06, 0.48151E+06,
0.51397E+06, 0.54819E+06, 0.58424E+06, 0.62221E+06, 0.66215E+06,
0.70416E+06, 0.74832E+06, 0.79470E+06, 0.84340E+06, 0.89450E+06,
0.94808E+06, 0.10042E+07, 0.10631E+07, 0.11247E+07, 0.11892E+07,
0.12567E+07, 0.13272E+07, 0.14009E+07, 0.14779E+07, 0.15583E+07,
0.16422E+07, 0.17298E+07, 0.18211E+07, 0.19163E+07, 0.20154E+07,
0.21187E+07, 0.22263E+07, 0.23382E+07, 0.24546E+07, 0.25757E+07,
0.27016E+07, 0.28324E+07, 0.29683E+07, 0.31095E+07, 0.32560E+07,
0.34081E+07, 0.35659E+07, 0.37295E+07, 0.38991E+07, 0.40750E+07,
0.42572E+07, 0.44459E+07, 0.46414E+07, 0.48437E+07, 0.50531E+07,
0.52698E+07, 0.54939E+07, 0.57257E+07, 0.59653E+07, 0.62129E+07,
0.64688E+07, 0.67331E+07, 0.70061E+07, 0.72880E+07, 0.75790E+07,
0.78792E+07, 0.81891E+07, 0.85086E+07, 0.88382E+07, 0.91780E+07,
0.95283E+07, 0.98893E+07, 0.10261E+08, 0.10644E+08, 0.11039E+08,
0.11445E+08, 0.11864E+08, 0.12294E+08, 0.12738E+08, 0.13194E+08,
0.13663E+08, 0.14145E+08, 0.14641E+08, 0.15151E+08, 0.15675E+08,
0.16214E+08])
# --------------- CO 26: M = 5, I = 1 ---------------------
M = 5
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.21948E+02, 0.30961E+02, 0.39980E+02,
0.49004E+02, 0.58035E+02, 0.67071E+02, 0.76112E+02, 0.85160E+02,
0.94213E+02, 0.10327E+03, 0.11234E+03, 0.12142E+03, 0.13050E+03,
0.13960E+03, 0.14872E+03, 0.15787E+03, 0.16704E+03, 0.17624E+03,
0.18548E+03, 0.19477E+03, 0.20411E+03, 0.21350E+03, 0.22295E+03,
0.23248E+03, 0.24207E+03, 0.25175E+03, 0.26151E+03, 0.27136E+03,
0.28130E+03, 0.29134E+03, 0.30148E+03, 0.31172E+03, 0.32207E+03,
0.33253E+03, 0.34312E+03, 0.35381E+03, 0.36463E+03, 0.37557E+03,
0.38663E+03, 0.39782E+03, 0.40914E+03, 0.42060E+03, 0.43218E+03,
0.44389E+03, 0.45575E+03, 0.46774E+03, 0.47987E+03, 0.49213E+03,
0.50454E+03, 0.51708E+03, 0.52978E+03, 0.54261E+03, 0.55559E+03,
0.56871E+03, 0.58198E+03, 0.59540E+03, 0.60896E+03, 0.62267E+03,
0.63653E+03, 0.65055E+03, 0.66470E+03, 0.67901E+03, 0.69347E+03,
0.70808E+03, 0.72284E+03, 0.73776E+03, 0.75283E+03, 0.76805E+03,
0.78342E+03, 0.79895E+03, 0.81463E+03, 0.83047E+03, 0.84646E+03,
0.86260E+03, 0.87891E+03, 0.89536E+03, 0.91197E+03, 0.92874E+03,
0.94566E+03, 0.96275E+03, 0.97998E+03, 0.99738E+03, 0.10149E+04,
0.10326E+04, 0.10505E+04, 0.10685E+04, 0.10867E+04, 0.11051E+04,
0.11236E+04, 0.11422E+04, 0.11611E+04, 0.11800E+04, 0.11992E+04,
0.12185E+04, 0.12380E+04, 0.12576E+04, 0.12774E+04, 0.12973E+04,
0.13174E+04, 0.13377E+04, 0.13581E+04, 0.13787E+04, 0.13994E+04,
0.14203E+04, 0.14414E+04, 0.14627E+04, 0.14841E+04, 0.15056E+04,
0.15273E+04, 0.15492E+04, 0.15713E+04, 0.15935E+04, 0.16159E+04,
0.16384E+04, 0.16611E+04, 0.16840E+04, 0.17070E+04, 0.17302E+04,
0.17536E+04])
# --------------- CO 36: M = 5, I = 2 ---------------------
M = 5
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.45888E+02, 0.64745E+02, 0.83615E+02,
0.10250E+03, 0.12139E+03, 0.14030E+03, 0.15921E+03, 0.17814E+03,
0.19708E+03, 0.21604E+03, 0.23501E+03, 0.25400E+03, 0.27302E+03,
0.29207E+03, 0.31117E+03, 0.33031E+03, 0.34952E+03, 0.36880E+03,
0.38817E+03, 0.40764E+03, 0.42723E+03, 0.44694E+03, 0.46679E+03,
0.48679E+03, 0.50696E+03, 0.52730E+03, 0.54783E+03, 0.56855E+03,
0.58948E+03, 0.61061E+03, 0.63198E+03, 0.65357E+03, 0.67539E+03,
0.69747E+03, 0.71979E+03, 0.74237E+03, 0.76521E+03, 0.78832E+03,
0.81169E+03, 0.83534E+03, 0.85927E+03, 0.88348E+03, 0.90798E+03,
0.93277E+03, 0.95784E+03, 0.98322E+03, 0.10089E+04, 0.10349E+04,
0.10611E+04, 0.10877E+04, 0.11146E+04, 0.11418E+04, 0.11693E+04,
0.11971E+04, 0.12253E+04, 0.12537E+04, 0.12825E+04, 0.13115E+04,
0.13409E+04, 0.13707E+04, 0.14007E+04, 0.14311E+04, 0.14617E+04,
0.14928E+04, 0.15241E+04, 0.15558E+04, 0.15877E+04, 0.16200E+04,
0.16527E+04, 0.16857E+04, 0.17190E+04, 0.17526E+04, 0.17866E+04,
0.18209E+04, 0.18555E+04, 0.18905E+04, 0.19258E+04, 0.19614E+04,
0.19974E+04, 0.20337E+04, 0.20703E+04, 0.21073E+04, 0.21446E+04,
0.21823E+04, 0.22203E+04, 0.22586E+04, 0.22973E+04, 0.23363E+04,
0.23756E+04, 0.24153E+04, 0.24553E+04, 0.24957E+04, 0.25364E+04,
0.25775E+04, 0.26189E+04, 0.26606E+04, 0.27027E+04, 0.27451E+04,
0.27879E+04, 0.28310E+04, 0.28745E+04, 0.29183E+04, 0.29625E+04,
0.30070E+04, 0.30518E+04, 0.30970E+04, 0.31425E+04, 0.31885E+04,
0.32347E+04, 0.32813E+04, 0.33282E+04, 0.33755E+04, 0.34231E+04,
0.34711E+04, 0.35194E+04, 0.35681E+04, 0.36172E+04, 0.36666E+04,
0.37163E+04])
# --------------- CO 28: M = 5, I = 3 ---------------------
M = 5
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.23030E+02, 0.32495E+02, 0.41966E+02,
0.51443E+02, 0.60926E+02, 0.70415E+02, 0.79910E+02, 0.89410E+02,
0.98918E+02, 0.10843E+03, 0.11795E+03, 0.12749E+03, 0.13703E+03,
0.14659E+03, 0.15618E+03, 0.16579E+03, 0.17543E+03, 0.18511E+03,
0.19483E+03, 0.20461E+03, 0.21444E+03, 0.22434E+03, 0.23430E+03,
0.24435E+03, 0.25447E+03, 0.26468E+03, 0.27499E+03, 0.28540E+03,
0.29591E+03, 0.30652E+03, 0.31725E+03, 0.32810E+03, 0.33906E+03,
0.35014E+03, 0.36136E+03, 0.37270E+03, 0.38417E+03, 0.39577E+03,
0.40752E+03, 0.41940E+03, 0.43142E+03, 0.44358E+03, 0.45589E+03,
0.46834E+03, 0.48094E+03, 0.49369E+03, 0.50659E+03, 0.51964E+03,
0.53284E+03, 0.54619E+03, 0.55971E+03, 0.57337E+03, 0.58719E+03,
0.60117E+03, 0.61530E+03, 0.62959E+03, 0.64405E+03, 0.65866E+03,
0.67343E+03, 0.68837E+03, 0.70346E+03, 0.71872E+03, 0.73414E+03,
0.74972E+03, 0.76547E+03, 0.78138E+03, 0.79745E+03, 0.81369E+03,
0.83010E+03, 0.84667E+03, 0.86341E+03, 0.88031E+03, 0.89738E+03,
0.91462E+03, 0.93202E+03, 0.94960E+03, 0.96734E+03, 0.98524E+03,
0.10033E+04, 0.10216E+04, 0.10400E+04, 0.10586E+04, 0.10773E+04,
0.10962E+04, 0.11153E+04, 0.11346E+04, 0.11540E+04, 0.11737E+04,
0.11934E+04, 0.12134E+04, 0.12335E+04, 0.12538E+04, 0.12743E+04,
0.12949E+04, 0.13157E+04, 0.13367E+04, 0.13578E+04, 0.13792E+04,
0.14007E+04, 0.14223E+04, 0.14442E+04, 0.14662E+04, 0.14884E+04,
0.15108E+04, 0.15333E+04, 0.15560E+04, 0.15789E+04, 0.16020E+04,
0.16252E+04, 0.16486E+04, 0.16722E+04, 0.16960E+04, 0.17199E+04,
0.17441E+04, 0.17684E+04, 0.17928E+04, 0.18175E+04, 0.18423E+04,
0.18673E+04])
# --------------- CO 27: M = 5, I = 4 ---------------------
M = 5
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.13505E+03, 0.19054E+03, 0.24606E+03,
0.30161E+03, 0.35720E+03, 0.41283E+03, 0.46848E+03, 0.52418E+03,
0.57991E+03, 0.63568E+03, 0.69149E+03, 0.74737E+03, 0.80332E+03,
0.85937E+03, 0.91553E+03, 0.97183E+03, 0.10283E+04, 0.10850E+04,
0.11420E+04, 0.11992E+04, 0.12568E+04, 0.13147E+04, 0.13730E+04,
0.14318E+04, 0.14910E+04, 0.15507E+04, 0.16110E+04, 0.16718E+04,
0.17332E+04, 0.17952E+04, 0.18579E+04, 0.19212E+04, 0.19852E+04,
0.20499E+04, 0.21153E+04, 0.21815E+04, 0.22484E+04, 0.23161E+04,
0.23846E+04, 0.24539E+04, 0.25240E+04, 0.25949E+04, 0.26666E+04,
0.27392E+04, 0.28127E+04, 0.28869E+04, 0.29621E+04, 0.30381E+04,
0.31150E+04, 0.31928E+04, 0.32715E+04, 0.33511E+04, 0.34316E+04,
0.35129E+04, 0.35952E+04, 0.36785E+04, 0.37626E+04, 0.38477E+04,
0.39336E+04, 0.40206E+04, 0.41084E+04, 0.41972E+04, 0.42869E+04,
0.43776E+04, 0.44692E+04, 0.45618E+04, 0.46553E+04, 0.47498E+04,
0.48452E+04, 0.49416E+04, 0.50390E+04, 0.51373E+04, 0.52366E+04,
0.53368E+04, 0.54381E+04, 0.55403E+04, 0.56435E+04, 0.57476E+04,
0.58527E+04, 0.59588E+04, 0.60659E+04, 0.61739E+04, 0.62829E+04,
0.63930E+04, 0.65040E+04, 0.66160E+04, 0.67290E+04, 0.68429E+04,
0.69579E+04, 0.70739E+04, 0.71908E+04, 0.73088E+04, 0.74277E+04,
0.75477E+04, 0.76686E+04, 0.77905E+04, 0.79135E+04, 0.80374E+04,
0.81624E+04, 0.82883E+04, 0.84153E+04, 0.85432E+04, 0.86722E+04,
0.88022E+04, 0.89331E+04, 0.90651E+04, 0.91982E+04, 0.93322E+04,
0.94672E+04, 0.96033E+04, 0.97404E+04, 0.98785E+04, 0.10018E+05,
0.10158E+05, 0.10299E+05, 0.10441E+05, 0.10584E+05, 0.10728E+05,
0.10874E+05])
# --------------- CO 38: M = 5, I = 5 ---------------------
M = 5
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.48264E+02, 0.68112E+02, 0.87974E+02,
0.10785E+03, 0.12773E+03, 0.14763E+03, 0.16754E+03, 0.18747E+03,
0.20741E+03, 0.22736E+03, 0.24733E+03, 0.26732E+03, 0.28735E+03,
0.30741E+03, 0.32752E+03, 0.34770E+03, 0.36794E+03, 0.38828E+03,
0.40871E+03, 0.42926E+03, 0.44994E+03, 0.47077E+03, 0.49175E+03,
0.51290E+03, 0.53424E+03, 0.55578E+03, 0.57752E+03, 0.59948E+03,
0.62166E+03, 0.64409E+03, 0.66676E+03, 0.68969E+03, 0.71287E+03,
0.73633E+03, 0.76006E+03, 0.78407E+03, 0.80836E+03, 0.83295E+03,
0.85784E+03, 0.88302E+03, 0.90851E+03, 0.93431E+03, 0.96042E+03,
0.98686E+03, 0.10136E+04, 0.10407E+04, 0.10681E+04, 0.10958E+04,
0.11238E+04, 0.11522E+04, 0.11809E+04, 0.12100E+04, 0.12393E+04,
0.12691E+04, 0.12991E+04, 0.13295E+04, 0.13603E+04, 0.13914E+04,
0.14228E+04, 0.14546E+04, 0.14867E+04, 0.15192E+04, 0.15520E+04,
0.15852E+04, 0.16187E+04, 0.16526E+04, 0.16869E+04, 0.17215E+04,
0.17564E+04, 0.17917E+04, 0.18274E+04, 0.18634E+04, 0.18998E+04,
0.19365E+04, 0.19736E+04, 0.20111E+04, 0.20489E+04, 0.20871E+04,
0.21256E+04, 0.21645E+04, 0.22038E+04, 0.22434E+04, 0.22834E+04,
0.23238E+04, 0.23645E+04, 0.24056E+04, 0.24471E+04, 0.24889E+04,
0.25311E+04, 0.25736E+04, 0.26166E+04, 0.26599E+04, 0.27035E+04,
0.27476E+04, 0.27920E+04, 0.28368E+04, 0.28819E+04, 0.29275E+04,
0.29733E+04, 0.30196E+04, 0.30662E+04, 0.31133E+04, 0.31606E+04,
0.32084E+04, 0.32565E+04, 0.33050E+04, 0.33539E+04, 0.34032E+04,
0.34528E+04, 0.35028E+04, 0.35532E+04, 0.36040E+04, 0.36551E+04,
0.37067E+04, 0.37586E+04, 0.38108E+04, 0.38635E+04, 0.39165E+04,
0.39699E+04])
# --------------- CO 37: M = 5, I = 6 ---------------------
M = 5
I = 6
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.28271E+03, 0.39894E+03, 0.51524E+03,
0.63162E+03, 0.74807E+03, 0.86459E+03, 0.98119E+03, 0.10979E+04,
0.12146E+04, 0.13314E+04, 0.14484E+04, 0.15654E+04, 0.16826E+04,
0.18000E+04, 0.19176E+04, 0.20355E+04, 0.21538E+04, 0.22725E+04,
0.23916E+04, 0.25114E+04, 0.26318E+04, 0.27529E+04, 0.28749E+04,
0.29977E+04, 0.31215E+04, 0.32463E+04, 0.33721E+04, 0.34991E+04,
0.36274E+04, 0.37568E+04, 0.38876E+04, 0.40197E+04, 0.41533E+04,
0.42882E+04, 0.44247E+04, 0.45626E+04, 0.47022E+04, 0.48433E+04,
0.49860E+04, 0.51304E+04, 0.52763E+04, 0.54240E+04, 0.55735E+04,
0.57246E+04, 0.58775E+04, 0.60321E+04, 0.61886E+04, 0.63468E+04,
0.65068E+04, 0.66687E+04, 0.68324E+04, 0.69980E+04, 0.71654E+04,
0.73347E+04, 0.75058E+04, 0.76789E+04, 0.78539E+04, 0.80307E+04,
0.82096E+04, 0.83903E+04, 0.85729E+04, 0.87576E+04, 0.89441E+04,
0.91326E+04, 0.93230E+04, 0.95154E+04, 0.97098E+04, 0.99061E+04,
0.10104E+05, 0.10305E+05, 0.10507E+05, 0.10711E+05, 0.10918E+05,
0.11126E+05, 0.11336E+05, 0.11549E+05, 0.11763E+05, 0.11979E+05,
0.12198E+05, 0.12418E+05, 0.12640E+05, 0.12865E+05, 0.13091E+05,
0.13320E+05, 0.13550E+05, 0.13783E+05, 0.14018E+05, 0.14254E+05,
0.14493E+05, 0.14734E+05, 0.14977E+05, 0.15221E+05, 0.15468E+05,
0.15718E+05, 0.15969E+05, 0.16222E+05, 0.16477E+05, 0.16734E+05,
0.16994E+05, 0.17255E+05, 0.17519E+05, 0.17784E+05, 0.18052E+05,
0.18322E+05, 0.18594E+05, 0.18868E+05, 0.19144E+05, 0.19422E+05,
0.19703E+05, 0.19985E+05, 0.20270E+05, 0.20556E+05, 0.20845E+05,
0.21136E+05, 0.21429E+05, 0.21724E+05, 0.22021E+05, 0.22320E+05,
0.22622E+05])
# --------------- CH4 211: M = 6, I = 1 ---------------------
M = 6
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.54800E+02, 0.91500E+02, 0.13410E+03,
0.18180E+03, 0.23410E+03, 0.29070E+03, 0.35140E+03, 0.41600E+03,
0.48450E+03, 0.55720E+03, 0.63420E+03, 0.71600E+03, 0.80310E+03,
0.89590E+03, 0.99520E+03, 0.11017E+04, 0.12161E+04, 0.13393E+04,
0.14721E+04, 0.16155E+04, 0.17706E+04, 0.19384E+04, 0.21202E+04,
0.23172E+04, 0.25307E+04, 0.27624E+04, 0.30137E+04, 0.32864E+04,
0.35823E+04, 0.39034E+04, 0.42519E+04, 0.46300E+04, 0.50402E+04,
0.54853E+04, 0.59679E+04, 0.64913E+04, 0.70588E+04, 0.76739E+04,
0.83404E+04, 0.90625E+04, 0.98446E+04, 0.10691E+05, 0.11608E+05,
0.12600E+05, 0.13674E+05, 0.14835E+05, 0.16090E+05, 0.17447E+05,
0.18914E+05, 0.20500E+05, 0.22212E+05, 0.24063E+05, 0.26061E+05,
0.28218E+05, 0.30548E+05, 0.33063E+05, 0.35778E+05, 0.38708E+05,
0.41871E+05, 0.45284E+05, 0.48970E+05, 0.52940E+05, 0.57230E+05,
0.61860E+05, 0.66860E+05, 0.72250E+05, 0.78070E+05, 0.84350E+05,
0.91130E+05, 0.98450E+05, 0.10635E+06, 0.11488E+06, 0.12408E+06,
0.13403E+06, 0.14480E+06, 0.15640E+06, 0.16890E+06, 0.18240E+06,
0.19700E+06, 0.21280E+06, 0.22980E+06, 0.24830E+06, 0.26820E+06,
0.28970E+06, 0.31290E+06, 0.33800E+06, 0.36520E+06, 0.39450E+06,
0.42600E+06, 0.46000E+06, 0.49700E+06, 0.53700E+06, 0.58100E+06,
0.62700E+06, 0.67800E+06, 0.73300E+06, 0.79200E+06, 0.85600E+06,
0.92500E+06, 0.10000E+07, 0.10800E+07, 0.11670E+07, 0.12610E+07,
0.13620E+07, 0.14720E+07, 0.15910E+07, 0.17190E+07, 0.18600E+07,
0.20100E+07, 0.21700E+07, 0.23400E+07, 0.25300E+07, 0.27300E+07,
0.29500E+07, 0.31800E+07, 0.34300E+07, 0.37000E+07, 0.39900E+07,
0.42856E+07])
# --------------- CH4 311: M = 6, I = 2 ---------------------
M = 6
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.10958E+03, 0.18304E+03, 0.26818E+03,
0.36356E+03, 0.46820E+03, 0.58141E+03, 0.70270E+03, 0.83186E+03,
0.96893E+03, 0.11142E+04, 0.12682E+04, 0.14316E+04, 0.16055E+04,
0.17909E+04, 0.19891E+04, 0.22016E+04, 0.24297E+04, 0.26752E+04,
0.29399E+04, 0.32255E+04, 0.35342E+04, 0.38680E+04, 0.42294E+04,
0.46208E+04, 0.50449E+04, 0.55046E+04, 0.60030E+04, 0.65434E+04,
0.71293E+04, 0.77646E+04, 0.84535E+04, 0.92004E+04, 0.10010E+05,
0.10888E+05, 0.11838E+05, 0.12869E+05, 0.13984E+05, 0.15193E+05,
0.16501E+05, 0.17916E+05, 0.19448E+05, 0.21104E+05, 0.22895E+05,
0.24830E+05, 0.26921E+05, 0.29180E+05, 0.31618E+05, 0.34250E+05,
0.37090E+05, 0.40152E+05, 0.43454E+05, 0.47012E+05, 0.50845E+05,
0.54973E+05, 0.59416E+05, 0.64197E+05, 0.69340E+05, 0.74870E+05,
0.80813E+05, 0.87198E+05, 0.94055E+05, 0.10142E+06, 0.10932E+06,
0.11779E+06, 0.12688E+06, 0.13662E+06, 0.14706E+06, 0.15824E+06,
0.17021E+06, 0.18302E+06, 0.19673E+06, 0.21139E+06, 0.22706E+06,
0.24381E+06, 0.26171E+06, 0.28082E+06, 0.30122E+06, 0.32299E+06,
0.34621E+06, 0.37097E+06, 0.39737E+06, 0.42551E+06, 0.45548E+06,
0.48739E+06, 0.52136E+06, 0.55752E+06, 0.59598E+06, 0.63688E+06,
0.68036E+06, 0.72657E+06, 0.77566E+06, 0.82780E+06, 0.88316E+06,
0.94191E+06, 0.10043E+07, 0.10704E+07, 0.11405E+07, 0.12148E+07,
0.12936E+07, 0.13770E+07, 0.14654E+07, 0.15589E+07, 0.16579E+07,
0.17627E+07, 0.18736E+07, 0.19908E+07, 0.21147E+07, 0.22456E+07,
0.23840E+07, 0.25301E+07, 0.26844E+07, 0.28474E+07, 0.30193E+07,
0.32007E+07, 0.33921E+07, 0.35939E+07, 0.38067E+07, 0.40310E+07,
0.42673E+07])
# --------------- CH4 212: M = 6, I = 3 ---------------------
M = 6
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.44079E+03, 0.73786E+03, 0.10822E+04,
0.14679E+04, 0.18913E+04, 0.23497E+04, 0.28415E+04, 0.33665E+04,
0.39257E+04, 0.45211E+04, 0.51562E+04, 0.58349E+04, 0.65624E+04,
0.73445E+04, 0.81872E+04, 0.90978E+04, 0.10084E+05, 0.11153E+05,
0.12315E+05, 0.13579E+05, 0.14955E+05, 0.16455E+05, 0.18089E+05,
0.19871E+05, 0.21816E+05, 0.23937E+05, 0.26251E+05, 0.28776E+05,
0.31531E+05, 0.34535E+05, 0.37811E+05, 0.41384E+05, 0.45278E+05,
0.49521E+05, 0.54144E+05, 0.59178E+05, 0.64657E+05, 0.70621E+05,
0.77108E+05, 0.84161E+05, 0.91828E+05, 0.10016E+06, 0.10921E+06,
0.11903E+06, 0.12968E+06, 0.14124E+06, 0.15378E+06, 0.16736E+06,
0.18207E+06, 0.19800E+06, 0.21524E+06, 0.23389E+06, 0.25405E+06,
0.27585E+06, 0.29939E+06, 0.32482E+06, 0.35226E+06, 0.38186E+06,
0.41379E+06, 0.44821E+06, 0.48529E+06, 0.52522E+06, 0.56821E+06,
0.61447E+06, 0.66422E+06, 0.71771E+06, 0.77519E+06, 0.83693E+06,
0.90323E+06, 0.97438E+06, 0.10507E+07, 0.11326E+07, 0.12203E+07,
0.13143E+07, 0.14150E+07, 0.15228E+07, 0.16382E+07, 0.17616E+07,
0.18935E+07, 0.20346E+07, 0.21853E+07, 0.23463E+07, 0.25181E+07,
0.27016E+07, 0.28973E+07, 0.31060E+07, 0.33284E+07, 0.35655E+07,
0.38181E+07, 0.40870E+07, 0.43733E+07, 0.46780E+07, 0.50020E+07,
0.53467E+07, 0.57130E+07, 0.61023E+07, 0.65158E+07, 0.69549E+07,
0.74211E+07, 0.79158E+07, 0.84407E+07, 0.89973E+07, 0.95874E+07,
0.10213E+08, 0.10875E+08, 0.11577E+08, 0.12320E+08, 0.13107E+08,
0.13940E+08, 0.14820E+08, 0.15752E+08, 0.16736E+08, 0.17777E+08,
0.18877E+08, 0.20038E+08, 0.21265E+08, 0.22560E+08, 0.23927E+08,
0.25369E+08])
# --------------- CH4 312: M = 6, I = 4 ---------------------
M = 6
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.88231E+03, 0.14770E+04, 0.21661E+04,
0.29384E+04, 0.37859E+04, 0.47034E+04, 0.56879E+04, 0.67388E+04,
0.78581E+04, 0.90501E+04, 0.10321E+05, 0.11680E+05, 0.13136E+05,
0.14702E+05, 0.16389E+05, 0.18212E+05, 0.20186E+05, 0.22328E+05,
0.24654E+05, 0.27185E+05, 0.29941E+05, 0.32943E+05, 0.36216E+05,
0.39786E+05, 0.43681E+05, 0.47930E+05, 0.52567E+05, 0.57625E+05,
0.63144E+05, 0.69164E+05, 0.75730E+05, 0.82890E+05, 0.90693E+05,
0.99198E+05, 0.10846E+06, 0.11855E+06, 0.12954E+06, 0.14149E+06,
0.15450E+06, 0.16864E+06, 0.18402E+06, 0.20072E+06, 0.21886E+06,
0.23856E+06, 0.25993E+06, 0.28312E+06, 0.30825E+06, 0.33550E+06,
0.36501E+06, 0.39696E+06, 0.43155E+06, 0.46896E+06, 0.50942E+06,
0.55315E+06, 0.60039E+06, 0.65141E+06, 0.70648E+06, 0.76589E+06,
0.82997E+06, 0.89904E+06, 0.97346E+06, 0.10536E+07, 0.11399E+07,
0.12327E+07, 0.13326E+07, 0.14400E+07, 0.15554E+07, 0.16793E+07,
0.18124E+07, 0.19553E+07, 0.21085E+07, 0.22729E+07, 0.24490E+07,
0.26378E+07, 0.28400E+07, 0.30565E+07, 0.32881E+07, 0.35360E+07,
0.38010E+07, 0.40843E+07, 0.43870E+07, 0.47103E+07, 0.50555E+07,
0.54239E+07, 0.58169E+07, 0.62361E+07, 0.66830E+07, 0.71592E+07,
0.76666E+07, 0.82069E+07, 0.87820E+07, 0.93940E+07, 0.10045E+08,
0.10737E+08, 0.11473E+08, 0.12256E+08, 0.13086E+08, 0.13969E+08,
0.14905E+08, 0.15899E+08, 0.16954E+08, 0.18072E+08, 0.19258E+08,
0.20515E+08, 0.21847E+08, 0.23257E+08, 0.24750E+08, 0.26331E+08,
0.28004E+08, 0.29774E+08, 0.31646E+08, 0.33625E+08, 0.35716E+08,
0.37926E+08, 0.40261E+08, 0.42726E+08, 0.45329E+08, 0.48077E+08,
0.50975E+08])
# --------------- O2 66: M = 7, I = 1 ---------------------
M = 7
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.44334E+02, 0.62460E+02, 0.80596E+02,
0.98738E+02, 0.11688E+03, 0.13503E+03, 0.15319E+03, 0.17136E+03,
0.18954E+03, 0.20775E+03, 0.22600E+03, 0.24431E+03, 0.26270E+03,
0.28119E+03, 0.29981E+03, 0.31857E+03, 0.33750E+03, 0.35662E+03,
0.37594E+03, 0.39550E+03, 0.41529E+03, 0.43535E+03, 0.45568E+03,
0.47630E+03, 0.49722E+03, 0.51844E+03, 0.53998E+03, 0.56185E+03,
0.58406E+03, 0.60660E+03, 0.62949E+03, 0.65274E+03, 0.67635E+03,
0.70031E+03, 0.72465E+03, 0.74936E+03, 0.77444E+03, 0.79990E+03,
0.82574E+03, 0.85197E+03, 0.87858E+03, 0.90558E+03, 0.93297E+03,
0.96076E+03, 0.98895E+03, 0.10175E+04, 0.10465E+04, 0.10759E+04,
0.11057E+04, 0.11359E+04, 0.11665E+04, 0.11976E+04, 0.12290E+04,
0.12609E+04, 0.12931E+04, 0.13258E+04, 0.13590E+04, 0.13925E+04,
0.14265E+04, 0.14609E+04, 0.14958E+04, 0.15311E+04, 0.15669E+04,
0.16031E+04, 0.16397E+04, 0.16768E+04, 0.17144E+04, 0.17524E+04,
0.17909E+04, 0.18298E+04, 0.18692E+04, 0.19091E+04, 0.19495E+04,
0.19904E+04, 0.20318E+04, 0.20736E+04, 0.21160E+04, 0.21588E+04,
0.22022E+04, 0.22461E+04, 0.22905E+04, 0.23354E+04, 0.23809E+04,
0.24268E+04, 0.24734E+04, 0.25204E+04, 0.25680E+04, 0.26162E+04,
0.26649E+04, 0.27142E+04, 0.27641E+04, 0.28145E+04, 0.28655E+04,
0.29171E+04, 0.29693E+04, 0.30221E+04, 0.30755E+04, 0.31295E+04,
0.31841E+04, 0.32393E+04, 0.32951E+04, 0.33516E+04, 0.34087E+04,
0.34665E+04, 0.35249E+04, 0.35839E+04, 0.36436E+04, 0.37040E+04,
0.37650E+04, 0.38267E+04, 0.38891E+04, 0.39522E+04, 0.40159E+04,
0.40804E+04, 0.41455E+04, 0.42114E+04, 0.42780E+04, 0.43452E+04,
0.44132E+04])
# --------------- O2 68: M = 7, I = 2 ---------------------
M = 7
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.89206E+02, 0.12759E+03, 0.16600E+03,
0.20442E+03, 0.24285E+03, 0.28128E+03, 0.31973E+03, 0.35821E+03,
0.39672E+03, 0.43530E+03, 0.47398E+03, 0.51281E+03, 0.55183E+03,
0.59108E+03, 0.63062E+03, 0.67051E+03, 0.71078E+03, 0.75148E+03,
0.79265E+03, 0.83435E+03, 0.87659E+03, 0.91941E+03, 0.96285E+03,
0.10069E+04, 0.10517E+04, 0.10971E+04, 0.11432E+04, 0.11901E+04,
0.12377E+04, 0.12861E+04, 0.13352E+04, 0.13851E+04, 0.14358E+04,
0.14872E+04, 0.15395E+04, 0.15926E+04, 0.16466E+04, 0.17013E+04,
0.17569E+04, 0.18134E+04, 0.18706E+04, 0.19288E+04, 0.19877E+04,
0.20476E+04, 0.21083E+04, 0.21698E+04, 0.22323E+04, 0.22956E+04,
0.23598E+04, 0.24248E+04, 0.24908E+04, 0.25576E+04, 0.26253E+04,
0.26940E+04, 0.27635E+04, 0.28339E+04, 0.29052E+04, 0.29775E+04,
0.30506E+04, 0.31247E+04, 0.31997E+04, 0.32756E+04, 0.33524E+04,
0.34302E+04, 0.35089E+04, 0.35885E+04, 0.36691E+04, 0.37506E+04,
0.38331E+04, 0.39166E+04, 0.40010E+04, 0.40864E+04, 0.41727E+04,
0.42601E+04, 0.43484E+04, 0.44377E+04, 0.45280E+04, 0.46193E+04,
0.47116E+04, 0.48049E+04, 0.48992E+04, 0.49946E+04, 0.50909E+04,
0.51883E+04, 0.52868E+04, 0.53863E+04, 0.54868E+04, 0.55884E+04,
0.56911E+04, 0.57949E+04, 0.58997E+04, 0.60056E+04, 0.61126E+04,
0.62207E+04, 0.63298E+04, 0.64401E+04, 0.65516E+04, 0.66641E+04,
0.67778E+04, 0.68926E+04, 0.70085E+04, 0.71256E+04, 0.72439E+04,
0.73633E+04, 0.74839E+04, 0.76056E+04, 0.77286E+04, 0.78527E+04,
0.79781E+04, 0.81046E+04, 0.82324E+04, 0.83613E+04, 0.84915E+04,
0.86229E+04, 0.87556E+04, 0.88895E+04, 0.90247E+04, 0.91611E+04,
0.92988E+04])
# --------------- O2 67: M = 7, I = 3 ---------------------
M = 7
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.52071E+03, 0.74484E+03, 0.96908E+03,
0.11934E+04, 0.14177E+04, 0.16422E+04, 0.18667E+04, 0.20913E+04,
0.23161E+04, 0.25413E+04, 0.27671E+04, 0.29936E+04, 0.32212E+04,
0.34501E+04, 0.36806E+04, 0.39130E+04, 0.41476E+04, 0.43846E+04,
0.46242E+04, 0.48668E+04, 0.51125E+04, 0.53615E+04, 0.56140E+04,
0.58701E+04, 0.61300E+04, 0.63938E+04, 0.66617E+04, 0.69337E+04,
0.72099E+04, 0.74904E+04, 0.77754E+04, 0.80647E+04, 0.83586E+04,
0.86571E+04, 0.89602E+04, 0.92680E+04, 0.95805E+04, 0.98977E+04,
0.10220E+05, 0.10547E+05, 0.10878E+05, 0.11215E+05, 0.11556E+05,
0.11903E+05, 0.12254E+05, 0.12611E+05, 0.12972E+05, 0.13338E+05,
0.13710E+05, 0.14086E+05, 0.14468E+05, 0.14855E+05, 0.15247E+05,
0.15644E+05, 0.16046E+05, 0.16453E+05, 0.16866E+05, 0.17283E+05,
0.17706E+05, 0.18135E+05, 0.18568E+05, 0.19007E+05, 0.19452E+05,
0.19901E+05, 0.20356E+05, 0.20817E+05, 0.21283E+05, 0.21754E+05,
0.22231E+05, 0.22713E+05, 0.23201E+05, 0.23695E+05, 0.24194E+05,
0.24699E+05, 0.25209E+05, 0.25725E+05, 0.26247E+05, 0.26775E+05,
0.27308E+05, 0.27847E+05, 0.28393E+05, 0.28944E+05, 0.29500E+05,
0.30063E+05, 0.30632E+05, 0.31207E+05, 0.31788E+05, 0.32375E+05,
0.32968E+05, 0.33568E+05, 0.34173E+05, 0.34785E+05, 0.35403E+05,
0.36028E+05, 0.36659E+05, 0.37296E+05, 0.37939E+05, 0.38590E+05,
0.39246E+05, 0.39909E+05, 0.40579E+05, 0.41256E+05, 0.41939E+05,
0.42629E+05, 0.43325E+05, 0.44029E+05, 0.44739E+05, 0.45456E+05,
0.46180E+05, 0.46911E+05, 0.47649E+05, 0.48394E+05, 0.49146E+05,
0.49905E+05, 0.50671E+05, 0.51445E+05, 0.52226E+05, 0.53014E+05,
0.53809E+05])
# --------------- NO 46: M = 8, I = 1 ---------------------
M = 8
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.15840E+03, 0.23971E+03, 0.33080E+03,
0.42907E+03, 0.53251E+03, 0.63972E+03, 0.74975E+03, 0.86195E+03,
0.97582E+03, 0.10911E+04, 0.12074E+04, 0.13248E+04, 0.14430E+04,
0.15621E+04, 0.16820E+04, 0.18027E+04, 0.19243E+04, 0.20468E+04,
0.21703E+04, 0.22948E+04, 0.24204E+04, 0.25472E+04, 0.26753E+04,
0.28046E+04, 0.29354E+04, 0.30676E+04, 0.32013E+04, 0.33365E+04,
0.34734E+04, 0.36120E+04, 0.37522E+04, 0.38942E+04, 0.40379E+04,
0.41835E+04, 0.43310E+04, 0.44803E+04, 0.46316E+04, 0.47849E+04,
0.49400E+04, 0.50972E+04, 0.52564E+04, 0.54176E+04, 0.55809E+04,
0.57462E+04, 0.59137E+04, 0.60832E+04, 0.62548E+04, 0.64286E+04,
0.66045E+04, 0.67825E+04, 0.69628E+04, 0.71451E+04, 0.73297E+04,
0.75164E+04, 0.77053E+04, 0.78964E+04, 0.80897E+04, 0.82853E+04,
0.84830E+04, 0.86830E+04, 0.88852E+04, 0.90896E+04, 0.92963E+04,
0.95052E+04, 0.97164E+04, 0.99297E+04, 0.10145E+05, 0.10363E+05,
0.10583E+05, 0.10806E+05, 0.11031E+05, 0.11258E+05, 0.11487E+05,
0.11718E+05, 0.11952E+05, 0.12188E+05, 0.12426E+05, 0.12667E+05,
0.12910E+05, 0.13155E+05, 0.13403E+05, 0.13652E+05, 0.13905E+05,
0.14159E+05, 0.14416E+05, 0.14675E+05, 0.14936E+05, 0.15199E+05,
0.15465E+05, 0.15733E+05, 0.16004E+05, 0.16277E+05, 0.16552E+05,
0.16829E+05, 0.17109E+05, 0.17391E+05, 0.17675E+05, 0.17962E+05,
0.18251E+05, 0.18542E+05, 0.18836E+05, 0.19131E+05, 0.19430E+05,
0.19730E+05, 0.20033E+05, 0.20338E+05, 0.20646E+05, 0.20955E+05,
0.21268E+05, 0.21582E+05, 0.21899E+05, 0.22218E+05, 0.22539E+05,
0.22863E+05, 0.23189E+05, 0.23518E+05, 0.23848E+05, 0.24181E+05,
0.24517E+05])
# --------------- NO 56: M = 8, I = 2 ---------------------
M = 8
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.10942E+03, 0.16560E+03, 0.22856E+03,
0.29647E+03, 0.36795E+03, 0.44204E+03, 0.51808E+03, 0.59561E+03,
0.67432E+03, 0.75396E+03, 0.83439E+03, 0.91551E+03, 0.99725E+03,
0.10796E+04, 0.11625E+04, 0.12460E+04, 0.13302E+04, 0.14150E+04,
0.15005E+04, 0.15868E+04, 0.16739E+04, 0.17618E+04, 0.18506E+04,
0.19404E+04, 0.20311E+04, 0.21229E+04, 0.22158E+04, 0.23098E+04,
0.24050E+04, 0.25013E+04, 0.25989E+04, 0.26976E+04, 0.27977E+04,
0.28991E+04, 0.30018E+04, 0.31058E+04, 0.32112E+04, 0.33180E+04,
0.34262E+04, 0.35358E+04, 0.36468E+04, 0.37593E+04, 0.38732E+04,
0.39885E+04, 0.41054E+04, 0.42237E+04, 0.43436E+04, 0.44649E+04,
0.45877E+04, 0.47121E+04, 0.48379E+04, 0.49654E+04, 0.50943E+04,
0.52248E+04, 0.53568E+04, 0.54904E+04, 0.56255E+04, 0.57622E+04,
0.59004E+04, 0.60403E+04, 0.61816E+04, 0.63246E+04, 0.64692E+04,
0.66152E+04, 0.67630E+04, 0.69123E+04, 0.70631E+04, 0.72156E+04,
0.73696E+04, 0.75253E+04, 0.76825E+04, 0.78414E+04, 0.80018E+04,
0.81638E+04, 0.83275E+04, 0.84927E+04, 0.86596E+04, 0.88280E+04,
0.89981E+04, 0.91698E+04, 0.93430E+04, 0.95180E+04, 0.96945E+04,
0.98726E+04, 0.10052E+05, 0.10234E+05, 0.10417E+05, 0.10601E+05,
0.10788E+05, 0.10975E+05, 0.11165E+05, 0.11356E+05, 0.11549E+05,
0.11743E+05, 0.11939E+05, 0.12137E+05, 0.12336E+05, 0.12537E+05,
0.12739E+05, 0.12943E+05, 0.13149E+05, 0.13356E+05, 0.13565E+05,
0.13776E+05, 0.13988E+05, 0.14202E+05, 0.14418E+05, 0.14635E+05,
0.14853E+05, 0.15074E+05, 0.15296E+05, 0.15520E+05, 0.15745E+05,
0.15972E+05, 0.16200E+05, 0.16431E+05, 0.16663E+05, 0.16896E+05,
0.17131E+05])
# --------------- NO 48: M = 8, I = 3 ---------------------
M = 8
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.16695E+03, 0.25269E+03, 0.34876E+03,
0.45239E+03, 0.56148E+03, 0.67455E+03, 0.79059E+03, 0.90891E+03,
0.10290E+04, 0.11506E+04, 0.12733E+04, 0.13971E+04, 0.15219E+04,
0.16476E+04, 0.17742E+04, 0.19017E+04, 0.20302E+04, 0.21598E+04,
0.22904E+04, 0.24223E+04, 0.25553E+04, 0.26897E+04, 0.28255E+04,
0.29628E+04, 0.31016E+04, 0.32420E+04, 0.33842E+04, 0.35280E+04,
0.36736E+04, 0.38211E+04, 0.39704E+04, 0.41217E+04, 0.42750E+04,
0.44302E+04, 0.45876E+04, 0.47469E+04, 0.49084E+04, 0.50720E+04,
0.52378E+04, 0.54058E+04, 0.55759E+04, 0.57483E+04, 0.59230E+04,
0.60999E+04, 0.62791E+04, 0.64605E+04, 0.66443E+04, 0.68304E+04,
0.70187E+04, 0.72095E+04, 0.74026E+04, 0.75980E+04, 0.77958E+04,
0.79960E+04, 0.81986E+04, 0.84036E+04, 0.86109E+04, 0.88207E+04,
0.90328E+04, 0.92474E+04, 0.94644E+04, 0.96839E+04, 0.99057E+04,
0.10130E+05, 0.10357E+05, 0.10586E+05, 0.10817E+05, 0.11052E+05,
0.11288E+05, 0.11527E+05, 0.11768E+05, 0.12012E+05, 0.12259E+05,
0.12507E+05, 0.12759E+05, 0.13012E+05, 0.13269E+05, 0.13527E+05,
0.13788E+05, 0.14052E+05, 0.14318E+05, 0.14587E+05, 0.14858E+05,
0.15131E+05, 0.15408E+05, 0.15686E+05, 0.15967E+05, 0.16251E+05,
0.16537E+05, 0.16825E+05, 0.17116E+05, 0.17410E+05, 0.17706E+05,
0.18004E+05, 0.18305E+05, 0.18609E+05, 0.18915E+05, 0.19224E+05,
0.19535E+05, 0.19848E+05, 0.20164E+05, 0.20483E+05, 0.20804E+05,
0.21127E+05, 0.21453E+05, 0.21782E+05, 0.22113E+05, 0.22447E+05,
0.22783E+05, 0.23122E+05, 0.23463E+05, 0.23807E+05, 0.24153E+05,
0.24502E+05, 0.24853E+05, 0.25207E+05, 0.25563E+05, 0.25922E+05,
0.26283E+05])
# --------------- SO2 626: M = 9, I = 1 ---------------------
M = 9
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.52899E+03, 0.89171E+03, 0.13139E+04,
0.17915E+04, 0.23246E+04, 0.29155E+04, 0.35675E+04, 0.42848E+04,
0.50723E+04, 0.59352E+04, 0.68794E+04, 0.79109E+04, 0.90366E+04,
0.10264E+05, 0.11599E+05, 0.13052E+05, 0.14629E+05, 0.16340E+05,
0.18193E+05, 0.20199E+05, 0.22366E+05, 0.24704E+05, 0.27225E+05,
0.29938E+05, 0.32855E+05, 0.35987E+05, 0.39346E+05, 0.42944E+05,
0.46794E+05, 0.50909E+05, 0.55302E+05, 0.59986E+05, 0.64977E+05,
0.70288E+05, 0.75934E+05, 0.81931E+05, 0.88294E+05, 0.95040E+05,
0.10219E+06, 0.10975E+06, 0.11774E+06, 0.12619E+06, 0.13511E+06,
0.14452E+06, 0.15443E+06, 0.16487E+06, 0.17586E+06, 0.18742E+06,
0.19957E+06, 0.21234E+06, 0.22573E+06, 0.23978E+06, 0.25451E+06,
0.26995E+06, 0.28611E+06, 0.30302E+06, 0.32071E+06, 0.33920E+06,
0.35852E+06, 0.37869E+06, 0.39974E+06, 0.42171E+06, 0.44461E+06,
0.46848E+06, 0.49334E+06, 0.51922E+06, 0.54617E+06, 0.57419E+06,
0.60334E+06, 0.63363E+06, 0.66511E+06, 0.69780E+06, 0.73174E+06,
0.76696E+06, 0.80349E+06, 0.84138E+06, 0.88066E+06, 0.92136E+06,
0.96352E+06, 0.10072E+07, 0.10524E+07, 0.10992E+07, 0.11475E+07,
0.11976E+07, 0.12493E+07, 0.13028E+07, 0.13580E+07, 0.14151E+07,
0.14741E+07, 0.15349E+07, 0.15977E+07, 0.16625E+07, 0.17293E+07,
0.17982E+07, 0.18693E+07, 0.19425E+07, 0.20180E+07, 0.20958E+07,
0.21758E+07, 0.22583E+07, 0.23432E+07, 0.24305E+07, 0.25204E+07,
0.26129E+07, 0.27080E+07, 0.28058E+07, 0.29064E+07, 0.30097E+07,
0.31159E+07, 0.32250E+07, 0.33371E+07, 0.34522E+07, 0.35705E+07,
0.36918E+07, 0.38164E+07, 0.39442E+07, 0.40754E+07, 0.42099E+07,
0.43479E+07])
# --------------- SO2 646: M = 9, I = 2 ---------------------
M = 9
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.53140E+03, 0.89578E+03, 0.13199E+04,
0.17997E+04, 0.23353E+04, 0.29288E+04, 0.35837E+04, 0.43043E+04,
0.50953E+04, 0.59621E+04, 0.69104E+04, 0.79465E+04, 0.90772E+04,
0.10310E+05, 0.11651E+05, 0.13110E+05, 0.14694E+05, 0.16413E+05,
0.18274E+05, 0.20289E+05, 0.22465E+05, 0.24814E+05, 0.27345E+05,
0.30070E+05, 0.33000E+05, 0.36145E+05, 0.39519E+05, 0.43133E+05,
0.46999E+05, 0.51132E+05, 0.55544E+05, 0.60248E+05, 0.65260E+05,
0.70594E+05, 0.76264E+05, 0.82287E+05, 0.88678E+05, 0.95453E+05,
0.10263E+06, 0.11022E+06, 0.11825E+06, 0.12674E+06, 0.13569E+06,
0.14514E+06, 0.15510E+06, 0.16558E+06, 0.17662E+06, 0.18823E+06,
0.20043E+06, 0.21325E+06, 0.22670E+06, 0.24081E+06, 0.25561E+06,
0.27111E+06, 0.28733E+06, 0.30432E+06, 0.32208E+06, 0.34065E+06,
0.36005E+06, 0.38031E+06, 0.40145E+06, 0.42351E+06, 0.44651E+06,
0.47047E+06, 0.49544E+06, 0.52144E+06, 0.54849E+06, 0.57664E+06,
0.60591E+06, 0.63633E+06, 0.66794E+06, 0.70077E+06, 0.73485E+06,
0.77022E+06, 0.80691E+06, 0.84496E+06, 0.88440E+06, 0.92527E+06,
0.96761E+06, 0.10115E+07, 0.10568E+07, 0.11038E+07, 0.11524E+07,
0.12027E+07, 0.12546E+07, 0.13083E+07, 0.13638E+07, 0.14211E+07,
0.14803E+07, 0.15414E+07, 0.16045E+07, 0.16695E+07, 0.17366E+07,
0.18059E+07, 0.18772E+07, 0.19507E+07, 0.20265E+07, 0.21046E+07,
0.21850E+07, 0.22678E+07, 0.23531E+07, 0.24408E+07, 0.25310E+07,
0.26239E+07, 0.27194E+07, 0.28176E+07, 0.29186E+07, 0.30224E+07,
0.31290E+07, 0.32386E+07, 0.33512E+07, 0.34668E+07, 0.35855E+07,
0.37074E+07, 0.38324E+07, 0.39608E+07, 0.40925E+07, 0.42276E+07,
0.43662E+07])
# --------------- NO2 646: M = 10, I = 1 ---------------------
M = 10
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.12046E+04, 0.20297E+04, 0.29875E+04,
0.40626E+04, 0.52463E+04, 0.65350E+04, 0.79286E+04, 0.94298E+04,
0.11043E+05, 0.12776E+05, 0.14634E+05, 0.16627E+05, 0.18765E+05,
0.21056E+05, 0.23511E+05, 0.26143E+05, 0.28961E+05, 0.31979E+05,
0.35209E+05, 0.38663E+05, 0.42355E+05, 0.46300E+05, 0.50510E+05,
0.55001E+05, 0.59787E+05, 0.64884E+05, 0.70308E+05, 0.76075E+05,
0.82201E+05, 0.88704E+05, 0.95602E+05, 0.10291E+06, 0.11065E+06,
0.11884E+06, 0.12750E+06, 0.13665E+06, 0.14631E+06, 0.15650E+06,
0.16724E+06, 0.17856E+06, 0.19047E+06, 0.20301E+06, 0.21618E+06,
0.23002E+06, 0.24456E+06, 0.25981E+06, 0.27580E+06, 0.29256E+06,
0.31012E+06, 0.32850E+06, 0.34773E+06, 0.36784E+06, 0.38886E+06,
0.41082E+06, 0.43374E+06, 0.45766E+06, 0.48262E+06, 0.50863E+06,
0.53574E+06, 0.56398E+06, 0.59339E+06, 0.62398E+06, 0.65581E+06,
0.68891E+06, 0.72331E+06, 0.75905E+06, 0.79617E+06, 0.83470E+06,
0.87469E+06, 0.91617E+06, 0.95919E+06, 0.10038E+07, 0.10500E+07,
0.10979E+07, 0.11474E+07, 0.11988E+07, 0.12519E+07, 0.13068E+07,
0.13636E+07, 0.14224E+07, 0.14831E+07, 0.15459E+07, 0.16107E+07,
0.16776E+07, 0.17467E+07, 0.18180E+07, 0.18916E+07, 0.19675E+07,
0.20458E+07, 0.21265E+07, 0.22097E+07, 0.22954E+07, 0.23837E+07,
0.24747E+07, 0.25684E+07, 0.26648E+07, 0.27641E+07, 0.28662E+07,
0.29713E+07, 0.30794E+07, 0.31905E+07, 0.33048E+07, 0.34223E+07,
0.35430E+07, 0.36670E+07, 0.37944E+07, 0.39253E+07, 0.40597E+07,
0.41976E+07, 0.43393E+07, 0.44846E+07, 0.46337E+07, 0.47867E+07,
0.49437E+07, 0.51046E+07, 0.52696E+07, 0.54388E+07, 0.56122E+07,
0.57900E+07])
# --------------- NH3 4111: M = 11, I = 1 ---------------------
M = 11
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.16013E+03, 0.26692E+03, 0.39067E+03,
0.52933E+03, 0.68153E+03, 0.84641E+03, 0.10234E+04, 0.12125E+04,
0.14136E+04, 0.16272E+04, 0.18537E+04, 0.20937E+04, 0.23481E+04,
0.26177E+04, 0.29035E+04, 0.32065E+04, 0.35279E+04, 0.38688E+04,
0.42304E+04, 0.46141E+04, 0.50212E+04, 0.54531E+04, 0.59114E+04,
0.63976E+04, 0.69133E+04, 0.74602E+04, 0.80401E+04, 0.86549E+04,
0.93066E+04, 0.99971E+04, 0.10729E+05, 0.11504E+05, 0.12324E+05,
0.13193E+05, 0.14112E+05, 0.15085E+05, 0.16114E+05, 0.17201E+05,
0.18352E+05, 0.19567E+05, 0.20851E+05, 0.22208E+05, 0.23640E+05,
0.25152E+05, 0.26747E+05, 0.28430E+05, 0.30205E+05, 0.32077E+05,
0.34050E+05, 0.36128E+05, 0.38317E+05, 0.40623E+05, 0.43050E+05,
0.45605E+05, 0.48292E+05, 0.51119E+05, 0.54091E+05, 0.57215E+05,
0.60498E+05, 0.63947E+05, 0.67569E+05, 0.71372E+05, 0.75364E+05,
0.79552E+05, 0.83946E+05, 0.88553E+05, 0.93384E+05, 0.98447E+05,
0.10375E+06, 0.10931E+06, 0.11513E+06, 0.12122E+06, 0.12760E+06,
0.13427E+06, 0.14125E+06, 0.14855E+06, 0.15619E+06, 0.16417E+06,
0.17250E+06, 0.18121E+06, 0.19031E+06, 0.19981E+06, 0.20973E+06,
0.22008E+06, 0.23088E+06, 0.24215E+06, 0.25390E+06, 0.26615E+06,
0.27892E+06, 0.29223E+06, 0.30610E+06, 0.32055E+06, 0.33559E+06,
0.35125E+06, 0.36756E+06, 0.38453E+06, 0.40219E+06, 0.42056E+06,
0.43967E+06, 0.45953E+06, 0.48019E+06, 0.50165E+06, 0.52396E+06,
0.54714E+06, 0.57122E+06, 0.59622E+06, 0.62218E+06, 0.64913E+06,
0.67710E+06, 0.70613E+06, 0.73624E+06, 0.76748E+06, 0.79988E+06,
0.83347E+06, 0.86829E+06, 0.90439E+06, 0.94180E+06, 0.98056E+06,
0.10207E+07])
# --------------- NH3 5111: M = 11, I = 2 ---------------------
M = 11
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.10697E+03, 0.17832E+03, 0.26100E+03,
0.35364E+03, 0.45533E+03, 0.56549E+03, 0.68377E+03, 0.81007E+03,
0.94447E+03, 0.10872E+04, 0.12385E+04, 0.13988E+04, 0.15688E+04,
0.17490E+04, 0.19399E+04, 0.21424E+04, 0.23571E+04, 0.25848E+04,
0.28264E+04, 0.30828E+04, 0.33548E+04, 0.36434E+04, 0.39496E+04,
0.42745E+04, 0.46190E+04, 0.49845E+04, 0.53720E+04, 0.57828E+04,
0.62182E+04, 0.66796E+04, 0.71684E+04, 0.76862E+04, 0.82344E+04,
0.88149E+04, 0.94292E+04, 0.10079E+05, 0.10767E+05, 0.11494E+05,
0.12262E+05, 0.13074E+05, 0.13932E+05, 0.14839E+05, 0.15796E+05,
0.16806E+05, 0.17872E+05, 0.18997E+05, 0.20183E+05, 0.21434E+05,
0.22752E+05, 0.24141E+05, 0.25604E+05, 0.27145E+05, 0.28767E+05,
0.30475E+05, 0.32271E+05, 0.34160E+05, 0.36146E+05, 0.38234E+05,
0.40428E+05, 0.42733E+05, 0.45154E+05, 0.47696E+05, 0.50364E+05,
0.53163E+05, 0.56100E+05, 0.59180E+05, 0.62408E+05, 0.65792E+05,
0.69339E+05, 0.73053E+05, 0.76943E+05, 0.81016E+05, 0.85279E+05,
0.89740E+05, 0.94406E+05, 0.99287E+05, 0.10439E+06, 0.10972E+06,
0.11530E+06, 0.12112E+06, 0.12720E+06, 0.13355E+06, 0.14018E+06,
0.14711E+06, 0.15433E+06, 0.16186E+06, 0.16971E+06, 0.17791E+06,
0.18645E+06, 0.19534E+06, 0.20462E+06, 0.21428E+06, 0.22434E+06,
0.23481E+06, 0.24572E+06, 0.25706E+06, 0.26887E+06, 0.28116E+06,
0.29393E+06, 0.30722E+06, 0.32103E+06, 0.33539E+06, 0.35031E+06,
0.36581E+06, 0.38191E+06, 0.39864E+06, 0.41600E+06, 0.43403E+06,
0.45274E+06, 0.47215E+06, 0.49230E+06, 0.51319E+06, 0.53487E+06,
0.55734E+06, 0.58064E+06, 0.60478E+06, 0.62981E+06, 0.65574E+06,
0.68260E+06])
# --------------- HNO3 146: M = 12, I = 1 ---------------------
M = 12
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.15010E+05, 0.25316E+05, 0.37374E+05,
0.51216E+05, 0.67105E+05, 0.85473E+05, 0.10688E+06, 0.13201E+06,
0.16165E+06, 0.19671E+06, 0.23825E+06, 0.28749E+06, 0.34583E+06,
0.41490E+06, 0.49657E+06, 0.59302E+06, 0.70673E+06, 0.84054E+06,
0.99775E+06, 0.11821E+07, 0.13978E+07, 0.16498E+07, 0.19436E+07,
0.22855E+07, 0.26825E+07, 0.31428E+07, 0.36753E+07, 0.42903E+07,
0.49993E+07, 0.58151E+07, 0.67523E+07, 0.78269E+07, 0.90572E+07,
0.10463E+08, 0.12067E+08, 0.13895E+08, 0.15973E+08, 0.18333E+08,
0.21009E+08, 0.24039E+08, 0.27464E+08, 0.31331E+08, 0.35690E+08,
0.40597E+08, 0.46115E+08, 0.52310E+08, 0.59257E+08, 0.67037E+08,
0.75739E+08, 0.85461E+08, 0.96310E+08, 0.10840E+09, 0.12186E+09,
0.13683E+09, 0.15346E+09, 0.17191E+09, 0.19236E+09, 0.21501E+09,
0.24006E+09, 0.26774E+09, 0.29830E+09, 0.33200E+09, 0.36914E+09,
0.41002E+09, 0.45498E+09, 0.50438E+09, 0.55862E+09, 0.61812E+09,
0.68332E+09, 0.75473E+09, 0.83286E+09, 0.91828E+09, 0.10116E+10,
0.11134E+10, 0.12245E+10, 0.13456E+10, 0.14775E+10, 0.16210E+10,
0.17771E+10, 0.19467E+10, 0.21309E+10, 0.23309E+10, 0.25477E+10,
0.27827E+10, 0.30372E+10, 0.33127E+10, 0.36107E+10, 0.39329E+10,
0.42809E+10, 0.46567E+10, 0.50623E+10, 0.54997E+10, 0.59711E+10,
0.64789E+10, 0.70257E+10, 0.76140E+10, 0.82468E+10, 0.89269E+10,
0.96575E+10, 0.10442E+11, 0.11284E+11, 0.12187E+11, 0.13155E+11,
0.14193E+11, 0.15304E+11, 0.16494E+11, 0.17767E+11, 0.19129E+11,
0.20585E+11, 0.22140E+11, 0.23802E+11, 0.25576E+11, 0.27469E+11,
0.29489E+11, 0.31642E+11, 0.33937E+11, 0.36382E+11, 0.38985E+11,
0.41757E+11])
# --------------- HNO3 156: M = 12, I = 2 --------------------- NOT IN TIPS-2011
M = 12
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- OH 61: M = 13, I = 1 ---------------------
M = 13
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.20066E+02, 0.24774E+02, 0.30309E+02,
0.36357E+02, 0.42745E+02, 0.49371E+02, 0.56168E+02, 0.63093E+02,
0.70116E+02, 0.77217E+02, 0.84380E+02, 0.91594E+02, 0.98850E+02,
0.10614E+03, 0.11346E+03, 0.12081E+03, 0.12818E+03, 0.13557E+03,
0.14298E+03, 0.15041E+03, 0.15785E+03, 0.16531E+03, 0.17278E+03,
0.18027E+03, 0.18778E+03, 0.19530E+03, 0.20284E+03, 0.21040E+03,
0.21797E+03, 0.22556E+03, 0.23318E+03, 0.24082E+03, 0.24848E+03,
0.25617E+03, 0.26389E+03, 0.27163E+03, 0.27941E+03, 0.28721E+03,
0.29505E+03, 0.30292E+03, 0.31084E+03, 0.31878E+03, 0.32677E+03,
0.33480E+03, 0.34287E+03, 0.35099E+03, 0.35915E+03, 0.36736E+03,
0.37561E+03, 0.38391E+03, 0.39227E+03, 0.40067E+03, 0.40913E+03,
0.41764E+03, 0.42620E+03, 0.43482E+03, 0.44350E+03, 0.45223E+03,
0.46102E+03, 0.46987E+03, 0.47878E+03, 0.48775E+03, 0.49679E+03,
0.50588E+03, 0.51503E+03, 0.52425E+03, 0.53354E+03, 0.54288E+03,
0.55229E+03, 0.56177E+03, 0.57132E+03, 0.58092E+03, 0.59060E+03,
0.60035E+03, 0.61016E+03, 0.62004E+03, 0.62999E+03, 0.64001E+03,
0.65010E+03, 0.66025E+03, 0.67049E+03, 0.68078E+03, 0.69115E+03,
0.70160E+03, 0.71211E+03, 0.72269E+03, 0.73335E+03, 0.74408E+03,
0.75488E+03, 0.76576E+03, 0.77671E+03, 0.78773E+03, 0.79883E+03,
0.81000E+03, 0.82124E+03, 0.83256E+03, 0.84396E+03, 0.85542E+03,
0.86696E+03, 0.87858E+03, 0.89027E+03, 0.90204E+03, 0.91389E+03,
0.92580E+03, 0.93781E+03, 0.94988E+03, 0.96203E+03, 0.97425E+03,
0.98656E+03, 0.99893E+03, 0.10114E+04, 0.10239E+04, 0.10365E+04,
0.10492E+04, 0.10620E+04, 0.10748E+04, 0.10878E+04, 0.11007E+04,
0.11138E+04])
# --------------- OH 81: M = 13, I = 2 ---------------------
M = 13
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.20124E+02, 0.24876E+02, 0.30457E+02,
0.36553E+02, 0.42991E+02, 0.49666E+02, 0.56513E+02, 0.63489E+02,
0.70563E+02, 0.77715E+02, 0.84929E+02, 0.92195E+02, 0.99504E+02,
0.10685E+03, 0.11423E+03, 0.12164E+03, 0.12907E+03, 0.13654E+03,
0.14403E+03, 0.15154E+03, 0.15909E+03, 0.16666E+03, 0.17427E+03,
0.18191E+03, 0.18959E+03, 0.19731E+03, 0.20507E+03, 0.21287E+03,
0.22073E+03, 0.22863E+03, 0.23658E+03, 0.24459E+03, 0.25266E+03,
0.26078E+03, 0.26897E+03, 0.27722E+03, 0.28554E+03, 0.29393E+03,
0.30238E+03, 0.31091E+03, 0.31952E+03, 0.32820E+03, 0.33696E+03,
0.34579E+03, 0.35471E+03, 0.36371E+03, 0.37279E+03, 0.38196E+03,
0.39121E+03, 0.40055E+03, 0.40998E+03, 0.41949E+03, 0.42910E+03,
0.43879E+03, 0.44858E+03, 0.45845E+03, 0.46843E+03, 0.47849E+03,
0.48865E+03, 0.49890E+03, 0.50924E+03, 0.51969E+03, 0.53022E+03,
0.54086E+03, 0.55159E+03, 0.56242E+03, 0.57335E+03, 0.58437E+03,
0.59550E+03, 0.60673E+03, 0.61805E+03, 0.62947E+03, 0.64100E+03,
0.65263E+03, 0.66435E+03, 0.67618E+03, 0.68811E+03, 0.70014E+03,
0.71228E+03, 0.72451E+03, 0.73685E+03, 0.74929E+03, 0.76184E+03,
0.77449E+03, 0.78724E+03, 0.80009E+03, 0.81306E+03, 0.82612E+03,
0.83929E+03, 0.85256E+03, 0.86594E+03, 0.87942E+03, 0.89301E+03,
0.90670E+03, 0.92050E+03, 0.93440E+03, 0.94841E+03, 0.96253E+03,
0.97675E+03, 0.99108E+03, 0.10055E+04, 0.10201E+04, 0.10347E+04,
0.10495E+04, 0.10643E+04, 0.10793E+04, 0.10944E+04, 0.11096E+04,
0.11248E+04, 0.11402E+04, 0.11558E+04, 0.11714E+04, 0.11871E+04,
0.12029E+04, 0.12189E+04, 0.12349E+04, 0.12511E+04, 0.12673E+04,
0.12837E+04])
# --------------- OH 62: M = 13, I = 3 ---------------------
M = 13
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.41032E+02, 0.54704E+02, 0.70201E+02,
0.86985E+02, 0.10469E+03, 0.12306E+03, 0.14194E+03, 0.16119E+03,
0.18075E+03, 0.20054E+03, 0.22053E+03, 0.24068E+03, 0.26096E+03,
0.28135E+03, 0.30183E+03, 0.32241E+03, 0.34305E+03, 0.36376E+03,
0.38453E+03, 0.40535E+03, 0.42622E+03, 0.44714E+03, 0.46811E+03,
0.48913E+03, 0.51019E+03, 0.53131E+03, 0.55246E+03, 0.57368E+03,
0.59495E+03, 0.61627E+03, 0.63766E+03, 0.65912E+03, 0.68064E+03,
0.70223E+03, 0.72390E+03, 0.74565E+03, 0.76749E+03, 0.78941E+03,
0.81143E+03, 0.83355E+03, 0.85578E+03, 0.87810E+03, 0.90054E+03,
0.92310E+03, 0.94577E+03, 0.96857E+03, 0.99149E+03, 0.10145E+04,
0.10377E+04, 0.10611E+04, 0.10845E+04, 0.11081E+04, 0.11319E+04,
0.11558E+04, 0.11798E+04, 0.12040E+04, 0.12284E+04, 0.12529E+04,
0.12776E+04, 0.13025E+04, 0.13275E+04, 0.13527E+04, 0.13781E+04,
0.14036E+04, 0.14293E+04, 0.14552E+04, 0.14813E+04, 0.15076E+04,
0.15340E+04, 0.15606E+04, 0.15874E+04, 0.16144E+04, 0.16416E+04,
0.16690E+04, 0.16965E+04, 0.17243E+04, 0.17522E+04, 0.17804E+04,
0.18087E+04, 0.18373E+04, 0.18660E+04, 0.18949E+04, 0.19241E+04,
0.19534E+04, 0.19829E+04, 0.20127E+04, 0.20426E+04, 0.20727E+04,
0.21031E+04, 0.21336E+04, 0.21644E+04, 0.21954E+04, 0.22266E+04,
0.22579E+04, 0.22895E+04, 0.23213E+04, 0.23534E+04, 0.23856E+04,
0.24180E+04, 0.24506E+04, 0.24835E+04, 0.25166E+04, 0.25499E+04,
0.25834E+04, 0.26171E+04, 0.26510E+04, 0.26852E+04, 0.27195E+04,
0.27541E+04, 0.27889E+04, 0.28239E+04, 0.28592E+04, 0.28946E+04,
0.29303E+04, 0.29661E+04, 0.30023E+04, 0.30386E+04, 0.30751E+04,
0.31119E+04])
# --------------- HF 19: M = 14, I = 1 ---------------------
M = 14
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.95958E+01, 0.12933E+02, 0.16295E+02,
0.19666E+02, 0.23043E+02, 0.26425E+02, 0.29809E+02, 0.33195E+02,
0.36584E+02, 0.39974E+02, 0.43366E+02, 0.46759E+02, 0.50154E+02,
0.53550E+02, 0.56947E+02, 0.60346E+02, 0.63746E+02, 0.67148E+02,
0.70550E+02, 0.73955E+02, 0.77361E+02, 0.80769E+02, 0.84179E+02,
0.87591E+02, 0.91006E+02, 0.94424E+02, 0.97846E+02, 0.10127E+03,
0.10470E+03, 0.10813E+03, 0.11157E+03, 0.11502E+03, 0.11847E+03,
0.12193E+03, 0.12540E+03, 0.12888E+03, 0.13236E+03, 0.13586E+03,
0.13936E+03, 0.14288E+03, 0.14641E+03, 0.14995E+03, 0.15351E+03,
0.15708E+03, 0.16066E+03, 0.16426E+03, 0.16788E+03, 0.17151E+03,
0.17516E+03, 0.17882E+03, 0.18251E+03, 0.18621E+03, 0.18994E+03,
0.19368E+03, 0.19745E+03, 0.20123E+03, 0.20504E+03, 0.20887E+03,
0.21272E+03, 0.21659E+03, 0.22049E+03, 0.22441E+03, 0.22836E+03,
0.23233E+03, 0.23632E+03, 0.24034E+03, 0.24439E+03, 0.24846E+03,
0.25255E+03, 0.25668E+03, 0.26083E+03, 0.26501E+03, 0.26921E+03,
0.27344E+03, 0.27770E+03, 0.28199E+03, 0.28631E+03, 0.29066E+03,
0.29503E+03, 0.29944E+03, 0.30387E+03, 0.30833E+03, 0.31282E+03,
0.31735E+03, 0.32190E+03, 0.32648E+03, 0.33110E+03, 0.33574E+03,
0.34042E+03, 0.34512E+03, 0.34986E+03, 0.35463E+03, 0.35943E+03,
0.36426E+03, 0.36913E+03, 0.37402E+03, 0.37895E+03, 0.38391E+03,
0.38891E+03, 0.39393E+03, 0.39899E+03, 0.40408E+03, 0.40921E+03,
0.41436E+03, 0.41955E+03, 0.42478E+03, 0.43004E+03, 0.43533E+03,
0.44065E+03, 0.44601E+03, 0.45140E+03, 0.45683E+03, 0.46229E+03,
0.46779E+03, 0.47332E+03, 0.47888E+03, 0.48448E+03, 0.49011E+03,
0.49578E+03])
# --------------- HF 29: M = 14, I = 2 --------------------- not in TIPS-2011
M = 14
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- HСl 15: M = 15, I = 1 --------------------
M = 15
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.34775E+02, 0.48060E+02, 0.61370E+02,
0.74692E+02, 0.88024E+02, 0.10136E+03, 0.11471E+03, 0.12806E+03,
0.14141E+03, 0.15478E+03, 0.16814E+03, 0.18151E+03, 0.19489E+03,
0.20827E+03, 0.22166E+03, 0.23506E+03, 0.24847E+03, 0.26189E+03,
0.27533E+03, 0.28878E+03, 0.30225E+03, 0.31575E+03, 0.32928E+03,
0.34284E+03, 0.35645E+03, 0.37009E+03, 0.38378E+03, 0.39753E+03,
0.41134E+03, 0.42521E+03, 0.43914E+03, 0.45316E+03, 0.46725E+03,
0.48142E+03, 0.49568E+03, 0.51003E+03, 0.52448E+03, 0.53902E+03,
0.55368E+03, 0.56843E+03, 0.58330E+03, 0.59829E+03, 0.61339E+03,
0.62862E+03, 0.64396E+03, 0.65944E+03, 0.67504E+03, 0.69078E+03,
0.70665E+03, 0.72265E+03, 0.73880E+03, 0.75508E+03, 0.77151E+03,
0.78809E+03, 0.80481E+03, 0.82168E+03, 0.83870E+03, 0.85587E+03,
0.87320E+03, 0.89068E+03, 0.90832E+03, 0.92611E+03, 0.94407E+03,
0.96218E+03, 0.98046E+03, 0.99889E+03, 0.10175E+04, 0.10363E+04,
0.10552E+04, 0.10743E+04, 0.10936E+04, 0.11130E+04, 0.11326E+04,
0.11524E+04, 0.11723E+04, 0.11924E+04, 0.12127E+04, 0.12332E+04,
0.12538E+04, 0.12746E+04, 0.12956E+04, 0.13168E+04, 0.13381E+04,
0.13597E+04, 0.13814E+04, 0.14032E+04, 0.14253E+04, 0.14475E+04,
0.14700E+04, 0.14926E+04, 0.15153E+04, 0.15383E+04, 0.15615E+04,
0.15848E+04, 0.16083E+04, 0.16320E+04, 0.16559E+04, 0.16800E+04,
0.17043E+04, 0.17287E+04, 0.17533E+04, 0.17782E+04, 0.18032E+04,
0.18284E+04, 0.18538E+04, 0.18794E+04, 0.19051E+04, 0.19311E+04,
0.19573E+04, 0.19836E+04, 0.20102E+04, 0.20369E+04, 0.20638E+04,
0.20910E+04, 0.21183E+04, 0.21458E+04, 0.21735E+04, 0.22014E+04,
0.22295E+04])
# --------------- HСl 17: M = 15, I = 2 ---------------------
M = 15
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.34823E+02, 0.48128E+02, 0.61458E+02,
0.74801E+02, 0.88152E+02, 0.10151E+03, 0.11488E+03, 0.12825E+03,
0.14162E+03, 0.15500E+03, 0.16839E+03, 0.18178E+03, 0.19518E+03,
0.20858E+03, 0.22199E+03, 0.23541E+03, 0.24884E+03, 0.26228E+03,
0.27574E+03, 0.28921E+03, 0.30270E+03, 0.31622E+03, 0.32977E+03,
0.34336E+03, 0.35698E+03, 0.37065E+03, 0.38436E+03, 0.39813E+03,
0.41196E+03, 0.42585E+03, 0.43981E+03, 0.45384E+03, 0.46796E+03,
0.48215E+03, 0.49644E+03, 0.51081E+03, 0.52528E+03, 0.53986E+03,
0.55453E+03, 0.56932E+03, 0.58421E+03, 0.59922E+03, 0.61435E+03,
0.62960E+03, 0.64498E+03, 0.66048E+03, 0.67611E+03, 0.69187E+03,
0.70777E+03, 0.72381E+03, 0.73998E+03, 0.75630E+03, 0.77276E+03,
0.78936E+03, 0.80612E+03, 0.82302E+03, 0.84007E+03, 0.85727E+03,
0.87463E+03, 0.89215E+03, 0.90982E+03, 0.92765E+03, 0.94563E+03,
0.96378E+03, 0.98209E+03, 0.10006E+04, 0.10192E+04, 0.10380E+04,
0.10570E+04, 0.10761E+04, 0.10954E+04, 0.11149E+04, 0.11345E+04,
0.11543E+04, 0.11743E+04, 0.11945E+04, 0.12148E+04, 0.12353E+04,
0.12560E+04, 0.12768E+04, 0.12979E+04, 0.13191E+04, 0.13405E+04,
0.13620E+04, 0.13838E+04, 0.14057E+04, 0.14278E+04, 0.14501E+04,
0.14726E+04, 0.14952E+04, 0.15180E+04, 0.15410E+04, 0.15642E+04,
0.15876E+04, 0.16112E+04, 0.16349E+04, 0.16589E+04, 0.16830E+04,
0.17073E+04, 0.17318E+04, 0.17565E+04, 0.17814E+04, 0.18064E+04,
0.18317E+04, 0.18572E+04, 0.18828E+04, 0.19086E+04, 0.19346E+04,
0.19609E+04, 0.19873E+04, 0.20139E+04, 0.20406E+04, 0.20676E+04,
0.20948E+04, 0.21222E+04, 0.21498E+04, 0.21775E+04, 0.22055E+04,
0.22337E+04])
# --------------- HСl 25: M = 15, I = 3 --------------------- not in TIPS-2011
M = 15
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- HСl 27: M = 15, I = 4 --------------------- not in TIPS-2011
M = 15
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- HBr 19: M = 16, I = 1 ---------------------
M = 16
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.42744E+02, 0.59373E+02, 0.76023E+02,
0.92685E+02, 0.10936E+03, 0.12604E+03, 0.14272E+03, 0.15942E+03,
0.17612E+03, 0.19282E+03, 0.20954E+03, 0.22626E+03, 0.24299E+03,
0.25973E+03, 0.27648E+03, 0.29325E+03, 0.31004E+03, 0.32686E+03,
0.34371E+03, 0.36060E+03, 0.37753E+03, 0.39451E+03, 0.41156E+03,
0.42868E+03, 0.44587E+03, 0.46314E+03, 0.48051E+03, 0.49798E+03,
0.51556E+03, 0.53325E+03, 0.55106E+03, 0.56900E+03, 0.58708E+03,
0.60530E+03, 0.62367E+03, 0.64219E+03, 0.66088E+03, 0.67972E+03,
0.69874E+03, 0.71793E+03, 0.73730E+03, 0.75685E+03, 0.77659E+03,
0.79652E+03, 0.81664E+03, 0.83696E+03, 0.85748E+03, 0.87820E+03,
0.89914E+03, 0.92028E+03, 0.94163E+03, 0.96319E+03, 0.98498E+03,
0.10070E+04, 0.10292E+04, 0.10516E+04, 0.10743E+04, 0.10972E+04,
0.11203E+04, 0.11437E+04, 0.11673E+04, 0.11911E+04, 0.12151E+04,
0.12394E+04, 0.12640E+04, 0.12887E+04, 0.13137E+04, 0.13390E+04,
0.13645E+04, 0.13902E+04, 0.14162E+04, 0.14424E+04, 0.14689E+04,
0.14956E+04, 0.15226E+04, 0.15498E+04, 0.15773E+04, 0.16050E+04,
0.16330E+04, 0.16612E+04, 0.16897E+04, 0.17185E+04, 0.17475E+04,
0.17767E+04, 0.18062E+04, 0.18360E+04, 0.18660E+04, 0.18963E+04,
0.19269E+04, 0.19577E+04, 0.19888E+04, 0.20202E+04, 0.20518E+04,
0.20837E+04, 0.21158E+04, 0.21482E+04, 0.21809E+04, 0.22139E+04,
0.22471E+04, 0.22806E+04, 0.23143E+04, 0.23484E+04, 0.23827E+04,
0.24173E+04, 0.24521E+04, 0.24873E+04, 0.25227E+04, 0.25584E+04,
0.25943E+04, 0.26306E+04, 0.26671E+04, 0.27039E+04, 0.27409E+04,
0.27783E+04, 0.28159E+04, 0.28538E+04, 0.28920E+04, 0.29305E+04,
0.29693E+04])
# --------------- HBr 11: M = 16, I = 2 ---------------------
M = 16
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.42756E+02, 0.59390E+02, 0.76045E+02,
0.92713E+02, 0.10939E+03, 0.12607E+03, 0.14277E+03, 0.15947E+03,
0.17617E+03, 0.19288E+03, 0.20960E+03, 0.22633E+03, 0.24306E+03,
0.25981E+03, 0.27656E+03, 0.29334E+03, 0.31014E+03, 0.32696E+03,
0.34381E+03, 0.36071E+03, 0.37764E+03, 0.39464E+03, 0.41169E+03,
0.42881E+03, 0.44601E+03, 0.46329E+03, 0.48066E+03, 0.49813E+03,
0.51572E+03, 0.53341E+03, 0.55123E+03, 0.56918E+03, 0.58727E+03,
0.60549E+03, 0.62387E+03, 0.64240E+03, 0.66109E+03, 0.67994E+03,
0.69896E+03, 0.71816E+03, 0.73754E+03, 0.75710E+03, 0.77684E+03,
0.79678E+03, 0.81691E+03, 0.83724E+03, 0.85776E+03, 0.87850E+03,
0.89943E+03, 0.92058E+03, 0.94194E+03, 0.96352E+03, 0.98531E+03,
0.10073E+04, 0.10295E+04, 0.10520E+04, 0.10747E+04, 0.10976E+04,
0.11207E+04, 0.11441E+04, 0.11677E+04, 0.11915E+04, 0.12156E+04,
0.12399E+04, 0.12644E+04, 0.12892E+04, 0.13142E+04, 0.13395E+04,
0.13650E+04, 0.13907E+04, 0.14167E+04, 0.14429E+04, 0.14694E+04,
0.14961E+04, 0.15231E+04, 0.15504E+04, 0.15778E+04, 0.16056E+04,
0.16336E+04, 0.16618E+04, 0.16903E+04, 0.17191E+04, 0.17481E+04,
0.17773E+04, 0.18069E+04, 0.18367E+04, 0.18667E+04, 0.18970E+04,
0.19276E+04, 0.19584E+04, 0.19895E+04, 0.20209E+04, 0.20525E+04,
0.20844E+04, 0.21166E+04, 0.21490E+04, 0.21817E+04, 0.22147E+04,
0.22479E+04, 0.22814E+04, 0.23152E+04, 0.23492E+04, 0.23835E+04,
0.24181E+04, 0.24530E+04, 0.24882E+04, 0.25236E+04, 0.25593E+04,
0.25952E+04, 0.26315E+04, 0.26680E+04, 0.27048E+04, 0.27419E+04,
0.27793E+04, 0.28169E+04, 0.28549E+04, 0.28931E+04, 0.29316E+04,
0.29703E+04])
# --------------- HBr 29: M = 16, I = 3 --------------------- not in TIPS-2011
M = 16
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- HBr 21: M = 16, I = 4 --------------------- not in TIPS-2011
M = 16
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- HI 17: M = 17, I = 1 ---------------------
M = 17
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.82031E+02, 0.11447E+03, 0.14694E+03,
0.17943E+03, 0.21194E+03, 0.24445E+03, 0.27699E+03, 0.30953E+03,
0.34209E+03, 0.37466E+03, 0.40725E+03, 0.43986E+03, 0.47249E+03,
0.50517E+03, 0.53789E+03, 0.57068E+03, 0.60354E+03, 0.63650E+03,
0.66957E+03, 0.70278E+03, 0.73614E+03, 0.76967E+03, 0.80340E+03,
0.83735E+03, 0.87153E+03, 0.90596E+03, 0.94067E+03, 0.97566E+03,
0.10110E+04, 0.10466E+04, 0.10826E+04, 0.11189E+04, 0.11555E+04,
0.11926E+04, 0.12300E+04, 0.12679E+04, 0.13061E+04, 0.13448E+04,
0.13839E+04, 0.14235E+04, 0.14635E+04, 0.15039E+04, 0.15448E+04,
0.15862E+04, 0.16280E+04, 0.16704E+04, 0.17132E+04, 0.17565E+04,
0.18003E+04, 0.18446E+04, 0.18894E+04, 0.19347E+04, 0.19806E+04,
0.20269E+04, 0.20738E+04, 0.21212E+04, 0.21691E+04, 0.22176E+04,
0.22666E+04, 0.23162E+04, 0.23662E+04, 0.24169E+04, 0.24680E+04,
0.25198E+04, 0.25720E+04, 0.26249E+04, 0.26783E+04, 0.27322E+04,
0.27867E+04, 0.28418E+04, 0.28975E+04, 0.29537E+04, 0.30105E+04,
0.30678E+04, 0.31258E+04, 0.31843E+04, 0.32434E+04, 0.33031E+04,
0.33633E+04, 0.34242E+04, 0.34856E+04, 0.35477E+04, 0.36103E+04,
0.36735E+04, 0.37373E+04, 0.38018E+04, 0.38668E+04, 0.39324E+04,
0.39986E+04, 0.40654E+04, 0.41329E+04, 0.42009E+04, 0.42696E+04,
0.43388E+04, 0.44087E+04, 0.44792E+04, 0.45503E+04, 0.46221E+04,
0.46944E+04, 0.47674E+04, 0.48410E+04, 0.49152E+04, 0.49901E+04,
0.50656E+04, 0.51417E+04, 0.52185E+04, 0.52959E+04, 0.53739E+04,
0.54526E+04, 0.55319E+04, 0.56118E+04, 0.56924E+04, 0.57736E+04,
0.58555E+04, 0.59380E+04, 0.60212E+04, 0.61050E+04, 0.61895E+04,
0.62746E+04])
# --------------- HI 27: M = 17, I = 2 --------------------- not in TIPS-2011
M = 17
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- ClO 56: M = 18, I = 1 ---------------------
M = 18
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.53847E+03, 0.76580E+03, 0.10017E+04,
0.12511E+04, 0.15168E+04, 0.18001E+04, 0.21014E+04, 0.24206E+04,
0.27577E+04, 0.31127E+04, 0.34857E+04, 0.38765E+04, 0.42854E+04,
0.47124E+04, 0.51575E+04, 0.56208E+04, 0.61025E+04, 0.66026E+04,
0.71211E+04, 0.76582E+04, 0.82138E+04, 0.87882E+04, 0.93813E+04,
0.99932E+04, 0.10624E+05, 0.11273E+05, 0.11942E+05, 0.12629E+05,
0.13336E+05, 0.14061E+05, 0.14806E+05, 0.15570E+05, 0.16353E+05,
0.17155E+05, 0.17976E+05, 0.18816E+05, 0.19676E+05, 0.20555E+05,
0.21453E+05, 0.22371E+05, 0.23308E+05, 0.24264E+05, 0.25240E+05,
0.26236E+05, 0.27250E+05, 0.28284E+05, 0.29338E+05, 0.30412E+05,
0.31505E+05, 0.32617E+05, 0.33749E+05, 0.34901E+05, 0.36072E+05,
0.37263E+05, 0.38474E+05, 0.39705E+05, 0.40955E+05, 0.42225E+05,
0.43515E+05, 0.44825E+05, 0.46154E+05, 0.47504E+05, 0.48873E+05,
0.50262E+05, 0.51672E+05, 0.53101E+05, 0.54549E+05, 0.56019E+05,
0.57508E+05, 0.59017E+05, 0.60546E+05, 0.62095E+05, 0.63665E+05,
0.65254E+05, 0.66864E+05, 0.68494E+05, 0.70144E+05, 0.71814E+05,
0.73504E+05, 0.75215E+05, 0.76946E+05, 0.78698E+05, 0.80470E+05,
0.82261E+05, 0.84074E+05, 0.85907E+05, 0.87760E+05, 0.89633E+05,
0.91527E+05, 0.93442E+05, 0.95377E+05, 0.97333E+05, 0.99309E+05,
0.10131E+06, 0.10332E+06, 0.10536E+06, 0.10742E+06, 0.10950E+06,
0.11160E+06, 0.11372E+06, 0.11586E+06, 0.11802E+06, 0.12020E+06,
0.12241E+06, 0.12463E+06, 0.12688E+06, 0.12914E+06, 0.13143E+06,
0.13374E+06, 0.13607E+06, 0.13842E+06, 0.14079E+06, 0.14318E+06,
0.14559E+06, 0.14802E+06, 0.15048E+06, 0.15295E+06, 0.15545E+06,
0.15797E+06])
# --------------- ClO 76: M = 18, I = 2 ---------------------
M = 18
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.54775E+03, 0.77899E+03, 0.10189E+04,
0.12726E+04, 0.15430E+04, 0.18313E+04, 0.21378E+04, 0.24627E+04,
0.28059E+04, 0.31674E+04, 0.35472E+04, 0.39454E+04, 0.43621E+04,
0.47972E+04, 0.52508E+04, 0.57232E+04, 0.62143E+04, 0.67242E+04,
0.72531E+04, 0.78010E+04, 0.83678E+04, 0.89537E+04, 0.95589E+04,
0.10183E+05, 0.10827E+05, 0.11490E+05, 0.12172E+05, 0.12874E+05,
0.13595E+05, 0.14335E+05, 0.15095E+05, 0.15875E+05, 0.16674E+05,
0.17493E+05, 0.18332E+05, 0.19190E+05, 0.20068E+05, 0.20965E+05,
0.21882E+05, 0.22820E+05, 0.23776E+05, 0.24753E+05, 0.25750E+05,
0.26766E+05, 0.27803E+05, 0.28859E+05, 0.29935E+05, 0.31032E+05,
0.32148E+05, 0.33284E+05, 0.34441E+05, 0.35617E+05, 0.36814E+05,
0.38031E+05, 0.39267E+05, 0.40524E+05, 0.41802E+05, 0.43099E+05,
0.44417E+05, 0.45755E+05, 0.47113E+05, 0.48492E+05, 0.49891E+05,
0.51310E+05, 0.52750E+05, 0.54210E+05, 0.55690E+05, 0.57191E+05,
0.58713E+05, 0.60255E+05, 0.61817E+05, 0.63400E+05, 0.65004E+05,
0.66628E+05, 0.68272E+05, 0.69938E+05, 0.71624E+05, 0.73331E+05,
0.75058E+05, 0.76806E+05, 0.78575E+05, 0.80364E+05, 0.82175E+05,
0.84006E+05, 0.85858E+05, 0.87731E+05, 0.89625E+05, 0.91539E+05,
0.93475E+05, 0.95431E+05, 0.97409E+05, 0.99407E+05, 0.10143E+06,
0.10347E+06, 0.10553E+06, 0.10761E+06, 0.10972E+06, 0.11184E+06,
0.11399E+06, 0.11615E+06, 0.11834E+06, 0.12055E+06, 0.12278E+06,
0.12503E+06, 0.12731E+06, 0.12960E+06, 0.13192E+06, 0.13425E+06,
0.13661E+06, 0.13899E+06, 0.14139E+06, 0.14382E+06, 0.14626E+06,
0.14873E+06, 0.15121E+06, 0.15372E+06, 0.15625E+06, 0.15880E+06,
0.16138E+06])
# --------------- OCS 622: M = 19, I = 1 ---------------------
M = 19
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.20609E+03, 0.29199E+03, 0.37861E+03,
0.46737E+03, 0.56024E+03, 0.65929E+03, 0.76649E+03, 0.88361E+03,
0.10123E+04, 0.11541E+04, 0.13105E+04, 0.14829E+04, 0.16728E+04,
0.18818E+04, 0.21113E+04, 0.23629E+04, 0.26383E+04, 0.29391E+04,
0.32672E+04, 0.36245E+04, 0.40128E+04, 0.44343E+04, 0.48911E+04,
0.53853E+04, 0.59193E+04, 0.64956E+04, 0.71166E+04, 0.77849E+04,
0.85033E+04, 0.92746E+04, 0.10102E+05, 0.10988E+05, 0.11936E+05,
0.12949E+05, 0.14032E+05, 0.15186E+05, 0.16416E+05, 0.17726E+05,
0.19120E+05, 0.20601E+05, 0.22173E+05, 0.23842E+05, 0.25611E+05,
0.27484E+05, 0.29468E+05, 0.31566E+05, 0.33783E+05, 0.36124E+05,
0.38595E+05, 0.41202E+05, 0.43949E+05, 0.46842E+05, 0.49888E+05,
0.53092E+05, 0.56460E+05, 0.59999E+05, 0.63716E+05, 0.67616E+05,
0.71708E+05, 0.75997E+05, 0.80491E+05, 0.85197E+05, 0.90124E+05,
0.95278E+05, 0.10067E+06, 0.10630E+06, 0.11219E+06, 0.11833E+06,
0.12475E+06, 0.13144E+06, 0.13842E+06, 0.14570E+06, 0.15328E+06,
0.16117E+06, 0.16940E+06, 0.17795E+06, 0.18686E+06, 0.19611E+06,
0.20574E+06, 0.21574E+06, 0.22613E+06, 0.23692E+06, 0.24813E+06,
0.25975E+06, 0.27182E+06, 0.28433E+06, 0.29730E+06, 0.31074E+06,
0.32467E+06, 0.33909E+06, 0.35403E+06, 0.36950E+06, 0.38551E+06,
0.40207E+06, 0.41920E+06, 0.43691E+06, 0.45522E+06, 0.47415E+06,
0.49370E+06, 0.51390E+06, 0.53476E+06, 0.55629E+06, 0.57852E+06,
0.60146E+06, 0.62513E+06, 0.64954E+06, 0.67471E+06, 0.70067E+06,
0.72742E+06, 0.75499E+06, 0.78339E+06, 0.81265E+06, 0.84279E+06,
0.87381E+06, 0.90576E+06, 0.93863E+06, 0.97246E+06, 0.10073E+07,
0.10431E+07])
# --------------- OCS 624: M = 19, I = 2 ---------------------
M = 19
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.21125E+03, 0.29930E+03, 0.38809E+03,
0.47911E+03, 0.57437E+03, 0.67603E+03, 0.78610E+03, 0.90643E+03,
0.10387E+04, 0.11846E+04, 0.13456E+04, 0.15231E+04, 0.17188E+04,
0.19342E+04, 0.21709E+04, 0.24304E+04, 0.27145E+04, 0.30250E+04,
0.33638E+04, 0.37328E+04, 0.41339E+04, 0.45694E+04, 0.50415E+04,
0.55524E+04, 0.61045E+04, 0.67004E+04, 0.73427E+04, 0.80340E+04,
0.87773E+04, 0.95755E+04, 0.10432E+05, 0.11349E+05, 0.12330E+05,
0.13380E+05, 0.14500E+05, 0.15696E+05, 0.16970E+05, 0.18327E+05,
0.19770E+05, 0.21305E+05, 0.22934E+05, 0.24663E+05, 0.26497E+05,
0.28439E+05, 0.30495E+05, 0.32669E+05, 0.34968E+05, 0.37396E+05,
0.39958E+05, 0.42661E+05, 0.45510E+05, 0.48511E+05, 0.51669E+05,
0.54993E+05, 0.58487E+05, 0.62159E+05, 0.66014E+05, 0.70061E+05,
0.74306E+05, 0.78757E+05, 0.83421E+05, 0.88305E+05, 0.93418E+05,
0.98767E+05, 0.10436E+06, 0.11021E+06, 0.11632E+06, 0.12270E+06,
0.12936E+06, 0.13631E+06, 0.14355E+06, 0.15111E+06, 0.15898E+06,
0.16718E+06, 0.17572E+06, 0.18460E+06, 0.19385E+06, 0.20346E+06,
0.21346E+06, 0.22385E+06, 0.23464E+06, 0.24585E+06, 0.25748E+06,
0.26956E+06, 0.28209E+06, 0.29509E+06, 0.30856E+06, 0.32252E+06,
0.33699E+06, 0.35198E+06, 0.36750E+06, 0.38357E+06, 0.40020E+06,
0.41741E+06, 0.43521E+06, 0.45362E+06, 0.47264E+06, 0.49231E+06,
0.51263E+06, 0.53362E+06, 0.55529E+06, 0.57768E+06, 0.60078E+06,
0.62462E+06, 0.64922E+06, 0.67459E+06, 0.70075E+06, 0.72773E+06,
0.75554E+06, 0.78419E+06, 0.81372E+06, 0.84413E+06, 0.87546E+06,
0.90771E+06, 0.94092E+06, 0.97509E+06, 0.10103E+07, 0.10464E+07,
0.10837E+07])
# --------------- OCS 632: M = 19, I = 3 ---------------------
M = 19
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.41351E+03, 0.58591E+03, 0.76004E+03,
0.93907E+03, 0.11273E+04, 0.13289E+04, 0.15481E+04, 0.17884E+04,
0.20533E+04, 0.23459E+04, 0.26692E+04, 0.30264E+04, 0.34205E+04,
0.38547E+04, 0.43323E+04, 0.48565E+04, 0.54309E+04, 0.60592E+04,
0.67451E+04, 0.74928E+04, 0.83064E+04, 0.91903E+04, 0.10149E+05,
0.11187E+05, 0.12310E+05, 0.13523E+05, 0.14831E+05, 0.16240E+05,
0.17756E+05, 0.19384E+05, 0.21132E+05, 0.23005E+05, 0.25011E+05,
0.27157E+05, 0.29449E+05, 0.31896E+05, 0.34506E+05, 0.37286E+05,
0.40245E+05, 0.43392E+05, 0.46735E+05, 0.50284E+05, 0.54048E+05,
0.58038E+05, 0.62263E+05, 0.66733E+05, 0.71460E+05, 0.76455E+05,
0.81728E+05, 0.87292E+05, 0.93159E+05, 0.99341E+05, 0.10585E+06,
0.11270E+06, 0.11991E+06, 0.12748E+06, 0.13543E+06, 0.14378E+06,
0.15255E+06, 0.16174E+06, 0.17137E+06, 0.18146E+06, 0.19202E+06,
0.20308E+06, 0.21465E+06, 0.22674E+06, 0.23937E+06, 0.25257E+06,
0.26635E+06, 0.28073E+06, 0.29573E+06, 0.31137E+06, 0.32767E+06,
0.34466E+06, 0.36235E+06, 0.38076E+06, 0.39992E+06, 0.41985E+06,
0.44057E+06, 0.46211E+06, 0.48450E+06, 0.50775E+06, 0.53189E+06,
0.55695E+06, 0.58295E+06, 0.60992E+06, 0.63789E+06, 0.66688E+06,
0.69693E+06, 0.72806E+06, 0.76030E+06, 0.79368E+06, 0.82823E+06,
0.86399E+06, 0.90097E+06, 0.93923E+06, 0.97878E+06, 0.10197E+07,
0.10619E+07, 0.11056E+07, 0.11506E+07, 0.11972E+07, 0.12453E+07,
0.12949E+07, 0.13460E+07, 0.13988E+07, 0.14533E+07, 0.15094E+07,
0.15673E+07, 0.16270E+07, 0.16884E+07, 0.17518E+07, 0.18170E+07,
0.18842E+07, 0.19533E+07, 0.20245E+07, 0.20978E+07, 0.21732E+07,
0.22507E+07])
# --------------- OCS 623: M = 19, I = 4 ---------------------
M = 19
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.83485E+03, 0.11828E+04, 0.15337E+04,
0.18934E+04, 0.22697E+04, 0.26712E+04, 0.31059E+04, 0.35809E+04,
0.41030E+04, 0.46785E+04, 0.53133E+04, 0.60135E+04, 0.67850E+04,
0.76338E+04, 0.85663E+04, 0.95888E+04, 0.10708E+05, 0.11931E+05,
0.13265E+05, 0.14718E+05, 0.16298E+05, 0.18012E+05, 0.19870E+05,
0.21881E+05, 0.24054E+05, 0.26399E+05, 0.28926E+05, 0.31646E+05,
0.34570E+05, 0.37710E+05, 0.41077E+05, 0.44685E+05, 0.48545E+05,
0.52672E+05, 0.57078E+05, 0.61780E+05, 0.66790E+05, 0.72125E+05,
0.77801E+05, 0.83833E+05, 0.90239E+05, 0.97036E+05, 0.10424E+06,
0.11188E+06, 0.11996E+06, 0.12850E+06, 0.13754E+06, 0.14708E+06,
0.15715E+06, 0.16777E+06, 0.17896E+06, 0.19076E+06, 0.20317E+06,
0.21623E+06, 0.22996E+06, 0.24438E+06, 0.25953E+06, 0.27543E+06,
0.29211E+06, 0.30959E+06, 0.32791E+06, 0.34710E+06, 0.36718E+06,
0.38820E+06, 0.41017E+06, 0.43314E+06, 0.45713E+06, 0.48219E+06,
0.50835E+06, 0.53564E+06, 0.56409E+06, 0.59376E+06, 0.62468E+06,
0.65688E+06, 0.69041E+06, 0.72530E+06, 0.76161E+06, 0.79937E+06,
0.83862E+06, 0.87941E+06, 0.92179E+06, 0.96581E+06, 0.10115E+07,
0.10589E+07, 0.11081E+07, 0.11591E+07, 0.12120E+07, 0.12669E+07,
0.13237E+07, 0.13825E+07, 0.14435E+07, 0.15066E+07, 0.15718E+07,
0.16394E+07, 0.17093E+07, 0.17815E+07, 0.18562E+07, 0.19334E+07,
0.20132E+07, 0.20956E+07, 0.21807E+07, 0.22685E+07, 0.23592E+07,
0.24528E+07, 0.25494E+07, 0.26490E+07, 0.27517E+07, 0.28576E+07,
0.29667E+07, 0.30792E+07, 0.31951E+07, 0.33145E+07, 0.34374E+07,
0.35640E+07, 0.36943E+07, 0.38285E+07, 0.39665E+07, 0.41085E+07,
0.42546E+07])
# --------------- OCS 822: M = 19, I = 5 ---------------------
M = 19
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.21967E+03, 0.31126E+03, 0.40370E+03,
0.49862E+03, 0.59823E+03, 0.70481E+03, 0.82050E+03, 0.94724E+03,
0.10868E+04, 0.12409E+04, 0.14112E+04, 0.15993E+04, 0.18067E+04,
0.20353E+04, 0.22866E+04, 0.25624E+04, 0.28645E+04, 0.31950E+04,
0.35558E+04, 0.39490E+04, 0.43767E+04, 0.48413E+04, 0.53452E+04,
0.58909E+04, 0.64810E+04, 0.71182E+04, 0.78053E+04, 0.85454E+04,
0.93413E+04, 0.10196E+05, 0.11114E+05, 0.12098E+05, 0.13151E+05,
0.14277E+05, 0.15480E+05, 0.16764E+05, 0.18133E+05, 0.19592E+05,
0.21144E+05, 0.22794E+05, 0.24548E+05, 0.26409E+05, 0.28383E+05,
0.30475E+05, 0.32689E+05, 0.35033E+05, 0.37511E+05, 0.40128E+05,
0.42892E+05, 0.45808E+05, 0.48882E+05, 0.52121E+05, 0.55532E+05,
0.59121E+05, 0.62895E+05, 0.66861E+05, 0.71028E+05, 0.75402E+05,
0.79991E+05, 0.84803E+05, 0.89847E+05, 0.95130E+05, 0.10066E+06,
0.10645E+06, 0.11251E+06, 0.11883E+06, 0.12545E+06, 0.13236E+06,
0.13957E+06, 0.14710E+06, 0.15495E+06, 0.16313E+06, 0.17166E+06,
0.18055E+06, 0.18980E+06, 0.19944E+06, 0.20946E+06, 0.21989E+06,
0.23073E+06, 0.24200E+06, 0.25371E+06, 0.26587E+06, 0.27850E+06,
0.29161E+06, 0.30521E+06, 0.31931E+06, 0.33394E+06, 0.34910E+06,
0.36482E+06, 0.38109E+06, 0.39795E+06, 0.41541E+06, 0.43348E+06,
0.45217E+06, 0.47151E+06, 0.49151E+06, 0.51219E+06, 0.53356E+06,
0.55565E+06, 0.57847E+06, 0.60204E+06, 0.62637E+06, 0.65149E+06,
0.67742E+06, 0.70417E+06, 0.73176E+06, 0.76023E+06, 0.78957E+06,
0.81982E+06, 0.85100E+06, 0.88313E+06, 0.91622E+06, 0.95031E+06,
0.98541E+06, 0.10216E+07, 0.10587E+07, 0.10970E+07, 0.11364E+07,
0.11769E+07])
# --------------- H2CO 126: M = 20, I = 2 ---------------------
M = 20
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.25934E+03, 0.43623E+03, 0.64143E+03,
0.87152E+03, 0.11241E+04, 0.13975E+04, 0.16906E+04, 0.20029E+04,
0.23344E+04, 0.26857E+04, 0.30577E+04, 0.34518E+04, 0.38698E+04,
0.43138E+04, 0.47860E+04, 0.52890E+04, 0.58256E+04, 0.63985E+04,
0.70109E+04, 0.76660E+04, 0.83673E+04, 0.91184E+04, 0.99230E+04,
0.10785E+05, 0.11710E+05, 0.12700E+05, 0.13762E+05, 0.14900E+05,
0.16119E+05, 0.17425E+05, 0.18823E+05, 0.20320E+05, 0.21923E+05,
0.23637E+05, 0.25471E+05, 0.27432E+05, 0.29527E+05, 0.31765E+05,
0.34155E+05, 0.36706E+05, 0.39428E+05, 0.42330E+05, 0.45424E+05,
0.48720E+05, 0.52231E+05, 0.55968E+05, 0.59945E+05, 0.64175E+05,
0.68672E+05, 0.73450E+05, 0.78526E+05, 0.83915E+05, 0.89634E+05,
0.95701E+05, 0.10213E+06, 0.10895E+06, 0.11618E+06, 0.12383E+06,
0.13193E+06, 0.14049E+06, 0.14956E+06, 0.15914E+06, 0.16927E+06,
0.17997E+06, 0.19127E+06, 0.20320E+06, 0.21578E+06, 0.22906E+06,
0.24306E+06, 0.25782E+06, 0.27336E+06, 0.28974E+06, 0.30698E+06,
0.32513E+06, 0.34422E+06, 0.36430E+06, 0.38542E+06, 0.40761E+06,
0.43093E+06, 0.45542E+06, 0.48114E+06, 0.50813E+06, 0.53646E+06,
0.56617E+06, 0.59733E+06, 0.63000E+06, 0.66423E+06, 0.70010E+06,
0.73767E+06, 0.77701E+06, 0.81818E+06, 0.86127E+06, 0.90635E+06,
0.95349E+06, 0.10028E+07, 0.10543E+07, 0.11082E+07, 0.11644E+07,
0.12232E+07, 0.12845E+07, 0.13485E+07, 0.14154E+07, 0.14851E+07,
0.15578E+07, 0.16337E+07, 0.17127E+07, 0.17952E+07, 0.18810E+07,
0.19705E+07, 0.20637E+07, 0.21607E+07, 0.22617E+07, 0.23669E+07,
0.24763E+07, 0.25901E+07, 0.27085E+07, 0.28316E+07, 0.29596E+07,
0.30926E+07])
# --------------- H2CO 136: M = 20, I = 2 ---------------------
M = 20
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.53173E+03, 0.89447E+03, 0.13153E+04,
0.17871E+04, 0.23051E+04, 0.28658E+04, 0.34669E+04, 0.41073E+04,
0.47872E+04, 0.55074E+04, 0.62702E+04, 0.70785E+04, 0.79357E+04,
0.88462E+04, 0.98147E+04, 0.10846E+05, 0.11946E+05, 0.13121E+05,
0.14377E+05, 0.15721E+05, 0.17159E+05, 0.18699E+05, 0.20349E+05,
0.22118E+05, 0.24013E+05, 0.26045E+05, 0.28222E+05, 0.30555E+05,
0.33055E+05, 0.35733E+05, 0.38601E+05, 0.41671E+05, 0.44958E+05,
0.48474E+05, 0.52235E+05, 0.56255E+05, 0.60552E+05, 0.65142E+05,
0.70043E+05, 0.75275E+05, 0.80856E+05, 0.86808E+05, 0.93152E+05,
0.99913E+05, 0.10711E+06, 0.11478E+06, 0.12293E+06, 0.13161E+06,
0.14083E+06, 0.15063E+06, 0.16104E+06, 0.17209E+06, 0.18382E+06,
0.19626E+06, 0.20945E+06, 0.22343E+06, 0.23825E+06, 0.25394E+06,
0.27054E+06, 0.28812E+06, 0.30671E+06, 0.32636E+06, 0.34713E+06,
0.36907E+06, 0.39224E+06, 0.41671E+06, 0.44252E+06, 0.46975E+06,
0.49845E+06, 0.52872E+06, 0.56060E+06, 0.59418E+06, 0.62954E+06,
0.66676E+06, 0.70591E+06, 0.74710E+06, 0.79040E+06, 0.83591E+06,
0.88373E+06, 0.93395E+06, 0.98669E+06, 0.10421E+07, 0.11001E+07,
0.11611E+07, 0.12250E+07, 0.12920E+07, 0.13622E+07, 0.14357E+07,
0.15128E+07, 0.15934E+07, 0.16779E+07, 0.17662E+07, 0.18587E+07,
0.19554E+07, 0.20565E+07, 0.21621E+07, 0.22725E+07, 0.23879E+07,
0.25084E+07, 0.26342E+07, 0.27655E+07, 0.29026E+07, 0.30456E+07,
0.31947E+07, 0.33502E+07, 0.35124E+07, 0.36814E+07, 0.38575E+07,
0.40410E+07, 0.42321E+07, 0.44311E+07, 0.46382E+07, 0.48538E+07,
0.50782E+07, 0.53116E+07, 0.55544E+07, 0.58068E+07, 0.60693E+07,
0.63421E+07])
# --------------- H2CO 128: M = 20, I = 3 ---------------------
M = 20
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.27198E+03, 0.45755E+03, 0.67282E+03,
0.91421E+03, 0.11792E+04, 0.14660E+04, 0.17735E+04, 0.21012E+04,
0.24490E+04, 0.28175E+04, 0.32077E+04, 0.36212E+04, 0.40598E+04,
0.45256E+04, 0.50211E+04, 0.55488E+04, 0.61116E+04, 0.67127E+04,
0.73552E+04, 0.80426E+04, 0.87783E+04, 0.95663E+04, 0.10410E+05,
0.11315E+05, 0.12285E+05, 0.13324E+05, 0.14438E+05, 0.15632E+05,
0.16911E+05, 0.18281E+05, 0.19748E+05, 0.21319E+05, 0.23000E+05,
0.24799E+05, 0.26723E+05, 0.28780E+05, 0.30978E+05, 0.33326E+05,
0.35834E+05, 0.38510E+05, 0.41365E+05, 0.44410E+05, 0.47656E+05,
0.51115E+05, 0.54798E+05, 0.58719E+05, 0.62891E+05, 0.67329E+05,
0.72047E+05, 0.77060E+05, 0.82385E+05, 0.88039E+05, 0.94039E+05,
0.10040E+06, 0.10715E+06, 0.11431E+06, 0.12189E+06, 0.12991E+06,
0.13841E+06, 0.14740E+06, 0.15691E+06, 0.16696E+06, 0.17759E+06,
0.18882E+06, 0.20067E+06, 0.21318E+06, 0.22639E+06, 0.24032E+06,
0.25501E+06, 0.27049E+06, 0.28680E+06, 0.30398E+06, 0.32207E+06,
0.34111E+06, 0.36114E+06, 0.38221E+06, 0.40436E+06, 0.42765E+06,
0.45211E+06, 0.47781E+06, 0.50479E+06, 0.53311E+06, 0.56283E+06,
0.59400E+06, 0.62669E+06, 0.66097E+06, 0.69688E+06, 0.73451E+06,
0.77393E+06, 0.81520E+06, 0.85840E+06, 0.90360E+06, 0.95090E+06,
0.10004E+07, 0.10521E+07, 0.11061E+07, 0.11626E+07, 0.12216E+07,
0.12833E+07, 0.13476E+07, 0.14148E+07, 0.14849E+07, 0.15581E+07,
0.16344E+07, 0.17140E+07, 0.17969E+07, 0.18834E+07, 0.19735E+07,
0.20674E+07, 0.21651E+07, 0.22669E+07, 0.23729E+07, 0.24832E+07,
0.25980E+07, 0.27174E+07, 0.28416E+07, 0.29708E+07, 0.31050E+07,
0.32446E+07])
# --------------- HOCl 165: M = 21, I = 1 ---------------------
M = 21
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.17041E+04, 0.28708E+04, 0.42250E+04,
0.57456E+04, 0.74211E+04, 0.92470E+04, 0.11225E+05, 0.13359E+05,
0.15657E+05, 0.18129E+05, 0.20785E+05, 0.23637E+05, 0.26696E+05,
0.29974E+05, 0.33484E+05, 0.37239E+05, 0.41252E+05, 0.45536E+05,
0.50105E+05, 0.54973E+05, 0.60152E+05, 0.65659E+05, 0.71507E+05,
0.77711E+05, 0.84286E+05, 0.91249E+05, 0.98614E+05, 0.10640E+06,
0.11462E+06, 0.12330E+06, 0.13244E+06, 0.14208E+06, 0.15222E+06,
0.16289E+06, 0.17411E+06, 0.18589E+06, 0.19825E+06, 0.21123E+06,
0.22483E+06, 0.23908E+06, 0.25400E+06, 0.26962E+06, 0.28596E+06,
0.30303E+06, 0.32087E+06, 0.33950E+06, 0.35895E+06, 0.37923E+06,
0.40038E+06, 0.42243E+06, 0.44539E+06, 0.46930E+06, 0.49419E+06,
0.52008E+06, 0.54700E+06, 0.57498E+06, 0.60406E+06, 0.63426E+06,
0.66562E+06, 0.69816E+06, 0.73192E+06, 0.76692E+06, 0.80322E+06,
0.84083E+06, 0.87979E+06, 0.92014E+06, 0.96192E+06, 0.10052E+07,
0.10499E+07, 0.10961E+07, 0.11440E+07, 0.11934E+07, 0.12445E+07,
0.12973E+07, 0.13518E+07, 0.14081E+07, 0.14661E+07, 0.15261E+07,
0.15879E+07, 0.16516E+07, 0.17174E+07, 0.17851E+07, 0.18550E+07,
0.19269E+07, 0.20010E+07, 0.20773E+07, 0.21559E+07, 0.22367E+07,
0.23200E+07, 0.24056E+07, 0.24936E+07, 0.25842E+07, 0.26773E+07,
0.27730E+07, 0.28714E+07, 0.29724E+07, 0.30763E+07, 0.31829E+07,
0.32924E+07, 0.34049E+07, 0.35203E+07, 0.36387E+07, 0.37603E+07,
0.38850E+07, 0.40129E+07, 0.41441E+07, 0.42786E+07, 0.44165E+07,
0.45579E+07, 0.47028E+07, 0.48512E+07, 0.50033E+07, 0.51592E+07,
0.53187E+07, 0.54822E+07, 0.56495E+07, 0.58208E+07, 0.59961E+07,
0.61755E+07])
# --------------- HOCl 167: M = 21, I = 2 ---------------------
M = 21
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.17342E+04, 0.29215E+04, 0.42998E+04,
0.58473E+04, 0.75524E+04, 0.94107E+04, 0.11423E+05, 0.13595E+05,
0.15935E+05, 0.18450E+05, 0.21154E+05, 0.24056E+05, 0.27168E+05,
0.30505E+05, 0.34077E+05, 0.37899E+05, 0.41983E+05, 0.46343E+05,
0.50993E+05, 0.55947E+05, 0.61218E+05, 0.66822E+05, 0.72774E+05,
0.79088E+05, 0.85780E+05, 0.92866E+05, 0.10036E+06, 0.10829E+06,
0.11665E+06, 0.12548E+06, 0.13479E+06, 0.14460E+06, 0.15492E+06,
0.16578E+06, 0.17719E+06, 0.18918E+06, 0.20177E+06, 0.21497E+06,
0.22881E+06, 0.24332E+06, 0.25851E+06, 0.27440E+06, 0.29102E+06,
0.30840E+06, 0.32656E+06, 0.34552E+06, 0.36531E+06, 0.38595E+06,
0.40748E+06, 0.42991E+06, 0.45328E+06, 0.47762E+06, 0.50295E+06,
0.52929E+06, 0.55669E+06, 0.58517E+06, 0.61477E+06, 0.64550E+06,
0.67741E+06, 0.71053E+06, 0.74489E+06, 0.78052E+06, 0.81745E+06,
0.85573E+06, 0.89539E+06, 0.93645E+06, 0.97897E+06, 0.10230E+07,
0.10685E+07, 0.11156E+07, 0.11643E+07, 0.12146E+07, 0.12666E+07,
0.13203E+07, 0.13757E+07, 0.14330E+07, 0.14921E+07, 0.15531E+07,
0.16160E+07, 0.16809E+07, 0.17478E+07, 0.18168E+07, 0.18878E+07,
0.19611E+07, 0.20365E+07, 0.21141E+07, 0.21941E+07, 0.22764E+07,
0.23611E+07, 0.24482E+07, 0.25378E+07, 0.26300E+07, 0.27248E+07,
0.28222E+07, 0.29223E+07, 0.30251E+07, 0.31308E+07, 0.32393E+07,
0.33508E+07, 0.34652E+07, 0.35827E+07, 0.37032E+07, 0.38269E+07,
0.39539E+07, 0.40840E+07, 0.42176E+07, 0.43545E+07, 0.44948E+07,
0.46387E+07, 0.47861E+07, 0.49372E+07, 0.50920E+07, 0.52506E+07,
0.54130E+07, 0.55793E+07, 0.57496E+07, 0.59239E+07, 0.61024E+07,
0.62850E+07])
# --------------- N2 44: M = 22, I = 1 ---------------------
M = 22
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.95487E+02, 0.13466E+03, 0.17386E+03,
0.21307E+03, 0.25230E+03, 0.29154E+03, 0.33080E+03, 0.37008E+03,
0.40937E+03, 0.44868E+03, 0.48800E+03, 0.52736E+03, 0.56674E+03,
0.60616E+03, 0.64562E+03, 0.68515E+03, 0.72475E+03, 0.76445E+03,
0.80426E+03, 0.84420E+03, 0.88430E+03, 0.92457E+03, 0.96505E+03,
0.10057E+04, 0.10467E+04, 0.10879E+04, 0.11293E+04, 0.11711E+04,
0.12132E+04, 0.12556E+04, 0.12984E+04, 0.13416E+04, 0.13851E+04,
0.14291E+04, 0.14734E+04, 0.15182E+04, 0.15635E+04, 0.16091E+04,
0.16553E+04, 0.17019E+04, 0.17490E+04, 0.17965E+04, 0.18446E+04,
0.18932E+04, 0.19422E+04, 0.19918E+04, 0.20419E+04, 0.20926E+04,
0.21437E+04, 0.21954E+04, 0.22477E+04, 0.23004E+04, 0.23538E+04,
0.24077E+04, 0.24621E+04, 0.25171E+04, 0.25727E+04, 0.26288E+04,
0.26856E+04, 0.27428E+04, 0.28007E+04, 0.28591E+04, 0.29181E+04,
0.29777E+04, 0.30379E+04, 0.30986E+04, 0.31600E+04, 0.32219E+04,
0.32844E+04, 0.33475E+04, 0.34112E+04, 0.34755E+04, 0.35404E+04,
0.36059E+04, 0.36720E+04, 0.37387E+04, 0.38060E+04, 0.38739E+04,
0.39424E+04, 0.40115E+04, 0.40812E+04, 0.41515E+04, 0.42224E+04,
0.42939E+04, 0.43661E+04, 0.44388E+04, 0.45122E+04, 0.45861E+04,
0.46607E+04, 0.47359E+04, 0.48117E+04, 0.48882E+04, 0.49652E+04,
0.50428E+04, 0.51211E+04, 0.52000E+04, 0.52795E+04, 0.53596E+04,
0.54404E+04, 0.55217E+04, 0.56037E+04, 0.56863E+04, 0.57695E+04,
0.58533E+04, 0.59378E+04, 0.60229E+04, 0.61086E+04, 0.61950E+04,
0.62819E+04, 0.63695E+04, 0.64577E+04, 0.65465E+04, 0.66360E+04,
0.67261E+04, 0.68168E+04, 0.69081E+04, 0.70001E+04, 0.70927E+04,
0.71859E+04])
# --------------- N2 45: M = 22, I = 2 --------------------- not in TIPS-2011
M = 22
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- HCN 124: M = 23, I = 1 ---------------------
M = 23
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.17143E+03, 0.24209E+03, 0.31285E+03,
0.38392E+03, 0.45582E+03, 0.52929E+03, 0.60515E+03, 0.68424E+03,
0.76731E+03, 0.85505E+03, 0.94805E+03, 0.10468E+04, 0.11519E+04,
0.12637E+04, 0.13826E+04, 0.15090E+04, 0.16435E+04, 0.17863E+04,
0.19378E+04, 0.20985E+04, 0.22689E+04, 0.24492E+04, 0.26401E+04,
0.28418E+04, 0.30550E+04, 0.32801E+04, 0.35176E+04, 0.37680E+04,
0.40318E+04, 0.43097E+04, 0.46021E+04, 0.49097E+04, 0.52330E+04,
0.55727E+04, 0.59294E+04, 0.63038E+04, 0.66964E+04, 0.71081E+04,
0.75396E+04, 0.79915E+04, 0.84646E+04, 0.89596E+04, 0.94774E+04,
0.10019E+05, 0.10585E+05, 0.11176E+05, 0.11793E+05, 0.12437E+05,
0.13108E+05, 0.13809E+05, 0.14540E+05, 0.15301E+05, 0.16094E+05,
0.16919E+05, 0.17779E+05, 0.18673E+05, 0.19603E+05, 0.20570E+05,
0.21575E+05, 0.22619E+05, 0.23704E+05, 0.24831E+05, 0.26000E+05,
0.27213E+05, 0.28472E+05, 0.29778E+05, 0.31131E+05, 0.32534E+05,
0.33987E+05, 0.35493E+05, 0.37052E+05, 0.38666E+05, 0.40336E+05,
0.42064E+05, 0.43852E+05, 0.45701E+05, 0.47612E+05, 0.49587E+05,
0.51629E+05, 0.53738E+05, 0.55916E+05, 0.58165E+05, 0.60486E+05,
0.62883E+05, 0.65355E+05, 0.67905E+05, 0.70536E+05, 0.73249E+05,
0.76045E+05, 0.78927E+05, 0.81897E+05, 0.84957E+05, 0.88108E+05,
0.91354E+05, 0.94696E+05, 0.98136E+05, 0.10168E+06, 0.10532E+06,
0.10907E+06, 0.11292E+06, 0.11689E+06, 0.12096E+06, 0.12516E+06,
0.12946E+06, 0.13389E+06, 0.13844E+06, 0.14311E+06, 0.14791E+06,
0.15284E+06, 0.15790E+06, 0.16310E+06, 0.16843E+06, 0.17391E+06,
0.17953E+06, 0.18529E+06, 0.19120E+06, 0.19726E+06, 0.20348E+06,
0.20986E+06])
# --------------- HCN 134: M = 23, I = 2 ---------------------
M = 23
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.35186E+03, 0.49693E+03, 0.64221E+03,
0.78815E+03, 0.93585E+03, 0.10868E+04, 0.12428E+04, 0.14056E+04,
0.15766E+04, 0.17574E+04, 0.19491E+04, 0.21528E+04, 0.23695E+04,
0.26002E+04, 0.28457E+04, 0.31068E+04, 0.33845E+04, 0.36795E+04,
0.39926E+04, 0.43249E+04, 0.46770E+04, 0.50500E+04, 0.54447E+04,
0.58621E+04, 0.63032E+04, 0.67690E+04, 0.72606E+04, 0.77789E+04,
0.83252E+04, 0.89005E+04, 0.95062E+04, 0.10143E+05, 0.10813E+05,
0.11517E+05, 0.12256E+05, 0.13032E+05, 0.13846E+05, 0.14699E+05,
0.15593E+05, 0.16530E+05, 0.17511E+05, 0.18538E+05, 0.19612E+05,
0.20734E+05, 0.21908E+05, 0.23134E+05, 0.24414E+05, 0.25750E+05,
0.27145E+05, 0.28599E+05, 0.30115E+05, 0.31694E+05, 0.33340E+05,
0.35054E+05, 0.36838E+05, 0.38694E+05, 0.40625E+05, 0.42633E+05,
0.44720E+05, 0.46889E+05, 0.49142E+05, 0.51481E+05, 0.53910E+05,
0.56430E+05, 0.59045E+05, 0.61757E+05, 0.64568E+05, 0.67482E+05,
0.70502E+05, 0.73630E+05, 0.76869E+05, 0.80223E+05, 0.83694E+05,
0.87285E+05, 0.91000E+05, 0.94843E+05, 0.98815E+05, 0.10292E+06,
0.10716E+06, 0.11155E+06, 0.11608E+06, 0.12075E+06, 0.12558E+06,
0.13056E+06, 0.13570E+06, 0.14100E+06, 0.14647E+06, 0.15211E+06,
0.15793E+06, 0.16392E+06, 0.17009E+06, 0.17646E+06, 0.18301E+06,
0.18976E+06, 0.19671E+06, 0.20387E+06, 0.21123E+06, 0.21881E+06,
0.22660E+06, 0.23462E+06, 0.24287E+06, 0.25135E+06, 0.26007E+06,
0.26903E+06, 0.27824E+06, 0.28771E+06, 0.29743E+06, 0.30742E+06,
0.31767E+06, 0.32820E+06, 0.33901E+06, 0.35011E+06, 0.36150E+06,
0.37319E+06, 0.38518E+06, 0.39749E+06, 0.41010E+06, 0.42304E+06,
0.43631E+06])
# --------------- HCN 135: M = 23, I = 3 ---------------------
M = 23
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.11863E+03, 0.16755E+03, 0.21653E+03,
0.26576E+03, 0.31559E+03, 0.36656E+03, 0.41926E+03, 0.47428E+03,
0.53214E+03, 0.59333E+03, 0.65824E+03, 0.72727E+03, 0.80074E+03,
0.87898E+03, 0.96227E+03, 0.10509E+04, 0.11452E+04, 0.12454E+04,
0.13518E+04, 0.14647E+04, 0.15844E+04, 0.17112E+04, 0.18455E+04,
0.19875E+04, 0.21377E+04, 0.22962E+04, 0.24636E+04, 0.26402E+04,
0.28263E+04, 0.30224E+04, 0.32289E+04, 0.34461E+04, 0.36745E+04,
0.39145E+04, 0.41667E+04, 0.44314E+04, 0.47092E+04, 0.50005E+04,
0.53059E+04, 0.56259E+04, 0.59609E+04, 0.63116E+04, 0.66785E+04,
0.70622E+04, 0.74633E+04, 0.78823E+04, 0.83200E+04, 0.87769E+04,
0.92536E+04, 0.97509E+04, 0.10269E+05, 0.10810E+05, 0.11373E+05,
0.11959E+05, 0.12570E+05, 0.13205E+05, 0.13866E+05, 0.14554E+05,
0.15268E+05, 0.16011E+05, 0.16782E+05, 0.17583E+05, 0.18415E+05,
0.19279E+05, 0.20174E+05, 0.21103E+05, 0.22067E+05, 0.23065E+05,
0.24100E+05, 0.25172E+05, 0.26282E+05, 0.27432E+05, 0.28622E+05,
0.29853E+05, 0.31127E+05, 0.32445E+05, 0.33807E+05, 0.35215E+05,
0.36670E+05, 0.38174E+05, 0.39727E+05, 0.41330E+05, 0.42986E+05,
0.44695E+05, 0.46459E+05, 0.48278E+05, 0.50155E+05, 0.52091E+05,
0.54086E+05, 0.56143E+05, 0.58263E+05, 0.60447E+05, 0.62696E+05,
0.65013E+05, 0.67399E+05, 0.69856E+05, 0.72384E+05, 0.74986E+05,
0.77663E+05, 0.80416E+05, 0.83249E+05, 0.86161E+05, 0.89156E+05,
0.92233E+05, 0.95397E+05, 0.98648E+05, 0.10199E+06, 0.10542E+06,
0.10894E+06, 0.11256E+06, 0.11627E+06, 0.12009E+06, 0.12400E+06,
0.12802E+06, 0.13214E+06, 0.13636E+06, 0.14070E+06, 0.14515E+06,
0.14971E+06])
# --------------- CH3Cl 215: M = 24, I = 1 ---------------------
M = 24
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.50529E+04, 0.85123E+04, 0.12528E+05,
0.17036E+05, 0.22005E+05, 0.27429E+05, 0.33325E+05, 0.39734E+05,
0.46713E+05, 0.54336E+05, 0.62690E+05, 0.71876E+05, 0.82006E+05,
0.93204E+05, 0.10560E+06, 0.11936E+06, 0.13463E+06, 0.15158E+06,
0.17043E+06, 0.19137E+06, 0.21464E+06, 0.24049E+06, 0.26920E+06,
0.30107E+06, 0.33642E+06, 0.37563E+06, 0.41907E+06, 0.46719E+06,
0.52045E+06, 0.57936E+06, 0.64448E+06, 0.71641E+06, 0.79582E+06,
0.88341E+06, 0.97997E+06, 0.10863E+07, 0.12034E+07, 0.13323E+07,
0.14739E+07, 0.16295E+07, 0.18003E+07, 0.19877E+07, 0.21932E+07,
0.24183E+07, 0.26649E+07, 0.29346E+07, 0.32296E+07, 0.35519E+07,
0.39039E+07, 0.42881E+07, 0.47072E+07, 0.51639E+07, 0.56615E+07,
0.62032E+07, 0.67926E+07, 0.74335E+07, 0.81299E+07, 0.88862E+07,
0.97071E+07, 0.10598E+08, 0.11563E+08, 0.12609E+08, 0.13742E+08,
0.14968E+08, 0.16294E+08, 0.17728E+08, 0.19277E+08, 0.20950E+08,
0.22756E+08, 0.24704E+08, 0.26805E+08, 0.29069E+08, 0.31507E+08,
0.34132E+08, 0.36957E+08, 0.39995E+08, 0.43260E+08, 0.46769E+08,
0.50538E+08, 0.54583E+08, 0.58923E+08, 0.63578E+08, 0.68568E+08,
0.73914E+08, 0.79640E+08, 0.85770E+08, 0.92329E+08, 0.99345E+08,
0.10685E+09, 0.11486E+09, 0.12342E+09, 0.13257E+09, 0.14233E+09,
0.15274E+09, 0.16384E+09, 0.17568E+09, 0.18829E+09, 0.20173E+09,
0.21604E+09, 0.23127E+09, 0.24748E+09, 0.26471E+09, 0.28304E+09,
0.30252E+09, 0.32322E+09, 0.34520E+09, 0.36853E+09, 0.39330E+09,
0.41958E+09, 0.44745E+09, 0.47701E+09, 0.50833E+09, 0.54151E+09,
0.57667E+09, 0.61389E+09, 0.65329E+09, 0.69498E+09, 0.73909E+09,
0.78573E+09])
# --------------- CH3Cl 217: M = 24, I = 2 ---------------------
M = 24
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.51327E+04, 0.86469E+04, 0.12726E+05,
0.17306E+05, 0.22354E+05, 0.27863E+05, 0.33853E+05, 0.40364E+05,
0.47453E+05, 0.55197E+05, 0.63684E+05, 0.73016E+05, 0.83306E+05,
0.94681E+05, 0.10728E+06, 0.12125E+06, 0.13676E+06, 0.15399E+06,
0.17313E+06, 0.19441E+06, 0.21804E+06, 0.24430E+06, 0.27347E+06,
0.30584E+06, 0.34176E+06, 0.38158E+06, 0.42572E+06, 0.47460E+06,
0.52871E+06, 0.58855E+06, 0.65471E+06, 0.72778E+06, 0.80844E+06,
0.89743E+06, 0.99552E+06, 0.11036E+07, 0.12225E+07, 0.13534E+07,
0.14973E+07, 0.16553E+07, 0.18289E+07, 0.20193E+07, 0.22280E+07,
0.24567E+07, 0.27072E+07, 0.29812E+07, 0.32808E+07, 0.36083E+07,
0.39659E+07, 0.43562E+07, 0.47819E+07, 0.52459E+07, 0.57514E+07,
0.63017E+07, 0.69005E+07, 0.75515E+07, 0.82590E+07, 0.90273E+07,
0.98613E+07, 0.10766E+08, 0.11747E+08, 0.12809E+08, 0.13960E+08,
0.15206E+08, 0.16553E+08, 0.18010E+08, 0.19584E+08, 0.21283E+08,
0.23118E+08, 0.25097E+08, 0.27231E+08, 0.29531E+08, 0.32008E+08,
0.34674E+08, 0.37544E+08, 0.40630E+08, 0.43948E+08, 0.47513E+08,
0.51341E+08, 0.55451E+08, 0.59860E+08, 0.64589E+08, 0.69658E+08,
0.75089E+08, 0.80906E+08, 0.87134E+08, 0.93797E+08, 0.10092E+09,
0.10854E+09, 0.11669E+09, 0.12539E+09, 0.13467E+09, 0.14459E+09,
0.15517E+09, 0.16645E+09, 0.17847E+09, 0.19129E+09, 0.20494E+09,
0.21948E+09, 0.23495E+09, 0.25141E+09, 0.26893E+09, 0.28754E+09,
0.30733E+09, 0.32836E+09, 0.35069E+09, 0.37440E+09, 0.39956E+09,
0.42626E+09, 0.45457E+09, 0.48460E+09, 0.51642E+09, 0.55013E+09,
0.58585E+09, 0.62366E+09, 0.66369E+09, 0.70605E+09, 0.75085E+09,
0.79824E+09])
# --------------- H2O2 1661: M = 25, I = 1 ---------------------
M = 25
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.62392E+03, 0.10958E+04, 0.16692E+04,
0.23492E+04, 0.31427E+04, 0.40574E+04, 0.51014E+04, 0.62840E+04,
0.76157E+04, 0.91085E+04, 0.10776E+05, 0.12633E+05, 0.14696E+05,
0.16983E+05, 0.19515E+05, 0.22312E+05, 0.25396E+05, 0.28792E+05,
0.32526E+05, 0.36625E+05, 0.41118E+05, 0.46036E+05, 0.51410E+05,
0.57275E+05, 0.63667E+05, 0.70623E+05, 0.78185E+05, 0.86394E+05,
0.95295E+05, 0.10493E+06, 0.11536E+06, 0.12662E+06, 0.13878E+06,
0.15188E+06, 0.16600E+06, 0.18118E+06, 0.19750E+06, 0.21503E+06,
0.23383E+06, 0.25398E+06, 0.27556E+06, 0.29864E+06, 0.32333E+06,
0.34970E+06, 0.37784E+06, 0.40786E+06, 0.43985E+06, 0.47392E+06,
0.51018E+06, 0.54874E+06, 0.58972E+06, 0.63324E+06, 0.67943E+06,
0.72843E+06, 0.78037E+06, 0.83540E+06, 0.89366E+06, 0.95530E+06,
0.10205E+07, 0.10894E+07, 0.11622E+07, 0.12391E+07, 0.13202E+07,
0.14057E+07, 0.14959E+07, 0.15909E+07, 0.16910E+07, 0.17963E+07,
0.19072E+07, 0.20237E+07, 0.21463E+07, 0.22750E+07, 0.24102E+07,
0.25522E+07, 0.27012E+07, 0.28575E+07, 0.30213E+07, 0.31931E+07,
0.33730E+07, 0.35615E+07, 0.37588E+07, 0.39653E+07, 0.41813E+07,
0.44072E+07, 0.46433E+07, 0.48901E+07, 0.51479E+07, 0.54171E+07,
0.56982E+07, 0.59915E+07, 0.62976E+07, 0.66167E+07, 0.69495E+07,
0.72963E+07, 0.76577E+07, 0.80342E+07, 0.84262E+07, 0.88343E+07,
0.92591E+07, 0.97011E+07, 0.10161E+08, 0.10639E+08, 0.11136E+08,
0.11652E+08, 0.12189E+08, 0.12746E+08, 0.13325E+08, 0.13926E+08,
0.14550E+08, 0.15198E+08, 0.15870E+08, 0.16566E+08, 0.17289E+08,
0.18038E+08, 0.18814E+08, 0.19619E+08, 0.20452E+08, 0.21315E+08,
0.22209E+08])
# --------------- C2H2 1221: M = 26, I = 1 ---------------------
M = 26
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.71617E+02, 0.10121E+03, 0.13092E+03,
0.16104E+03, 0.19218E+03, 0.22509E+03, 0.26062E+03, 0.29959E+03,
0.34281E+03, 0.39103E+03, 0.44503E+03, 0.50558E+03, 0.57346E+03,
0.64950E+03, 0.73457E+03, 0.82960E+03, 0.93557E+03, 0.10535E+04,
0.11846E+04, 0.13301E+04, 0.14911E+04, 0.16692E+04, 0.18658E+04,
0.20825E+04, 0.23211E+04, 0.25833E+04, 0.28711E+04, 0.31867E+04,
0.35323E+04, 0.39102E+04, 0.43230E+04, 0.47735E+04, 0.52645E+04,
0.57991E+04, 0.63807E+04, 0.70127E+04, 0.76988E+04, 0.84430E+04,
0.92495E+04, 0.10123E+05, 0.11067E+05, 0.12088E+05, 0.13191E+05,
0.14381E+05, 0.15664E+05, 0.17047E+05, 0.18536E+05, 0.20137E+05,
0.21859E+05, 0.23710E+05, 0.25696E+05, 0.27827E+05, 0.30112E+05,
0.32561E+05, 0.35183E+05, 0.37990E+05, 0.40991E+05, 0.44199E+05,
0.47626E+05, 0.51285E+05, 0.55189E+05, 0.59353E+05, 0.63791E+05,
0.68518E+05, 0.73551E+05, 0.78908E+05, 0.84604E+05, 0.90661E+05,
0.97095E+05, 0.10393E+06, 0.11118E+06, 0.11888E+06, 0.12704E+06,
0.13569E+06, 0.14486E+06, 0.15457E+06, 0.16485E+06, 0.17572E+06,
0.18722E+06, 0.19938E+06, 0.21223E+06, 0.22581E+06, 0.24014E+06,
0.25527E+06, 0.27123E+06, 0.28807E+06, 0.30582E+06, 0.32452E+06,
0.34423E+06, 0.36498E+06, 0.38683E+06, 0.40982E+06, 0.43401E+06,
0.45944E+06, 0.48618E+06, 0.51428E+06, 0.54380E+06, 0.57480E+06,
0.60735E+06, 0.64151E+06, 0.67735E+06, 0.71494E+06, 0.75436E+06,
0.79568E+06, 0.83898E+06, 0.88434E+06, 0.93184E+06, 0.98158E+06,
0.10336E+07, 0.10881E+07, 0.11451E+07, 0.12047E+07, 0.12670E+07,
0.13321E+07, 0.14002E+07, 0.14713E+07, 0.15455E+07, 0.16231E+07,
0.17040E+07])
# --------------- C2H2 1231: M = 26, I = 2 ---------------------
M = 26
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.28647E+03, 0.40486E+03, 0.52369E+03,
0.64419E+03, 0.76874E+03, 0.90040E+03, 0.10425E+04, 0.11984E+04,
0.13713E+04, 0.15642E+04, 0.17802E+04, 0.20223E+04, 0.22939E+04,
0.25981E+04, 0.29384E+04, 0.33185E+04, 0.37424E+04, 0.42142E+04,
0.47386E+04, 0.53203E+04, 0.59646E+04, 0.66769E+04, 0.74633E+04,
0.83302E+04, 0.92845E+04, 0.10333E+05, 0.11485E+05, 0.12747E+05,
0.14129E+05, 0.15641E+05, 0.17292E+05, 0.19094E+05, 0.21058E+05,
0.23197E+05, 0.25523E+05, 0.28051E+05, 0.30796E+05, 0.33773E+05,
0.36999E+05, 0.40492E+05, 0.44270E+05, 0.48354E+05, 0.52765E+05,
0.57525E+05, 0.62658E+05, 0.68189E+05, 0.74144E+05, 0.80551E+05,
0.87439E+05, 0.94840E+05, 0.10279E+06, 0.11131E+06, 0.12045E+06,
0.13025E+06, 0.14074E+06, 0.15196E+06, 0.16397E+06, 0.17680E+06,
0.19051E+06, 0.20514E+06, 0.22076E+06, 0.23742E+06, 0.25517E+06,
0.27408E+06, 0.29421E+06, 0.31564E+06, 0.33842E+06, 0.36265E+06,
0.38839E+06, 0.41572E+06, 0.44474E+06, 0.47553E+06, 0.50818E+06,
0.54278E+06, 0.57945E+06, 0.61829E+06, 0.65940E+06, 0.70289E+06,
0.74890E+06, 0.79754E+06, 0.84894E+06, 0.90324E+06, 0.96057E+06,
0.10211E+07, 0.10849E+07, 0.11523E+07, 0.12233E+07, 0.12981E+07,
0.13769E+07, 0.14599E+07, 0.15473E+07, 0.16393E+07, 0.17361E+07,
0.18378E+07, 0.19447E+07, 0.20571E+07, 0.21752E+07, 0.22992E+07,
0.24294E+07, 0.25661E+07, 0.27094E+07, 0.28598E+07, 0.30175E+07,
0.31828E+07, 0.33560E+07, 0.35374E+07, 0.37274E+07, 0.39264E+07,
0.41346E+07, 0.43525E+07, 0.45805E+07, 0.48188E+07, 0.50681E+07,
0.53286E+07, 0.56008E+07, 0.58852E+07, 0.61823E+07, 0.64924E+07,
0.68162E+07])
# --------------- C2H2 1222: M = 26, I = 3 ---------------------
M = 26
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.24843E+03, 0.35373E+03, 0.45997E+03,
0.56930E+03, 0.68497E+03, 0.81065E+03, 0.94999E+03, 0.11065E+04,
0.12837E+04, 0.14848E+04, 0.17135E+04, 0.19731E+04, 0.22675E+04,
0.26205E+04, 0.29999E+04, 0.34276E+04, 0.39086E+04, 0.44486E+04,
0.50533E+04, 0.57294E+04, 0.64837E+04, 0.73237E+04, 0.82576E+04,
0.92941E+04, 0.10443E+05, 0.11714E+05, 0.13117E+05, 0.14666E+05,
0.16373E+05, 0.18250E+05, 0.20313E+05, 0.22578E+05, 0.25060E+05,
0.27777E+05, 0.30750E+05, 0.33997E+05, 0.37541E+05, 0.41405E+05,
0.45614E+05, 0.50192E+05, 0.55170E+05, 0.60576E+05, 0.66441E+05,
0.72799E+05, 0.79686E+05, 0.87140E+05, 0.95199E+05, 0.10391E+06,
0.11331E+06, 0.12345E+06, 0.13438E+06, 0.14615E+06, 0.15882E+06,
0.17245E+06, 0.18710E+06, 0.20283E+06, 0.21972E+06, 0.23783E+06,
0.25724E+06, 0.27804E+06, 0.30030E+06, 0.32411E+06, 0.34958E+06,
0.37679E+06, 0.40585E+06, 0.43686E+06, 0.46994E+06, 0.50521E+06,
0.54280E+06, 0.58282E+06, 0.62542E+06, 0.67074E+06, 0.71892E+06,
0.77013E+06, 0.82453E+06, 0.88228E+06, 0.94356E+06, 0.10086E+07,
0.10775E+07, 0.11505E+07, 0.12279E+07, 0.13098E+07, 0.13964E+07,
0.14881E+07, 0.15850E+07, 0.16875E+07, 0.17957E+07, 0.19100E+07,
0.20307E+07, 0.21580E+07, 0.22923E+07, 0.24339E+07, 0.25831E+07,
0.27404E+07, 0.29060E+07, 0.30803E+07, 0.32638E+07, 0.34568E+07,
0.36598E+07, 0.38733E+07, 0.40976E+07, 0.43332E+07, 0.45807E+07,
0.48406E+07, 0.51133E+07, 0.53995E+07, 0.56997E+07, 0.60144E+07,
0.63444E+07, 0.66901E+07, 0.70524E+07, 0.74317E+07, 0.78289E+07,
0.82447E+07, 0.86797E+07, 0.91348E+07, 0.96108E+07, 0.10108E+08,
0.10629E+08])
# --------------- C2H6 1221: M = 27, I = 1 ---------------------
M = 27
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.47267E+04, 0.80011E+04, 0.11928E+05,
0.16564E+05, 0.21985E+05, 0.28287E+05, 0.35590E+05, 0.44049E+05,
0.53862E+05, 0.65277E+05, 0.78597E+05, 0.94191E+05, 0.11250E+06,
0.13407E+06, 0.15952E+06, 0.18962E+06, 0.22526E+06, 0.26751E+06,
0.31763E+06, 0.37714E+06, 0.44780E+06, 0.53174E+06, 0.63145E+06,
0.74989E+06, 0.89056E+06, 0.10576E+07, 0.12559E+07, 0.14912E+07,
0.17704E+07, 0.21013E+07, 0.24936E+07, 0.29582E+07, 0.35083E+07,
0.41591E+07, 0.49286E+07, 0.58379E+07, 0.69116E+07, 0.81787E+07,
0.96728E+07, 0.11433E+08, 0.13506E+08, 0.15945E+08, 0.18812E+08,
0.22180E+08, 0.26134E+08, 0.30770E+08, 0.36204E+08, 0.42565E+08,
0.50008E+08, 0.58708E+08, 0.68868E+08, 0.80725E+08, 0.94548E+08,
0.11065E+09, 0.12940E+09, 0.15119E+09, 0.17652E+09, 0.20593E+09,
0.24003E+09, 0.27956E+09, 0.32533E+09, 0.37829E+09, 0.43951E+09,
0.51021E+09, 0.59180E+09, 0.68588E+09, 0.79427E+09, 0.91904E+09,
0.10625E+10, 0.12275E+10, 0.14168E+10, 0.16341E+10, 0.18831E+10,
0.21684E+10, 0.24949E+10, 0.28684E+10, 0.32951E+10, 0.37823E+10,
0.43382E+10, 0.49719E+10, 0.56938E+10, 0.65156E+10, 0.74502E+10,
0.85125E+10, 0.97190E+10, 0.11088E+11, 0.12641E+11, 0.14401E+11,
0.16393E+11, 0.18648E+11, 0.21198E+11, 0.24079E+11, 0.27332E+11,
0.31003E+11, 0.35142E+11, 0.39807E+11, 0.45060E+11, 0.50972E+11,
0.57620E+11, 0.65091E+11, 0.73483E+11, 0.82902E+11, 0.93467E+11,
0.10531E+12, 0.11858E+12, 0.13343E+12, 0.15005E+12, 0.16864E+12,
0.18941E+12, 0.21260E+12, 0.23849E+12, 0.26737E+12, 0.29957E+12,
0.33545E+12, 0.37541E+12, 0.41987E+12, 0.46934E+12, 0.52432E+12,
0.58542E+12])
# --------------- C2H6 1231: M = 27, I = 2 ---------------------
M = 27
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.24128E+04, 0.40845E+04, 0.60896E+04,
0.84564E+04, 0.11224E+05, 0.14442E+05, 0.18170E+05, 0.22490E+05,
0.27501E+05, 0.33329E+05, 0.40131E+05, 0.48094E+05, 0.57446E+05,
0.68459E+05, 0.81458E+05, 0.96828E+05, 0.11503E+06, 0.13661E+06,
0.16221E+06, 0.19260E+06, 0.22869E+06, 0.27156E+06, 0.32249E+06,
0.38298E+06, 0.45483E+06, 0.54015E+06, 0.64144E+06, 0.76164E+06,
0.90423E+06, 0.10733E+07, 0.12737E+07, 0.15110E+07, 0.17920E+07,
0.21245E+07, 0.25176E+07, 0.29821E+07, 0.35307E+07, 0.41780E+07,
0.49414E+07, 0.58408E+07, 0.68999E+07, 0.81461E+07, 0.96110E+07,
0.11332E+08, 0.13352E+08, 0.15721E+08, 0.18497E+08, 0.21748E+08,
0.25551E+08, 0.29997E+08, 0.35189E+08, 0.41248E+08, 0.48313E+08,
0.56542E+08, 0.66122E+08, 0.77262E+08, 0.90206E+08, 0.10523E+09,
0.12267E+09, 0.14287E+09, 0.16626E+09, 0.19333E+09, 0.22462E+09,
0.26076E+09, 0.30247E+09, 0.35056E+09, 0.40596E+09, 0.46974E+09,
0.54310E+09, 0.62740E+09, 0.72420E+09, 0.83527E+09, 0.96260E+09,
0.11084E+10, 0.12754E+10, 0.14663E+10, 0.16845E+10, 0.19336E+10,
0.22178E+10, 0.25418E+10, 0.29109E+10, 0.33311E+10, 0.38090E+10,
0.43522E+10, 0.49691E+10, 0.56693E+10, 0.64633E+10, 0.73631E+10,
0.83821E+10, 0.95352E+10, 0.10839E+11, 0.12312E+11, 0.13976E+11,
0.15854E+11, 0.17971E+11, 0.20357E+11, 0.23043E+11, 0.26067E+11,
0.29467E+11, 0.33289E+11, 0.37581E+11, 0.42399E+11, 0.47804E+11,
0.53862E+11, 0.60649E+11, 0.68247E+11, 0.76750E+11, 0.86257E+11,
0.96882E+11, 0.10875E+12, 0.12199E+12, 0.13677E+12, 0.15325E+12,
0.17160E+12, 0.19204E+12, 0.21480E+12, 0.24010E+12, 0.26824E+12,
0.29950E+12])
# --------------- PH3 1111: M = 28, I = 1 ---------------------
M = 28
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.29652E+03, 0.49643E+03, 0.72810E+03,
0.98777E+03, 0.12729E+04, 0.15820E+04, 0.19145E+04, 0.22708E+04,
0.26520E+04, 0.30600E+04, 0.34971E+04, 0.39662E+04, 0.44702E+04,
0.50126E+04, 0.55970E+04, 0.62273E+04, 0.69075E+04, 0.76421E+04,
0.84357E+04, 0.92933E+04, 0.10220E+05, 0.11222E+05, 0.12304E+05,
0.13473E+05, 0.14736E+05, 0.16099E+05, 0.17571E+05, 0.19160E+05,
0.20873E+05, 0.22720E+05, 0.24710E+05, 0.26854E+05, 0.29162E+05,
0.31646E+05, 0.34317E+05, 0.37188E+05, 0.40273E+05, 0.43585E+05,
0.47140E+05, 0.50953E+05, 0.55040E+05, 0.59419E+05, 0.64108E+05,
0.69127E+05, 0.74496E+05, 0.80236E+05, 0.86369E+05, 0.92918E+05,
0.99909E+05, 0.10737E+06, 0.11532E+06, 0.12380E+06, 0.13282E+06,
0.14244E+06, 0.15266E+06, 0.16354E+06, 0.17511E+06, 0.18739E+06,
0.20044E+06, 0.21430E+06, 0.22900E+06, 0.24459E+06, 0.26111E+06,
0.27862E+06, 0.29716E+06, 0.31680E+06, 0.33757E+06, 0.35954E+06,
0.38277E+06, 0.40733E+06, 0.43326E+06, 0.46065E+06, 0.48955E+06,
0.52005E+06, 0.55222E+06, 0.58614E+06, 0.62188E+06, 0.65953E+06,
0.69917E+06, 0.74091E+06, 0.78483E+06, 0.83103E+06, 0.87960E+06,
0.93067E+06, 0.98432E+06, 0.10407E+07, 0.10999E+07, 0.11620E+07,
0.12272E+07, 0.12956E+07, 0.13673E+07, 0.14425E+07, 0.15212E+07,
0.16038E+07, 0.16902E+07, 0.17808E+07, 0.18755E+07, 0.19746E+07,
0.20784E+07, 0.21868E+07, 0.23002E+07, 0.24187E+07, 0.25425E+07,
0.26719E+07, 0.28070E+07, 0.29480E+07, 0.30952E+07, 0.32488E+07,
0.34091E+07, 0.35762E+07, 0.37504E+07, 0.39320E+07, 0.41213E+07,
0.43185E+07, 0.45239E+07, 0.47378E+07, 0.49605E+07, 0.51923E+07,
0.54335E+07])
# --------------- COF2 269: M = 29, I = 1 ---------------------
M = 29
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.54999E+04, 0.92749E+04, 0.13668E+05,
0.18643E+05, 0.24224E+05, 0.30487E+05, 0.37547E+05, 0.45543E+05,
0.54639E+05, 0.65019E+05, 0.76886E+05, 0.90462E+05, 0.10600E+06,
0.12377E+06, 0.14407E+06, 0.16723E+06, 0.19363E+06, 0.22367E+06,
0.25780E+06, 0.29650E+06, 0.34031E+06, 0.38982E+06, 0.44568E+06,
0.50859E+06, 0.57932E+06, 0.65872E+06, 0.74770E+06, 0.84724E+06,
0.95844E+06, 0.10825E+07, 0.12205E+07, 0.13741E+07, 0.15446E+07,
0.17336E+07, 0.19428E+07, 0.21742E+07, 0.24296E+07, 0.27113E+07,
0.30214E+07, 0.33626E+07, 0.37373E+07, 0.41484E+07, 0.45989E+07,
0.50921E+07, 0.56313E+07, 0.62202E+07, 0.68626E+07, 0.75628E+07,
0.83251E+07, 0.91542E+07, 0.10055E+08, 0.11033E+08, 0.12093E+08,
0.13242E+08, 0.14486E+08, 0.15831E+08, 0.17284E+08, 0.18853E+08,
0.20546E+08, 0.22371E+08, 0.24335E+08, 0.26450E+08, 0.28724E+08,
0.31167E+08, 0.33790E+08, 0.36605E+08, 0.39623E+08, 0.42856E+08,
0.46318E+08, 0.50022E+08, 0.53983E+08, 0.58215E+08, 0.62735E+08,
0.67558E+08, 0.72702E+08, 0.78186E+08, 0.84028E+08, 0.90247E+08,
0.96865E+08, 0.10390E+09, 0.11138E+09, 0.11933E+09, 0.12777E+09,
0.13672E+09, 0.14622E+09, 0.15629E+09, 0.16695E+09, 0.17825E+09,
0.19021E+09, 0.20287E+09, 0.21625E+09, 0.23039E+09, 0.24534E+09,
0.26113E+09, 0.27779E+09, 0.29538E+09, 0.31392E+09, 0.33348E+09,
0.35409E+09, 0.37580E+09, 0.39867E+09, 0.42274E+09, 0.44806E+09,
0.47470E+09, 0.50271E+09, 0.53215E+09, 0.56308E+09, 0.59557E+09,
0.62968E+09, 0.66548E+09, 0.70304E+09, 0.74243E+09, 0.78374E+09,
0.82703E+09, 0.87240E+09, 0.91992E+09, 0.96967E+09, 0.10218E+10,
0.10763E+10])
# --------------- COF2 369: M = 29, I = 2 --------------------- not in TIPS-2011
M = 29
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- SF6 29: M = 30, I = 1 ---------------------
M = 30
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.46373E+05, 0.78844E+05, 0.11939E+06,
0.17183E+06, 0.24247E+06, 0.34059E+06, 0.47963E+06, 0.67906E+06,
0.96713E+06, 0.13848E+07, 0.19911E+07, 0.28714E+07, 0.41481E+07,
0.59956E+07, 0.86617E+07, 0.12496E+08, 0.17991E+08, 0.25832E+08,
0.36971E+08, 0.52724E+08, 0.74895E+08, 0.10595E+09, 0.14923E+09,
0.20925E+09, 0.29208E+09, 0.40582E+09, 0.56124E+09, 0.77259E+09,
0.10586E+10, 0.14439E+10, 0.19605E+10, 0.26500E+10, 0.35662E+10,
0.47781E+10, 0.63747E+10, 0.84689E+10, 0.11205E+11, 0.14765E+11,
0.19378E+11, 0.25336E+11, 0.32998E+11, 0.42819E+11, 0.55361E+11,
0.71323E+11, 0.91569E+11, 0.11716E+12, 0.14941E+12, 0.18992E+12,
0.24065E+12, 0.30398E+12, 0.38283E+12, 0.48069E+12, 0.60182E+12,
0.75136E+12, 0.93546E+12, 0.11615E+13, 0.14384E+13, 0.17767E+13,
0.21890E+13, 0.26903E+13, 0.32984E+13, 0.40344E+13, 0.49232E+13,
0.59942E+13, 0.72819E+13, 0.88272E+13, 0.10678E+14, 0.12889E+14,
0.15527E+14, 0.18666E+14, 0.22397E+14, 0.26823E+14, 0.32062E+14,
0.38253E+14, 0.45558E+14, 0.54161E+14, 0.64277E+14, 0.76153E+14,
0.90072E+14, 0.10636E+15, 0.12539E+15, 0.14759E+15, 0.17345E+15,
0.20354E+15, 0.23848E+15, 0.27902E+15, 0.32597E+15, 0.38028E+15,
0.44303E+15, 0.51542E+15, 0.59883E+15, 0.69482E+15, 0.80516E+15,
0.93182E+15, 0.10770E+16, 0.12434E+16, 0.14336E+16, 0.16511E+16,
0.18992E+16, 0.21821E+16, 0.25043E+16, 0.28709E+16, 0.32875E+16,
0.37604E+16, 0.42968E+16, 0.49046E+16, 0.55925E+16, 0.63704E+16,
0.72492E+16, 0.82411E+16, 0.93596E+16, 0.10620E+17, 0.12038E+17,
0.13633E+17, 0.15425E+17, 0.17438E+17, 0.19694E+17, 0.22224E+17,
0.25057E+17])
# --------------- H2S 121: M = 31, I = 1 ---------------------
M = 31
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.47192E+02, 0.78671E+02, 0.11510E+03,
0.15589E+03, 0.20061E+03, 0.24896E+03, 0.30070E+03, 0.35571E+03,
0.41386E+03, 0.47513E+03, 0.53951E+03, 0.60703E+03, 0.67772E+03,
0.75167E+03, 0.82896E+03, 0.90969E+03, 0.99396E+03, 0.10819E+04,
0.11736E+04, 0.12692E+04, 0.13689E+04, 0.14727E+04, 0.15809E+04,
0.16937E+04, 0.18111E+04, 0.19333E+04, 0.20606E+04, 0.21931E+04,
0.23309E+04, 0.24744E+04, 0.26236E+04, 0.27788E+04, 0.29403E+04,
0.31081E+04, 0.32825E+04, 0.34638E+04, 0.36522E+04, 0.38478E+04,
0.40510E+04, 0.42619E+04, 0.44808E+04, 0.47080E+04, 0.49437E+04,
0.51881E+04, 0.54415E+04, 0.57042E+04, 0.59764E+04, 0.62584E+04,
0.65505E+04, 0.68529E+04, 0.71660E+04, 0.74899E+04, 0.78251E+04,
0.81718E+04, 0.85303E+04, 0.89008E+04, 0.92838E+04, 0.96795E+04,
0.10088E+05, 0.10510E+05, 0.10946E+05, 0.11396E+05, 0.11860E+05,
0.12339E+05, 0.12833E+05, 0.13342E+05, 0.13867E+05, 0.14408E+05,
0.14966E+05, 0.15540E+05, 0.16132E+05, 0.16741E+05, 0.17368E+05,
0.18013E+05, 0.18677E+05, 0.19361E+05, 0.20064E+05, 0.20786E+05,
0.21529E+05, 0.22293E+05, 0.23078E+05, 0.23885E+05, 0.24714E+05,
0.25565E+05, 0.26439E+05, 0.27337E+05, 0.28258E+05, 0.29204E+05,
0.30174E+05, 0.31170E+05, 0.32191E+05, 0.33239E+05, 0.34313E+05,
0.35414E+05, 0.36543E+05, 0.37700E+05, 0.38886E+05, 0.40101E+05,
0.41346E+05, 0.42621E+05, 0.43926E+05, 0.45263E+05, 0.46631E+05,
0.48033E+05, 0.49466E+05, 0.50934E+05, 0.52435E+05, 0.53971E+05,
0.55542E+05, 0.57149E+05, 0.58792E+05, 0.60472E+05, 0.62190E+05,
0.63946E+05, 0.65740E+05, 0.67574E+05, 0.69448E+05, 0.71362E+05,
0.73318E+05])
# --------------- H2S 141: M = 31, I = 2 ---------------------
M = 31
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.47310E+02, 0.78869E+02, 0.11539E+03,
0.15628E+03, 0.20112E+03, 0.24959E+03, 0.30147E+03, 0.35661E+03,
0.41491E+03, 0.47634E+03, 0.54088E+03, 0.60857E+03, 0.67945E+03,
0.75359E+03, 0.83107E+03, 0.91201E+03, 0.99649E+03, 0.10846E+04,
0.11766E+04, 0.12724E+04, 0.13724E+04, 0.14765E+04, 0.15850E+04,
0.16980E+04, 0.18157E+04, 0.19382E+04, 0.20658E+04, 0.21987E+04,
0.23369E+04, 0.24807E+04, 0.26303E+04, 0.27859E+04, 0.29478E+04,
0.31160E+04, 0.32909E+04, 0.34727E+04, 0.36615E+04, 0.38576E+04,
0.40613E+04, 0.42728E+04, 0.44923E+04, 0.47200E+04, 0.49563E+04,
0.52013E+04, 0.54554E+04, 0.57188E+04, 0.59917E+04, 0.62744E+04,
0.65672E+04, 0.68704E+04, 0.71843E+04, 0.75090E+04, 0.78451E+04,
0.81926E+04, 0.85520E+04, 0.89236E+04, 0.93075E+04, 0.97042E+04,
0.10114E+05, 0.10537E+05, 0.10974E+05, 0.11425E+05, 0.11890E+05,
0.12370E+05, 0.12866E+05, 0.13376E+05, 0.13903E+05, 0.14445E+05,
0.15004E+05, 0.15580E+05, 0.16173E+05, 0.16784E+05, 0.17412E+05,
0.18059E+05, 0.18725E+05, 0.19410E+05, 0.20115E+05, 0.20839E+05,
0.21584E+05, 0.22350E+05, 0.23137E+05, 0.23946E+05, 0.24777E+05,
0.25630E+05, 0.26507E+05, 0.27407E+05, 0.28330E+05, 0.29278E+05,
0.30251E+05, 0.31249E+05, 0.32273E+05, 0.33324E+05, 0.34401E+05,
0.35505E+05, 0.36637E+05, 0.37797E+05, 0.38985E+05, 0.40204E+05,
0.41451E+05, 0.42729E+05, 0.44038E+05, 0.45379E+05, 0.46751E+05,
0.48155E+05, 0.49593E+05, 0.51064E+05, 0.52569E+05, 0.54109E+05,
0.55684E+05, 0.57295E+05, 0.58943E+05, 0.60627E+05, 0.62349E+05,
0.64109E+05, 0.65908E+05, 0.67747E+05, 0.69625E+05, 0.71544E+05,
0.73505E+05])
# --------------- H2S 131: M = 30, I = 3 ---------------------
M = 31
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.18901E+03, 0.31509E+03, 0.46102E+03,
0.62437E+03, 0.80349E+03, 0.99713E+03, 0.12044E+04, 0.14247E+04,
0.16576E+04, 0.19030E+04, 0.21609E+04, 0.24313E+04, 0.27145E+04,
0.30106E+04, 0.33202E+04, 0.36436E+04, 0.39811E+04, 0.43332E+04,
0.47005E+04, 0.50835E+04, 0.54827E+04, 0.58987E+04, 0.63321E+04,
0.67836E+04, 0.72538E+04, 0.77434E+04, 0.82532E+04, 0.87838E+04,
0.93360E+04, 0.99106E+04, 0.10508E+05, 0.11130E+05, 0.11777E+05,
0.12449E+05, 0.13147E+05, 0.13874E+05, 0.14628E+05, 0.15412E+05,
0.16225E+05, 0.17070E+05, 0.17947E+05, 0.18857E+05, 0.19801E+05,
0.20780E+05, 0.21795E+05, 0.22847E+05, 0.23937E+05, 0.25067E+05,
0.26236E+05, 0.27448E+05, 0.28702E+05, 0.29999E+05, 0.31342E+05,
0.32730E+05, 0.34166E+05, 0.35650E+05, 0.37184E+05, 0.38769E+05,
0.40406E+05, 0.42097E+05, 0.43842E+05, 0.45644E+05, 0.47503E+05,
0.49421E+05, 0.51399E+05, 0.53439E+05, 0.55542E+05, 0.57709E+05,
0.59942E+05, 0.62242E+05, 0.64611E+05, 0.67051E+05, 0.69563E+05,
0.72148E+05, 0.74808E+05, 0.77545E+05, 0.80360E+05, 0.83255E+05,
0.86232E+05, 0.89291E+05, 0.92435E+05, 0.95667E+05, 0.98986E+05,
0.10240E+06, 0.10590E+06, 0.10949E+06, 0.11318E+06, 0.11697E+06,
0.12086E+06, 0.12484E+06, 0.12893E+06, 0.13313E+06, 0.13743E+06,
0.14184E+06, 0.14637E+06, 0.15100E+06, 0.15575E+06, 0.16062E+06,
0.16560E+06, 0.17071E+06, 0.17594E+06, 0.18129E+06, 0.18677E+06,
0.19238E+06, 0.19813E+06, 0.20400E+06, 0.21002E+06, 0.21617E+06,
0.22246E+06, 0.22890E+06, 0.23548E+06, 0.24221E+06, 0.24909E+06,
0.25612E+06, 0.26331E+06, 0.27065E+06, 0.27816E+06, 0.28583E+06,
0.29366E+06])
# --------------- HCOOH 126: M = 32, I = 1 ---------------------
M = 32
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.31899E+04, 0.53773E+04, 0.79205E+04,
0.10792E+05, 0.13993E+05, 0.17550E+05, 0.21509E+05, 0.25930E+05,
0.30885E+05, 0.36460E+05, 0.42750E+05, 0.49864E+05, 0.57926E+05,
0.67071E+05, 0.77453E+05, 0.89243E+05, 0.10263E+06, 0.11783E+06,
0.13507E+06, 0.15462E+06, 0.17676E+06, 0.20183E+06, 0.23018E+06,
0.26221E+06, 0.29836E+06, 0.33911E+06, 0.38501E+06, 0.43664E+06,
0.49467E+06, 0.55981E+06, 0.63286E+06, 0.71470E+06, 0.80628E+06,
0.90865E+06, 0.10230E+07, 0.11505E+07, 0.12927E+07, 0.14509E+07,
0.16269E+07, 0.18225E+07, 0.20396E+07, 0.22804E+07, 0.25472E+07,
0.28425E+07, 0.31692E+07, 0.35301E+07, 0.39285E+07, 0.43681E+07,
0.48525E+07, 0.53858E+07, 0.59727E+07, 0.66178E+07, 0.73265E+07,
0.81042E+07, 0.89571E+07, 0.98918E+07, 0.10915E+08, 0.12035E+08,
0.13259E+08, 0.14597E+08, 0.16057E+08, 0.17650E+08, 0.19387E+08,
0.21279E+08, 0.23339E+08, 0.25579E+08, 0.28016E+08, 0.30663E+08,
0.33536E+08, 0.36655E+08, 0.40037E+08, 0.43701E+08, 0.47671E+08,
0.51967E+08, 0.56614E+08, 0.61639E+08, 0.67068E+08, 0.72930E+08,
0.79257E+08, 0.86082E+08, 0.93439E+08, 0.10137E+09, 0.10990E+09,
0.11909E+09, 0.12898E+09, 0.13960E+09, 0.15102E+09, 0.16329E+09,
0.17646E+09, 0.19059E+09, 0.20575E+09, 0.22200E+09, 0.23941E+09,
0.25806E+09, 0.27802E+09, 0.29938E+09, 0.32223E+09, 0.34666E+09,
0.37276E+09, 0.40064E+09, 0.43041E+09, 0.46218E+09, 0.49607E+09,
0.53221E+09, 0.57074E+09, 0.61179E+09, 0.65551E+09, 0.70206E+09,
0.75159E+09, 0.80430E+09, 0.86034E+09, 0.91992E+09, 0.98324E+09,
0.10505E+10, 0.11219E+10, 0.11977E+10, 0.12782E+10, 0.13635E+10,
0.14540E+10])
# --------------- HO2 166: M = 33, I = 1 ---------------------
M = 33
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.39277E+03, 0.66062E+03, 0.97123E+03,
0.13194E+04, 0.17014E+04, 0.21148E+04, 0.25578E+04, 0.30296E+04,
0.35297E+04, 0.40585E+04, 0.46167E+04, 0.52055E+04, 0.58264E+04,
0.64809E+04, 0.71707E+04, 0.78978E+04, 0.86641E+04, 0.94715E+04,
0.10322E+05, 0.11218E+05, 0.12161E+05, 0.13154E+05, 0.14198E+05,
0.15296E+05, 0.16449E+05, 0.17661E+05, 0.18933E+05, 0.20267E+05,
0.21666E+05, 0.23133E+05, 0.24669E+05, 0.26277E+05, 0.27960E+05,
0.29720E+05, 0.31560E+05, 0.33482E+05, 0.35489E+05, 0.37584E+05,
0.39769E+05, 0.42048E+05, 0.44423E+05, 0.46898E+05, 0.49475E+05,
0.52157E+05, 0.54948E+05, 0.57850E+05, 0.60868E+05, 0.64003E+05,
0.67261E+05, 0.70643E+05, 0.74154E+05, 0.77797E+05, 0.81575E+05,
0.85492E+05, 0.89553E+05, 0.93760E+05, 0.98118E+05, 0.10263E+06,
0.10730E+06, 0.11213E+06, 0.11713E+06, 0.12230E+06, 0.12765E+06,
0.13317E+06, 0.13888E+06, 0.14478E+06, 0.15086E+06, 0.15715E+06,
0.16363E+06, 0.17032E+06, 0.17723E+06, 0.18434E+06, 0.19168E+06,
0.19924E+06, 0.20704E+06, 0.21506E+06, 0.22333E+06, 0.23185E+06,
0.24061E+06, 0.24963E+06, 0.25891E+06, 0.26846E+06, 0.27828E+06,
0.28838E+06, 0.29876E+06, 0.30943E+06, 0.32039E+06, 0.33166E+06,
0.34323E+06, 0.35512E+06, 0.36732E+06, 0.37985E+06, 0.39271E+06,
0.40590E+06, 0.41944E+06, 0.43333E+06, 0.44758E+06, 0.46219E+06,
0.47717E+06, 0.49252E+06, 0.50826E+06, 0.52439E+06, 0.54091E+06,
0.55784E+06, 0.57518E+06, 0.59293E+06, 0.61112E+06, 0.62973E+06,
0.64878E+06, 0.66828E+06, 0.68824E+06, 0.70866E+06, 0.72955E+06,
0.75091E+06, 0.77276E+06, 0.79511E+06, 0.81795E+06, 0.84131E+06,
0.86518E+06])
# --------------- O 6: M = 34, I = 1 --------------------- not in TIPS-2011
M = 34
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- ClONO2 5646: M = 35, I = 1 ---------------------
M = 35
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.11444E+06, 0.21121E+06, 0.34858E+06,
0.53934E+06, 0.80041E+06, 0.11539E+07, 0.16286E+07, 0.22614E+07,
0.30992E+07, 0.42015E+07, 0.56426E+07, 0.75152E+07, 0.99344E+07,
0.13042E+08, 0.17012E+08, 0.22058E+08, 0.28437E+08, 0.36463E+08,
0.46514E+08, 0.59042E+08, 0.74589E+08, 0.93801E+08, 0.11744E+09,
0.14643E+09, 0.18181E+09, 0.22486E+09, 0.27705E+09, 0.34009E+09,
0.41598E+09, 0.50705E+09, 0.61599E+09, 0.74590E+09, 0.90037E+09,
0.10835E+10, 0.13001E+10, 0.15554E+10, 0.18556E+10, 0.22079E+10,
0.26200E+10, 0.31012E+10, 0.36615E+10, 0.43126E+10, 0.50675E+10,
0.59409E+10, 0.69492E+10, 0.81110E+10, 0.94469E+10, 0.10980E+11,
0.12736E+11, 0.14745E+11, 0.17037E+11, 0.19649E+11, 0.22620E+11,
0.25994E+11, 0.29819E+11, 0.34150E+11, 0.39044E+11, 0.44568E+11,
0.50794E+11, 0.57799E+11, 0.65672E+11, 0.74506E+11, 0.84408E+11,
0.95490E+11, 0.10788E+12, 0.12171E+12, 0.13713E+12, 0.15431E+12,
0.17342E+12, 0.19465E+12, 0.21822E+12, 0.24435E+12, 0.27329E+12,
0.30530E+12, 0.34069E+12, 0.37976E+12, 0.42286E+12, 0.47034E+12,
0.52262E+12, 0.58012E+12, 0.64330E+12, 0.71267E+12, 0.78875E+12,
0.87214E+12, 0.96344E+12, 0.10633E+13, 0.11725E+13, 0.12918E+13,
0.14220E+13, 0.15640E+13, 0.17188E+13, 0.18873E+13, 0.20706E+13,
0.22700E+13, 0.24866E+13, 0.27218E+13, 0.29771E+13, 0.32538E+13,
0.35537E+13, 0.38784E+13, 0.42299E+13, 0.46100E+13, 0.50208E+13,
0.54645E+13, 0.59435E+13, 0.64603E+13, 0.70175E+13, 0.76180E+13,
0.82647E+13, 0.89608E+13, 0.97097E+13, 0.10515E+14, 0.11380E+14,
0.12310E+14, 0.13307E+14, 0.14378E+14, 0.15526E+14, 0.16756E+14,
0.18075E+14])
# --------------- ClONO2 7646: M = 35, I = 2 ---------------------
M = 35
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.11735E+06, 0.21659E+06, 0.35745E+06,
0.55307E+06, 0.82078E+06, 0.11833E+07, 0.16700E+07, 0.23189E+07,
0.31781E+07, 0.43084E+07, 0.57862E+07, 0.77065E+07, 0.10187E+08,
0.13374E+08, 0.17445E+08, 0.22619E+08, 0.29161E+08, 0.37391E+08,
0.47698E+08, 0.60545E+08, 0.76487E+08, 0.96188E+08, 0.12043E+09,
0.15015E+09, 0.18644E+09, 0.23059E+09, 0.28410E+09, 0.34874E+09,
0.42657E+09, 0.51995E+09, 0.63167E+09, 0.76489E+09, 0.92329E+09,
0.11111E+10, 0.13331E+10, 0.15950E+10, 0.19029E+10, 0.22641E+10,
0.26867E+10, 0.31801E+10, 0.37547E+10, 0.44224E+10, 0.51965E+10,
0.60921E+10, 0.71261E+10, 0.83174E+10, 0.96873E+10, 0.11260E+11,
0.13061E+11, 0.15120E+11, 0.17471E+11, 0.20149E+11, 0.23196E+11,
0.26656E+11, 0.30578E+11, 0.35019E+11, 0.40038E+11, 0.45703E+11,
0.52087E+11, 0.59270E+11, 0.67343E+11, 0.76403E+11, 0.86556E+11,
0.97921E+11, 0.11062E+12, 0.12481E+12, 0.14062E+12, 0.15824E+12,
0.17783E+12, 0.19961E+12, 0.22377E+12, 0.25057E+12, 0.28024E+12,
0.31308E+12, 0.34936E+12, 0.38943E+12, 0.43362E+12, 0.48232E+12,
0.53593E+12, 0.59489E+12, 0.65968E+12, 0.73081E+12, 0.80883E+12,
0.89434E+12, 0.98797E+12, 0.10904E+13, 0.12024E+13, 0.13247E+13,
0.14582E+13, 0.16038E+13, 0.17625E+13, 0.19353E+13, 0.21233E+13,
0.23278E+13, 0.25499E+13, 0.27911E+13, 0.30528E+13, 0.33366E+13,
0.36442E+13, 0.39772E+13, 0.43376E+13, 0.47273E+13, 0.51486E+13,
0.56036E+13, 0.60948E+13, 0.66248E+13, 0.71962E+13, 0.78119E+13,
0.84751E+13, 0.91889E+13, 0.99569E+13, 0.10783E+14, 0.11670E+14,
0.12623E+14, 0.13646E+14, 0.14744E+14, 0.15921E+14, 0.17183E+14,
0.18535E+14])
# --------------- NOp 46: M = 36, I = 1 ---------------------
M = 36
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.63956E+02, 0.90185E+02, 0.11642E+03,
0.14265E+03, 0.16889E+03, 0.19513E+03, 0.22138E+03, 0.24763E+03,
0.27388E+03, 0.30013E+03, 0.32639E+03, 0.35266E+03, 0.37894E+03,
0.40523E+03, 0.43155E+03, 0.45790E+03, 0.48429E+03, 0.51074E+03,
0.53725E+03, 0.56383E+03, 0.59052E+03, 0.61731E+03, 0.64422E+03,
0.67127E+03, 0.69846E+03, 0.72582E+03, 0.75335E+03, 0.78108E+03,
0.80901E+03, 0.83715E+03, 0.86552E+03, 0.89413E+03, 0.92298E+03,
0.95208E+03, 0.98144E+03, 0.10111E+04, 0.10410E+04, 0.10712E+04,
0.11017E+04, 0.11325E+04, 0.11636E+04, 0.11950E+04, 0.12268E+04,
0.12588E+04, 0.12912E+04, 0.13239E+04, 0.13570E+04, 0.13903E+04,
0.14241E+04, 0.14581E+04, 0.14926E+04, 0.15273E+04, 0.15624E+04,
0.15979E+04, 0.16337E+04, 0.16699E+04, 0.17065E+04, 0.17434E+04,
0.17806E+04, 0.18183E+04, 0.18563E+04, 0.18947E+04, 0.19334E+04,
0.19725E+04, 0.20120E+04, 0.20519E+04, 0.20921E+04, 0.21327E+04,
0.21737E+04, 0.22151E+04, 0.22568E+04, 0.22990E+04, 0.23415E+04,
0.23844E+04, 0.24276E+04, 0.24713E+04, 0.25153E+04, 0.25598E+04,
0.26046E+04, 0.26497E+04, 0.26953E+04, 0.27413E+04, 0.27876E+04,
0.28343E+04, 0.28815E+04, 0.29290E+04, 0.29769E+04, 0.30251E+04,
0.30738E+04, 0.31229E+04, 0.31723E+04, 0.32222E+04, 0.32724E+04,
0.33230E+04, 0.33740E+04, 0.34254E+04, 0.34772E+04, 0.35294E+04,
0.35819E+04, 0.36349E+04, 0.36883E+04, 0.37420E+04, 0.37961E+04,
0.38507E+04, 0.39056E+04, 0.39609E+04, 0.40166E+04, 0.40727E+04,
0.41292E+04, 0.41861E+04, 0.42434E+04, 0.43010E+04, 0.43591E+04,
0.44176E+04, 0.44764E+04, 0.45357E+04, 0.45953E+04, 0.46554E+04,
0.47158E+04])
# --------------- HOBr 169: M = 37, I = 1 ---------------------
M = 37
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.24445E+04, 0.41206E+04, 0.60683E+04,
0.82610E+04, 0.10689E+05, 0.13352E+05, 0.16261E+05, 0.19427E+05,
0.22867E+05, 0.26600E+05, 0.30643E+05, 0.35018E+05, 0.39745E+05,
0.44844E+05, 0.50338E+05, 0.56249E+05, 0.62599E+05, 0.69410E+05,
0.76706E+05, 0.84509E+05, 0.92845E+05, 0.10174E+06, 0.11121E+06,
0.12128E+06, 0.13199E+06, 0.14335E+06, 0.15540E+06, 0.16815E+06,
0.18165E+06, 0.19591E+06, 0.21096E+06, 0.22684E+06, 0.24358E+06,
0.26120E+06, 0.27974E+06, 0.29922E+06, 0.31969E+06, 0.34118E+06,
0.36372E+06, 0.38735E+06, 0.41210E+06, 0.43800E+06, 0.46511E+06,
0.49345E+06, 0.52307E+06, 0.55400E+06, 0.58628E+06, 0.61997E+06,
0.65509E+06, 0.69170E+06, 0.72984E+06, 0.76954E+06, 0.81087E+06,
0.85386E+06, 0.89856E+06, 0.94502E+06, 0.99329E+06, 0.10434E+07,
0.10955E+07, 0.11495E+07, 0.12055E+07, 0.12636E+07, 0.13238E+07,
0.13862E+07, 0.14508E+07, 0.15177E+07, 0.15870E+07, 0.16587E+07,
0.17328E+07, 0.18095E+07, 0.18888E+07, 0.19707E+07, 0.20554E+07,
0.21428E+07, 0.22331E+07, 0.23263E+07, 0.24225E+07, 0.25217E+07,
0.26241E+07, 0.27296E+07, 0.28385E+07, 0.29506E+07, 0.30662E+07,
0.31853E+07, 0.33079E+07, 0.34341E+07, 0.35641E+07, 0.36979E+07,
0.38355E+07, 0.39771E+07, 0.41228E+07, 0.42725E+07, 0.44265E+07,
0.45848E+07, 0.47474E+07, 0.49145E+07, 0.50862E+07, 0.52624E+07,
0.54435E+07, 0.56293E+07, 0.58201E+07, 0.60159E+07, 0.62168E+07,
0.64229E+07, 0.66343E+07, 0.68511E+07, 0.70734E+07, 0.73013E+07,
0.75349E+07, 0.77742E+07, 0.80196E+07, 0.82709E+07, 0.85283E+07,
0.87920E+07, 0.90620E+07, 0.93385E+07, 0.96215E+07, 0.99112E+07,
0.10208E+08])
# --------------- HOBr 161: M = 37, I = 2 ---------------------
M = 37
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M,I)] = float32([0.24350E+04, 0.41047E+04, 0.60448E+04,
0.82291E+04, 0.10648E+05, 0.13301E+05, 0.16200E+05, 0.19355E+05,
0.22784E+05, 0.26504E+05, 0.30534E+05, 0.34895E+05, 0.39607E+05,
0.44691E+05, 0.50169E+05, 0.56063E+05, 0.62394E+05, 0.69186E+05,
0.76461E+05, 0.84243E+05, 0.92555E+05, 0.10142E+06, 0.11087E+06,
0.12091E+06, 0.13159E+06, 0.14292E+06, 0.15494E+06, 0.16766E+06,
0.18112E+06, 0.19534E+06, 0.21036E+06, 0.22620E+06, 0.24289E+06,
0.26047E+06, 0.27896E+06, 0.29840E+06, 0.31882E+06, 0.34025E+06,
0.36274E+06, 0.38630E+06, 0.41099E+06, 0.43683E+06, 0.46387E+06,
0.49215E+06, 0.52169E+06, 0.55255E+06, 0.58475E+06, 0.61836E+06,
0.65340E+06, 0.68992E+06, 0.72796E+06, 0.76757E+06, 0.80880E+06,
0.85169E+06, 0.89628E+06, 0.94263E+06, 0.99079E+06, 0.10408E+07,
0.10927E+07, 0.11466E+07, 0.12025E+07, 0.12605E+07, 0.13205E+07,
0.13828E+07, 0.14472E+07, 0.15140E+07, 0.15831E+07, 0.16546E+07,
0.17286E+07, 0.18051E+07, 0.18842E+07, 0.19660E+07, 0.20504E+07,
0.21377E+07, 0.22277E+07, 0.23207E+07, 0.24167E+07, 0.25157E+07,
0.26178E+07, 0.27231E+07, 0.28317E+07, 0.29436E+07, 0.30589E+07,
0.31777E+07, 0.33001E+07, 0.34260E+07, 0.35557E+07, 0.36892E+07,
0.38265E+07, 0.39678E+07, 0.41131E+07, 0.42626E+07, 0.44162E+07,
0.45741E+07, 0.47364E+07, 0.49031E+07, 0.50744E+07, 0.52503E+07,
0.54309E+07, 0.56164E+07, 0.58067E+07, 0.60021E+07, 0.62025E+07,
0.64081E+07, 0.66191E+07, 0.68354E+07, 0.70572E+07, 0.72846E+07,
0.75177E+07, 0.77565E+07, 0.80013E+07, 0.82521E+07, 0.85090E+07,
0.87721E+07, 0.90415E+07, 0.93173E+07, 0.95997E+07, 0.98888E+07,
0.10185E+08])
# --------------- C2H4 221: M = 38, I = 1 ---------------------
M = 38
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.95843E+03, 0.16137E+04, 0.23744E+04,
0.32285E+04, 0.41694E+04, 0.51963E+04, 0.63143E+04, 0.75337E+04,
0.88702E+04, 0.10344E+05, 0.11978E+05, 0.13802E+05, 0.15846E+05,
0.18145E+05, 0.20740E+05, 0.23675E+05, 0.27000E+05, 0.30770E+05,
0.35048E+05, 0.39905E+05, 0.45420E+05, 0.51680E+05, 0.58786E+05,
0.66850E+05, 0.75997E+05, 0.86369E+05, 0.98123E+05, 0.11144E+06,
0.12651E+06, 0.14356E+06, 0.16284E+06, 0.18463E+06, 0.20923E+06,
0.23699E+06, 0.26831E+06, 0.30360E+06, 0.34334E+06, 0.38808E+06,
0.43840E+06, 0.49495E+06, 0.55847E+06, 0.62976E+06, 0.70973E+06,
0.79935E+06, 0.89973E+06, 0.10121E+07, 0.11378E+07, 0.12782E+07,
0.14351E+07, 0.16102E+07, 0.18055E+07, 0.20231E+07, 0.22656E+07,
0.25354E+07, 0.28356E+07, 0.31692E+07, 0.35398E+07, 0.39511E+07,
0.44074E+07, 0.49132E+07, 0.54736E+07, 0.60940E+07, 0.67803E+07,
0.75392E+07, 0.83776E+07, 0.93035E+07, 0.10325E+08, 0.11452E+08,
0.12694E+08, 0.14062E+08, 0.15567E+08, 0.17224E+08, 0.19045E+08,
0.21046E+08, 0.23243E+08, 0.25655E+08, 0.28300E+08, 0.31200E+08,
0.34377E+08, 0.37856E+08, 0.41662E+08, 0.45826E+08, 0.50378E+08,
0.55351E+08, 0.60781E+08, 0.66707E+08, 0.73172E+08, 0.80219E+08,
0.87899E+08, 0.96262E+08, 0.10537E+09, 0.11527E+09, 0.12604E+09,
0.13775E+09, 0.15047E+09, 0.16428E+09, 0.17927E+09, 0.19553E+09,
0.21316E+09, 0.23226E+09, 0.25296E+09, 0.27537E+09, 0.29963E+09,
0.32587E+09, 0.35425E+09, 0.38492E+09, 0.41805E+09, 0.45383E+09,
0.49246E+09, 0.53413E+09, 0.57908E+09, 0.62754E+09, 0.67977E+09,
0.73602E+09, 0.79660E+09, 0.86179E+09, 0.93194E+09, 0.10074E+10,
0.10885E+10])
# --------------- C2H4 231: M = 38, I = 2 ---------------------
M = 38
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.39228E+04, 0.66051E+04, 0.97190E+04,
0.13215E+05, 0.17066E+05, 0.21270E+05, 0.25846E+05, 0.30838E+05,
0.36309E+05, 0.42341E+05, 0.49032E+05, 0.56496E+05, 0.64862E+05,
0.74275E+05, 0.84897E+05, 0.96912E+05, 0.11052E+06, 0.12595E+06,
0.14347E+06, 0.16335E+06, 0.18592E+06, 0.21155E+06, 0.24064E+06,
0.27365E+06, 0.31109E+06, 0.35354E+06, 0.40166E+06, 0.45615E+06,
0.51785E+06, 0.58765E+06, 0.66657E+06, 0.75575E+06, 0.85646E+06,
0.97011E+06, 0.10983E+07, 0.12428E+07, 0.14055E+07, 0.15886E+07,
0.17945E+07, 0.20260E+07, 0.22861E+07, 0.25779E+07, 0.29052E+07,
0.32721E+07, 0.36830E+07, 0.41429E+07, 0.46573E+07, 0.52323E+07,
0.58744E+07, 0.65912E+07, 0.73906E+07, 0.82816E+07, 0.92740E+07,
0.10379E+08, 0.11607E+08, 0.12973E+08, 0.14490E+08, 0.16174E+08,
0.18042E+08, 0.20112E+08, 0.22406E+08, 0.24945E+08, 0.27755E+08,
0.30861E+08, 0.34293E+08, 0.38083E+08, 0.42266E+08, 0.46878E+08,
0.51961E+08, 0.57560E+08, 0.63724E+08, 0.70504E+08, 0.77959E+08,
0.86150E+08, 0.95145E+08, 0.10502E+09, 0.11585E+09, 0.12772E+09,
0.14072E+09, 0.15496E+09, 0.17054E+09, 0.18759E+09, 0.20622E+09,
0.22658E+09, 0.24880E+09, 0.27306E+09, 0.29952E+09, 0.32837E+09,
0.35981E+09, 0.39404E+09, 0.43131E+09, 0.47186E+09, 0.51595E+09,
0.56387E+09, 0.61594E+09, 0.67247E+09, 0.73382E+09, 0.80038E+09,
0.87255E+09, 0.95076E+09, 0.10355E+10, 0.11272E+10, 0.12265E+10,
0.13339E+10, 0.14501E+10, 0.15756E+10, 0.17113E+10, 0.18577E+10,
0.20159E+10, 0.21865E+10, 0.23705E+10, 0.25688E+10, 0.27826E+10,
0.30129E+10, 0.32608E+10, 0.35277E+10, 0.38149E+10, 0.41237E+10,
0.44557E+10])
# --------------- CH3OH 2161: M = 39, I = 1 --------------------- not in TIPS-2011
M = 39
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# --------------- CH3Br 219: M = 40, I = 1 ---------------------
M = 40
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.70299E+04, 0.11847E+05, 0.17442E+05,
0.23741E+05, 0.30723E+05, 0.38408E+05, 0.46851E+05, 0.56138E+05,
0.66375E+05, 0.77692E+05, 0.90239E+05, 0.10418E+06, 0.11972E+06,
0.13704E+06, 0.15639E+06, 0.17801E+06, 0.20218E+06, 0.22920E+06,
0.25940E+06, 0.29316E+06, 0.33087E+06, 0.37296E+06, 0.41992E+06,
0.47229E+06, 0.53062E+06, 0.59557E+06, 0.66781E+06, 0.74812E+06,
0.83731E+06, 0.93629E+06, 0.10461E+07, 0.11677E+07, 0.13023E+07,
0.14513E+07, 0.16159E+07, 0.17978E+07, 0.19985E+07, 0.22199E+07,
0.24638E+07, 0.27324E+07, 0.30280E+07, 0.33529E+07, 0.37099E+07,
0.41019E+07, 0.45319E+07, 0.50034E+07, 0.55199E+07, 0.60853E+07,
0.67039E+07, 0.73801E+07, 0.81189E+07, 0.89255E+07, 0.98056E+07,
0.10765E+08, 0.11811E+08, 0.12949E+08, 0.14188E+08, 0.15535E+08,
0.17000E+08, 0.18590E+08, 0.20317E+08, 0.22190E+08, 0.24220E+08,
0.26421E+08, 0.28804E+08, 0.31383E+08, 0.34173E+08, 0.37189E+08,
0.40448E+08, 0.43967E+08, 0.47765E+08, 0.51862E+08, 0.56280E+08,
0.61040E+08, 0.66167E+08, 0.71686E+08, 0.77624E+08, 0.84009E+08,
0.90873E+08, 0.98247E+08, 0.10616E+09, 0.11466E+09, 0.12378E+09,
0.13356E+09, 0.14403E+09, 0.15526E+09, 0.16728E+09, 0.18014E+09,
0.19391E+09, 0.20863E+09, 0.22436E+09, 0.24117E+09, 0.25913E+09,
0.27830E+09, 0.29875E+09, 0.32057E+09, 0.34384E+09, 0.36864E+09,
0.39506E+09, 0.42320E+09, 0.45316E+09, 0.48504E+09, 0.51896E+09,
0.55502E+09, 0.59336E+09, 0.63410E+09, 0.67738E+09, 0.72334E+09,
0.77212E+09, 0.82388E+09, 0.87879E+09, 0.93701E+09, 0.99873E+09,
0.10641E+10, 0.11334E+10, 0.12068E+10, 0.12845E+10, 0.13667E+10,
0.14536E+10])
# --------------- CH3Br 211: M = 40, I = 2 ---------------------
M = 40
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.70566E+04, 0.11892E+05, 0.17508E+05,
0.23832E+05, 0.30841E+05, 0.38557E+05, 0.47036E+05, 0.56362E+05,
0.66644E+05, 0.78011E+05, 0.90615E+05, 0.10462E+06, 0.12023E+06,
0.13763E+06, 0.15707E+06, 0.17880E+06, 0.20308E+06, 0.23023E+06,
0.26059E+06, 0.29451E+06, 0.33240E+06, 0.37471E+06, 0.42191E+06,
0.47453E+06, 0.53316E+06, 0.59843E+06, 0.67104E+06, 0.75176E+06,
0.84141E+06, 0.94090E+06, 0.10512E+07, 0.11735E+07, 0.13088E+07,
0.14585E+07, 0.16241E+07, 0.18069E+07, 0.20086E+07, 0.22312E+07,
0.24764E+07, 0.27464E+07, 0.30435E+07, 0.33702E+07, 0.37291E+07,
0.41231E+07, 0.45554E+07, 0.50294E+07, 0.55486E+07, 0.61171E+07,
0.67389E+07, 0.74188E+07, 0.81616E+07, 0.89725E+07, 0.98573E+07,
0.10822E+08, 0.11873E+08, 0.13018E+08, 0.14263E+08, 0.15618E+08,
0.17090E+08, 0.18689E+08, 0.20425E+08, 0.22308E+08, 0.24350E+08,
0.26563E+08, 0.28959E+08, 0.31552E+08, 0.34357E+08, 0.37389E+08,
0.40666E+08, 0.44204E+08, 0.48023E+08, 0.52143E+08, 0.56585E+08,
0.61371E+08, 0.66526E+08, 0.72076E+08, 0.78046E+08, 0.84467E+08,
0.91369E+08, 0.98783E+08, 0.10674E+09, 0.11529E+09, 0.12446E+09,
0.13429E+09, 0.14482E+09, 0.15611E+09, 0.16820E+09, 0.18113E+09,
0.19497E+09, 0.20978E+09, 0.22560E+09, 0.24250E+09, 0.26056E+09,
0.27983E+09, 0.30040E+09, 0.32234E+09, 0.34574E+09, 0.37068E+09,
0.39725E+09, 0.42555E+09, 0.45567E+09, 0.48773E+09, 0.52184E+09,
0.55811E+09, 0.59666E+09, 0.63763E+09, 0.68115E+09, 0.72736E+09,
0.77642E+09, 0.82847E+09, 0.88368E+09, 0.94223E+09, 0.10043E+10,
0.10701E+10, 0.11397E+10, 0.12135E+10, 0.12916E+10, 0.13743E+10,
0.14618E+10])
# --------------- CH3CN 2124: M = 41, I = 1 ---------------------
M = 41
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M,I)] = float32([0.54361E+04, 0.91953E+04, 0.13708E+05,
0.19097E+05, 0.25531E+05, 0.33206E+05, 0.42337E+05, 0.53173E+05,
0.66002E+05, 0.81163E+05, 0.99053E+05, 0.12014E+06, 0.14496E+06,
0.17414E+06, 0.20843E+06, 0.24866E+06, 0.29580E+06, 0.35099E+06,
0.41551E+06, 0.49085E+06, 0.57871E+06, 0.68104E+06, 0.80008E+06,
0.93836E+06, 0.10988E+07, 0.12848E+07, 0.14999E+07, 0.17487E+07,
0.20359E+07, 0.23670E+07, 0.27484E+07, 0.31871E+07, 0.36912E+07,
0.42697E+07, 0.49328E+07, 0.56921E+07, 0.65605E+07, 0.75526E+07,
0.86847E+07, 0.99753E+07, 0.11445E+08, 0.13116E+08, 0.15016E+08,
0.17172E+08, 0.19617E+08, 0.22386E+08, 0.25520E+08, 0.29063E+08,
0.33064E+08, 0.37578E+08, 0.42667E+08, 0.48397E+08, 0.54844E+08,
0.62090E+08, 0.70228E+08, 0.79358E+08, 0.89592E+08, 0.10105E+09,
0.11388E+09, 0.12822E+09, 0.14424E+09, 0.16212E+09, 0.18205E+09,
0.20427E+09, 0.22900E+09, 0.25652E+09, 0.28710E+09, 0.32107E+09,
0.35877E+09, 0.40059E+09, 0.44692E+09, 0.49822E+09, 0.55500E+09,
0.61777E+09, 0.68712E+09, 0.76370E+09, 0.84819E+09, 0.94135E+09,
0.10440E+10, 0.11570E+10, 0.12814E+10, 0.14181E+10, 0.15684E+10,
0.17334E+10, 0.19145E+10, 0.21131E+10, 0.23308E+10, 0.25693E+10,
0.28304E+10, 0.31161E+10, 0.34285E+10, 0.37698E+10, 0.41426E+10,
0.45496E+10, 0.49935E+10, 0.54776E+10, 0.60051E+10, 0.65796E+10,
0.72049E+10, 0.78853E+10, 0.86251E+10, 0.94291E+10, 0.10303E+11,
0.11251E+11, 0.12280E+11, 0.13396E+11, 0.14606E+11, 0.15916E+11,
0.17336E+11, 0.18873E+11, 0.20536E+11, 0.22334E+11, 0.24278E+11,
0.26379E+11, 0.28647E+11, 0.31096E+11, 0.33739E+11, 0.36589E+11,
0.39661E+11])
# --------------- CH3CN 2134: M = 41, I = 2 --------------------- not in HITRAN-2012
M = 41
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.10906E+05, 0.18458E+05, 0.27552E+05,
0.38455E+05, 0.51523E+05, 0.67161E+05, 0.85818E+05, 0.10801E+06,
0.13434E+06, 0.16550E+06, 0.20234E+06, 0.24581E+06, 0.29705E+06,
0.35737E+06, 0.42831E+06, 0.51162E+06, 0.60936E+06, 0.72387E+06,
0.85786E+06, 0.10145E+07, 0.11972E+07, 0.14102E+07, 0.16582E+07,
0.19465E+07, 0.22813E+07, 0.26695E+07, 0.31190E+07, 0.36390E+07,
0.42397E+07, 0.49328E+07, 0.57314E+07, 0.66507E+07, 0.77076E+07,
0.89211E+07, 0.10313E+08, 0.11907E+08, 0.13732E+08, 0.15817E+08,
0.18198E+08, 0.20914E+08, 0.24007E+08, 0.27527E+08, 0.31529E+08,
0.36073E+08, 0.41228E+08, 0.47070E+08, 0.53683E+08, 0.61162E+08,
0.69612E+08, 0.79149E+08, 0.89903E+08, 0.10202E+09, 0.11565E+09,
0.13098E+09, 0.14820E+09, 0.16753E+09, 0.18921E+09, 0.21349E+09,
0.24066E+09, 0.27106E+09, 0.30502E+09, 0.34293E+09, 0.38523E+09,
0.43237E+09, 0.48486E+09, 0.54328E+09, 0.60823E+09, 0.68039E+09,
0.76049E+09, 0.84935E+09, 0.94784E+09, 0.10569E+10, 0.11777E+10,
0.13112E+10, 0.14588E+10, 0.16217E+10, 0.18016E+10, 0.19999E+10,
0.22185E+10, 0.24592E+10, 0.27241E+10, 0.30155E+10, 0.33357E+10,
0.36875E+10, 0.40736E+10, 0.44971E+10, 0.49615E+10, 0.54702E+10,
0.60273E+10, 0.66369E+10, 0.73035E+10, 0.80322E+10, 0.88282E+10,
0.96972E+10, 0.10645E+11, 0.11679E+11, 0.12806E+11, 0.14034E+11,
0.15370E+11, 0.16824E+11, 0.18406E+11, 0.20125E+11, 0.21992E+11,
0.24020E+11, 0.26221E+11, 0.28608E+11, 0.31197E+11, 0.34002E+11,
0.37040E+11, 0.40330E+11, 0.43889E+11, 0.47739E+11, 0.51902E+11,
0.56400E+11, 0.61259E+11, 0.66504E+11, 0.72165E+11, 0.78272E+11,
0.84856E+11])
# --------------- CH3CN 3124: M = 41, I = 3 --------------------- not in HITRAN-2012
M = 41
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.11223E+05, 0.18985E+05, 0.28307E+05,
0.39441E+05, 0.52744E+05, 0.68620E+05, 0.87523E+05, 0.10997E+06,
0.13658E+06, 0.16806E+06, 0.20524E+06, 0.24910E+06, 0.30080E+06,
0.36165E+06, 0.43319E+06, 0.51722E+06, 0.61579E+06, 0.73127E+06,
0.86640E+06, 0.10243E+07, 0.12086E+07, 0.14234E+07, 0.16735E+07,
0.19642E+07, 0.23017E+07, 0.26931E+07, 0.31464E+07, 0.36706E+07,
0.42762E+07, 0.49749E+07, 0.57801E+07, 0.67069E+07, 0.77722E+07,
0.89955E+07, 0.10398E+08, 0.12006E+08, 0.13845E+08, 0.15947E+08,
0.18346E+08, 0.21083E+08, 0.24201E+08, 0.27748E+08, 0.31781E+08,
0.36361E+08, 0.41556E+08, 0.47442E+08, 0.54106E+08, 0.61643E+08,
0.70157E+08, 0.79767E+08, 0.90604E+08, 0.10281E+09, 0.11655E+09,
0.13199E+09, 0.14935E+09, 0.16882E+09, 0.19065E+09, 0.21512E+09,
0.24250E+09, 0.27312E+09, 0.30733E+09, 0.34553E+09, 0.38814E+09,
0.43562E+09, 0.48851E+09, 0.54736E+09, 0.61279E+09, 0.68548E+09,
0.76617E+09, 0.85568E+09, 0.95489E+09, 0.10648E+10, 0.11864E+10,
0.13209E+10, 0.14695E+10, 0.16337E+10, 0.18148E+10, 0.20146E+10,
0.22348E+10, 0.24772E+10, 0.27441E+10, 0.30375E+10, 0.33601E+10,
0.37143E+10, 0.41032E+10, 0.45298E+10, 0.49975E+10, 0.55099E+10,
0.60709E+10, 0.66849E+10, 0.73563E+10, 0.80902E+10, 0.88918E+10,
0.97670E+10, 0.10722E+11, 0.11763E+11, 0.12898E+11, 0.14134E+11,
0.15480E+11, 0.16945E+11, 0.18537E+11, 0.20269E+11, 0.22149E+11,
0.24191E+11, 0.26408E+11, 0.28812E+11, 0.31419E+11, 0.34244E+11,
0.37303E+11, 0.40616E+11, 0.44201E+11, 0.48078E+11, 0.52269E+11,
0.56799E+11, 0.61692E+11, 0.66974E+11, 0.72675E+11, 0.78824E+11,
0.85454E+11])
# --------------- CH3CN 3134: M = 41, I = 4 --------------------- not in HITRAN-2012
M = 41
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.22522E+05, 0.38117E+05, 0.56899E+05,
0.79412E+05, 0.10640E+06, 0.13870E+06, 0.17726E+06, 0.22314E+06,
0.27761E+06, 0.34214E+06, 0.41847E+06, 0.50862E+06, 0.61497E+06,
0.74028E+06, 0.88774E+06, 0.10611E+07, 0.12646E+07, 0.15031E+07,
0.17825E+07, 0.21092E+07, 0.24908E+07, 0.29358E+07, 0.34541E+07,
0.40571E+07, 0.47576E+07, 0.55703E+07, 0.65120E+07, 0.76018E+07,
0.88614E+07, 0.10315E+08, 0.11992E+08, 0.13922E+08, 0.16142E+08,
0.18693E+08, 0.21619E+08, 0.24973E+08, 0.28812E+08, 0.33202E+08,
0.38216E+08, 0.43936E+08, 0.50455E+08, 0.57876E+08, 0.66315E+08,
0.75901E+08, 0.86779E+08, 0.99110E+08, 0.11307E+09, 0.12887E+09,
0.14672E+09, 0.16688E+09, 0.18961E+09, 0.21523E+09, 0.24407E+09,
0.27651E+09, 0.31295E+09, 0.35387E+09, 0.39975E+09, 0.45118E+09,
0.50875E+09, 0.57315E+09, 0.64512E+09, 0.72549E+09, 0.81517E+09,
0.91514E+09, 0.10265E+10, 0.11504E+10, 0.12883E+10, 0.14414E+10,
0.16115E+10, 0.18001E+10, 0.20093E+10, 0.22410E+10, 0.24975E+10,
0.27812E+10, 0.30948E+10, 0.34412E+10, 0.38235E+10, 0.42452E+10,
0.47101E+10, 0.52220E+10, 0.57856E+10, 0.64055E+10, 0.70869E+10,
0.78355E+10, 0.86574E+10, 0.95591E+10, 0.10548E+11, 0.11631E+11,
0.12817E+11, 0.14116E+11, 0.15536E+11, 0.17088E+11, 0.18785E+11,
0.20636E+11, 0.22657E+11, 0.24861E+11, 0.27264E+11, 0.29881E+11,
0.32730E+11, 0.35832E+11, 0.39205E+11, 0.42871E+11, 0.46855E+11,
0.51182E+11, 0.55878E+11, 0.60973E+11, 0.66497E+11, 0.72484E+11,
0.78970E+11, 0.85992E+11, 0.93592E+11, 0.10181E+12, 0.11070E+12,
0.12031E+12, 0.13069E+12, 0.14189E+12, 0.15398E+12, 0.16703E+12,
0.18110E+12])
# --------------- CF4 29: M = 42, I = 1 ---------------------
M = 42
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.76233E+04, 0.12867E+05, 0.19059E+05,
0.26316E+05, 0.34895E+05, 0.45145E+05, 0.57461E+05, 0.72259E+05,
0.89950E+05, 0.11092E+06, 0.13550E+06, 0.16399E+06, 0.19658E+06,
0.23341E+06, 0.27457E+06, 0.32004E+06, 0.36978E+06, 0.42369E+06,
0.48161E+06, 0.54338E+06, 0.60880E+06, 0.67764E+06, 0.55684E+07,
0.71250E+07, 0.90615E+07, 0.11458E+08, 0.14407E+08, 0.18021E+08,
0.22428E+08, 0.27778E+08, 0.34247E+08, 0.42038E+08, 0.51386E+08,
0.62559E+08, 0.75869E+08, 0.91670E+08, 0.11037E+09, 0.13242E+09,
0.15836E+09, 0.18878E+09, 0.22436E+09, 0.26584E+09, 0.31410E+09,
0.37008E+09, 0.43488E+09, 0.50970E+09, 0.59589E+09, 0.69496E+09,
0.80858E+09, 0.93863E+09, 0.10872E+10, 0.12565E+10, 0.14491E+10,
0.16679E+10, 0.19159E+10, 0.21966E+10, 0.25136E+10, 0.28711E+10,
0.32740E+10, 0.37260E+10, 0.42340E+10, 0.48030E+10, 0.54400E+10,
0.61520E+10, 0.69470E+10, 0.78320E+10, 0.88170E+10, 0.99120E+10,
0.11130E+11, 0.12470E+11, 0.13970E+11, 0.15620E+11, 0.17440E+11,
0.19450E+11, 0.21670E+11, 0.24100E+11, 0.26790E+11, 0.29730E+11,
0.33000E+11, 0.36500E+11, 0.40400E+11, 0.44600E+11, 0.49300E+11,
0.54300E+11, 0.59800E+11, 0.65800E+11, 0.72400E+11, 0.79500E+11,
0.87200E+11, 0.95500E+11, 0.10500E+12, 0.11400E+12, 0.12500E+12,
0.13600E+12, 0.14900E+12, 0.16200E+12, 0.17700E+12, 0.19200E+12,
0.21000E+12, 0.23000E+12, 0.25000E+12, 0.27000E+12, 0.29000E+12,
0.31000E+12, 0.34000E+12, 0.36000E+12, 0.39000E+12, 0.42000E+12,
0.46000E+12, 0.49000E+12, 0.53000E+12, 0.57000E+12, 0.61000E+12,
0.66000E+12, 0.70000E+12, 0.75000E+12, 0.81000E+12, 0.86000E+12,
0.93000E+12])
# --------------- C4H2 1221: M = 43, I = 1 ---------------------
M = 43
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.57628E+03, 0.84874E+03, 0.11789E+04,
0.15952E+04, 0.21317E+04, 0.28324E+04, 0.37543E+04, 0.49705E+04,
0.65754E+04, 0.86894E+04, 0.11466E+05, 0.15099E+05, 0.19834E+05,
0.25980E+05, 0.33920E+05, 0.44132E+05, 0.57210E+05, 0.73884E+05,
0.95049E+05, 0.12180E+06, 0.15548E+06, 0.19771E+06, 0.25045E+06,
0.31606E+06, 0.39739E+06, 0.49786E+06, 0.62152E+06, 0.77324E+06,
0.95878E+06, 0.11850E+07, 0.14599E+07, 0.17930E+07, 0.21956E+07,
0.26807E+07, 0.32637E+07, 0.39626E+07, 0.47983E+07, 0.57951E+07,
0.69813E+07, 0.83896E+07, 0.10058E+08, 0.12030E+08, 0.14356E+08,
0.17093E+08, 0.20309E+08, 0.24079E+08, 0.28491E+08, 0.33644E+08,
0.39651E+08, 0.46642E+08, 0.54764E+08, 0.64184E+08, 0.75091E+08,
0.87699E+08, 0.10225E+09, 0.11902E+09, 0.13832E+09, 0.16049E+09,
0.18593E+09, 0.21507E+09, 0.24841E+09, 0.28650E+09, 0.32996E+09,
0.37949E+09, 0.43586E+09, 0.49993E+09, 0.57266E+09, 0.65513E+09,
0.74852E+09, 0.85418E+09, 0.97356E+09, 0.11083E+10, 0.12602E+10,
0.14313E+10, 0.16238E+10, 0.18401E+10, 0.20829E+10, 0.23553E+10,
0.26605E+10, 0.30021E+10, 0.33841E+10, 0.38109E+10, 0.42874E+10,
0.48187E+10, 0.54107E+10, 0.60698E+10, 0.68029E+10, 0.76176E+10,
0.85223E+10, 0.95260E+10, 0.10639E+11, 0.11871E+11, 0.13236E+11,
0.14744E+11, 0.16412E+11, 0.18253E+11, 0.20285E+11, 0.22526E+11,
0.24995E+11, 0.27714E+11, 0.30705E+11, 0.33995E+11, 0.37609E+11,
0.41579E+11, 0.45934E+11, 0.50711E+11, 0.55947E+11, 0.61681E+11,
0.67957E+11, 0.74824E+11, 0.82330E+11, 0.90532E+11, 0.99487E+11,
0.10926E+12, 0.11992E+12, 0.13154E+12, 0.14420E+12, 0.15799E+12,
0.17299E+12])
# --------------- HC3N 12224: M = 44, I = 1 --------------------- 1224 in HITRAN, 12224 in TIPS
M = 44
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.16683E+04, 0.24538E+04, 0.33995E+04,
0.45769E+04, 0.60637E+04, 0.79533E+04, 0.10360E+05, 0.13422E+05,
0.17311E+05, 0.22232E+05, 0.28434E+05, 0.36215E+05, 0.45932E+05,
0.58011E+05, 0.72958E+05, 0.91370E+05, 0.11395E+06, 0.14153E+06,
0.17507E+06, 0.21570E+06, 0.26475E+06, 0.32372E+06, 0.39440E+06,
0.47881E+06, 0.57930E+06, 0.69856E+06, 0.83968E+06, 0.10062E+07,
0.12021E+07, 0.14320E+07, 0.17011E+07, 0.20153E+07, 0.23812E+07,
0.28065E+07, 0.32996E+07, 0.38701E+07, 0.45287E+07, 0.52876E+07,
0.61602E+07, 0.71616E+07, 0.83088E+07, 0.96206E+07, 0.11118E+08,
0.12824E+08, 0.14765E+08, 0.16969E+08, 0.19469E+08, 0.22299E+08,
0.25498E+08, 0.29110E+08, 0.33181E+08, 0.37763E+08, 0.42914E+08,
0.48697E+08, 0.55180E+08, 0.62440E+08, 0.70558E+08, 0.79627E+08,
0.89743E+08, 0.10102E+09, 0.11356E+09, 0.12752E+09, 0.14301E+09,
0.16020E+09, 0.17925E+09, 0.20035E+09, 0.22367E+09, 0.24945E+09,
0.27790E+09, 0.30928E+09, 0.34385E+09, 0.38191E+09, 0.42376E+09,
0.46975E+09, 0.52023E+09, 0.57562E+09, 0.63632E+09, 0.70279E+09,
0.77553E+09, 0.85506E+09, 0.94195E+09, 0.10368E+10, 0.11403E+10,
0.12531E+10, 0.13759E+10, 0.15097E+10, 0.16552E+10, 0.18133E+10,
0.19851E+10, 0.21715E+10, 0.23738E+10, 0.25931E+10, 0.28307E+10,
0.30879E+10, 0.33662E+10, 0.36672E+10, 0.39926E+10, 0.43439E+10,
0.47233E+10, 0.51325E+10, 0.55738E+10, 0.60493E+10, 0.65615E+10,
0.71129E+10, 0.77061E+10, 0.83441E+10, 0.90298E+10, 0.97664E+10,
0.10557E+11, 0.11406E+11, 0.12317E+11, 0.13293E+11, 0.14339E+11,
0.15459E+11, 0.16659E+11, 0.17942E+11, 0.19316E+11, 0.20784E+11,
0.22353E+11])
# --------------- HC3N 12234: M = 44, I = 2 --------------------- see above
M = 44
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.33507E+04, 0.49290E+04, 0.68293E+04,
0.91959E+04, 0.12185E+05, 0.15986E+05, 0.20828E+05, 0.26993E+05,
0.34824E+05, 0.44739E+05, 0.57239E+05, 0.72931E+05, 0.92539E+05,
0.11693E+06, 0.14713E+06, 0.18435E+06, 0.23004E+06, 0.28588E+06,
0.35384E+06, 0.43625E+06, 0.53580E+06, 0.65562E+06, 0.79933E+06,
0.97115E+06, 0.11759E+07, 0.14191E+07, 0.17073E+07, 0.20476E+07,
0.24486E+07, 0.29196E+07, 0.34716E+07, 0.41169E+07, 0.48696E+07,
0.57453E+07, 0.67621E+07, 0.79402E+07, 0.93022E+07, 0.10874E+08,
0.12684E+08, 0.14764E+08, 0.17150E+08, 0.19884E+08, 0.23009E+08,
0.26576E+08, 0.30641E+08, 0.35265E+08, 0.40518E+08, 0.46477E+08,
0.53225E+08, 0.60856E+08, 0.69475E+08, 0.79195E+08, 0.90143E+08,
0.10246E+09, 0.11629E+09, 0.13182E+09, 0.14921E+09, 0.16868E+09,
0.19045E+09, 0.21477E+09, 0.24189E+09, 0.27211E+09, 0.30575E+09,
0.34316E+09, 0.38471E+09, 0.43083E+09, 0.48196E+09, 0.53858E+09,
0.60125E+09, 0.67052E+09, 0.74704E+09, 0.83148E+09, 0.92459E+09,
0.10272E+10, 0.11401E+10, 0.12643E+10, 0.14007E+10, 0.15506E+10,
0.17150E+10, 0.18953E+10, 0.20928E+10, 0.23090E+10, 0.25456E+10,
0.28042E+10, 0.30867E+10, 0.33951E+10, 0.37316E+10, 0.40984E+10,
0.44981E+10, 0.49332E+10, 0.54067E+10, 0.59216E+10, 0.64812E+10,
0.70890E+10, 0.77488E+10, 0.84645E+10, 0.92405E+10, 0.10081E+11,
0.10992E+11, 0.11978E+11, 0.13044E+11, 0.14197E+11, 0.15443E+11,
0.16789E+11, 0.18243E+11, 0.19810E+11, 0.21501E+11, 0.23324E+11,
0.25288E+11, 0.27403E+11, 0.29680E+11, 0.32130E+11, 0.34764E+11,
0.37596E+11, 0.40639E+11, 0.43907E+11, 0.47416E+11, 0.51181E+11,
0.55220E+11])
# --------------- HC3N 12324: M = 44, I = 3 --------------------- see above
M = 44
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.33506E+04, 0.49280E+04, 0.68267E+04,
0.91901E+04, 0.12174E+05, 0.15966E+05, 0.20793E+05, 0.26936E+05,
0.34734E+05, 0.44598E+05, 0.57026E+05, 0.72612E+05, 0.92071E+05,
0.11625E+06, 0.14616E+06, 0.18298E+06, 0.22813E+06, 0.28323E+06,
0.35022E+06, 0.43133E+06, 0.52918E+06, 0.64677E+06, 0.78761E+06,
0.95571E+06, 0.11557E+07, 0.13929E+07, 0.16734E+07, 0.20041E+07,
0.23929E+07, 0.28488E+07, 0.33820E+07, 0.40040E+07, 0.47280E+07,
0.55686E+07, 0.65423E+07, 0.76678E+07, 0.89661E+07, 0.10460E+08,
0.12177E+08, 0.14145E+08, 0.16397E+08, 0.18970E+08, 0.21903E+08,
0.25242E+08, 0.29036E+08, 0.33339E+08, 0.38214E+08, 0.43726E+08,
0.49949E+08, 0.56965E+08, 0.64864E+08, 0.73743E+08, 0.83711E+08,
0.94886E+08, 0.10740E+09, 0.12139E+09, 0.13701E+09, 0.15443E+09,
0.17384E+09, 0.19543E+09, 0.21943E+09, 0.24607E+09, 0.27561E+09,
0.30832E+09, 0.34452E+09, 0.38453E+09, 0.42870E+09, 0.47742E+09,
0.53110E+09, 0.59020E+09, 0.65518E+09, 0.72659E+09, 0.80496E+09,
0.89092E+09, 0.98510E+09, 0.10882E+10, 0.12010E+10, 0.13242E+10,
0.14588E+10, 0.16056E+10, 0.17657E+10, 0.19401E+10, 0.21299E+10,
0.23363E+10, 0.25606E+10, 0.28043E+10, 0.30687E+10, 0.33553E+10,
0.36660E+10, 0.40024E+10, 0.43665E+10, 0.47601E+10, 0.51856E+10,
0.56450E+10, 0.61408E+10, 0.66756E+10, 0.72520E+10, 0.78729E+10,
0.85413E+10, 0.92604E+10, 0.10034E+11, 0.10864E+11, 0.11757E+11,
0.12714E+11, 0.13742E+11, 0.14843E+11, 0.16023E+11, 0.17287E+11,
0.18640E+11, 0.20087E+11, 0.21634E+11, 0.23288E+11, 0.25054E+11,
0.26939E+11, 0.28950E+11, 0.31096E+11, 0.33382E+11, 0.35819E+11,
0.38413E+11])
# --------------- HC3N 13224: M = 44, I = 4 --------------------- see above
M = 44
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M,I)] = float32([0.34439E+04, 0.50672E+04, 0.70230E+04,
0.94603E+04, 0.12542E+05, 0.16462E+05, 0.21461E+05, 0.27833E+05,
0.35935E+05, 0.46204E+05, 0.59168E+05, 0.75463E+05, 0.95854E+05,
0.12126E+06, 0.15276E+06, 0.19165E+06, 0.23947E+06, 0.29802E+06,
0.36943E+06, 0.45619E+06, 0.56121E+06, 0.68789E+06, 0.84018E+06,
0.10227E+07, 0.12407E+07, 0.15003E+07, 0.18086E+07, 0.21738E+07,
0.26052E+07, 0.31134E+07, 0.37106E+07, 0.44109E+07, 0.52300E+07,
0.61861E+07, 0.72996E+07, 0.85939E+07, 0.10095E+08, 0.11833E+08,
0.13841E+08, 0.16158E+08, 0.18825E+08, 0.21890E+08, 0.25407E+08,
0.29436E+08, 0.34045E+08, 0.39308E+08, 0.45309E+08, 0.52143E+08,
0.59912E+08, 0.68734E+08, 0.78737E+08, 0.90065E+08, 0.10288E+09,
0.11735E+09, 0.13367E+09, 0.15206E+09, 0.17277E+09, 0.19604E+09,
0.22217E+09, 0.25148E+09, 0.28432E+09, 0.32108E+09, 0.36218E+09,
0.40809E+09, 0.45932E+09, 0.51644E+09, 0.58004E+09, 0.65082E+09,
0.72950E+09, 0.81690E+09, 0.91388E+09, 0.10214E+10, 0.11405E+10,
0.12724E+10, 0.14182E+10, 0.15794E+10, 0.17573E+10, 0.19536E+10,
0.21701E+10, 0.24086E+10, 0.26711E+10, 0.29599E+10, 0.32774E+10,
0.36262E+10, 0.40090E+10, 0.44290E+10, 0.48895E+10, 0.53939E+10,
0.59462E+10, 0.65504E+10, 0.72111E+10, 0.79332E+10, 0.87217E+10,
0.95823E+10, 0.10521E+11, 0.11544E+11, 0.12659E+11, 0.13874E+11,
0.15195E+11, 0.16632E+11, 0.18194E+11, 0.19892E+11, 0.21735E+11,
0.23736E+11, 0.25907E+11, 0.28260E+11, 0.30810E+11, 0.33572E+11,
0.36563E+11, 0.39799E+11, 0.43299E+11, 0.47083E+11, 0.51172E+11,
0.55588E+11, 0.60355E+11, 0.65500E+11, 0.71049E+11, 0.77031E+11,
0.83478E+11])
# --------------- HC3N 12225: M = 44, I = 5 --------------------- see above
M = 44
I = 5
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.11455E+04, 0.16850E+04, 0.23345E+04,
0.31432E+04, 0.41647E+04, 0.54630E+04, 0.71168E+04, 0.92219E+04,
0.11895E+05, 0.15279E+05, 0.19545E+05, 0.24897E+05, 0.31584E+05,
0.39899E+05, 0.50190E+05, 0.62871E+05, 0.78428E+05, 0.97434E+05,
0.12056E+06, 0.14859E+06, 0.18243E+06, 0.22314E+06, 0.27194E+06,
0.33026E+06, 0.39972E+06, 0.48219E+06, 0.57983E+06, 0.69509E+06,
0.83077E+06, 0.99009E+06, 0.11767E+07, 0.13946E+07, 0.16487E+07,
0.19441E+07, 0.22868E+07, 0.26836E+07, 0.31420E+07, 0.36704E+07,
0.42786E+07, 0.49770E+07, 0.57776E+07, 0.66938E+07, 0.77404E+07,
0.89339E+07, 0.10293E+08, 0.11837E+08, 0.13590E+08, 0.15576E+08,
0.17823E+08, 0.20362E+08, 0.23227E+08, 0.26454E+08, 0.30085E+08,
0.34166E+08, 0.38745E+08, 0.43877E+08, 0.49622E+08, 0.56046E+08,
0.63219E+08, 0.71222E+08, 0.80138E+08, 0.90062E+08, 0.10110E+09,
0.11335E+09, 0.12695E+09, 0.14202E+09, 0.15870E+09, 0.17716E+09,
0.19756E+09, 0.22009E+09, 0.24493E+09, 0.27232E+09, 0.30247E+09,
0.33565E+09, 0.37211E+09, 0.41217E+09, 0.45613E+09, 0.50433E+09,
0.55714E+09, 0.61497E+09, 0.67823E+09, 0.74739E+09, 0.82293E+09,
0.90540E+09, 0.99536E+09, 0.10934E+10, 0.12002E+10, 0.13165E+10,
0.14430E+10, 0.15805E+10, 0.17299E+10, 0.18922E+10, 0.20682E+10,
0.22591E+10, 0.24660E+10, 0.26901E+10, 0.29326E+10, 0.31951E+10,
0.34788E+10, 0.37854E+10, 0.41166E+10, 0.44741E+10, 0.48598E+10,
0.52758E+10, 0.57240E+10, 0.62069E+10, 0.67269E+10, 0.72864E+10,
0.78882E+10, 0.85352E+10, 0.92305E+10, 0.99773E+10, 0.10779E+11,
0.11639E+11, 0.12562E+11, 0.13552E+11, 0.14612E+11, 0.15748E+11,
0.16964E+11])
# --------------- HC3N 22224: M = 44, I = 6 --------------------- see above
M = 44
I = 6
TIPS_GSI_HASH[(M,I)] = __FloatType__(9.)
TIPS_ISO_HASH[(M,I)] = float32([0.27029E+04, 0.39999E+04, 0.55894E+04,
0.76092E+04, 0.10219E+05, 0.13616E+05, 0.18042E+05, 0.23798E+05,
0.31255E+05, 0.40867E+05, 0.53189E+05, 0.68897E+05, 0.88807E+05,
0.11390E+06, 0.14537E+06, 0.18461E+06, 0.23330E+06, 0.29342E+06,
0.36733E+06, 0.45779E+06, 0.56802E+06, 0.70182E+06, 0.86361E+06,
0.10585E+07, 0.12925E+07, 0.15725E+07, 0.19064E+07, 0.23034E+07,
0.27739E+07, 0.33302E+07, 0.39858E+07, 0.47566E+07, 0.56604E+07,
0.67176E+07, 0.79511E+07, 0.93872E+07, 0.11055E+08, 0.12989E+08,
0.15225E+08, 0.17806E+08, 0.20779E+08, 0.24197E+08, 0.28119E+08,
0.32612E+08, 0.37749E+08, 0.43612E+08, 0.50294E+08, 0.57895E+08,
0.66528E+08, 0.76318E+08, 0.87403E+08, 0.99937E+08, 0.11409E+09,
0.13004E+09, 0.14800E+09, 0.16819E+09, 0.19086E+09, 0.21629E+09,
0.24476E+09, 0.27661E+09, 0.31219E+09, 0.35189E+09, 0.39615E+09,
0.44542E+09, 0.50021E+09, 0.56108E+09, 0.62862E+09, 0.70350E+09,
0.78641E+09, 0.87814E+09, 0.97952E+09, 0.10915E+10, 0.12149E+10,
0.13510E+10, 0.15008E+10, 0.16656E+10, 0.18468E+10, 0.20457E+10,
0.22640E+10, 0.25032E+10, 0.27653E+10, 0.30522E+10, 0.33659E+10,
0.37088E+10, 0.40832E+10, 0.44917E+10, 0.49371E+10, 0.54224E+10,
0.59508E+10, 0.65256E+10, 0.71507E+10, 0.78298E+10, 0.85671E+10,
0.93672E+10, 0.10235E+11, 0.11175E+11, 0.12193E+11, 0.13295E+11,
0.14487E+11, 0.15776E+11, 0.17168E+11, 0.18671E+11, 0.20293E+11,
0.22043E+11, 0.23929E+11, 0.25960E+11, 0.28148E+11, 0.30502E+11,
0.33034E+11, 0.35756E+11, 0.38681E+11, 0.41823E+11, 0.45195E+11,
0.48812E+11, 0.52692E+11, 0.56850E+11, 0.61306E+11, 0.66076E+11,
0.71183E+11])
# --------------- H2 11: M = 45, I = 1 ---------------------
M = 45
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.15265E+01, 0.22243E+01, 0.29619E+01,
0.36724E+01, 0.43456E+01, 0.49880E+01, 0.56090E+01, 0.62165E+01,
0.68161E+01, 0.74113E+01, 0.80044E+01, 0.85966E+01, 0.91887E+01,
0.97810E+01, 0.10374E+02, 0.10967E+02, 0.11561E+02, 0.12156E+02,
0.12751E+02, 0.13347E+02, 0.13944E+02, 0.14541E+02, 0.15139E+02,
0.15738E+02, 0.16337E+02, 0.16937E+02, 0.17538E+02, 0.18140E+02,
0.18743E+02, 0.19346E+02, 0.19951E+02, 0.20556E+02, 0.21163E+02,
0.21771E+02, 0.22379E+02, 0.22990E+02, 0.23601E+02, 0.24214E+02,
0.24829E+02, 0.25445E+02, 0.26063E+02, 0.26683E+02, 0.27304E+02,
0.27928E+02, 0.28553E+02, 0.29181E+02, 0.29811E+02, 0.30443E+02,
0.31078E+02, 0.31715E+02, 0.32355E+02, 0.32997E+02, 0.33643E+02,
0.34291E+02, 0.34942E+02, 0.35596E+02, 0.36253E+02, 0.36914E+02,
0.37578E+02, 0.38245E+02, 0.38916E+02, 0.39590E+02, 0.40268E+02,
0.40949E+02, 0.41635E+02, 0.42324E+02, 0.43017E+02, 0.43715E+02,
0.44416E+02, 0.45122E+02, 0.45831E+02, 0.46546E+02, 0.47264E+02,
0.47987E+02, 0.48714E+02, 0.49446E+02, 0.50183E+02, 0.50925E+02,
0.51671E+02, 0.52422E+02, 0.53178E+02, 0.53939E+02, 0.54705E+02,
0.55476E+02, 0.56252E+02, 0.57033E+02, 0.57820E+02, 0.58612E+02,
0.59409E+02, 0.60212E+02, 0.61020E+02, 0.61833E+02, 0.62652E+02,
0.63477E+02, 0.64308E+02, 0.65144E+02, 0.65986E+02, 0.66833E+02,
0.67687E+02, 0.68546E+02, 0.69411E+02, 0.70283E+02, 0.71160E+02,
0.72043E+02, 0.72933E+02, 0.73829E+02, 0.74730E+02, 0.75638E+02,
0.76553E+02, 0.77473E+02, 0.78400E+02, 0.79333E+02, 0.80273E+02,
0.81219E+02, 0.82172E+02, 0.83131E+02, 0.84097E+02, 0.85069E+02,
0.86048E+02])
# --------------- H2 12: M = 45, I = 2 ---------------------
M = 45
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M,I)] = float32([0.81692E+01, 0.10308E+02, 0.12557E+02,
0.14848E+02, 0.17159E+02, 0.19482E+02, 0.21815E+02, 0.24153E+02,
0.26497E+02, 0.28845E+02, 0.31197E+02, 0.33552E+02, 0.35910E+02,
0.38272E+02, 0.40636E+02, 0.43002E+02, 0.45372E+02, 0.47744E+02,
0.50119E+02, 0.52496E+02, 0.54877E+02, 0.57261E+02, 0.59649E+02,
0.62040E+02, 0.64435E+02, 0.66835E+02, 0.69240E+02, 0.71650E+02,
0.74066E+02, 0.76489E+02, 0.78918E+02, 0.81354E+02, 0.83799E+02,
0.86252E+02, 0.88715E+02, 0.91187E+02, 0.93669E+02, 0.96163E+02,
0.98668E+02, 0.10118E+03, 0.10371E+03, 0.10626E+03, 0.10881E+03,
0.11138E+03, 0.11397E+03, 0.11657E+03, 0.11919E+03, 0.12182E+03,
0.12447E+03, 0.12714E+03, 0.12982E+03, 0.13252E+03, 0.13524E+03,
0.13798E+03, 0.14074E+03, 0.14352E+03, 0.14632E+03, 0.14914E+03,
0.15198E+03, 0.15484E+03, 0.15772E+03, 0.16062E+03, 0.16355E+03,
0.16649E+03, 0.16946E+03, 0.17246E+03, 0.17547E+03, 0.17851E+03,
0.18157E+03, 0.18466E+03, 0.18777E+03, 0.19090E+03, 0.19406E+03,
0.19725E+03, 0.20045E+03, 0.20369E+03, 0.20695E+03, 0.21023E+03,
0.21354E+03, 0.21687E+03, 0.22024E+03, 0.22362E+03, 0.22704E+03,
0.23048E+03, 0.23394E+03, 0.23744E+03, 0.24096E+03, 0.24451E+03,
0.24808E+03, 0.25169E+03, 0.25532E+03, 0.25897E+03, 0.26266E+03,
0.26638E+03, 0.27012E+03, 0.27389E+03, 0.27769E+03, 0.28152E+03,
0.28537E+03, 0.28926E+03, 0.29317E+03, 0.29712E+03, 0.30109E+03,
0.30509E+03, 0.30913E+03, 0.31319E+03, 0.31728E+03, 0.32140E+03,
0.32555E+03, 0.32974E+03, 0.33395E+03, 0.33819E+03, 0.34246E+03,
0.34677E+03, 0.35110E+03, 0.35547E+03, 0.35987E+03, 0.36429E+03,
0.36875E+03])
# --------------- CS 22: M = 46, I = 1 ---------------------
M = 46
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.51416E+02, 0.72723E+02, 0.94044E+02,
0.11538E+03, 0.13673E+03, 0.15810E+03, 0.17949E+03, 0.20093E+03,
0.22245E+03, 0.24407E+03, 0.26582E+03, 0.28776E+03, 0.30992E+03,
0.33233E+03, 0.35504E+03, 0.37807E+03, 0.40147E+03, 0.42525E+03,
0.44944E+03, 0.47406E+03, 0.49914E+03, 0.52468E+03, 0.55071E+03,
0.57723E+03, 0.60427E+03, 0.63183E+03, 0.65991E+03, 0.68854E+03,
0.71771E+03, 0.74743E+03, 0.77771E+03, 0.80855E+03, 0.83996E+03,
0.87193E+03, 0.90449E+03, 0.93762E+03, 0.97134E+03, 0.10056E+04,
0.10405E+04, 0.10760E+04, 0.11121E+04, 0.11487E+04, 0.11860E+04,
0.12239E+04, 0.12623E+04, 0.13014E+04, 0.13410E+04, 0.13813E+04,
0.14222E+04, 0.14637E+04, 0.15057E+04, 0.15484E+04, 0.15917E+04,
0.16357E+04, 0.16802E+04, 0.17253E+04, 0.17711E+04, 0.18175E+04,
0.18645E+04, 0.19121E+04, 0.19603E+04, 0.20091E+04, 0.20586E+04,
0.21087E+04, 0.21594E+04, 0.22107E+04, 0.22626E+04, 0.23152E+04,
0.23684E+04, 0.24222E+04, 0.24767E+04, 0.25317E+04, 0.25874E+04,
0.26438E+04, 0.27007E+04, 0.27583E+04, 0.28165E+04, 0.28754E+04,
0.29348E+04, 0.29949E+04, 0.30557E+04, 0.31170E+04, 0.31790E+04,
0.32417E+04, 0.33049E+04, 0.33688E+04, 0.34334E+04, 0.34986E+04,
0.35644E+04, 0.36308E+04, 0.36979E+04, 0.37656E+04, 0.38340E+04,
0.39030E+04, 0.39727E+04, 0.40430E+04, 0.41139E+04, 0.41855E+04,
0.42577E+04, 0.43306E+04, 0.44041E+04, 0.44782E+04, 0.45530E+04,
0.46284E+04, 0.47045E+04, 0.47813E+04, 0.48587E+04, 0.49367E+04,
0.50154E+04, 0.50947E+04, 0.51747E+04, 0.52553E+04, 0.53366E+04,
0.54185E+04, 0.55011E+04, 0.55844E+04, 0.56683E+04, 0.57528E+04,
0.58380E+04])
# --------------- CS 24: M = 46, I = 2 ---------------------
M = 46
I = 2
TIPS_GSI_HASH[(M,I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M,I)] = float32([0.52247E+02, 0.73900E+02, 0.95568E+02,
0.11725E+03, 0.13895E+03, 0.16066E+03, 0.18241E+03, 0.20420E+03,
0.22607E+03, 0.24805E+03, 0.27018E+03, 0.29249E+03, 0.31503E+03,
0.33784E+03, 0.36096E+03, 0.38442E+03, 0.40824E+03, 0.43247E+03,
0.45712E+03, 0.48221E+03, 0.50778E+03, 0.53382E+03, 0.56037E+03,
0.58743E+03, 0.61501E+03, 0.64312E+03, 0.67179E+03, 0.70100E+03,
0.73077E+03, 0.76111E+03, 0.79202E+03, 0.82351E+03, 0.85559E+03,
0.88824E+03, 0.92149E+03, 0.95533E+03, 0.98977E+03, 0.10248E+04,
0.10605E+04, 0.10967E+04, 0.11336E+04, 0.11710E+04, 0.12091E+04,
0.12478E+04, 0.12871E+04, 0.13270E+04, 0.13675E+04, 0.14087E+04,
0.14505E+04, 0.14929E+04, 0.15359E+04, 0.15795E+04, 0.16238E+04,
0.16687E+04, 0.17142E+04, 0.17604E+04, 0.18071E+04, 0.18546E+04,
0.19026E+04, 0.19513E+04, 0.20006E+04, 0.20505E+04, 0.21011E+04,
0.21523E+04, 0.22042E+04, 0.22566E+04, 0.23098E+04, 0.23635E+04,
0.24179E+04, 0.24730E+04, 0.25286E+04, 0.25850E+04, 0.26419E+04,
0.26995E+04, 0.27578E+04, 0.28167E+04, 0.28762E+04, 0.29364E+04,
0.29972E+04, 0.30587E+04, 0.31208E+04, 0.31836E+04, 0.32470E+04,
0.33111E+04, 0.33758E+04, 0.34412E+04, 0.35072E+04, 0.35739E+04,
0.36412E+04, 0.37092E+04, 0.37778E+04, 0.38471E+04, 0.39171E+04,
0.39877E+04, 0.40589E+04, 0.41309E+04, 0.42034E+04, 0.42767E+04,
0.43505E+04, 0.44251E+04, 0.45003E+04, 0.45762E+04, 0.46527E+04,
0.47299E+04, 0.48077E+04, 0.48863E+04, 0.49654E+04, 0.50453E+04,
0.51258E+04, 0.52070E+04, 0.52888E+04, 0.53713E+04, 0.54545E+04,
0.55383E+04, 0.56229E+04, 0.57080E+04, 0.57939E+04, 0.58804E+04,
0.59676E+04])
# --------------- CS 32: M = 46, I = 3 ---------------------
M = 46
I = 3
TIPS_GSI_HASH[(M,I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M,I)] = float32([0.10889E+03, 0.15403E+03, 0.19920E+03,
0.24440E+03, 0.28964E+03, 0.33491E+03, 0.38026E+03, 0.42571E+03,
0.47134E+03, 0.51722E+03, 0.56342E+03, 0.61005E+03, 0.65719E+03,
0.70493E+03, 0.75334E+03, 0.80249E+03, 0.85245E+03, 0.90329E+03,
0.95504E+03, 0.10078E+04, 0.10615E+04, 0.11163E+04, 0.11721E+04,
0.12291E+04, 0.12872E+04, 0.13464E+04, 0.14068E+04, 0.14684E+04,
0.15311E+04, 0.15951E+04, 0.16604E+04, 0.17268E+04, 0.17945E+04,
0.18635E+04, 0.19337E+04, 0.20051E+04, 0.20779E+04, 0.21519E+04,
0.22272E+04, 0.23038E+04, 0.23817E+04, 0.24609E+04, 0.25414E+04,
0.26232E+04, 0.27064E+04, 0.27908E+04, 0.28765E+04, 0.29636E+04,
0.30520E+04, 0.31417E+04, 0.32327E+04, 0.33251E+04, 0.34188E+04,
0.35138E+04, 0.36102E+04, 0.37079E+04, 0.38070E+04, 0.39074E+04,
0.40091E+04, 0.41122E+04, 0.42166E+04, 0.43224E+04, 0.44295E+04,
0.45380E+04, 0.46478E+04, 0.47590E+04, 0.48715E+04, 0.49854E+04,
0.51007E+04, 0.52173E+04, 0.53353E+04, 0.54547E+04, 0.55754E+04,
0.56975E+04, 0.58210E+04, 0.59458E+04, 0.60720E+04, 0.61996E+04,
0.63285E+04, 0.64589E+04, 0.65906E+04, 0.67236E+04, 0.68581E+04,
0.69940E+04, 0.71312E+04, 0.72698E+04, 0.74098E+04, 0.75512E+04,
0.76940E+04, 0.78381E+04, 0.79837E+04, 0.81307E+04, 0.82790E+04,
0.84287E+04, 0.85799E+04, 0.87324E+04, 0.88864E+04, 0.90417E+04,
0.91984E+04, 0.93566E+04, 0.95161E+04, 0.96771E+04, 0.98394E+04,
0.10003E+05, 0.10168E+05, 0.10335E+05, 0.10503E+05, 0.10672E+05,
0.10843E+05, 0.11015E+05, 0.11189E+05, 0.11364E+05, 0.11541E+05,
0.11719E+05, 0.11898E+05, 0.12079E+05, 0.12261E+05, 0.12444E+05,
0.12630E+05])
# --------------- CS 23: M = 46, I = 4 ---------------------
M = 46
I = 4
TIPS_GSI_HASH[(M,I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M,I)] = float32([0.20737E+03, 0.29330E+03, 0.37930E+03,
0.46535E+03, 0.55145E+03, 0.63764E+03, 0.72394E+03, 0.81043E+03,
0.89722E+03, 0.98443E+03, 0.10722E+04, 0.11607E+04, 0.12501E+04,
0.13406E+04, 0.14323E+04, 0.15253E+04, 0.16197E+04, 0.17158E+04,
0.18135E+04, 0.19129E+04, 0.20142E+04, 0.21174E+04, 0.22226E+04,
0.23298E+04, 0.24391E+04, 0.25504E+04, 0.26639E+04, 0.27796E+04,
0.28976E+04, 0.30177E+04, 0.31401E+04, 0.32648E+04, 0.33918E+04,
0.35211E+04, 0.36527E+04, 0.37867E+04, 0.39231E+04, 0.40618E+04,
0.42029E+04, 0.43463E+04, 0.44922E+04, 0.46405E+04, 0.47912E+04,
0.49443E+04, 0.50999E+04, 0.52579E+04, 0.54183E+04, 0.55812E+04,
0.57465E+04, 0.59143E+04, 0.60846E+04, 0.62573E+04, 0.64325E+04,
0.66102E+04, 0.67903E+04, 0.69729E+04, 0.71581E+04, 0.73457E+04,
0.75358E+04, 0.77284E+04, 0.79235E+04, 0.81211E+04, 0.83212E+04,
0.85239E+04, 0.87290E+04, 0.89367E+04, 0.91469E+04, 0.93596E+04,
0.95748E+04, 0.97926E+04, 0.10013E+05, 0.10236E+05, 0.10461E+05,
0.10689E+05, 0.10920E+05, 0.11153E+05, 0.11388E+05, 0.11626E+05,
0.11867E+05, 0.12110E+05, 0.12356E+05, 0.12604E+05, 0.12855E+05,
0.13109E+05, 0.13365E+05, 0.13623E+05, 0.13884E+05, 0.14148E+05,
0.14415E+05, 0.14683E+05, 0.14955E+05, 0.15229E+05, 0.15506E+05,
0.15785E+05, 0.16067E+05, 0.16351E+05, 0.16638E+05, 0.16928E+05,
0.17220E+05, 0.17515E+05, 0.17813E+05, 0.18113E+05, 0.18416E+05,
0.18721E+05, 0.19029E+05, 0.19340E+05, 0.19653E+05, 0.19969E+05,
0.20287E+05, 0.20608E+05, 0.20932E+05, 0.21258E+05, 0.21587E+05,
0.21919E+05, 0.22253E+05, 0.22590E+05, 0.22930E+05, 0.23272E+05,
0.23617E+05])
# --------------- SO3 26: M = 46, I = 1 --------------------- not in TIPS-2011
M = 47
I = 1
TIPS_GSI_HASH[(M,I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M,I)] = float32([0.])
# NOT IN HITRAN, BUT PRESENT IN TIPS-2011
# ... extracted from iso_comparison
#
# id M I COMMENT TIPS_M TIPS_I iso_name abundance mass mol_name
#101 1001 1 not in HITRAN 45 H \N \N H
#
#102 1002 1 not in HITRAN 45 He \N \N He
#
#104 1018 1 not in HITRAN 45 Ar \N \N Ar
#
# not in HITRAN 45 4224 C2N2
# not in HITRAN 45 5225 C2N2
#
# not in HITRAN 48 26 SO
# not in HITRAN 48 46 SO
# not in HITRAN 48 28 SO
#
# not in HITRAN 49 1221 C3H4
#
# not in HITRAN 50 2111 CH3
#
# not in HITRAN 51 222 CS2
# not in HITRAN 51 224 CS2
# not in HITRAN 51 223 CS2
# not in HITRAN 51 232 CS2
# --------------- TIPS IMPLEMENTATION ----------------------
def BD_TIPS_2011_PYTHON(M,I,T):
# out of temperature range
if T<70. or T>3000.:
#Qt = -1.
#gi = 0.
#return gi,Qt
raise Exception('TIPS: T must be between 70K and 3000K.')
try:
# get statistical weight for specified isotopologue
gi = TIPS_GSI_HASH[(M,I)]
# interpolate partition sum for specified isotopologue
Qt = AtoB(T,Tdat,TIPS_ISO_HASH[(M,I)],TIPS_NPT)
except KeyError:
raise Exception('TIPS: no data for M,I = %d,%d.' % (M,I))
return gi,Qt
# Total internal partition sum
# M - molecule number
# I - isotopologue number
# T - temperature (K)
# returns (StatWeight,PartitionSum)
def partitionSum(M,I,T,step=None):
"""
INPUT PARAMETERS:
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
T: temperature conditions (required)
step: step to calculate temperatures (optional)
OUTPUT PARAMETERS:
TT: list of temperatures (present only if T is a list)
PartSum: partition sums calculated on a list of temperatures
---
DESCRIPTION:
Calculate range of partition sums at different temperatures.
This function uses a python implementation of TIPS-2011 code:
Reference:
A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman.
Total internal partition sums to support planetary remote sensing.
Icarus, Volume 215, Issue 1, September 2011, Pages 391–400
http://dx.doi.org/10.1016/j.icarus.2011.06.004
Output depends on a structure of input parameter T so that:
1) If T is a scalar/list and step IS NOT provided,
then calculate partition sums over each value of T.
2) If T is a list and step parameter IS provided,
then calculate partition sums between T[0] and T[1]
with a given step.
---
EXAMPLE OF USAGE:
PartSum = partitionSum(1,1,[296,1000])
TT,PartSum = partitionSum(1,1,[296,1000],step=0.1)
---
"""
# partitionSum
if not step:
if type(T) not in set([list,tuple]):
return BD_TIPS_2011_PYTHON(M,I,T)[1]
else:
return [BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in T]
else:
#n = (T[1]-T[0])/step
#TT = linspace(T[0],T[1],n)
TT = arange(T[0],T[1],step)
return TT,array([BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in TT])
# ------------------ partition sum --------------------------------------
# ------------------ LINESHAPES -----------------------------------------
# ------------------ complex probability function -----------------------
# define static data
zone = __ComplexType__(1.0e0 + 0.0e0j)
zi = __ComplexType__(0.0e0 + 1.0e0j)
tt = __FloatType__([0.5e0,1.5e0,2.5e0,3.5e0,4.5e0,5.5e0,6.5e0,7.5e0,8.5e0,9.5e0,10.5e0,11.5e0,12.5e0,13.5e0,14.5e0])
pipwoeronehalf = __FloatType__(0.564189583547756e0)
# "naive" implementation for benchmarks
def cpf3(X,Y):
# X,Y,WR,WI - numpy arrays
if type(X) != ndarray:
if type(X) not in set([list,tuple]):
X = array([X])
else:
X = array(X)
if type(Y) != ndarray:
if type(Y) not in set([list,tuple]):
Y = array([Y])
else:
Y = array(Y)
zm1 = zone/__ComplexType__(X + zi*Y) # maybe redundant
zm2 = zm1**2
zsum = zone
zterm=zone
for tt_i in tt:
zterm *= zm2*tt_i
zsum += zterm
zsum *= zi*zm1*pipwoeronehalf
return zsum.real, zsum.imag
T = __FloatType__([0.314240376e0,0.947788391e0,1.59768264e0,2.27950708e0,3.02063703e0,3.8897249e0])
U = __FloatType__([1.01172805e0,-0.75197147e0,1.2557727e-2,1.00220082e-2,-2.42068135e-4,5.00848061e-7])
S = __FloatType__([1.393237e0,0.231152406e0,-0.155351466e0,6.21836624e-3,9.19082986e-5,-6.27525958e-7])
# Complex probability function implementation (Humlicek)
def cpf(X,Y):
# X,Y,WR,WI - numpy arrays
if type(X) != ndarray:
if type(X) not in set([list,tuple]):
X = array([X])
else:
X = array(X)
if type(Y) != ndarray:
if type(Y) not in set([list,tuple]):
Y = array([Y])
else:
Y = array(Y)
# REGION3
index_REGION3 = where(sqrt(X**2 + Y**2) > __FloatType__(8.0e0))
X_REGION3 = X[index_REGION3]
Y_REGION3 = Y[index_REGION3]
zm1 = zone/__ComplexType__(X_REGION3 + zi*Y_REGION3)
zm2 = zm1**2
zsum_REGION3 = zone
zterm=zone
for tt_i in tt:
zterm *= zm2*tt_i
zsum_REGION3 += zterm
zsum_REGION3 *= zi*zm1*pipwoeronehalf
index_REGION12 = setdiff1d(array(arange(len(X))),array(index_REGION3))
X_REGION12 = X[index_REGION12]
Y_REGION12 = Y[index_REGION12]
WR = __FloatType__(0.0e0)
WI = __FloatType__(0.0e0)
# REGION12
Y1_REGION12 = Y_REGION12 + __FloatType__(1.5e0)
Y2_REGION12 = Y1_REGION12**2
# REGION2
subindex_REGION2 = where((Y_REGION12 <= 0.85e0) &
(abs(X_REGION12) >= (18.1e0*Y_REGION12 + 1.65e0)))
index_REGION2 = index_REGION12[subindex_REGION2]
X_REGION2 = X[index_REGION2]
Y_REGION2 = Y[index_REGION2]
Y1_REGION2 = Y1_REGION12[subindex_REGION2]
Y2_REGION2 = Y2_REGION12[subindex_REGION2]
Y3_REGION2 = Y_REGION2 + __FloatType__(3.0e0)
WR_REGION2 = WR
WI_REGION2 = WI
WR_REGION2 = zeros(len(X_REGION2))
ii = abs(X_REGION2) < __FloatType__(12.0e0)
WR_REGION2[ii] = exp(-X_REGION2[ii]**2)
WR_REGION2[~ii] = WR
for I in range(6):
R_REGION2 = X_REGION2 - T[I]
R2_REGION2 = R_REGION2**2
D_REGION2 = __FloatType__(1.0e0) / (R2_REGION2 + Y2_REGION2)
D1_REGION2 = Y1_REGION2 * D_REGION2
D2_REGION2 = R_REGION2 * D_REGION2
WR_REGION2 = WR_REGION2 + Y_REGION2 * (U[I]*(R_REGION2*D2_REGION2 - 1.5e0*D1_REGION2) +
S[I]*Y3_REGION2*D2_REGION2)/(R2_REGION2 + 2.25e0)
R_REGION2 = X_REGION2 + T[I]
R2_REGION2 = R_REGION2**2
D_REGION2 = __FloatType__(1.0e0) / (R2_REGION2 + Y2_REGION2)
D3_REGION2 = Y1_REGION2 * D_REGION2
D4_REGION2 = R_REGION2 * D_REGION2
WR_REGION2 = WR_REGION2 + Y_REGION2 * (U[I]*(R_REGION2*D4_REGION2 - 1.5e0*D3_REGION2) -
S[I]*Y3_REGION2*D4_REGION2)/(R2_REGION2 + 2.25e0)
WI_REGION2 = WI_REGION2 + U[I]*(D2_REGION2 + D4_REGION2) + S[I]*(D1_REGION2 - D3_REGION2)
# REGION3
index_REGION1 = setdiff1d(array(index_REGION12),array(index_REGION2))
X_REGION1 = X[index_REGION1]
Y_REGION1 = X[index_REGION1]
subindex_REGION1 = setdiff1d(array(arange(len(index_REGION12))),array(subindex_REGION2))
Y1_REGION1 = Y1_REGION12[subindex_REGION1]
Y2_REGION1 = Y2_REGION12[subindex_REGION1]
WR_REGION1 = WR
WI_REGION1 = WI
for I in range(6):
R_REGION1 = X_REGION1 - T[I]
D_REGION1 = __FloatType__(1.0e0) / (R_REGION1**2 + Y2_REGION1)
D1_REGION1 = Y1_REGION1 * D_REGION1
D2_REGION1 = R_REGION1 * D_REGION1
R_REGION1 = X_REGION1 + T[I]
D_REGION1 = __FloatType__(1.0e0) / (R_REGION1**2 + Y2_REGION1)
D3_REGION1 = Y1_REGION1 * D_REGION1
D4_REGION1 = R_REGION1 * D_REGION1
WR_REGION1 = WR_REGION1 + U[I]*(D1_REGION1 + D3_REGION1) - S[I]*(D2_REGION1 - D4_REGION1)
WI_REGION1 = WI_REGION1 + U[I]*(D2_REGION1 + D4_REGION1) + S[I]*(D1_REGION1 - D3_REGION1)
# total result
WR_TOTAL = zeros(len(X))
WI_TOTAL = zeros(len(X))
# REGION3
WR_TOTAL[index_REGION3] = zsum_REGION3.real
WI_TOTAL[index_REGION3] = zsum_REGION3.imag
# REGION2
WR_TOTAL[index_REGION2] = WR_REGION2
WI_TOTAL[index_REGION2] = WI_REGION2
# REGION1
WR_TOTAL[index_REGION1] = WR_REGION1
WI_TOTAL[index_REGION1] = WI_REGION1
return WR_TOTAL,WI_TOTAL
# ------------------ Hartmann-Tran Profile (HTP) ------------------------
def pcqsdhc_BACKUP(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,eta,sg):
#-------------------------------------------------
# "pCqSDHC": partially-Correlated quadratic-Speed-Dependent Hard-Collision
# Subroutine to Compute the complex normalized spectral shape of an
# isolated line by the pCqSDHC model
#
# Reference:
# H. Tran, N.H. Ngo, J.-M. Hartmann.
# Efficient computation of some speed-dependent isolated line profiles.
# JQSRT, Volume 129, November 2013, Pages 199–203
# http://dx.doi.org/10.1016/j.jqsrt.2013.06.015
#
# Input/Output Parameters of Routine (Arguments or Common)
# ---------------------------------
# T : Temperature in Kelvin (Input).
# amM1 : Molar mass of the absorber in g/mol(Input).
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# eta : Correlation parameter, No unit (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
#
# Output Quantities (through Common Statements)
# -----------------
# LS_pCqSDHC_R: Real part of the normalized spectral shape (cm)
# LS_pCqSDHC_I: Imaginary part of the normalized spectral shape (cm)
#
# Called Routines: 'CPF' (Complex Probability Function)
# --------------- 'CPF3' (Complex Probability Function for the region 3)
#
# Called By: Main Program
# ---------
#
# Double Precision Version
#
#-------------------------------------------------
# sg is the only vector argument which is passed to fusnction
if type(sg) not in set([array,ndarray,list,tuple]):
sg = array([sg])
number_of_points = len(sg)
Aterm_GLOBAL = zeros(number_of_points,dtype=__ComplexType__)
Bterm_GLOBAL = zeros(number_of_points,dtype=__ComplexType__)
cte=sqrt(log(2.0e0))/GamD
rpi=sqrt(pi)
iz = __ComplexType__(0.0e0 + 1.0e0j)
c0 = __ComplexType__(Gam0 - 1.0e0j*Shift0)
c2=__ComplexType__(Gam2 - 1.0e0j*Shift2)
c0t = __ComplexType__((1.0e0 - eta) * (c0 - 1.5e0 * c2) + anuVC)
c2t = __ComplexType__((1.0e0 - eta) * c2)
Y = __ComplexType__(1.0e0 / ((2.0e0*cte*c2t))**2)
# X - vector, Y - scalar
X = (iz * (sg - sg0) + c0t) / c2t
# PART1
if abs(c2t) == 0.0e0:
Z1 = (iz*(sg - sg0) + c0t) * cte
xZ1 = -Z1.imag
yZ1 = Z1.real
WR1,WI1 = cpf(xZ1,yZ1)
Aterm_GLOBAL = rpi*cte*__ComplexType__(WR1 + 1.0e0j*WI1)
index_Z1 = abs(Z1) <= 4.0e3
index_NOT_Z1 = ~index_Z1
if any(index_Z1):
Bterm_GLOBAL = rpi*cte*((1.0e0 - Z1**2)*__ComplexType__(WR1 + 1.0e0j*WI1) + Z1/rpi)
if any(index_NOT_Z1):
Bterm_GLOBAL = cte*(rpi*__ComplexType__(WR1 + 1.0e0j*WI1) + 0.5e0/Z1 - 0.75e0/(Z1**3))
else:
# PART2, PART3 AND PART4 (PART4 IS A MAIN PART)
index_PART2 = abs(X) < 3.0e-8 * abs(Y)
index_PART3 = (abs(Y) < 1.0e-15 * abs(X)) & ~index_PART2
index_PART4 = ~ (index_PART2 | index_PART3)
# PART4
if any(index_PART4):
X_TMP = X[index_PART4]
Z1 = sqrt(X_TMP + Y) - sqrt(Y)
Z2 = Z1 + __FloatType__(2.0e0) * sqrt(Y)
xZ1 = -Z1.imag
yZ1 = Z1.real
xZ2 = -Z2.imag
yZ2 = Z2.real
SZ1 = sqrt(xZ1**2 + yZ1**2)
SZ2 = sqrt(xZ2**2 + yZ2**2)
DSZ = abs(SZ1 - SZ2)
SZmx = maximum(SZ1,SZ2)
SZmn = minimum(SZ1,SZ2)
length_PART4 = len(index_PART4)
WR1_PART4 = zeros(length_PART4)
WI1_PART4 = zeros(length_PART4)
WR2_PART4 = zeros(length_PART4)
WI2_PART4 = zeros(length_PART4)
index_CPF3 = (DSZ <= 1.0e0) & (SZmx > 8.0e0) & (SZmn <= 8.0e0)
index_CPF = ~index_CPF3 # can be removed
if any(index_CPF3):
WR1,WI1 = cpf3(xZ1[index_CPF3],yZ1[index_CPF3])
WR2,WI2 = cpf3(xZ2[index_CPF3],yZ2[index_CPF3])
WR1_PART4[index_CPF3] = WR1
WI1_PART4[index_CPF3] = WI1
WR2_PART4[index_CPF3] = WR2
WI2_PART4[index_CPF3] = WI2
if any(index_CPF):
WR1,WI1 = cpf(xZ1[index_CPF],yZ1[index_CPF])
WR2,WI2 = cpf(xZ2[index_CPF],yZ2[index_CPF])
WR1_PART4[index_CPF] = WR1
WI1_PART4[index_CPF] = WI1
WR2_PART4[index_CPF] = WR2
WI2_PART4[index_CPF] = WI2
Aterm = rpi*cte*(__ComplexType__(WR1_PART4 + 1.0e0j*WI1_PART4) - __ComplexType__(WR2_PART4+1.0e0j*WI2_PART4))
Bterm = (-1.0e0 +
rpi/(2.0e0*sqrt(Y))*(1.0e0 - Z1**2)*__ComplexType__(WR1_PART4 + 1.0e0j*WI1_PART4)-
rpi/(2.0e0*sqrt(Y))*(1.0e0 - Z2**2)*__ComplexType__(WR2_PART4 + 1.0e0j*WI2_PART4)) / c2t
Aterm_GLOBAL[index_PART4] = Aterm
Bterm_GLOBAL[index_PART4] = Bterm
# PART2
if any(index_PART2):
X_TMP = X[index_PART2]
Z1 = (iz*(sg[index_PART2] - sg0) + c0t) * cte
Z2 = sqrt(X_TMP + Y) + sqrt(Y)
xZ1 = -Z1.imag
yZ1 = Z1.real
xZ2 = -Z2.imag
yZ2 = Z2.real
WR1_PART2,WI1_PART2 = cpf(xZ1,yZ1)
WR2_PART2,WI2_PART2 = cpf(xZ2,yZ2)
Aterm = rpi*cte*(__ComplexType__(WR1_PART2 + 1.0e0j*WI1_PART2) - __ComplexType__(WR2_PART2 + 1.0e0j*WI2_PART2))
Bterm = (-1.0e0 +
rpi/(2.0e0*sqrt(Y))*(1.0e0 - Z1**2)*__ComplexType__(WR1_PART2 + 1.0e0j*WI1_PART2)-
rpi/(2.0e0*sqrt(Y))*(1.0e0 - Z2**2)*__ComplexType__(WR2_PART2 + 1.0e0j*WI2_PART2)) / c2t
Aterm_GLOBAL[index_PART2] = Aterm
Bterm_GLOBAL[index_PART2] = Bterm
# PART3
if any(index_PART3):
X_TMP = X[index_PART3]
xZ1 = -sqrt(X_TMP + Y).imag
yZ1 = sqrt(X_TMP + Y).real
WR1_PART3,WI1_PART3 = cpf(xZ1,yZ1)
index_ABS = abs(sqrt(X_TMP)) <= 4.0e3
index_NOT_ABS = ~index_ABS
Aterm = zeros(len(index_PART3),dtype=__ComplexType__)
Bterm = zeros(len(index_PART3),dtype=__ComplexType__)
if any(index_ABS):
xXb = -sqrt(X).imag
yXb = sqrt(X).real
WRb,WIb = cpf(xXb,yXb)
Aterm[index_ABS] = (2.0e0*rpi/c2t)*(1.0e0/rpi - sqrt(X_TMP[index_ABS])*__ComplexType__(WRb + 1.0e0j*WIb))
Bterm[index_ABS] = (1.0e0/c2t)*(-1.0e0+
2.0e0*rpi*(1.0e0 - X_TMP[index_ABS]-2.0e0*Y)*(1.0e0/rpi-sqrt(X_TMP[index_ABS])*__ComplexType__(WRb + 1.0e0j*WIb))+
2.0e0*rpi*sqrt(X_TMP[index_ABS] + Y)*__ComplexType__(WR1_PART3 + 1.0e0j*WI1_PART3))
if any(index_NOT_ABS):
Aterm[index_NOT_ABS] = (1.0e0/c2t)*(1.0e0/X_TMP[index_NOT_ABS] - 1.5e0/(X_TMP[index_NOT_ABS]**2))
Bterm[index_NOT_ABS] = (1.0e0/c2t)*(-1.0e0 + (1.0e0 - X_TMP[index_NOT_ABS] - 2.0e0*Y)*
(1.0e0/X_TMP[index_NOT_ABS] - 1.5e0/(X_TMP[index_NOT_ABS]**2))+
2.0e0*rpi*sqrt(X_TMP[index_NOT_ABS] + Y)*__ComplexType__(WR1 + 1.0e0j*WI1))
Aterm_GLOBAL[index_PART3] = Aterm
Bterm_GLOBAL[index_PART3] = Bterm
# common part
LS_pCqSDHC = (1.0e0/pi) * (Aterm_GLOBAL / (1.0e0 - (anuVC-eta*(c0-1.5e0*c2))*Aterm_GLOBAL + eta*c2*Bterm_GLOBAL))
return LS_pCqSDHC.real,LS_pCqSDHC.imag
# ------------------ Hartmann-Tran Profile (HTP) ------------------------
def pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,eta,sg):
#-------------------------------------------------
# "pCqSDHC": partially-Correlated quadratic-Speed-Dependent Hard-Collision
# Subroutine to Compute the complex normalized spectral shape of an
# isolated line by the pCqSDHC model
#
# Reference:
# H. Tran, N.H. Ngo, J.-M. Hartmann.
# Efficient computation of some speed-dependent isolated line profiles.
# JQSRT, Volume 129, November 2013, Pages 199–203
# http://dx.doi.org/10.1016/j.jqsrt.2013.06.015
#
# Input/Output Parameters of Routine (Arguments or Common)
# ---------------------------------
# T : Temperature in Kelvin (Input).
# amM1 : Molar mass of the absorber in g/mol(Input).
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# eta : Correlation parameter, No unit (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
#
# Output Quantities (through Common Statements)
# -----------------
# LS_pCqSDHC_R: Real part of the normalized spectral shape (cm)
# LS_pCqSDHC_I: Imaginary part of the normalized spectral shape (cm)
#
# Called Routines: 'CPF' (Complex Probability Function)
# --------------- 'CPF3' (Complex Probability Function for the region 3)
#
# Called By: Main Program
# ---------
#
# Double Precision Version
#
#-------------------------------------------------
# sg is the only vector argument which is passed to fusnction
if type(sg) not in set([array,ndarray,list,tuple]):
sg = array([sg])
number_of_points = len(sg)
Aterm_GLOBAL = zeros(number_of_points,dtype=__ComplexType__)
Bterm_GLOBAL = zeros(number_of_points,dtype=__ComplexType__)
cte=sqrt(log(2.0e0))/GamD
rpi=sqrt(pi)
iz = __ComplexType__(0.0e0 + 1.0e0j)
c0 = __ComplexType__(Gam0 + 1.0e0j*Shift0)
c2 = __ComplexType__(Gam2 + 1.0e0j*Shift2)
c0t = __ComplexType__((1.0e0 - eta) * (c0 - 1.5e0 * c2) + anuVC)
c2t = __ComplexType__((1.0e0 - eta) * c2)
# PART1
if abs(c2t) == 0.0e0:
Z1 = (iz*(sg0 - sg) + c0t) * cte
xZ1 = -Z1.imag
yZ1 = Z1.real
WR1,WI1 = cpf(xZ1,yZ1)
Aterm_GLOBAL = rpi*cte*__ComplexType__(WR1 + 1.0e0j*WI1)
index_Z1 = abs(Z1) <= 4.0e3
index_NOT_Z1 = ~index_Z1
if any(index_Z1):
Bterm_GLOBAL = rpi*cte*((1.0e0 - Z1**2)*__ComplexType__(WR1 + 1.0e0j*WI1) + Z1/rpi)
if any(index_NOT_Z1):
Bterm_GLOBAL = cte*(rpi*__ComplexType__(WR1 + 1.0e0j*WI1) + 0.5e0/Z1 - 0.75e0/(Z1**3))
else:
# PART2, PART3 AND PART4 (PART4 IS A MAIN PART)
# X - vector, Y - scalar
X = (iz * (sg0 - sg) + c0t) / c2t
Y = __ComplexType__(1.0e0 / ((2.0e0*cte*c2t))**2)
csqrtY = (Gam2 - iz*Shift2) / (2.0e0*cte*(1.0e0-eta) * (Gam2**2 + Shift2**2))
index_PART2 = abs(X) <= 3.0e-8 * abs(Y)
index_PART3 = (abs(Y) <= 1.0e-15 * abs(X)) & ~index_PART2
index_PART4 = ~ (index_PART2 | index_PART3)
# PART4
if any(index_PART4):
X_TMP = X[index_PART4]
Z1 = sqrt(X_TMP + Y) - csqrtY
Z2 = Z1 + __FloatType__(2.0e0) * csqrtY
xZ1 = -Z1.imag
yZ1 = Z1.real
xZ2 = -Z2.imag
yZ2 = Z2.real
SZ1 = sqrt(xZ1**2 + yZ1**2)
SZ2 = sqrt(xZ2**2 + yZ2**2)
DSZ = abs(SZ1 - SZ2)
SZmx = maximum(SZ1,SZ2)
SZmn = minimum(SZ1,SZ2)
length_PART4 = len(index_PART4)
WR1_PART4 = zeros(length_PART4)
WI1_PART4 = zeros(length_PART4)
WR2_PART4 = zeros(length_PART4)
WI2_PART4 = zeros(length_PART4)
index_CPF3 = (DSZ <= 1.0e0) & (SZmx > 8.0e0) & (SZmn <= 8.0e0)
index_CPF = ~index_CPF3 # can be removed
if any(index_CPF3):
WR1,WI1 = cpf3(xZ1[index_CPF3],yZ1[index_CPF3])
WR2,WI2 = cpf3(xZ2[index_CPF3],yZ2[index_CPF3])
WR1_PART4[index_CPF3] = WR1
WI1_PART4[index_CPF3] = WI1
WR2_PART4[index_CPF3] = WR2
WI2_PART4[index_CPF3] = WI2
if any(index_CPF):
WR1,WI1 = cpf(xZ1[index_CPF],yZ1[index_CPF])
WR2,WI2 = cpf(xZ2[index_CPF],yZ2[index_CPF])
WR1_PART4[index_CPF] = WR1
WI1_PART4[index_CPF] = WI1
WR2_PART4[index_CPF] = WR2
WI2_PART4[index_CPF] = WI2
Aterm = rpi*cte*(__ComplexType__(WR1_PART4 + 1.0e0j*WI1_PART4) - __ComplexType__(WR2_PART4+1.0e0j*WI2_PART4))
Bterm = (-1.0e0 +
rpi/(2.0e0*csqrtY)*(1.0e0 - Z1**2)*__ComplexType__(WR1_PART4 + 1.0e0j*WI1_PART4)-
rpi/(2.0e0*csqrtY)*(1.0e0 - Z2**2)*__ComplexType__(WR2_PART4 + 1.0e0j*WI2_PART4)) / c2t
Aterm_GLOBAL[index_PART4] = Aterm
Bterm_GLOBAL[index_PART4] = Bterm
# PART2
if any(index_PART2):
X_TMP = X[index_PART2]
Z1 = (iz*(sg0 - sg[index_PART2]) + c0t) * cte
Z2 = sqrt(X_TMP + Y) + csqrtY
xZ1 = -Z1.imag
yZ1 = Z1.real
xZ2 = -Z2.imag
yZ2 = Z2.real
WR1_PART2,WI1_PART2 = cpf(xZ1,yZ1)
WR2_PART2,WI2_PART2 = cpf(xZ2,yZ2)
Aterm = rpi*cte*(__ComplexType__(WR1_PART2 + 1.0e0j*WI1_PART2) - __ComplexType__(WR2_PART2 + 1.0e0j*WI2_PART2))
Bterm = (-1.0e0 +
rpi/(2.0e0*csqrtY)*(1.0e0 - Z1**2)*__ComplexType__(WR1_PART2 + 1.0e0j*WI1_PART2)-
rpi/(2.0e0*csqrtY)*(1.0e0 - Z2**2)*__ComplexType__(WR2_PART2 + 1.0e0j*WI2_PART2)) / c2t
Aterm_GLOBAL[index_PART2] = Aterm
Bterm_GLOBAL[index_PART2] = Bterm
# PART3
if any(index_PART3):
X_TMP = X[index_PART3]
xZ1 = -sqrt(X_TMP + Y).imag
yZ1 = sqrt(X_TMP + Y).real
WR1_PART3,WI1_PART3 = cpf(xZ1,yZ1)
index_ABS = abs(sqrt(X_TMP)) <= 4.0e3
index_NOT_ABS = ~index_ABS
Aterm = zeros(len(index_PART3),dtype=__ComplexType__)
Bterm = zeros(len(index_PART3),dtype=__ComplexType__)
if any(index_ABS):
xXb = -sqrt(X).imag
yXb = sqrt(X).real
WRb,WIb = cpf(xXb,yXb)
Aterm[index_ABS] = (2.0e0*rpi/c2t)*(1.0e0/rpi - sqrt(X_TMP[index_ABS])*__ComplexType__(WRb + 1.0e0j*WIb))
Bterm[index_ABS] = (1.0e0/c2t)*(-1.0e0+
2.0e0*rpi*(1.0e0 - X_TMP[index_ABS]-2.0e0*Y)*(1.0e0/rpi-sqrt(X_TMP[index_ABS])*__ComplexType__(WRb + 1.0e0j*WIb))+
2.0e0*rpi*sqrt(X_TMP[index_ABS] + Y)*__ComplexType__(WR1_PART3 + 1.0e0j*WI1_PART3))
if any(index_NOT_ABS):
Aterm[index_NOT_ABS] = (1.0e0/c2t)*(1.0e0/X_TMP[index_NOT_ABS] - 1.5e0/(X_TMP[index_NOT_ABS]**2))
Bterm[index_NOT_ABS] = (1.0e0/c2t)*(-1.0e0 + (1.0e0 - X_TMP[index_NOT_ABS] - 2.0e0*Y)*
(1.0e0/X_TMP[index_NOT_ABS] - 1.5e0/(X_TMP[index_NOT_ABS]**2))+
2.0e0*rpi*sqrt(X_TMP[index_NOT_ABS] + Y)*__ComplexType__(WR1 + 1.0e0j*WI1))
Aterm_GLOBAL[index_PART3] = Aterm
Bterm_GLOBAL[index_PART3] = Bterm
# common part
LS_pCqSDHC = (1.0e0/pi) * (Aterm_GLOBAL / (1.0e0 - (anuVC-eta*(c0-1.5e0*c2))*Aterm_GLOBAL + eta*c2*Bterm_GLOBAL))
return LS_pCqSDHC.real,LS_pCqSDHC.imag
# ------------------ CROSS-SECTIONS, XSECT.PY --------------------------------
# set interfaces for TIPS(M,I,T)
PYTIPS = lambda M,I,T: BD_TIPS_2011_PYTHON(M,I,T)[1]
# set interfaces for profiles
#PYHTP = pcqsdhc
#PROFILE_HTP = PYHTP
#PROFILE_VOIGT = lambda sg0,GamD,Gam0,sg: PROFILE_HTP(sg0,GamD,Gam0,cZero,cZero,cZero,cZero,cZero,sg)
#PROFILE_LORENTZ = lambda sg0,Gam0,sg: Gam0/(pi*(Gam0**2+(sg-sg0)**2))
#PROFILE_DOPPLER = lambda sg0,GamD,sg: cSqrtLn2divSqrtPi*exp(-cLn2*((sg-sg0)/GamD)**2)/GamD
def PROFILE_HT(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,eta,sg):
"""
#-------------------------------------------------
# "pCqSDHC": partially-Correlated quadratic-Speed-Dependent Hard-Collision
# Subroutine to Compute the complex normalized spectral shape of an
# isolated line by the pCqSDHC model
#
# References:
#
# 1) N.H. Ngo, D. Lisak, H. Tran, J.-M. Hartmann.
# An isolated line-shape model to go beyond the Voigt profile in
# spectroscopic databases and radiative transfer codes.
# JQSRT, Volume 129, November 2013, Pages 89–100
# http://dx.doi.org/10.1016/j.jqsrt.2013.05.034
#
# 2) H. Tran, N.H. Ngo, J.-M. Hartmann.
# Efficient computation of some speed-dependent isolated line profiles.
# JQSRT, Volume 129, November 2013, Pages 199–203
# http://dx.doi.org/10.1016/j.jqsrt.2013.06.015
#
# 3) H. Tran, N.H. Ngo, J.-M. Hartmann.
# Erratum to “Efficient computation of some speed-dependent isolated line profiles”.
# JQSRT, Volume 134, February 2014, Pages 104
# http://dx.doi.org/10.1016/j.jqsrt.2013.10.015
#
# Input/Output Parameters of Routine (Arguments or Common)
# ---------------------------------
# T : Temperature in Kelvin (Input).
# amM1 : Molar mass of the absorber in g/mol(Input).
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# eta : Correlation parameter, No unit (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
#
# The function has two outputs:
# -----------------
# (1): Real part of the normalized spectral shape (cm)
# (2): Imaginary part of the normalized spectral shape (cm)
#
# Called Routines: 'CPF' (Complex Probability Function)
# --------------- 'CPF3' (Complex Probability Function for the region 3)
#
# Based on a double precision Fortran version
#
#-------------------------------------------------
"""
return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,eta,sg)
PROFILE_HTP = PROFILE_HT # stub for backwards compatibility
def PROFILE_VOIGT(sg0,GamD,Gam0,sg):
"""
# Voigt profile based on HTP.
# Input parameters:
# sg0: Unperturbed line position in cm-1 (Input).
# GamD: Doppler HWHM in cm-1 (Input)
# Gam0: Speed-averaged line-width in cm-1 (Input).
# sg: Current WaveNumber of the Computation in cm-1 (Input).
"""
return PROFILE_HTP(sg0,GamD,Gam0,cZero,cZero,cZero,cZero,cZero,sg)
def PROFILE_LORENTZ(sg0,Gam0,sg):
"""
# Lorentz profile.
# Input parameters:
# sg0: Unperturbed line position in cm-1 (Input).
# Gam0: Speed-averaged line-width in cm-1 (Input).
# sg: Current WaveNumber of the Computation in cm-1 (Input).
"""
return Gam0/(pi*(Gam0**2+(sg-sg0)**2))
def PROFILE_DOPPLER(sg0,GamD,sg):
"""
# Doppler profile.
# Input parameters:
# sg0: Unperturbed line position in cm-1 (Input).
# GamD: Doppler HWHM in cm-1 (Input)
# sg: Current WaveNumber of the Computation in cm-1 (Input).
"""
return cSqrtLn2divSqrtPi*exp(-cLn2*((sg-sg0)/GamD)**2)/GamD
# Volume concentration of all gas molecules at the pressure p and temperature T
def volumeConcentration(p,T):
return (p/9.869233e-7)/(cBolts*T) # CGS
# ------------------------------- PARAMETER DEPENDENCIES --------------------------------
# temperature dependence for intencities (HITRAN)
def EnvironmentDependency_Intensity(LineIntensityRef,T,Tref,SigmaT,SigmaTref,
LowerStateEnergy,LineCenter):
const = __FloatType__(1.4388028496642257)
ch = exp(-const*LowerStateEnergy/T)*(1-exp(-const*LineCenter/T))
zn = exp(-const*LowerStateEnergy/Tref)*(1-exp(-const*LineCenter/Tref))
LineIntensity = LineIntensityRef*SigmaTref/SigmaT*ch/zn
return LineIntensity
# environmental dependence for GammaD (HTP, Voigt) # Tref/T ????
def EnvironmentDependency_GammaD(GammaD_ref,T,Tref):
# Doppler parameters do not depend on pressure!
return GammaD_ref*sqrt(T/Tref)
# environmental dependence for Gamma0 (HTP, Voigt)
def EnvironmentDependency_Gamma0(Gamma0_ref,T,Tref,p,pref,TempRatioPower):
return Gamma0_ref*p/pref*(Tref/T)**TempRatioPower
# environmental dependence for Gamma2 (HTP)
def EnvironmentDependency_Gamma2(Gamma2_ref,T,Tref,p,pref,TempRatioPower):
return Gamma2_ref*p/pref*(Tref/T)**TempRatioPower
# environmental dependence for Delta0 (HTP)
def EnvironmentDependency_Delta0(Delta0_ref,p,pref):
return Delta0_ref*p/pref
# environmental dependence for Delta2 (HTP)
def EnvironmentDependency_Delta2(Delta2_ref,p,pref):
return Delta2_ref*p/pref
# environmental dependence for anuVC (HTP)
def EnvironmentDependency_anuVC(anuVC_ref,T,Tref,p,pref):
return anuVC_ref*Tref/T*p/pref
# ------------------------------- /PARAMETER DEPENDENCIES --------------------------------
# ------------------------------- BINGINGS --------------------------------
# default parameter bindings
DefaultParameterBindings = {}
# default temperature dependencies
DefaultEnvironmentDependencyBindings = {}
# ------------------------------- /BINGINGS --------------------------------
# default values for intensity threshold
DefaultIntensityThreshold = 0. # cm*molec
# default value for omega wing in halfwidths (from center)
DefaultOmegaWingHW = 50. # cm-1 HOTW default
# check and argument for being a tuple or list
# this is connected with a "bug" that in Python
# (val) is not a tuple, but (val,) is a tuple
def listOfTuples(a):
if type(a) not in set([list,tuple]):
a = [a]
return a
# determine default parameters from those which are passed to absorptionCoefficient_...
def getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format):
if SourceTables[0] == None:
SourceTables = ['__BUFFER__',]
if Environment == None:
Environment = {'T':296., 'p':1.}
if Components == [None]:
CompDict = {}
for TableName in SourceTables:
# check table existance
if TableName not in LOCAL_TABLE_CACHE.keys():
raise Exception('%s: no such table. Check tableList() for more info.' % TableName)
mol_ids = LOCAL_TABLE_CACHE[TableName]['data']['molec_id']
iso_ids = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id']
if len(mol_ids) != len(iso_ids):
raise Exception('Lengths if mol_ids and iso_ids differ!')
MI_zip = zip(mol_ids,iso_ids)
MI_zip = set(MI_zip)
for mol_id,iso_id in MI_zip:
CompDict[(mol_id,iso_id)] = None
Components = CompDict.keys()
if OmegaRange == None:
omega_min = float('inf')
omega_max = float('-inf')
for TableName in SourceTables:
nu = LOCAL_TABLE_CACHE[TableName]['data']['nu']
numin = min(nu)
numax = max(nu)
if omega_min > numin:
omega_min = numin
if omega_max < numax:
omega_max = numax
OmegaRange = (omega_min,omega_max)
OmegaDelta = OmegaRange[1]-OmegaRange[0]
if OmegaStep == None:
#OmegaStep = OmegaDelta/100.
OmegaStep = 0.01 # cm-1
if OmegaWing == None:
#OmegaWing = OmegaDelta/10.
OmegaWing = 0.0 # cm-1
if not Format:
Infinitesimal = 1e-14 # put this to header in next version!
min_number_of_digits = 4 # minimal number of digits after dec. pnt.
last_digit_pos = 0
while modf(OmegaStep * 10**last_digit_pos)[0] > Infinitesimal:
last_digit_pos += 1
actual_number_of_digits = max(min_number_of_digits,last_digit_pos)
Format = '%%.%df %%e' % actual_number_of_digits
return Components,SourceTables,Environment,OmegaRange,\
OmegaStep,OmegaWing,IntensityThreshold,Format
# save numpy arrays to file
# arrays must have same dimensions
def save_to_file(fname,fformat,*arg):
f = open(fname,'w')
for i in range(len(arg[0])):
argline = []
for j in range(len(arg)):
argline.append(arg[j][i])
f.write((fformat+'\n') % tuple(argline))
f.close()
# calculate apsorption for HT profile
def absorptionCoefficient_HT(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
OmegaRange: wavenumber range to consider.
OmegaStep: wavenumber step to consider.
OmegaWing: absolute wing for calculating a lineshape (in cm-1)
IntensityThreshold: threshold for intensities
OmegaWingHW: relative wing for calculating a lineshape (in halfwidths)
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts significant digits in OmegaStep)
OUTPUT PARAMETERS:
Omegas: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid.
Units are switched by HITRAN_units
---
DESCRIPTION:
Calculate absorption coefficient using HT (Hartmann-Tran) profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation
(such as OmegaRange, OmegaStep, OmegaWing, OmegaWingHW, IntensityThreshold).
The choice of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precicion (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_HT(((2,1),),'co2',OmegaStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# warn user about too large omega step
if OmegaStep>0.1: warn('Too small omega step: possible accuracy decline')
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
# get uniform linespace for cross-section
#number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
#Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_air'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_self'][RowID]
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][GammaL][RowID]
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
#TempRatioPowerDB = 1.0 # for planar molecules
try:
Gamma2DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma2'][RowID]
except:
Gamma2DB = 0
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
try:
if LineShift:
Shift2DB = LOCAL_TABLE_CACHE[TableName]['data']['shift2'][RowID]
else:
Shift2DB = 0
except:
Shift2DB = 0
try:
anuVCDB = LOCAL_TABLE_CACHE[TableName]['data']['anuVC'][RowID]
except:
anuVCDB = 0
try:
eta = LOCAL_TABLE_CACHE[TableName]['data']['eta'][RowID]
except:
eta = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
# V1 >>>
#GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB))
#GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref)
# V2 >>>
cMassMol = 1.66053873e-27 # hapi
#cMassMol = 1.6605402e-27 # converter
m = molecularMass(MoleculeNumberDB,IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2*cBolts*T*log(2)/m/cc**2)*LineCenterDB
# lorentz broadening coefficient
Gamma0 = EnvironmentDependency_Gamma0(Gamma0DB,T,Tref,p,pref,TempRatioPowerDB)
# quadratic speed dependence of lorentz broadening coefficient
Gamma2 = Gamma2DB*p/pref*(Tref/T)**TempRatioPowerDB
# shift coefficient
Shift0 = Shift0DB*p/pref
# quadratic speed dependence of shift coefficient
Shift2 = Shift2DB*p/pref
# Dicke narrowing coefficient
anuVC = anuVCDB*p/pref*Tref/T
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing,OmegaWingHW*Gamma0,OmegaWingHW*GammaD)
#PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
#lineshape_vals = PROFILE_HT(LineCenterDB,GammaD,Gamma0,Omegas[BoundIndexLower:BoundIndexUpper])[0]
lineshape_vals = PROFILE_HT(LineCenterDB,GammaD,Gamma0,Gamma2,Shift0,Shift2,anuVC,eta,
Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
# calculate apsorption for Voigt profile
def absorptionCoefficient_Voigt(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
OmegaRange: wavenumber range to consider.
OmegaStep: wavenumber step to consider.
OmegaWing: absolute wing for calculating a lineshape (in cm-1)
IntensityThreshold: threshold for intensities
OmegaWingHW: relative wing for calculating a lineshape (in halfwidths)
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts significant digits in OmegaStep)
OUTPUT PARAMETERS:
Omegas: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Voigt profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation
(such as OmegaRange, OmegaStep, OmegaWing, OmegaWingHW, IntensityThreshold).
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Voigt(((2,1),),'co2',OmegaStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# warn user about too large omega step
if OmegaStep>0.1: warn('Too small omega step: possible accuracy decline')
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
# get uniform linespace for cross-section
#number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
#Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_air'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_self'][RowID]
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][GammaL][RowID]
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
#TempRatioPowerDB = 1.0 # for planar molecules
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
# V1 >>>
#GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB))
#GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref)
# V2 >>>
cMassMol = 1.66053873e-27 # hapi
#cMassMol = 1.6605402e-27 # converter
m = molecularMass(MoleculeNumberDB,IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2*cBolts*T*log(2)/m/cc**2)*LineCenterDB
# lorentz broadening coefficient
Gamma0 = EnvironmentDependency_Gamma0(Gamma0DB,T,Tref,p,pref,TempRatioPowerDB)
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing,OmegaWingHW*Gamma0,OmegaWingHW*GammaD)
# shift coefficient
Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
#PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
lineshape_vals = PROFILE_VOIGT(LineCenterDB+Shift0,GammaD,Gamma0,Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
# calculate apsorption for Lorentz profile
def absorptionCoefficient_Lorentz(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
OmegaRange: wavenumber range to consider.
OmegaStep: wavenumber step to consider.
OmegaWing: absolute wing for calculating a lineshape (in cm-1)
IntensityThreshold: threshold for intensities
OmegaWingHW: relative wing for calculating a lineshape (in halfwidths)
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts significant digits in OmegaStep)
OUTPUT PARAMETERS:
Omegas: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Lorentz profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation
(such as OmegaRange, OmegaStep, OmegaWing, OmegaWingHW, IntensityThreshold).
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Lorentz(((2,1),),'co2',OmegaStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# warn user about too large omega step
if OmegaStep>0.1: warn('Too small omega step: possible accuracy decline')
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
# get uniform linespace for cross-section
#number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
#Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_air'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_self'][RowID]
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][GammaL][RowID]
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
#TempRatioPowerDB = 1.0 # for planar molecules
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# lorentz broadening coefficient
Gamma0 = EnvironmentDependency_Gamma0(Gamma0DB,T,Tref,p,pref,TempRatioPowerDB)
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing,OmegaWingHW*Gamma0)
# shift coefficient
Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
#PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
lineshape_vals = PROFILE_LORENTZ(LineCenterDB+Shift0,Gamma0,Omegas[BoundIndexLower:BoundIndexUpper])
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
# calculate apsorption for Doppler profile
def absorptionCoefficient_Doppler(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='dummy', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
OmegaRange: wavenumber range to consider.
OmegaStep: wavenumber step to consider.
OmegaWing: absolute wing for calculating a lineshape (in cm-1)
IntensityThreshold: threshold for intensities
OmegaWingHW: relative wing for calculating a lineshape (in halfwidths)
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts significant digits in OmegaStep)
OUTPUT PARAMETERS:
Omegas: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Doppler (Gauss) profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation
(such as OmegaRange, OmegaStep, OmegaWing, OmegaWingHW, IntensityThreshold).
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which give a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Doppler(((2,1),),'co2',OmegaStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# warn user about too large omega step
if OmegaStep>0.005: warn('Too small omega step: possible accuracy decline')
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
# special for Doppler case: set OmegaStep to a smaller value
if not OmegaStep: OmegaStep = 0.001
# get uniform linespace for cross-section
#number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
#Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
#GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB))
#GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref)
#print(GammaD)
cMassMol = 1.66053873e-27
#cSqrt2Ln2 = 1.1774100225
fSqrtMass = sqrt(molecularMass(MoleculeNumberDB,IsoNumberDB))
#fSqrtMass = sqrt(32831.2508809)
cc_ = 2.99792458e8
cBolts_ = 1.3806503e-23
#cBolts_ = 1.3806488E-23
GammaD = (cSqrt2Ln2/cc_)*sqrt(cBolts_/cMassMol)*sqrt(T) * LineCenterDB/fSqrtMass
#GammaD = 4.30140e-7*LineCenterDB*sqrt(T/molecularMass(MoleculeNumberDB,IsoNumberDB))
#cc_ = 2.99792458e8 # 2.99792458e10 # 2.99792458e8
#cBolts_ = 1.3806503e-23 #1.3806488E-16 # 1.380648813E-16 # 1.3806503e-23 # 1.3806488E-23
#GammaD = sqrt(log(2))*LineCenterDB*sqrt(2*cBolts_*T/(cMassMol*molecularMass(MoleculeNumberDB,IsoNumberDB)*cc_**2))
#print(GammaD)
# get final wing of the line according to GammaD, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing,OmegaWingHW*GammaD)
# shift coefficient
Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
#PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
lineshape_vals = PROFILE_DOPPLER(LineCenterDB+Shift0,GammaD,Omegas[BoundIndexLower:BoundIndexUpper])
#lineshape_vals = PROFILE_VOIGT(LineCenterDB,GammaD,cZero,Omegas[BoundIndexLower:BoundIndexUpper])[0]
#Xsect[BoundIndexLower:BoundIndexUpper] += lineshape_vals # DEBUG
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
# ---------------------------------------------------------------------------
# SHORTCUTS AND ALIASES FOR ABSORPTION COEFFICIENTS
# ---------------------------------------------------------------------------
absorptionCoefficient_Gauss = absorptionCoefficient_Doppler
def abscoef_HT(table=None,step=None,grid=None,env={'T':296.,'p':1.},file=None):
return absorptionCoefficient_HT(SourceTables=table,OmegaStep=step,OmegaGrid=grid,Environment=env,File=file)
def abscoef_Voigt(table=None,step=None,grid=None,env={'T':296.,'p':1.},file=None):
return absorptionCoefficient_Voigt(SourceTables=table,OmegaStep=step,OmegaGrid=grid,Environment=env,File=file)
def abscoef_Lorentz(table=None,step=None,grid=None,env={'T':296.,'p':1.},file=None):
return absorptionCoefficient_Lorentz(SourceTables=table,OmegaStep=step,OmegaGrid=grid,Environment=env,File=file)
def abscoef_Doppler(table=None,step=None,grid=None,env={'T':296.,'p':1.},file=None):
return absorptionCoefficient_Doppler(SourceTables=table,OmegaStep=step,OmegaGrid=grid,Environment=env,File=file)
abscoef_Gauss = abscoef_Doppler
def abscoef(table=None,step=None,grid=None,env={'T':296.,'p':1.},file=None): # default
return absorptionCoefficient_Lorentz(SourceTables=table,OmegaStep=step,OmegaGrid=grid,Environment=env,File=file)
# ---------------------------------------------------------------------------
def transmittanceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.},
File=None, Format='%e %e'):
"""
INPUT PARAMETERS:
Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
Default={'l':100.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Omegas: wavenumber grid
Xsect: transmittance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a transmittance spectrum (dimensionless) based
on previously calculated absorption coefficient.
Transmittance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default)
---
EXAMPLE OF USAGE:
nu,trans = transmittanceSpectrum(nu,coef)
---
"""
l = Environment['l']
Xsect = exp(-AbsorptionCoefficient*l)
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
def absorptionSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.},
File=None, Format='%e %e'):
"""
INPUT PARAMETERS:
Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
Default={'l':100.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Omegas: wavenumber grid
Xsect: transmittance spectrum calculated on the grid
---
DESCRIPTION:
Calculate an absorption spectrum (dimensionless) based
on previously calculated absorption coefficient.
Absorption spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default)
---
EXAMPLE OF USAGE:
nu,absorp = absorptionSpectrum(nu,coef)
---
"""
l = Environment['l']
Xsect = 1-exp(-AbsorptionCoefficient*l)
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
def radianceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.,'T':296.},
File=None, Format='%e %e'):
"""
INPUT PARAMETERS:
Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Omegas: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
"""
l = Environment['l']
T = Environment['T']
Alw = 1-exp(-AbsorptionCoefficient*l)
LBBTw = 2*hh*cc**2*Omegas**3 / (exp(hh*cc*Omegas/(cBolts*T)) - 1) * 1.0E-7
Xsect = Alw*LBBTw # W/sr/cm**2/cm**-1
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
# GET X,Y FOR FINE PLOTTING OF A STICK SPECTRUM
def getStickXY(TableName):
"""
Get X and Y for fine plotting of a stick spectrum.
Usage: X,Y = getStickXY(TableName).
"""
cent,intens = getColumns(TableName,('nu','sw'))
n = len(cent)
cent_ = zeros(n*3)
intens_ = zeros(n*3)
for i in range(n):
intens_[3*i] = 0
intens_[3*i+1] = intens[i]
intens_[3*i+2] = 0
cent_[(3*i):(3*i+3)] = cent[i]
return cent_,intens_
# /GET X,Y FOR FINE PLOTTING OF A STICK SPECTRUM
# LOW-RES SPECTRA (CONVOLUTION WITH APPARATUS FUNCTION)
# /LOW-RES SPECTRA (CONVOLUTION WITH APPARATUS FUNCTION)
# /----------------------------------------------------------------------------
# ------------------ HITRAN-ON-THE-WEB COMPATIBILITY -------------------------
def read_hotw(filename):
"""
Read cross-section file fetched from HITRAN-on-the-Web.
The format of the file line must be as follows:
nu, coef
Other lines are omitted.
"""
import sys
f = open(filename,'r')
nu = []
coef = []
for line in f:
pars = line.split()
try:
nu.append(float(pars[0]))
coef.append(float(pars[1]))
except:
if False:
print(sys.exc_info())
else:
pass
return array(nu),array(coef)
# alias for read_hotw for backwards compatibility
read_xsect = read_hotw
# /----------------------------------------------------------------------------
# ------------------ SPECTRAL CONVOLUTION -------------------------
# rectangular slit function
def SLIT_RECTANGULAR(x,g):
"""
Instrumental (slit) function.
B(x) = 1/γ , if |x| ≤ γ/2 & B(x) = 0, if |x| > γ/2,
where γ is a slit width or the instrumental resolution.
"""
index_inner = abs(x) <= g/2
index_outer = ~index_inner
y = zeros(len(x))
y[index_inner] = 1/g
y[index_outer] = 0
return y
# triangular slit function
def SLIT_TRIANGULAR(x,g):
"""
Instrumental (slit) function.
B(x) = 1/γ*(1-|x|/γ), if |x| ≤ γ & B(x) = 0, if |x| > γ,
where γ is the line width equal to the half base of the triangle.
"""
index_inner = abs(x) <= g
index_outer = ~index_inner
y = zeros(len(x))
y[index_inner] = 1/g * (1 - abs(x[index_inner])/g)
y[index_outer] = 0
return y
# gaussian slit function
def SLIT_GAUSSIAN(x,g):
"""
Instrumental (slit) function.
B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2),
where γ/2 is a gaussian half-width at half-maximum.
"""
g /= 2
return sqrt(log(2))/(sqrt(pi)*g)*exp(-log(2)*(x/g)**2)
# dispersion slit function
def SLIT_DISPERSION(x,g):
"""
Instrumental (slit) function.
B(x) = γ/pi/(x**2+γ**2),
where γ/2 is a lorentzian half-width at half-maximum.
"""
g /= 2
return g/pi/(x**2+g**2)
# cosinus slit function
def SLIT_COSINUS(x,g):
return (cos(pi/g*x)+1)/(2*g)
# diffraction slit function
def SLIT_DIFFRACTION(x,g):
"""
Instrumental (slit) function.
"""
y = zeros(len(x))
index_zero = x==0
index_nonzero = ~index_zero
dk_ = pi/g
x_ = dk_*x[index_nonzero]
w_ = sin(x_)
r_ = w_**2/x_**2
y[index_zero] = 1
y[index_nonzero] = r_/g
return y
# apparatus function of the ideal Michelson interferometer
def SLIT_MICHELSON(x,g):
"""
Instrumental (slit) function.
B(x) = 2/γ*sin(2pi*x/γ)/(2pi*x/γ) if x!=0 else 1,
where 1/γ is the maximum optical path difference.
"""
y = zeros(len(x))
index_zero = x==0
index_nonzero = ~index_zero
dk_ = 2*pi/g
x_ = dk_*x[index_nonzero]
y[index_zero] = 1
y[index_nonzero] = 2/g*sin(x_)/x_
return y
# spectral convolution with an apparatus (slit) function
def convolveSpectrum(Omega,CrossSection,Resolution=0.1,AF_wing=10.,SlitFunction=SLIT_RECTANGULAR):
"""
INPUT PARAMETERS:
Omega: wavenumber grid (required)
CrossSection: high-res cross section calculated on grid (required)
Resolution: instrumental resolution γ (optional)
AF_wing: instrumental function wing (optional)
SlitFunction: instrumental function for low-res spectra calculation (optional)
OUTPUT PARAMETERS:
Omega: wavenumber grid
CrossSection: low-res cross section calculated on grid
i1: lower index in Omega input
i2: higher index in Omega input
slit: slit function calculated over grid [-AF_wing; AF_wing]
with the step equal to instrumental resolution.
---
DESCRIPTION:
Produce a simulation of experimental spectrum via the convolution
of a “dry” spectrum with an instrumental function.
Instrumental function is provided as a parameter and
is calculated in a grid with the width=AF_wing and step=Resolution.
---
EXAMPLE OF USAGE:
nu_,radi_,i,j,slit = convolveSpectrum(nu,radi,Resolution=2.0,AF_wing=10.0,
SlitFunction=SLIT_MICHELSON)
---
"""
step = Omega[1]-Omega[0]
if step>=Resolution: raise Exception('step must be less than resolution')
x = arange(-AF_wing,AF_wing+step,step)
slit = SlitFunction(x,Resolution)
# FIXING THE BUG: normalize slit function
slit /= sum(slit)*step # simple normalization
left_bnd = len(slit)/2
right_bnd = len(Omega) - len(slit)/2
#CrossSectionLowRes = convolve(CrossSection,slit,mode='valid')*step
CrossSectionLowRes = convolve(CrossSection,slit,mode='same')*step
#return Omega[left_bnd:right_bnd],CrossSectionLowRes,left_bnd,right_bnd,slit
return Omega[left_bnd:right_bnd],CrossSectionLowRes[left_bnd:right_bnd],left_bnd,right_bnd,slit
# DEBUG
# spectral convolution with an apparatus (slit) function
def convolveSpectrumSame(Omega,CrossSection,Resolution=0.1,AF_wing=10.,SlitFunction=SLIT_RECTANGULAR):
"""
Convolves cross section with a slit function with given parameters.
"""
step = Omega[1]-Omega[0]
x = arange(-AF_wing,AF_wing+step,step)
slit = SlitFunction(x,Resolution)
print('step=')
print(step)
print('x=')
print(x)
print('slitfunc=')
print(SlitFunction)
CrossSectionLowRes = convolve(CrossSection,slit,mode='same')*step
return Omega,CrossSectionLowRes,None,None,slit
# DEBUG
def convolveSpectrumFull(Omega,CrossSection,Resolution=0.1,AF_wing=10.,SlitFunction=SLIT_RECTANGULAR):
"""
Convolves cross section with a slit function with given parameters.
"""
step = Omega[1]-Omega[0]
x = arange(-AF_wing,AF_wing+step,step)
slit = SlitFunction(x,Resolution)
print('step=')
print(step)
print('x=')
print(x)
print('slitfunc=')
print(SlitFunction)
CrossSectionLowRes = convolve(CrossSection,slit,mode='full')*step
return Omega,CrossSectionLowRes,None,None
# ------------------------------------------------------------------
|
michaelaye/hapi
|
hapi/hapi.py
|
Python
|
bsd-3-clause
| 541,258
|
[
"Gaussian"
] |
f0b42587786374a604316b674bffcd3b166fc8602c639616e25a203350642910
|
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
# TODO: class largely copied from test_calc
from math import pi
from mock import Mock
from nose.tools import raises
from diffcalc import settings
try:
from numpy import matrix
except ImportError:
from numjy import matrix
from diffcalc.hkl.willmott.calc import \
WillmottHorizontalUbCalcStrategy, WillmottHorizontalCalculator, \
WillmottHorizontalPosition as Pos, WillmottHorizontalGeometry
from test.tools import assert_array_almost_equal, \
assert_second_dict_almost_in_first, matrixeq_
from diffcalc.ub.calc import UBCalculation
from diffcalc.ub.crystal import CrystalUnderTest
from diffcalc.ub.persistence import UbCalculationNonPersister
from diffcalc.util import DiffcalcException
from test.diffcalc.test_hardware import SimpleHardwareAdapter
from test.diffcalc.hkl.vlieg.test_calc import createMockUbcalc, \
createMockDiffractometerGeometry
import diffcalc.hkl.willmott.calc # @UnusedImport
TORAD = pi / 180
TODEG = 180 / pi
class _BaseTest():
def setup_method(self):
self.mock_ubcalc = createMockUbcalc(None)
self.mock_geometry = createMockDiffractometerGeometry()
self.mock_hardware = SimpleHardwareAdapter(
['delta', 'gamma', 'omegah', 'phi'])
self.constraints = Mock()
settings.geometry = self.mock_geometry
settings.hardware = self.mock_hardware
self.calc = WillmottHorizontalCalculator(self.mock_ubcalc, self.constraints)
self.places = 12
def _check_hkl_to_angles(self, testname, zrot, yrot, hkl, pos_expected,
wavelength, virtual_expected={}):
print ('_check_hkl_to_angles(%s, %.1f, %.1f, %s, %s, %.2f, %s)'
% (testname, zrot, yrot, hkl, pos_expected, wavelength,
virtual_expected))
self.zrot, self.yrot = zrot, yrot
self._configure_ub()
pos, virtual = self.calc.hklToAngles(hkl[0], hkl[1], hkl[2],
wavelength)
assert_array_almost_equal(pos.totuple(), pos_expected.totuple(),
self.places)
assert_second_dict_almost_in_first(virtual, virtual_expected)
def _check_angles_to_hkl(self, testname, zrot, yrot, hkl_expected, pos,
wavelength, virtual_expected={}):
print ('_check_angles_to_hkl(%s, %.1f, %.1f, %s, %s, %.2f, %s)' %
(testname, zrot, yrot, hkl_expected, pos, wavelength,
virtual_expected))
self.zrot, self.yrot = zrot, yrot
self._configure_ub()
hkl, virtual = self.calc.anglesToHkl(pos, wavelength)
assert_array_almost_equal(hkl, hkl_expected, self.places)
assert_second_dict_almost_in_first(virtual, virtual_expected)
@raises(DiffcalcException)
def _check_hkl_to_angles_fails(self, *args):
self._check_hkl_to_angles(*args)
# Primary and secondary reflections found with the help of DDIF on Diamond's
# i07 on Jan 27 2010
Si_5_5_12_WAVELENGTH = 0.6358
Si_5_5_12_HKL0 = 2, 19, 32
Si_5_5_12_REF0 = Pos(delta=21.975, gamma=4.419, omegah=2, phi=326.2)
Si_5_5_12_HKL1 = 0, 7, 22
Si_5_5_12_REF1 = Pos(delta=11.292, gamma=2.844, omegah=2, phi=124.1)
# This is U matrix displayed by DDIF
U_FROM_DDIF = matrix([[0.233140, 0.510833, 0.827463],
[-0.65596, -0.545557, 0.521617],
[0.717888, -0.664392, 0.207894]])
# This is the version that Diffcalc comes up with ( see following test)
Si_5_5_12_U_DIFFCALC = matrix([[-0.7178876, 0.6643924, -0.2078944],
[-0.6559596, -0.5455572, 0.5216170],
[0.2331402, 0.5108327, 0.8274634]])
class TestUBCalculationWithWillmotStrategy_Si_5_5_12():
def setup_method(self):
hardware = Mock()
hardware.get_axes_names.return_value = ('d', 'g', 'oh', 'p')
settings.geometry = WillmottHorizontalGeometry()
settings.hardware = hardware
self.ubcalc = UBCalculation(UbCalculationNonPersister(),
WillmottHorizontalUbCalcStrategy())
def testAgainstResultsFromJan_27_2010(self):
self.ubcalc.start_new('test')
self.ubcalc.set_lattice('Si_5_5_12', 7.68, 53.48, 75.63, 90, 90, 90)
self.ubcalc.add_reflection(
Si_5_5_12_HKL0[0], Si_5_5_12_HKL0[1], Si_5_5_12_HKL0[2],
Si_5_5_12_REF0, 12.39842 / Si_5_5_12_WAVELENGTH, 'ref0', None)
self.ubcalc.add_reflection(
Si_5_5_12_HKL1[0], Si_5_5_12_HKL1[1], Si_5_5_12_HKL1[2],
Si_5_5_12_REF1, 12.39842 / Si_5_5_12_WAVELENGTH, 'ref1', None)
self.ubcalc.calculate_UB()
print "U: ", self.ubcalc.U
print "UB: ", self.ubcalc.UB
matrixeq_(self.ubcalc.U, Si_5_5_12_U_DIFFCALC)
class TestSurfaceNormalVertical_Si_5_5_12_PosGamma(_BaseTest):
def setup_method(self):
_BaseTest.setup_method(self)
self.constraints.reference = {'betain': 2}
self.wavelength = 0.6358
B = CrystalUnderTest('xtal', 7.68, 53.48,
75.63, 90, 90, 90).B
self.UB = Si_5_5_12_U_DIFFCALC * B
diffcalc.hkl.willmott.calc.CHOOSE_POSITIVE_GAMMA = True
def _configure_ub(self):
self.mock_ubcalc.UB = self.UB
def _check(self, hkl, pos, virtual_expected={}, fails=False):
self._check_angles_to_hkl('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
if fails:
self._check_hkl_to_angles_fails('', 999, 999, hkl, pos,
self.wavelength, virtual_expected)
else:
self._check_hkl_to_angles('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
def testHkl_2_19_32_found_orientation_setting(self):
'''Check that the or0 reflection maps back to the assumed hkl'''
self.places = 2
self._check_angles_to_hkl('', 999, 999, Si_5_5_12_HKL0,
Si_5_5_12_REF0,
self.wavelength, {'betain': 2})
def testHkl_0_7_22_found_orientation_setting(self):
'''Check that the or1 reflection maps back to the assumed hkl'''
self.places = 0
self._check_angles_to_hkl('', 999, 999, Si_5_5_12_HKL1,
Si_5_5_12_REF1,
self.wavelength, {'betain': 2})
def testHkl_2_19_32_calculated_from_DDIF(self):
self.places = 3
self._check((2, 19, 32),
Pos(delta=21.974, gamma=4.419, omegah=2, phi=-33.803),
{'betain': 2})
def testHkl_0_7_22_calculated_from_DDIF(self):
self.places = 3
self._check((0, 7, 22),
Pos(delta=11.242, gamma=3.038, omegah=2, phi=123.064),
{'betain': 2})
def testHkl_2_m5_12_calculated_from_DDIF(self):
self.places = 3
self._check((2, -5, 12),
Pos(delta=5.224, gamma=10.415, omegah=2, phi=-1.972),
{'betain': 2})
# conlcusion:
# given or1 from testHkl_2_19_32_found_orientation_setting and,
# or1 from testHkl_0_7_22_found_orientation_setting
# we can calculate a U matrix which agrees with that from diff except for
# signs and row order
# We can also calculate values for 2_19_32 and 0_7_22 that match those
# calculated by DDIF to the number of recorded decimal places (3)
class SkipTestSurfaceNormalVertical_Si_5_5_12_NegGamma(
TestSurfaceNormalVertical_Si_5_5_12_PosGamma):
"""When choosing -ve gamma delta ends up being -ve too"""
def setup_method(self):
_BaseTest.setup_method(self)
self.constraints.reference = {'betain': 2 * TORAD}
self.wavelength = 0.6358
B = CrystalUnderTest('xtal', 7.68, 53.48,
75.63, 90, 90, 90).B
self.UB = Si_5_5_12_U_DIFFCALC * B
diffcalc.hkl.willmott.calc.CHOOSE_POSITIVE_GAMMA = False
##################################################################
# Primary and secondary reflections found with the help of DDIF on Diamond's
# i07 on Jan 28/29 2010
Pt531_HKL0 = -1.000, 1.000, 6.0000
Pt531_REF0 = Pos(delta=9.465, gamma=16.301, omegah=2,
phi=307.94 - 360)
Pt531_REF0_DIFFCALC = Pos(
9.397102509657, 16.181230279320, 2.000000000000, -52.139290474913)
Pt531_HKL1 = -2.000, -1.000, 7.0000
Pt531_REF1 = Pos(delta=11.094, gamma=11.945, omegah=2, phi=238.991 - 360)
Pt531_REF1_DIFFCALC = Pos(
11.012695836306, 11.863612760237, 2.000000000000, -121.215597507237)
Pt531_HKL2 = 1, 1, 9
Pt531_REF2 = Pos(delta=14.272, gamma=7.806, omegah=2,
phi=22.9)
Pt531_REF2_DIFFCALC = Pos(
14.188161709766, 7.758593908726, 2.000000000000, 23.020313153847)
Pt531_WAVELENGTH = 0.6358
# This is U matrix displayed by DDIF
U_FROM_DDIF = matrix([[-0.00312594, -0.00063417, 0.99999491],
[0.99999229, -0.00237817, 0.00312443],
[0.00237618, 0.99999697, 0.00064159]])
# This is the version that Diffcalc comes up with ( see following test)
Pt531_U_DIFFCALC = matrix([[-0.0023763, -0.9999970, -0.0006416],
[0.9999923, -0.0023783, 0.0031244],
[-0.0031259, -0.0006342, 0.9999949]])
class TestUBCalculationWithWillmotStrategy_Pt531():
def setup_method(self):
hardware = Mock()
hardware.get_axes_names.return_value = ('d', 'g', 'oh', 'p')
settings.geometry = WillmottHorizontalGeometry()
settings.hardware = hardware
self.ubcalc = UBCalculation(UbCalculationNonPersister(),
WillmottHorizontalUbCalcStrategy())
def testAgainstResultsFromJan_27_2010(self):
self.ubcalc.start_new('test')
self.ubcalc.set_lattice('Pt531', 6.204, 4.806, 23.215, 90, 90, 49.8)
self.ubcalc.add_reflection(
Pt531_HKL0[0], Pt531_HKL0[1], Pt531_HKL0[2], Pt531_REF0,
12.39842 / Pt531_WAVELENGTH, 'ref0', None)
self.ubcalc.add_reflection(
Pt531_HKL1[0], Pt531_HKL1[1], Pt531_HKL1[2], Pt531_REF1,
12.39842 / Pt531_WAVELENGTH, 'ref1', None)
self.ubcalc.calculate_UB()
print "U: ", self.ubcalc.U
print "UB: ", self.ubcalc.UB
matrixeq_(self.ubcalc.U, Pt531_U_DIFFCALC)
class TestSurfaceNormalVertical_Pt531_PosGamma(_BaseTest):
def setup_method(self):
_BaseTest.setup_method(self)
self.constraints.reference = {'betain': 2}
self.wavelength = Pt531_WAVELENGTH
cut = CrystalUnderTest('Pt531', 6.204, 4.806, 23.215, 90, 90, 49.8)
B = cut.B
self.UB = Pt531_U_DIFFCALC * B
diffcalc.hkl.willmott.calc.CHOOSE_POSITIVE_GAMMA = True
def _configure_ub(self):
self.mock_ubcalc.UB = self.UB
def _check(self, hkl, pos, virtual_expected={}, fails=False):
# self._check_angles_to_hkl('', 999, 999, hkl, pos, self.wavelength,
# virtual_expected)
if fails:
self._check_hkl_to_angles_fails('', 999, 999, hkl, pos,
self.wavelength, virtual_expected)
else:
self._check_hkl_to_angles('', 999, 999, hkl, pos, self.wavelength,
virtual_expected)
def testHkl_0_found_orientation_setting(self):
'''Check that the or0 reflection maps back to the assumed hkl'''
self.places = 1
self._check_angles_to_hkl('', 999, 999, Pt531_HKL0,
Pt531_REF0,
self.wavelength, {'betain': 2})
def testHkl_1_found_orientation_setting(self):
'''Check that the or1 reflection maps back to the assumed hkl'''
self.places = 0
self._check_angles_to_hkl('', 999, 999, Pt531_HKL1,
Pt531_REF1,
self.wavelength, {'betain': 2})
def testHkl_0_predicted_versus_found_during_oriantation_phase(self):
self._check(Pt531_HKL0,
Pt531_REF0_DIFFCALC, # inspected as close to Pt531_REF0
{'betain': 2})
def testHkl_1_predicted_versus_found_during_oriantation_phase(self):
self._check(Pt531_HKL1,
Pt531_REF1_DIFFCALC, # inspected as close to Pt531_REF1,
{'betain': 2})
def testHkl_2_predicted_versus_found_during_oriantation_phase(self):
self._check(Pt531_HKL2,
Pt531_REF2_DIFFCALC, # inspected as close to Pt531_REF2
{'betain': 2})
|
DiamondLightSource/diffcalc
|
test/diffcalc/hkl/willmot/test_calcwill.py
|
Python
|
gpl-3.0
| 13,345
|
[
"CRYSTAL"
] |
9e05d99a0b1fe007ca391e996fb0edc7cdd90c086ddc9980931aaaee492166d8
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import copy
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio.util._testing import assert_data_frame_almost_equal
class MetadataMixinTests:
def test_constructor_invalid_type(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
self._metadata_constructor_(metadata=md)
def test_constructor_no_metadata(self):
for md in None, {}:
obj = self._metadata_constructor_(metadata=md)
self.assertFalse(obj.has_metadata())
self.assertEqual(obj.metadata, {})
def test_constructor_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
obj = self._metadata_constructor_(
metadata={'': '', 123: {'a': 'b', 'c': 'd'}})
self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})
def test_constructor_handles_missing_metadata_efficiently(self):
self.assertIsNone(self._metadata_constructor_()._metadata)
self.assertIsNone(self._metadata_constructor_(metadata=None)._metadata)
def test_constructor_makes_shallow_copy_of_metadata(self):
md = {'foo': 'bar', 42: []}
obj = self._metadata_constructor_(metadata=md)
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_eq(self):
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42}),
self._metadata_constructor_(metadata={'foo': 42}))
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42, 123: {}}),
self._metadata_constructor_(metadata={'foo': 42, 123: {}}))
def test_eq_missing_metadata(self):
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_())
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_(metadata={}))
self.assertReallyEqual(self._metadata_constructor_(metadata={}),
self._metadata_constructor_(metadata={}))
def test_eq_handles_missing_metadata_efficiently(self):
obj1 = self._metadata_constructor_()
obj2 = self._metadata_constructor_()
self.assertReallyEqual(obj1, obj2)
self.assertIsNone(obj1._metadata)
self.assertIsNone(obj2._metadata)
def test_ne(self):
# Both have metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_(metadata={'id': 'bar'})
self.assertReallyNotEqual(obj1, obj2)
# One has metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_()
self.assertReallyNotEqual(obj1, obj2)
def test_copy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._metadata)
self.assertIsNone(obj_copy._metadata)
def test_copy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj._metadata, {})
self.assertIsNone(obj_copy._metadata)
def test_copy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._metadata, obj_copy._metadata)
self.assertIs(obj._metadata['foo'], obj_copy._metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1, 2]})
def test_deepcopy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._metadata)
self.assertIsNone(obj_copy._metadata)
def test_deepcopy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj._metadata, {})
self.assertIsNone(obj_copy._metadata)
def test_deepcopy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._metadata, obj_copy._metadata)
self.assertIsNot(obj._metadata['foo'], obj_copy._metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1]})
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_metadata_getter(self):
obj = self._metadata_constructor_(
metadata={42: 'foo', ('hello', 'world'): 43})
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})
obj.metadata[42] = 'bar'
self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})
def test_metadata_getter_no_metadata(self):
obj = self._metadata_constructor_()
self.assertIsNone(obj._metadata)
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {})
self.assertIsNotNone(obj._metadata)
def test_metadata_setter(self):
obj = self._metadata_constructor_()
self.assertFalse(obj.has_metadata())
obj.metadata = {'hello': 'world'}
self.assertTrue(obj.has_metadata())
self.assertEqual(obj.metadata, {'hello': 'world'})
obj.metadata = {}
self.assertFalse(obj.has_metadata())
self.assertEqual(obj.metadata, {})
def test_metadata_setter_makes_shallow_copy(self):
obj = self._metadata_constructor_()
md = {'foo': 'bar', 42: []}
obj.metadata = md
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_metadata_setter_invalid_type(self):
obj = self._metadata_constructor_(metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
obj.metadata = md
self.assertEqual(obj.metadata, {123: 456})
def test_metadata_deleter(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
del obj.metadata
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
# Delete again.
del obj.metadata
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
obj = self._metadata_constructor_()
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
del obj.metadata
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
def test_has_metadata(self):
obj = self._metadata_constructor_()
self.assertFalse(obj.has_metadata())
# Handles metadata efficiently.
self.assertIsNone(obj._metadata)
self.assertFalse(
self._metadata_constructor_(metadata={}).has_metadata())
self.assertTrue(
self._metadata_constructor_(metadata={'': ''}).has_metadata())
self.assertTrue(
self._metadata_constructor_(
metadata={'foo': 42}).has_metadata())
class PositionalMetadataMixinTests:
def test_constructor_invalid_positional_metadata_type(self):
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
self._positional_metadata_constructor_(0, positional_metadata=2)
def test_constructor_positional_metadata_len_mismatch(self):
# Zero elements.
with self.assertRaisesRegex(ValueError, '\(0\).*\(4\)'):
self._positional_metadata_constructor_(4, positional_metadata=[])
# Not enough elements.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4])
# Too many elements.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(3)))
# Series too many rows.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(5)}))
# Empty DataFrame wrong size.
with self.assertRaisesRegex(ValueError, '\(2\).*\(3\)'):
self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(2)))
def test_constructor_no_positional_metadata(self):
# Length zero with missing/empty positional metadata.
for empty in None, {}, pd.DataFrame():
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
self.assertFalse(obj.has_positional_metadata())
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(0)))
# Nonzero length with missing positional metadata.
obj = self._positional_metadata_constructor_(
3, positional_metadata=None)
self.assertFalse(obj.has_positional_metadata())
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
def test_constructor_with_positional_metadata_len_zero(self):
for data in [], (), np.array([]):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': data})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=range(0)))
def test_constructor_with_positional_metadata_len_one(self):
for data in [2], (2, ), np.array([2]):
obj = self._positional_metadata_constructor_(
1, positional_metadata={'foo': data})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=range(1)))
def test_constructor_with_positional_metadata_len_greater_than_one(self):
for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
obj = self._positional_metadata_constructor_(
9, positional_metadata={'foo': data})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=range(9)))
def test_constructor_with_positional_metadata_multiple_columns(self):
obj = self._positional_metadata_constructor_(
5, positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
def test_constructor_with_positional_metadata_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
obj = self._positional_metadata_constructor_(
5, positional_metadata=df)
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
def test_constructor_with_positional_metadata_int64_index(self):
# Test that memory-inefficient index is converted to memory-efficient
# index.
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=np.arange(5))
self.assertIsInstance(df.index, pd.Int64Index)
obj = self._positional_metadata_constructor_(
5, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
def test_constructor_handles_missing_positional_metadata_efficiently(self):
obj = self._positional_metadata_constructor_(4)
self.assertIsNone(obj._positional_metadata)
obj = self._positional_metadata_constructor_(
4, positional_metadata=None)
self.assertIsNone(obj._positional_metadata)
def test_constructor_makes_shallow_copy_of_positional_metadata(self):
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj = self._positional_metadata_constructor_(
3, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=range(3)))
def test_eq_basic(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
self.assertReallyEqual(obj1, obj2)
def test_eq_from_different_source(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': np.array([1, 2, 3])})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},
index=['foo', 'bar', 'baz']))
self.assertReallyEqual(obj1, obj2)
def test_eq_missing_positional_metadata(self):
for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(0))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
0, positional_metadata=empty))
for empty in None, pd.DataFrame(index=['a', 'b']):
obj = self._positional_metadata_constructor_(
2, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(2))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
2, positional_metadata=empty))
def test_eq_handles_missing_positional_metadata_efficiently(self):
obj1 = self._positional_metadata_constructor_(1)
obj2 = self._positional_metadata_constructor_(1)
self.assertReallyEqual(obj1, obj2)
self.assertIsNone(obj1._positional_metadata)
self.assertIsNone(obj2._positional_metadata)
def test_ne_len_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': [], 'bar': []})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(0)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_greater_than_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 2]})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(3)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_mismatch(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj2 = self._positional_metadata_constructor_(
2, positional_metadata=pd.DataFrame(index=range(2)))
self.assertReallyNotEqual(obj1, obj2)
def test_copy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._positional_metadata)
self.assertIsNone(obj_copy._positional_metadata)
def test_copy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj._positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNone(obj_copy._positional_metadata)
def test_copy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._positional_metadata,
obj_copy._positional_metadata)
self.assertIsNot(obj._positional_metadata.values,
obj_copy._positional_metadata.values)
self.assertIs(obj._positional_metadata.loc[0, 'bar'],
obj_copy._positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_copy_preserves_range_index(self):
for pm in None, {'foo': ['a', 'b', 'c']}:
obj = self._positional_metadata_constructor_(
3, positional_metadata=pm)
obj_copy = copy.copy(obj)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
self.assertIsInstance(obj_copy.positional_metadata.index,
pd.RangeIndex)
def test_deepcopy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._positional_metadata)
self.assertIsNone(obj_copy._positional_metadata)
def test_deepcopy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj._positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNone(obj_copy._positional_metadata)
def test_deepcopy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._positional_metadata,
obj_copy._positional_metadata)
self.assertIsNot(obj._positional_metadata.values,
obj_copy._positional_metadata.values)
self.assertIsNot(obj._positional_metadata.loc[0, 'bar'],
obj_copy._positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_preserves_range_index(self):
for pm in None, {'foo': ['a', 'b', 'c']}:
obj = self._positional_metadata_constructor_(
3, positional_metadata=pm)
obj_copy = copy.deepcopy(obj)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
self.assertIsInstance(obj_copy.positional_metadata.index,
pd.RangeIndex)
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_positional_metadata_getter(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [22, 22, 0]})
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
# Update existing column.
obj.positional_metadata['foo'] = [42, 42, 43]
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43]}))
# Add new column.
obj.positional_metadata['foo2'] = [True, False, True]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43],
'foo2': [True, False, True]}))
def test_positional_metadata_getter_no_positional_metadata(self):
obj = self._positional_metadata_constructor_(4)
self.assertIsNone(obj._positional_metadata)
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame(index=range(4)))
self.assertIsNotNone(obj._positional_metadata)
def test_positional_metadata_getter_set_column_series(self):
length = 8
obj = self._positional_metadata_constructor_(
length, positional_metadata={'foo': range(length)})
obj.positional_metadata['bar'] = pd.Series(range(length-3))
# pandas.Series will be padded with NaN if too short.
npt.assert_equal(obj.positional_metadata['bar'],
np.array(list(range(length-3)) + [np.nan]*3))
obj.positional_metadata['baz'] = pd.Series(range(length+3))
# pandas.Series will be truncated if too long.
npt.assert_equal(obj.positional_metadata['baz'],
np.array(range(length)))
def test_positional_metadata_getter_set_column_array(self):
length = 8
obj = self._positional_metadata_constructor_(
length, positional_metadata={'foo': range(length)})
# array-like objects will fail if wrong size.
for array_like in (np.array(range(length-1)), range(length-1),
np.array(range(length+1)), range(length+1)):
with self.assertRaisesRegex(ValueError,
"Length of values does not match "
"length of index"):
obj.positional_metadata['bar'] = array_like
def test_positional_metadata_setter_pandas_consumable(self):
obj = self._positional_metadata_constructor_(3)
self.assertFalse(obj.has_positional_metadata())
obj.positional_metadata = {'foo': [3, 2, 1]}
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [3, 2, 1]}))
obj.positional_metadata = pd.DataFrame(index=np.arange(3))
self.assertFalse(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
def test_positional_metadata_setter_data_frame(self):
obj = self._positional_metadata_constructor_(3)
self.assertFalse(obj.has_positional_metadata())
obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},
index=['a', 'b', 'c'])
self.assertTrue(obj.has_positional_metadata())
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [3, 2, 1]}))
obj.positional_metadata = pd.DataFrame(index=np.arange(3))
self.assertFalse(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
def test_positional_metadata_setter_none(self):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': []}))
# `None` behavior differs from constructor.
obj.positional_metadata = None
self.assertFalse(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(0)))
def test_positional_metadata_setter_int64_index(self):
# Test that memory-inefficient index is converted to memory-efficient
# index.
obj = self._positional_metadata_constructor_(5)
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=np.arange(5))
self.assertIsInstance(df.index, pd.Int64Index)
obj.positional_metadata = df
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
def test_positional_metadata_setter_makes_shallow_copy(self):
obj = self._positional_metadata_constructor_(3)
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj.positional_metadata = df
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=range(3)))
def test_positional_metadata_setter_invalid_type(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 42]})
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
obj.positional_metadata = 2
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_setter_len_mismatch(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 42]})
# `None` behavior differs from constructor.
with self.assertRaisesRegex(ValueError, '\(0\).*\(3\)'):
obj.positional_metadata = None
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
with self.assertRaisesRegex(ValueError, '\(4\).*\(3\)'):
obj.positional_metadata = [1, 2, 3, 4]
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_deleter(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 3]}))
del obj.positional_metadata
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
# Delete again.
del obj.positional_metadata
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(3)
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
del obj.positional_metadata
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
def test_has_positional_metadata(self):
obj = self._positional_metadata_constructor_(4)
self.assertFalse(obj.has_positional_metadata())
self.assertIsNone(obj._positional_metadata)
obj = self._positional_metadata_constructor_(0, positional_metadata={})
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame(index=np.arange(4)))
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame(index=['a', 'b', 'c', 'd']))
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
self.assertTrue(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
4, positional_metadata={'foo': [1, 2, 3, 4]})
self.assertTrue(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
2, positional_metadata={'foo': [1, 2], 'bar': ['abc', 'def']})
self.assertTrue(obj.has_positional_metadata())
|
kdmurray91/scikit-bio
|
skbio/metadata/_testing.py
|
Python
|
bsd-3-clause
| 36,975
|
[
"scikit-bio"
] |
0689eb1e55a4ec4a01f2f45499a7fa002a070ea9ae1d89019d9b55c78e939094
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006, 2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""Device Settings listing dialog """
from kiwi.ui.objectlist import Column
from stoqlib.database.runtime import get_current_station
from stoqlib.domain.devices import DeviceSettings
from stoqlib.gui.base.lists import ModelListDialog, ModelListSlave
from stoqlib.gui.editors.deviceseditor import DeviceSettingsEditor
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class DeviceSettingsListSlave(ModelListSlave):
columns = [
Column('device_type_name', title=_('Device Type'),
data_type=str, sorted=True, width=180),
Column('description', title=_('Description'),
data_type=str, expand=True),
Column('station.name', title=_('Computer'),
data_type=str, width=150, searchable=True),
Column('is_active', title=_("Active"),
data_type=bool, width=70)]
model_type = DeviceSettings
# FIXME: This should be 'def populate', verify if this is working
def _populate(self):
return self.parent.store.find(DeviceSettings)
def run_editor(self, store, model):
return self.run_dialog(DeviceSettingsEditor, store=store,
model=model,
station=get_current_station(store))
class DeviceSettingsDialog(ModelListDialog):
list_slave_class = DeviceSettingsListSlave
title = _('Device settings')
size = (750, 300)
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/devices.py
|
Python
|
gpl-2.0
| 2,339
|
[
"VisIt"
] |
c8caa8d2b1683759053cc357289fa44491bc8eddb7b3b14937f0397ef12ce1e5
|
""" DIRAC Basic MySQL Class
It provides access to the basic MySQL methods in a multithread-safe mode
keeping used connections in a python Queue for further reuse.
These are the coded methods:
__init__( host, user, passwd, name, [maxConnsInQueue=10] )
Initializes the Queue and tries to connect to the DB server,
using the _connect method.
"maxConnsInQueue" defines the size of the Queue of open connections
that are kept for reuse. It also defined the maximum number of open
connections available from the object.
maxConnsInQueue = 0 means unlimited and it is not supported.
_except( methodName, exception, errorMessage )
Helper method for exceptions: the "methodName" and the "errorMessage"
are printed with ERROR level, then the "exception" is printed (with
full description if it is a MySQL Exception) and S_ERROR is returned
with the errorMessage and the exception.
_connect()
Attempts connection to DB and sets the _connected flag to True upon success.
Returns S_OK or S_ERROR.
_query( cmd, [conn] )
Executes SQL command "cmd".
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue.
Returns S_OK with fetchall() out in Value or S_ERROR upon failure.
_update( cmd, [conn] )
Executes SQL command "cmd" and issue a commit
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue
Returns S_OK with number of updated registers in Value or S_ERROR upon failure.
_createTables( tableDict )
Create a new Table in the DB
_getConnection()
Gets a connection from the Queue (or open a new one if none is available)
Returns S_OK with connection in Value or S_ERROR
the calling method is responsible for closing this connection once it is no
longer needed.
Some high level methods have been added to avoid the need to write SQL
statement in most common cases. They should be used instead of low level
_insert, _update methods when ever possible.
buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None ):
Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
Alternatively inDict can be used
String type values will be appropriately escaped.
updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
for compatibility with other methods condDict keyed argument is added
getCounters( self, table, attrList, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Get distinct values of a table attribute under specified conditions
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DataStructures import MutableStruct
from DIRAC.Core.Utilities import Time
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb
# This is for proper initialization of embeded server, it should only be called once
MySQLdb.server_init( ['--defaults-file=/opt/dirac/etc/my.cnf', '--datadir=/opt/mysql/db'], ['mysqld'] )
gInstancesCount = 0
gDebugFile = None
import collections
import time
import threading
from types import StringTypes, DictType, ListType, TupleType, BooleanType
MAXCONNECTRETRY = 10
def _checkQueueSize( maxQueueSize ):
"""
Helper to check maxQueueSize
"""
if maxQueueSize <= 0:
raise Exception( 'MySQL.__init__: maxQueueSize must positive' )
try:
maxQueueSize - 1
except Exception:
raise Exception( 'MySQL.__init__: wrong type for maxQueueSize' )
def _checkFields( inFields, inValues ):
"""
Helper to check match between inFields and inValues lengths
"""
if inFields == None and inValues == None:
return S_OK()
try:
assert len( inFields ) == len( inValues )
except:
return S_ERROR( 'Mismatch between inFields and inValues.' )
return S_OK()
def _quotedList( fieldList = None ):
"""
Quote a list of MySQL Field Names with "`"
Return a comma separated list of quoted Field Names
To be use for Table and Field Names
"""
if fieldList == None:
return None
quotedFields = []
try:
for field in fieldList:
quotedFields.append( '`%s`' % field.replace( '`', '' ) )
except Exception:
return None
if not quotedFields:
return None
return ', '.join( quotedFields )
class MySQL:
"""
Basic multithreaded DIRAC MySQL Client Class
"""
__initialized = False
class ConnectionPool( object ):
"""
Management of connections per thread
"""
__connData = MutableStruct( 'ConnData', [ 'conn', 'dbName', 'last', 'intrans' ] )
def __init__( self, host, user, passwd, port = 3306, graceTime = 600 ):
self.__host = host
self.__user = user
self.__passwd = passwd
self.__port = port
self.__graceTime = graceTime
self.__spares = collections.deque()
self.__maxSpares = 10
self.__lastClean = 0
self.__assigned = {}
@property
def __thid( self ):
return threading.current_thread()
def __newConn( self ):
conn = MySQLdb.connect( host = self.__host,
port = self.__port,
user = self.__user,
passwd = self.__passwd )
self.__execute( conn, "SET AUTOCOMMIT=1" )
return conn
def __execute( self, conn, cmd ):
cursor = conn.cursor()
res = cursor.execute( cmd )
cursor.close()
return res
def get( self, dbName, retries = 10 ):
retries = max( 0, min( MAXCONNECTRETRY, retries ) )
self.clean()
result = self.__getWithRetry( dbName, retries, retries )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'Value' ].conn )
def __getWithRetry( self, dbName, totalRetries = 10, retriesLeft = 10 ):
sleepTime = 5 * ( totalRetries - retriesLeft )
if sleepTime > 0:
time.sleep( sleepTime )
try:
connData, thid = self.__innerGet()
except MySQLdb.MySQLError, excp:
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( "Could not connect: %s" % excp )
if not connData.intrans and not self.__ping( connData.conn ):
try:
self.__assigned.pop( thid )
except KeyError:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft )
return S_ERROR( "Could not connect" )
if connData.dbName != dbName:
try:
connData.conn.select_db( dbName )
connData.dbName = dbName
except MySQLdb.MySQLError, excp:
try:
self.__assigned.pop( thid ).conn.close()
except Exception:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( "Could not select db %s: %s" % ( dbName, excp ) )
return S_OK( connData )
def __ping( self, conn ):
try:
conn.ping( True )
return True
except:
return False
def __innerGet( self ):
thid = self.__thid
now = time.time()
try:
data = self.__assigned[ thid ]
data.last = now
return data, thid
except KeyError:
pass
#Not cached
try:
connData = self.__spares.pop()
except IndexError:
connData = self.__connData( self.__newConn(), "", now, False )
self.__assigned[ thid ] = connData
return self.__assigned[ thid ], thid
def __pop( self, thid ):
try:
connData = self.__assigned.pop( thid )
except KeyError:
return
if not connData.intrans and len( self.__spares ) < self.__maxSpares:
self.__spares.append( connData )
else:
connData.conn.close()
def clean( self, now = False ):
if not now:
now = time.time()
self.__lastClean = now
for thid in list( self.__assigned ):
if not thid.isAlive():
self.__pop( thid )
continue
try:
data = self.__assigned[ thid ]
except KeyError:
continue
if now - data.last > self.__graceTime:
self.__pop( thid )
def transactionStart( self, dbName ):
print "TRANS START"
result = self.__getWithRetry( dbName )
if not result[ 'OK' ]:
return result
connData = result[ 'Value' ]
try:
if connData.intrans:
raise RuntimeError( "Staring a MySQL transaction inside another one" )
self.__execute( connData.conn, "SET AUTOCOMMIT=0" )
self.__execute( connData.conn, "START TRANSACTION WITH CONSISTENT SNAPSHOT" )
connData.intrans = True
return S_OK()
except MySQLdb.MySQLError, excp:
return S_ERROR( "Could not begin transaction: %s" % excp )
def transactionCommit( self, dbName ):
print "TRANS COMMIT"
return self.__endTransaction( dbName, True )
def transactionRollback( self, dbName ):
print "TRANS ROLLBACK"
return self.__endTransaction( dbName, False )
def __endTransaction( self, dbName, commit ):
result = self.__getWithRetry( dbName )
if not result[ 'OK' ]:
return result
connData = result[ 'Value' ]
try:
if not connData.intrans:
gLogger.warn( "MySQL connection has reconnected. Transaction may be inconsistent" )
if commit:
result = connData.conn.commit()
else:
result = connData.conn.rollback()
self.__execute( connData.conn, "SET AUTOCOMMIT=1" )
connData.conn.commit()
connData.intrans = False
return S_OK( result )
except MySQLdb.MySQLError, excp:
return S_ERROR( "Could not end transaction: %s" % excp )
__connectionPools = {}
def __init__( self, hostName, userName, passwd, dbName, port = 3306, maxQueueSize = 3, debug = False ):
"""
set MySQL connection parameters and try to connect
"""
global gInstancesCount, gDebugFile
gInstancesCount += 1
self._connected = False
if 'log' not in dir( self ):
self.log = gLogger.getSubLogger( 'MySQL' )
self.logger = self.log
# let the derived class decide what to do with if is not 1
self._threadsafe = MySQLdb.thread_safe()
self.log.debug( 'thread_safe = %s' % self._threadsafe )
_checkQueueSize( maxQueueSize )
self.__hostName = str( hostName )
self.__userName = str( userName )
self.__passwd = str( passwd )
self.__dbName = str( dbName )
self.__port = port
cKey = ( self.__hostName, self.__userName, self.__passwd, self.__port )
if cKey not in MySQL.__connectionPools:
MySQL.__connectionPools[ cKey ] = MySQL.ConnectionPool( *cKey )
self.__connectionPool = MySQL.__connectionPools[ cKey ]
self.__initialized = True
result = self._connect()
if not result[ 'OK' ]:
gLogger.error( "Cannot connect to to DB: %s" % result[ 'Message' ] )
if debug:
try:
gDebugFile = open( "%s.debug.log" % self.__dbName, "w" )
except IOError:
pass
def __del__( self ):
global gInstancesCount
try:
gInstancesCount -= 1
except Exception:
pass
def _except( self, methodName, x, err ):
"""
print MySQL error or exception
return S_ERROR with Exception
"""
try:
raise x
except MySQLdb.Error, e:
self.log.debug( '%s: %s' % ( methodName, err ),
'%d: %s' % ( e.args[0], e.args[1] ) )
return S_ERROR( '%s: ( %d: %s )' % ( err, e.args[0], e.args[1] ) )
except Exception, e:
self.log.debug( '%s: %s' % ( methodName, err ), str( e ) )
return S_ERROR( '%s: (%s)' % ( err, str( e ) ) )
def __escapeString( self, myString ):
"""
To be used for escaping any MySQL string before passing it to the DB
this should prevent passing non-MySQL accepted characters to the DB
It also includes quotation marks " around the given string
"""
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict['Value']
specialValues = ( 'UTC_TIMESTAMP', 'TIMESTAMPADD', 'TIMESTAMPDIFF' )
try:
myString = str( myString )
except ValueError:
return S_ERROR( "Cannot escape value!" )
try:
for sV in specialValues:
if myString.find( sV ) == 0:
return S_OK( myString )
escape_string = connection.escape_string( str( myString ) )
self.log.debug( '__escape_string: returns', '"%s"' % escape_string )
return S_OK( '"%s"' % escape_string )
except Exception, x:
self.log.debug( '__escape_string: Could not escape string', '"%s"' % myString )
return self._except( '__escape_string', x, 'Could not escape string' )
def __checkTable( self, tableName, force = False ):
table = _quotedList( [tableName] )
if not table:
return S_ERROR( 'Invalid tableName argument' )
cmd = 'SHOW TABLES'
retDict = self._query( cmd, debug = True )
if not retDict['OK']:
return retDict
if ( tableName, ) in retDict['Value']:
if force:
cmd = 'DROP TABLE %s' % table
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
else:
# the requested exist and table creation is not force, return with error
return S_ERROR( 'Requested table %s already exists' % tableName )
return S_OK()
def _escapeString( self, myString, conn = None ):
"""
Wrapper around the internal method __escapeString
"""
self.log.debug( '_escapeString:', '"%s"' % str( myString ) )
return self.__escapeString( myString )
def _escapeValues( self, inValues = None ):
"""
Escapes all strings in the list of values provided
"""
self.log.debug( '_escapeValues:', inValues )
inEscapeValues = []
if not inValues:
return S_OK( inEscapeValues )
for value in inValues:
if type( value ) in StringTypes:
retDict = self.__escapeString( value )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
elif type( value ) == TupleType or type( value ) == ListType:
tupleValues = []
for v in list( value ):
retDict = self.__escapeString( v )
if not retDict['OK']:
return retDict
tupleValues.append( retDict['Value'] )
inEscapeValues.append( '(' + ', '.join( tupleValues ) + ')' )
elif type( value ) == BooleanType:
inEscapeValues = [str( value )]
else:
retDict = self.__escapeString( str( value ) )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
return S_OK( inEscapeValues )
def _connect( self ):
"""
open connection to MySQL DB and put Connection into Queue
set connected flag to True and return S_OK
return S_ERROR upon failure
"""
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( error )
self.log.debug( '_connect:', self._connected )
if self._connected:
return S_OK()
self.log.debug( '_connect: Attempting to access DB',
'[%s@%s] by user %s/%s.' %
( self.__dbName, self.__hostName, self.__userName, self.__passwd ) )
try:
self.log.verbose( '_connect: Connected.' )
self._connected = True
return S_OK()
except Exception, x:
return self._except( '_connect', x, 'Could not connect to DB.' )
def _query( self, cmd, conn = None, debug = False ):
"""
execute MySQL query command
return S_OK structure with fetchall result as tuple
it returns an empty tuple if no matching rows are found
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_query:', cmd )
else:
if self.logger._minLevel == self.logger._logLevels.getLevelValue( 'DEBUG' ):
self.logger.verbose( '_query:', cmd )
else:
self.logger.verbose( '_query:', cmd[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
try:
cursor = connection.cursor()
if cursor.execute( cmd ):
res = cursor.fetchall()
else:
res = ()
# Log the result limiting it to just 10 records
if len( res ) <= 10:
if debug:
self.logger.debug( '_query: returns', res )
else:
self.logger.verbose( '_query: returns', res )
else:
if debug:
self.logger.debug( '_query: Total %d records returned' % len( res ) )
self.logger.debug( '_query: %s ...' % str( res[:10] ) )
else:
self.logger.verbose( '_query: Total %d records returned' % len( res ) )
self.logger.verbose( '_query: %s ...' % str( res[:10] ) )
retDict = S_OK( res )
except Exception , x:
self.log.warn( '_query:', cmd )
retDict = self._except( '_query', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _update( self, cmd, conn = None, debug = False ):
""" execute MySQL update command
return S_OK with number of updated registers upon success
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_update:', cmd )
else:
if self.logger._minLevel == self.logger._logLevels.getLevelValue( 'DEBUG' ):
self.logger.verbose( '_update:', cmd )
else:
self.logger.verbose( '_update:', cmd[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self.__getConnection( conn = conn )
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
res = cursor.execute( cmd )
# connection.commit()
if debug:
self.log.debug( '_update:', res )
else:
self.log.verbose( '_update:', res )
retDict = S_OK( res )
if cursor.lastrowid:
retDict[ 'lastRowId' ] = cursor.lastrowid
except Exception, x:
self.log.warn( '_update: %s: %s' % ( cmd, str( x ) ) )
retDict = self._except( '_update', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _transaction( self, cmdList, conn = None ):
""" dummy transaction support
:param self: self reference
:param list cmdList: list of queries to be executed within the transaction
:param MySQLDB.Connection conn: connection
:return: S_OK( [ ( cmd1, ret1 ), ... ] ) or S_ERROR
"""
if type( cmdList ) != ListType:
return S_ERROR( "_transaction: wrong type (%s) for cmdList" % type( cmdList ) )
# # get connection
connection = conn
if not connection:
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
# # list with cmds and their results
cmdRet = []
try:
cursor = connection.cursor()
for cmd in cmdList:
cmdRet.append( ( cmd, cursor.execute( cmd ) ) )
connection.commit()
except Exception, error:
self.logger.execption( error )
# # rollback, put back connection to the pool
connection.rollback()
return S_ERROR( error )
# # close cursor, put back connection to the pool
cursor.close()
return S_OK( cmdRet )
def _createViews( self, viewsDict, force = False ):
""" create view based on query
:param dict viewDict: { 'ViewName': "Fields" : { "`a`": `tblA.a`, "`sumB`" : "SUM(`tblB.b`)" }
"SelectFrom" : "tblA join tblB on tblA.id = tblB.id",
"Clauses" : [ "`tblA.a` > 10", "`tblB.Status` = 'foo'" ] ## WILL USE AND CLAUSE
"GroupBy": [ "`a`" ],
"OrderBy": [ "`b` DESC" ] }
"""
if force:
gLogger.debug( viewsDict )
for viewName, viewDict in viewsDict.items():
viewQuery = [ "CREATE OR REPLACE VIEW `%s`.`%s` AS" % ( self.__dbName, viewName ) ]
columns = ",".join( [ "%s AS %s" % ( colDef, colName )
for colName, colDef in viewDict.get( "Fields", {} ).items() ] )
tables = viewDict.get( "SelectFrom", "" )
if columns and tables:
viewQuery.append( "SELECT %s FROM %s" % ( columns, tables ) )
where = " AND ".join( viewDict.get( "Clauses", [] ) )
if where:
viewQuery.append( "WHERE %s" % where )
groupBy = ",".join( viewDict.get( "GroupBy", [] ) )
if groupBy:
viewQuery.append( "GROUP BY %s" % groupBy )
orderBy = ",".join( viewDict.get( "OrderBy", [] ) )
if orderBy:
viewQuery.append( "ORDER BY %s" % orderBy )
viewQuery.append( ";" )
viewQuery = " ".join( viewQuery )
self.log.debug( "`%s` VIEW QUERY IS: %s" % ( viewName, viewQuery ) )
createView = self._query( viewQuery )
if not createView["OK"]:
gLogger.error( createView["Message"] )
return createView
return S_OK()
def _createTables( self, tableDict, force = False, okIfTableExists = True ):
"""
tableDict:
tableName: { 'Fields' : { 'Field': 'Description' },
'ForeignKeys': {'Field': 'Table.key' },
'PrimaryKey': 'Id',
'Indexes': { 'Index': [] },
'UniqueIndexes': { 'Index': [] },
'Engine': 'InnoDB' }
only 'Fields' is a mandatory key.
Creates a new Table for each key in tableDict, "tableName" in the DB with
the provided description.
It allows to create:
- flat tables if no "ForeignKeys" key defined.
- tables with foreign keys to auxiliary tables holding the values
of some of the fields
Arguments:
tableDict: dictionary of dictionary with description of tables to be created.
Only "Fields" is a mandatory key in the table description.
"Fields": Dictionary with Field names and description of the fields
"ForeignKeys": Dictionary with Field names and name of auxiliary tables.
The auxiliary tables must be defined in tableDict.
"PrimaryKey": Name of PRIMARY KEY for the table (if exist).
"Indexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed.
"UniqueIndexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed. This indexes will declared
unique.
"Engine": use the given DB engine, InnoDB is the default if not present.
force:
if True, requested tables are DROP if they exist.
if False (default), tables are not overwritten
okIfTableExists:
if True (default), returns S_OK if table exists
if False, returns S_ERROR if table exists
"""
# First check consistency of request
if type( tableDict ) != DictType:
return S_ERROR( 'Argument is not a dictionary: %s( %s )'
% ( type( tableDict ), tableDict ) )
tableList = tableDict.keys()
if len( tableList ) == 0:
return S_OK( 0 )
for table in tableList:
thisTable = tableDict[table]
# Check if Table is properly described with a dictionary
if type( thisTable ) != DictType:
return S_ERROR( 'Table description is not a dictionary: %s( %s )'
% ( type( thisTable ), thisTable ) )
if not 'Fields' in thisTable:
return S_ERROR( 'Missing `Fields` key in `%s` table dictionary' % table )
tableCreationList = [[]]
auxiliaryTableList = []
i = 0
extracted = True
while tableList and extracted:
# iterate extracting tables from list if they only depend on
# already extracted tables.
extracted = False
auxiliaryTableList += tableCreationList[i]
i += 1
tableCreationList.append( [] )
for table in list( tableList ):
toBeExtracted = True
thisTable = tableDict[table]
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.items():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
if forTable not in auxiliaryTableList:
toBeExtracted = False
break
if not key in thisTable['Fields']:
return S_ERROR( 'ForeignKey `%s` -> `%s` not defined in Primary table `%s`.'
% ( key, forKey, table ) )
if not forKey in tableDict[forTable]['Fields']:
return S_ERROR( 'ForeignKey `%s` -> `%s` not defined in Auxiliary table `%s`.'
% ( key, forKey, forTable ) )
if toBeExtracted:
self.log.debug( 'Table %s ready to be created' % table )
extracted = True
tableList.remove( table )
tableCreationList[i].append( table )
if tableList:
return S_ERROR( 'Recursive Foreign Keys in %s' % ', '.join( tableList ) )
createdTablesList = []
for tableList in tableCreationList:
for table in tableList:
# Check if Table exists
retDict = self.__checkTable( table, force = force )
if not retDict['OK']:
if 'already exists' in retDict['Message'] and okIfTableExists:
continue
return retDict
thisTable = tableDict[table]
cmdList = []
for field in thisTable['Fields'].keys():
cmdList.append( '`%s` %s' % ( field, thisTable['Fields'][field] ) )
if thisTable.has_key( 'PrimaryKey' ):
if type( thisTable['PrimaryKey'] ) in StringTypes:
cmdList.append( 'PRIMARY KEY ( `%s` )' % thisTable['PrimaryKey'] )
else:
cmdList.append( 'PRIMARY KEY ( %s )' % ", ".join( [ "`%s`" % str( f ) for f in thisTable['PrimaryKey'] ] ) )
if thisTable.has_key( 'Indexes' ):
indexDict = thisTable['Indexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if thisTable.has_key( 'UniqueIndexes' ):
indexDict = thisTable['UniqueIndexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'UNIQUE INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.items():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
# cmdList.append( '`%s` %s' % ( forTable, tableDict[forTable]['Fields'][forKey] )
cmdList.append( 'FOREIGN KEY ( `%s` ) REFERENCES `%s` ( `%s` )'
' ON DELETE RESTRICT' % ( key, forTable, forKey ) )
if thisTable.has_key( 'Engine' ):
engine = thisTable['Engine']
else:
engine = 'InnoDB'
cmd = 'CREATE TABLE `%s` (\n%s\n) ENGINE=%s' % (
table, ',\n'.join( cmdList ), engine )
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
self.log.debug( 'Table %s created' % table )
createdTablesList.append( table )
return S_OK( createdTablesList )
def _getFields( self, tableName, outFields = None,
inFields = None, inValues = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_getFields:', 'deprecation warning, use getFields methods instead of _getFields.' )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( '_getFields:', retDict['Message'] )
return retDict
condDict = {}
if inFields != None:
try:
condDict.update( [ ( inFields[k], inValues[k] ) for k in range( len( inFields ) )] )
except Exception, x:
return S_ERROR( x )
return self.getFields( tableName, outFields, condDict, limit, conn, older, newer, timeStamp, orderAttribute )
def _insert( self, tableName, inFields = None, inValues = None, conn = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_insert:', 'deprecation warning, use insertFields methods instead of _insert.' )
return self.insertFields( tableName, inFields, inValues, conn )
def _to_value( self, param ):
"""
Convert to string
"""
return str( param[0] )
def _to_string( self, param ):
"""
"""
return param[0].tostring()
def _getConnection( self ):
"""
Return a new connection to the DB
It uses the private method __getConnection
"""
self.log.debug( '_getConnection:' )
retDict = self.__getConnection( trial = 0 )
return retDict
def __getConnection( self, conn = None, trial = 0 ):
"""
Return a new connection to the DB,
Arguments are kept for backward compatibility
TODO: Remove ALL references to those arguments
"""
self.log.debug( '__getConnection:' )
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( error )
return self.__connectionPool.get( self.__dbName )
########################################################################################
#
# Transaction functions
#
########################################################################################
def transactionStart( self ):
return self.__connectionPool.transactionStart( self.__dbName )
def transactionCommit( self ):
return self.__connectionPool.transactionCommit( self.__dbName )
def transactionRollback( self ):
return self.__connectionPool.transactionRollback( self.__dbName )
@property
def transaction( self ):
""" Transaction guard """
class TransactionGuard( object ):
def __init__( self, db ):
self.__db = db
self.__ok = False
def __enter__( self ):
self.__db.transactionStart()
def commitWard( *args ):
self.__ok = True
return args
return commitWard
def __exit__( self, exType, exValue, traceback ):
if exValue or not self.__ok:
self.__db.transactionRollback()
else:
self.__db.transactionCommit()
return TransactionGuard( self )
########################################################################################
#
# Utility functions
#
########################################################################################
def countEntries( self, table, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of entries wit the given conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'countEntries:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT COUNT(*) FROM %s %s' % ( table, cond )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
return S_OK( res['Value'][0][0] )
########################################################################################
def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( error )
attrNames = _quotedList( attrList )
if attrNames == None:
error = 'Invalid updateFields argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT %s, COUNT(*) FROM %s %s GROUP BY %s ORDER BY %s' % ( attrNames, table, cond, attrNames, attrNames )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
resultList = []
for raw in res['Value']:
attrDict = {}
for i in range( len( attrList ) ):
attrDict[attrList[i]] = raw[i]
item = ( attrDict, raw[len( attrList )] )
resultList.append( item )
return S_OK( resultList )
#########################################################################################
def getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Get distinct values of a table attribute under specified conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( error )
attributeName = _quotedList( [attribute] )
if not attributeName:
error = 'Invalid attribute argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT DISTINCT( %s ) FROM %s %s ORDER BY %s' % ( attributeName, table, cond, attributeName )
res = self._query( cmd, connection, debug = True )
if not res['OK']:
return res
attr_list = [ x[0] for x in res['Value'] ]
return S_OK( attr_list )
#############################################################################
def buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None, offset = None ):
""" Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
"""
condition = ''
conjunction = "WHERE"
if condDict != None:
for aName, attrValue in condDict.items():
if type( aName ) in StringTypes:
attrName = _quotedList( [aName] )
elif type( aName ) == TupleType:
attrName = '('+_quotedList( list( aName ) )+')'
if not attrName:
error = 'Invalid condDict argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if type( attrValue ) == ListType:
retDict = self._escapeValues( attrValue )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValues = retDict['Value']
multiValue = ', '.join( escapeInValues )
condition = ' %s %s %s IN ( %s )' % ( condition,
conjunction,
attrName,
multiValue )
conjunction = "AND"
else:
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s = %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if timeStamp:
timeStamp = _quotedList( [timeStamp] )
if not timeStamp:
error = 'Invalid timeStamp argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if newer:
retDict = self._escapeValues( [ newer ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
conjunction = "AND"
if older:
retDict = self._escapeValues( [ older ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
if type( greater ) == DictType:
for attrName, attrValue in greater.items():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid greater argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if type( smaller ) == DictType:
for attrName, attrValue in smaller.items():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid smaller argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
orderList = []
orderAttrList = orderAttribute
if type( orderAttrList ) != ListType:
orderAttrList = [ orderAttribute ]
for orderAttr in orderAttrList:
if orderAttr == None:
continue
if type( orderAttr ) not in StringTypes:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
orderField = _quotedList( orderAttr.split( ':' )[:1] )
if not orderField:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if len( orderAttr.split( ':' ) ) == 2:
orderType = orderAttr.split( ':' )[1].upper()
if orderType in [ 'ASC', 'DESC']:
orderList.append( '%s %s' % ( orderField, orderType ) )
else:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
else:
orderList.append( orderAttr )
if orderList:
condition = "%s ORDER BY %s" % ( condition, ', '.join( orderList ) )
if limit:
if offset:
condition = "%s LIMIT %d OFFSET %d" % ( condition, limit, offset )
else:
condition = "%s LIMIT %d" % ( condition, limit )
return condition
#############################################################################
def getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if outFields == None all fields in "tableName" are returned
if limit is not False, the given limit is set
inValues are properly escaped using the _escape_string method, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'getFields:', error )
return S_ERROR( error )
quotedOutFields = '*'
if outFields:
quotedOutFields = _quotedList( outFields )
if quotedOutFields == None:
error = 'Invalid outFields arguments'
self.log.warn( 'getFields:', error )
return S_ERROR( error )
self.log.verbose( 'getFields:', 'selecting fields %s from table %s.' %
( quotedOutFields, table ) )
if condDict == None:
condDict = {}
try:
try:
mylimit = limit[0]
myoffset = limit[1]
except:
mylimit = limit
myoffset = None
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = mylimit,
greater = None, smaller = None, offset = myoffset )
except Exception, x:
return S_ERROR( x )
return self._query( 'SELECT %s FROM %s %s' %
( quotedOutFields, table, condition ), conn, debug = True )
#############################################################################
def deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'deleteEntries:', error )
return S_ERROR( error )
self.log.verbose( 'deleteEntries:', 'deleting rows from table %s.' % table )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
return self._update( 'DELETE FROM %s %s' % ( table, condition ), conn, debug = True )
#############################################################################
def updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
"""
if not updateFields and not updateDict:
return S_OK( 0 )
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
retDict = _checkFields( updateFields, updateValues )
if not retDict['OK']:
error = 'Mismatch between updateFields and updateValues.'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
if updateFields == None:
updateFields = []
updateValues = []
if updateDict:
if type( updateDict ) != DictType:
error = 'updateDict must be a of Type DictType'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
try:
updateFields += updateDict.keys()
updateValues += [updateDict[k] for k in updateDict.keys()]
except TypeError:
error = 'updateFields and updateValues must be a list'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
updateValues = self._escapeValues( updateValues )
if not updateValues['OK']:
self.log.warn( 'updateFields:', updateValues['Message'] )
return updateValues
updateValues = updateValues['Value']
self.log.verbose( 'updateFields:', 'updating fields %s from table %s.' %
( ', '.join( updateFields ), table ) )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
updateString = ','.join( ['%s = %s' % ( _quotedList( [updateFields[k]] ),
updateValues[k] ) for k in range( len( updateFields ) ) ] )
return self._update( 'UPDATE %s SET %s %s' %
( table, updateString, condition ), conn, debug = True )
#############################################################################
def insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
"""
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
String type values will be appropriately escaped.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
if inFields == None:
inFields = []
inValues = []
if inDict:
if type( inDict ) != DictType:
error = 'inDict must be a of Type DictType'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
try:
inFields += inDict.keys()
inValues += [inDict[k] for k in inDict.keys()]
except TypeError:
error = 'inFields and inValues must be a list'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
inFieldString = _quotedList( inFields )
if inFieldString == None:
error = 'Invalid inFields arguments'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
inFieldString = '( %s )' % inFieldString
retDict = self._escapeValues( inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
inValueString = ', '.join( retDict['Value'] )
inValueString = '( %s )' % inValueString
self.log.verbose( 'insertFields:', 'inserting %s into table %s'
% ( inFieldString, table ) )
return self._update( 'INSERT INTO %s %s VALUES %s' %
( table, inFieldString, inValueString ), conn, debug = True )
#####################################################################################
#
# This is a test code for this class, it requires access to a MySQL DB
#
if __name__ == '__main__':
import os
import sys
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset python optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
gLogger.info( 'Testing MySQL class...' )
HOST = '127.0.0.1'
USER = 'Dirac'
PWD = 'Dirac'
DB = 'AccountingDB'
TESTDB = MySQL( HOST, USER, PWD, DB )
assert TESTDB._connect()['OK']
TESTDICT = { 'TestTable' : { 'Fields': { 'ID' : "INTEGER UNIQUE NOT NULL AUTO_INCREMENT",
'Name' : "VARCHAR(255) NOT NULL DEFAULT 'Yo'",
'Surname' : "VARCHAR(255) NOT NULL DEFAULT 'Tu'",
'Count' : "INTEGER NOT NULL DEFAULT 0",
'Time' : "DATETIME",
},
'PrimaryKey': 'ID'
}
}
NAME = 'TestTable'
FIELDS = [ 'Name', 'Surname' ]
NEWVALUES = [ 'Name2', 'Surn2' ]
SOMEFIELDS = [ 'Name', 'Surname', 'Count' ]
ALLFIELDS = [ 'ID', 'Name', 'Surname', 'Count', 'Time' ]
ALLVALUES = [ 1, 'Name1', 'Surn1', 1, 'UTC_TIMESTAMP()' ]
ALLDICT = dict( Name = 'Name1', Surname = 'Surn1', Count = 1, Time = 'UTC_TIMESTAMP()' )
COND0 = {}
COND10 = {'Count': range( 10 )}
try:
RESULT = TESTDB._createTables( TESTDICT, force = True )
assert RESULT['OK']
print 'Table Created'
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.getDistinctAttributeValues( NAME, FIELDS[0], COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.getFields( NAME, FIELDS )
assert RESULT['OK']
assert RESULT['Value'] == ()
print 'Inserting'
for J in range( 100 ):
RESULT = TESTDB.insertFields( NAME, SOMEFIELDS, ['Name1', 'Surn1', J] )
assert RESULT['OK']
assert RESULT['Value'] == 1
assert RESULT['lastRowId'] == J + 1
print 'Querying'
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == [( {'Surname': 'Surn1', 'Name': 'Name1'}, 100L )]
RESULT = TESTDB.getDistinctAttributeValues( NAME, FIELDS[0], COND0 )
assert RESULT['OK']
assert RESULT['Value'] == ['Name1']
RESULT = TESTDB.getFields( NAME, FIELDS )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 100
RESULT = TESTDB.getFields( NAME, SOMEFIELDS, COND10 )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 10
RESULT = TESTDB.getFields( NAME, limit = 1 )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 1
RESULT = TESTDB.getFields( NAME, ['Count'], orderAttribute = 'Count:DESC', limit = 1 )
assert RESULT['OK']
assert RESULT['Value'] == ( ( 99, ), )
RESULT = TESTDB.getFields( NAME, ['Count'], orderAttribute = 'Count:ASC', limit = 1 )
assert RESULT['OK']
assert RESULT['Value'] == ( ( 0, ), )
RESULT = TESTDB.getCounters( NAME, FIELDS, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == [( {'Surname': 'Surn1', 'Name': 'Name1'}, 10L )]
RESULT = TESTDB._getFields( NAME, FIELDS, COND10.keys(), COND10.values() )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 10
RESULT = TESTDB.updateFields( NAME, FIELDS, NEWVALUES, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 10
RESULT = TESTDB.updateFields( NAME, FIELDS, NEWVALUES, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 0
print 'Removing'
RESULT = TESTDB.deleteEntries( NAME, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 10
RESULT = TESTDB.deleteEntries( NAME )
assert RESULT['OK']
assert RESULT['Value'] == 90
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.insertFields( NAME, inFields = ALLFIELDS, inValues = ALLVALUES )
assert RESULT['OK']
assert RESULT['Value'] == 1
time.sleep( 1 )
RESULT = TESTDB.insertFields( NAME, inDict = ALLDICT )
assert RESULT['OK']
assert RESULT['Value'] == 1
time.sleep( 2 )
RESULT = TESTDB.getFields( NAME, older = 'UTC_TIMESTAMP()', timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 2
RESULT = TESTDB.getFields( NAME, newer = 'UTC_TIMESTAMP()', timeStamp = 'Time' )
assert len( RESULT['Value'] ) == 0
RESULT = TESTDB.getFields( NAME, older = Time.toString(), timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 2
RESULT = TESTDB.getFields( NAME, newer = Time.dateTime(), timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 0
RESULT = TESTDB.deleteEntries( NAME )
assert RESULT['OK']
assert RESULT['Value'] == 2
print 'OK'
except AssertionError:
print 'ERROR ',
if not RESULT['OK']:
print RESULT['Message']
else:
print RESULT
|
rajanandakumar/DIRAC
|
Core/Utilities/MySQL.py
|
Python
|
gpl-3.0
| 58,684
|
[
"DIRAC"
] |
b422f9440046e902880e19be9bfd31d951e7cd19ba343792aab9ab03a187a69e
|
#
# Copyright (C) 2017-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the time series accumulator.
"""
import unittest as ut
import numpy as np
import pickle
import espressomd
import espressomd.observables
import espressomd.accumulators
N_PART = 100
class TimeSeriesTest(ut.TestCase):
def test_time_series(self):
"""Check that accumulator results are the same as the respective numpy result.
"""
system = espressomd.System(box_l=3 * [1.])
system.part.add(pos=np.random.random((N_PART, 3)))
obs = espressomd.observables.ParticlePositions(ids=system.part[:].id)
acc = espressomd.accumulators.TimeSeries(obs=obs)
positions = []
for _ in range(10):
pos = np.random.random((N_PART, 3))
positions.append(pos)
system.part[:].pos = pos
acc.update()
time_series = acc.time_series()
# Check pickling
acc_unpickled = pickle.loads(pickle.dumps(acc))
np.testing.assert_array_equal(time_series, acc_unpickled.time_series())
for result, expected in zip(time_series, positions):
np.testing.assert_array_equal(result, expected)
acc.clear()
self.assertEqual(len(acc.time_series()), 0)
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/time_series.py
|
Python
|
gpl-3.0
| 1,973
|
[
"ESPResSo"
] |
57309b4934e20c491e003af82d00a76c004a70aeea20734733cbaa125eebe76f
|
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
from clawpack.pyclaw import io
#------------------------------
def setrun(claw_pkg='classic'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "classic" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'"
num_dim = 1
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
# Sample setup to write one line to setprob.data ...
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
probdata.add_param('ic', 3, 'Initial condition type')
probdata.add_param('beta', 50., 'Gaussian hump width parameter')
probdata.add_param('rhol', 1., 'Density left of interface')
probdata.add_param('cl', 1., 'Sound speed left of interface')
probdata.add_param('rhor', 4., 'Density right of interface')
probdata.add_param('cr', 0.5, 'Sound speed right of interface')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -5. # xlower
clawdata.upper[0] = 3. # xupper
# Number of grid cells:
clawdata.num_cells[0] = 1000 # mx
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 2
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 2
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.qNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.q0006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 200
clawdata.tfinal = 20.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.e9
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.9
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 2
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = [4,4]
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'none'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'wall' # at xlower
clawdata.bc_upper[0] = 'wall' # at xupper
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
clawpack/adjoint
|
examples/acoustics_1d_heterogeneous/forward/setrun.py
|
Python
|
bsd-2-clause
| 7,368
|
[
"Gaussian",
"NetCDF"
] |
f44bef74b6c6eef9996b6617be5093de81045cc8107f25eec3362b795b63db62
|
#!/usr/bin/python
import os
import sys
import Bio
import vcf
import pandas as pd
from mrbait import sequence_tools as s
from mrbait import misc_utils as utils
from Bio import AlignIO
"""Functions for parsing and manipulating sequence alignment files
Functions by Zach Zbinden and Tyler Chafin"""
#Write FASTA from pandas df where col1 is index, col2 is sequence
#seqs must be a pandas df
def writeFasta(seqs, fas):
with open(fas, 'w') as fh:
try:
#Write seqs to FASTA first
#Assumes that a[0] is index, a[1] is id, and a[2] is sequence
for a in seqs.itertuples():
name = ">id_" + str(a[1]) + "\n"
seq = a[2] + "\n"
fh.write(name)
fh.write(seq)
except IOError as e:
print("Could not read file:",e)
sys.exit(1)
except Exception as e:
print("Unexpected error:",e)
sys.exit(1)
finally:
fh.close()
#Write FASTA from pandas df where col1 is index, col2 is sequence
#seqs must be a pandas df
def writeFastaNoprefix(seqs, fas):
with open(fas, 'w') as fh:
try:
#Write seqs to FASTA first
#Assumes that a[0] is index, a[1] is id, and a[2] is sequence
for a in seqs.itertuples():
name = ">" + str(a[1]) + "\n"
seq = a[2] + "\n"
fh.write(name)
fh.write(seq)
except IOError as e:
print("Could not read file:",e)
sys.exit(1)
except Exception as e:
print("Unexpected error:",e)
sys.exit(1)
finally:
fh.close()
#Write FASTA from pandas df where col1 is index, col2 is sequence
#seqs must be a pandas df
#this version replaces gaps with N characters
def writeFastaNogap(seqs, fas):
with open(fas, 'w') as fh:
try:
#Write seqs to FASTA first
#Assumes that a[0] is index, a[1] is id, and a[2] is sequence
for a in seqs.itertuples():
name = ">id_" + str(a[1]) + "\n"
seq = str(a[2]) + "\n"
seq = seq.replace("-","N")
fh.write(name)
fh.write(seq)
except IOError as e:
print("Could not read file:",e)
sys.exit(1)
except Exception as e:
print("Unexpected error:",e)
sys.exit(1)
finally:
fh.close()
#function to reverse complement a fasta file
def reverseComplementFasta(infile, outfile):
data = list()
for tuple in read_fasta(infile):
tuple[1] = s.reverseComplement(tuple[1])
data.append(tuple)
writeFastaNoprefix(pd.DataFrame(data), outfile)
return(0)
#Read genome as FASTA. FASTA header will be used
#This is a generator function
#Doesn't matter if sequences are interleaved or not.
def read_fasta(fas):
if not utils.fileCheck(fas):
raise FileNotFoundError("Fatal exception, file %s not found."%fas)
fh = open(fas)
try:
with fh as file_object:
contig = ""
seq = ""
for line in file_object:
line = line.strip()
if not line:
continue
line = line.replace(" ","")
#print(line)
if line[0] == ">": #Found a header line
#If we already loaded a contig, yield that contig and
#start loading a new one
if contig:
yield([contig,seq]) #yield
contig = "" #reset contig and seq
seq = ""
contig = (line.replace(">",""))
else:
seq += line
#Iyield last sequence, if it has both a header and sequence
if contig and seq:
yield([contig,seq])
finally:
fh.close()
#This is a GENERATOR function to read through a .loci file
#.loci is the RAD alignment output from the promgram pyRAD
#YIELDS: BioPython MultipleSeqAlignment object
def read_loci(infile):
if not utils.fileCheck(infile):
raise FileNotFoundError("Fatal exception, file %s not found."%infile)
# make emptyp dictionary
loci = Bio.Align.MultipleSeqAlignment([])
# read file from command line
try:
f = open(infile)
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
except:
print("Unexpected error:", sys.exec_info()[0])
with f as file_object:
for line in file_object:
line = line.strip()
if not line:
continue
if line[0] != "/":
identifier = line.split()[0]
sequence = line.split()[1]
loci.add_sequence(identifier, sequence)
else:
yield(loci)
loci = Bio.Align.MultipleSeqAlignment([])
f.close()
#Function to remove existing CHUNK files
def removeChunks(dir_name):
test = os.listdir(dir_name)
for item in test:
if item.endswith(".chunk"):
os.remove(os.path.join(dir_name, item))
#function to count number of loci alignments in file
def countLoci(loci):
fh = open(loci, 'r')
count=0
for l in fh:
line = l.strip()
if not line:
continue
if line.startswith("//"):
count+=1
return(count)
#function to count number of loci in FASTA file (by headers)
def countMAF(loci):
fh = open(str(loci), 'r')
count=0
for l in fh:
line = l.strip()
if not line:
continue
if line.startswith("a"):
count+=1
return(count)
#function to count number of loci in FASTA file (by headers)
def countXMFA(loci):
fh = open(str(loci), 'r')
count=0
for l in fh:
line = l.strip()
if not line:
continue
if line.startswith("="):
count+=1
return(count)
#Function split a file into chunks, skipping commented lines
def generic_chunker(infile, chunks, wd):
chunks = int(chunks)
line_count = utils.fileLength(infile, skip=True)
if line_count < chunks:
chunks = line_count
chunk_size = line_count // chunks
removeChunks(wd)
files = list()
#write .loci file into chunk files
with open(infile) as file_object:
max_chunks = chunks
chunks = 1
line_number = 1
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
files.append(chunk_file)
for l in file_object:
line = l.strip()
if not line:
continue
if chunks < max_chunks:
if line_number <= chunk_size:
line_number = line_number + 1
out = line + "\n"
out_object.write(out)
else:
line_number = 1
chunks = chunks + 1
out_object.close()
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
files.append(chunk_file)
out = line + "\n"
out_object.write(out)
else:
#If last chunk, keep writing to final chunk file
out = line + "\n"
out_object.write(out)
out_object.close()
file_object.close()
return(files)
#Function split .loci file into n chunks
def loci_chunker(infile, chunks, wd):
chunks = int(chunks)
loci_count = countLoci(infile)
if loci_count < chunks:
chunks = loci_count
chunk_size = loci_count // chunks
removeChunks(wd)
files = list()
#write .loci file into chunk files
with open(infile) as file_object:
max_chunks = chunks
chunks = 1
loci_number = 1
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
files.append(chunk_file)
for l in file_object:
line = l.strip()
if not line:
continue
if chunks < max_chunks:
if loci_number <= chunk_size:
if line[0] == ">":
out = line + "\n"
out_object.write(out)
else:
loci_number = loci_number + 1
out = line + "\n"
out_object.write(out)
else:
loci_number = 1
chunks = chunks + 1
out_object.close()
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
files.append(chunk_file)
out = line + "\n"
out_object.write(out)
else:
#If last chunk, keep writing to final chunk file
out = line + "\n"
out_object.write(out)
out_object.close()
file_object.close()
# else:
# chunks = max_chunks
# out_object.write(line.strip())
return(files)
#Function to split maf file into n chunks
def maf_chunker(infile, chunks, wd):
chunks = int(chunks)
loci_count = countMAF(infile)
if loci_count < chunks:
chunks = loci_count
chunk_size = loci_count // chunks
removeChunks(wd) #clear any existing chunkfiles
files = list()
#write .loci file into chunk files
with open(infile) as file_object:
max_chunks = chunks
chunks = 1
loci_number = 0
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
files.append(chunk_file)
header = ""
hset = 0
for l in file_object:
line = l.strip()
if not line:
continue
#First, get header information
if hset == 0:
if line[0] == "a":
loci_number = 1
hset=1
header += "\n"
out_object.write(header)
out = "\n" + line + "\n"
out_object.write(out)
elif line[0] =="#":
header +=str(line+"\n")
else:
#Write chunk_size alignments to each chunk
if chunks < max_chunks:
#If starting new alignment
if line[0] == "a":
loci_number += 1 #increment locus number
#If current chunk not full, add locus to chunk
if loci_number <= chunk_size:
out = "\n" + line + "\n"
out_object.write(out)
#Otherwise, start new chunk
else:
loci_number = 1
chunks = chunks + 1
out_object.close()
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
out_object.write(header)
files.append(chunk_file)
out = line + "\n"
out_object.write(out)
#If not new alignment, write to current chunk
else:
out = line + "\n"
out_object.write(out)
#If last chunk, keep writing to final chunk file
else:
if line[0] == "a":
out = "\n" + line + "\n"
else:
out = line + "\n"
out_object.write(out)
out_object.close()
file_object.close()
# else:
# chunks = max_chunks
# out_object.write(line.strip())
return(files)
#Function to split xmfa file into n chunks
def xmfa_chunker(infile, chunks, wd):
chunks = int(chunks)
loci_count = countXMFA(infile)
if loci_count < chunks:
chunks = loci_count
chunk_size = loci_count // chunks
removeChunks(wd)
files = list()
#write .loci file into chunk files
with open(infile) as file_object:
max_chunks = chunks
chunks = 1
loci_number = 1
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
files.append(chunk_file)
header = ""
hset = 0
for l in file_object:
line = l.strip()
if not line:
continue
#First, get header information
if hset == 0:
if line[0] == ">":
loci_number = 1
hset=1
out_object.write(header)
out = line + "\n"
out_object.write(out)
elif line[0] =="#":
header +=str(line+"\n")
else:
if chunks < max_chunks:
if loci_number <= chunk_size:
#If its the header for a sequence, start seq
if line[0] == ">":
out = line + "\n"
out_object.write(out)
#If end of alignment, deposit it
elif line[0] == "=":
loci_number = loci_number + 1
out = line + "\n"
out_object.write(out)
#otherwise its a sequence!
else:
out = line + "\n"
out_object.write(out)
else:
loci_number = 1
chunks = chunks + 1
out_object.close()
chunk_file = wd + "/." + str(chunks) + ".chunk"
out_object = open(chunk_file, "w")
out_object.write(header)
files.append(chunk_file)
out = line + "\n"
out_object.write(out)
else:
#If last chunk, keep writing to final chunk file
out = line + "\n"
out_object.write(out)
out_object.close()
file_object.close()
# else:
# chunks = max_chunks
# out_object.write(line.strip())
return(files)
|
tkchafin/mrbait
|
mrbait/aln_file_tools.py
|
Python
|
gpl-3.0
| 11,249
|
[
"Biopython"
] |
1a71b86a69dd6485a2c064ce2cf8ceb85bb8d1f680a12b9ca392ce3c34af1f22
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('/home/pi/Desktop/image.jpg',0)
img = cv2.medianBlur(img,5)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)','Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in xrange(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
|
agdal1125/sicp2
|
document-scanner/please.py
|
Python
|
mit
| 743
|
[
"Gaussian"
] |
b44870f5544a040f26978d6f25c6fac04d59c05d022d0f22ce13573e723f0416
|
__version__="v3.2 beta1"
welcome_block="""
# Multi-Echo ICA, Version %s
#
# Kundu, P., Brenowitz, N.D., Voon, V., Worbe, Y., Vertes, P.E., Inati, S.J., Saad, Z.S.,
# Bandettini, P.A. & Bullmore, E.T. Integrated strategy for improving functional
# connectivity mapping using multiecho fMRI. PNAS (2013).
#
# Kundu, P., Inati, S.J., Evans, J.W., Luh, W.M. & Bandettini, P.A. Differentiating
# BOLD and non-BOLD signals in fMRI time series using multi-echo EPI. NeuroImage (2011).
# https://doi.org/10.1016/j.neuroimage.2011.12.028
#
# PROCEDURE 2a: Model fitting and component selection routines
"""
import numpy as np
import scipy.stats as stats
import scipy.signal as SS
from numpy import random
from sklearn import svm
import scipy.optimize
def fitmodels_direct(catd,mmix,mask,t2s,tes,fout=None,reindex=False,mmixN=None,full_sel=True,debugout=False):
"""
Usage:
fitmodels_direct(fout)
Input:
fout is flag for output of per-component TE-dependence maps
t2s is a (nx,ny,nz) ndarray
tes is a 1d array
"""
#Compute opt. com. raw data
tsoc = np.array(optcom(catd,t2s,tes,mask),dtype=float)[mask]
tsoc_mean = tsoc.mean(axis=-1)
tsoc_dm = tsoc-tsoc_mean[:,np.newaxis]
#Compute un-normalized weight dataset (features)
if mmixN == None: mmixN=mmix
#WTS = computefeats2(unmask(unmask(tsoc,mask)[t2s!=0],t2s!=0),mmixN,t2s!=0,normalize=False)
WTS = computefeats2(unmask(tsoc,mask),mmixN,mask,normalize=False)
#Compute PSC dataset - shouldn't have to refit data
tsoc_B = get_coeffs(unmask(tsoc_dm,mask),mask,mmix)[mask]
tsoc_Babs = np.abs(tsoc_B)
PSC = tsoc_B/tsoc.mean(axis=-1)[:,np.newaxis]*100
#Compute skews to determine signs based on unnormalized weights, correct mmix & WTS signs based on spatial distribution tails
from scipy.stats import skew
signs = skew(WTS,axis=0)
signs /= np.abs(signs)
mmix = mmix.copy()
mmix*=signs
WTS*=signs
PSC*=signs
totvar = (tsoc_B**2).sum()
totvar_norm = (WTS**2).sum()
#Compute Betas and means over TEs for TE-dependence analysis
Ne = tes.shape[0]
betas = cat2echos(get_coeffs(uncat2echos(catd,Ne),np.tile(mask,(1,1,Ne)),mmix),Ne)
nx,ny,nz,Ne,nc = betas.shape
Nm = mask.sum()
NmD = (t2s!=0).sum()
mu = catd.mean(axis=-1)
tes = np.reshape(tes,(Ne,1))
fmin,fmid,fmax = getfbounds(ne)
#Mask arrays
mumask = fmask(mu,t2s!=0)
#t2smask = fmask(t2s,mask)
t2smask = fmask(t2s,t2s!=0)
betamask = fmask(betas,t2s!=0)
if debugout: fout=aff
#Setup Xmats
#Model 1
X1 = mumask.transpose()
#Model 2
X2 = np.tile(tes,(1,NmD))*mumask.transpose()/t2smask.transpose()
#Tables for component selection
Kappas = np.zeros([nc])
Rhos = np.zeros([nc])
varex = np.zeros([nc])
varex_norm = np.zeros([nc])
Z_maps = np.zeros([Nm,nc])
F_R2_maps = np.zeros([NmD,nc])
F_S0_maps = np.zeros([NmD,nc])
Z_clmaps = np.zeros([Nm,nc])
F_R2_clmaps = np.zeros([NmD,nc])
F_S0_clmaps = np.zeros([NmD,nc])
Br_clmaps_R2 = np.zeros([Nm,nc])
Br_clmaps_S0 = np.zeros([Nm,nc])
for i in range(nc):
#size of B is (nc, nx*ny*nz)
B = np.atleast_3d(betamask)[:,:,i].transpose()
alpha = (np.abs(B)**2).sum(axis=0)
varex[i] = (tsoc_B[:,i]**2).sum()/totvar*100.
varex_norm[i] = (unmask(WTS,mask)[t2s!=0][:,i]**2).sum()/totvar_norm*100.
#S0 Model
coeffs_S0 = (B*X1).sum(axis=0)/(X1**2).sum(axis=0)
SSE_S0 = (B - X1*np.tile(coeffs_S0,(Ne,1)))**2
SSE_S0 = SSE_S0.sum(axis=0)
F_S0 = (alpha - SSE_S0)*2/(SSE_S0)
F_S0_maps[:,i] = F_S0
#R2 Model
coeffs_R2 = (B*X2).sum(axis=0)/(X2**2).sum(axis=0)
SSE_R2 = (B - X2*np.tile(coeffs_R2,(Ne,1)))**2
SSE_R2 = SSE_R2.sum(axis=0)
F_R2 = (alpha - SSE_R2)*2/(SSE_R2)
F_R2_maps[:,i] = F_R2
#Compute weights as Z-values
wtsZ=(WTS[:,i]-WTS[:,i].mean())/WTS[:,i].std()
wtsZ[np.abs(wtsZ)>Z_MAX]=(Z_MAX*(np.abs(wtsZ)/wtsZ))[np.abs(wtsZ)>Z_MAX]
Z_maps[:,i] = wtsZ
#Compute Kappa and Rho
F_S0[F_S0>F_MAX] = F_MAX
F_R2[F_R2>F_MAX] = F_MAX
Kappas[i] = np.average(F_R2,weights=np.abs(np.squeeze(unmask(wtsZ,mask)[t2s!=0]**2.)))
Rhos[i] = np.average(F_S0,weights=np.abs(np.squeeze(unmask(wtsZ,mask)[t2s!=0]**2.)))
#Tabulate component values
comptab_pre = np.vstack([np.arange(nc),Kappas,Rhos,varex,varex_norm]).T
if reindex:
#Re-index all components in Kappa order
comptab = comptab_pre[comptab_pre[:,1].argsort()[::-1],:]
Kappas = comptab[:,1]; Rhos = comptab[:,2]; varex = comptab[:,3]; varex_norm = comptab[:,4]
nnc = np.array(comptab[:,0],dtype=np.int)
mmix_new = mmix[:,nnc]
F_S0_maps = F_S0_maps[:,nnc]; F_R2_maps = F_R2_maps[:,nnc]; Z_maps = Z_maps[:,nnc]
WTS = WTS[:,nnc]; PSC=PSC[:,nnc]; tsoc_B=tsoc_B[:,nnc]; tsoc_Babs=tsoc_Babs[:,nnc]
comptab[:,0] = np.arange(comptab.shape[0])
else:
comptab = comptab_pre
mmix_new = mmix
#Full selection including clustering criteria
seldict=None
if full_sel:
for i in range(nc):
#Save out files
out = np.zeros((nx,ny,nz,4))
if fout!=None:
ccname = "cc%.3d.nii" % i
else: ccname = ".cc_temp.nii.gz"
out[:,:,:,0] = np.squeeze(unmask(PSC[:,i],mask))
out[:,:,:,1] = np.squeeze(unmask(F_R2_maps[:,i],t2s!=0))
out[:,:,:,2] = np.squeeze(unmask(F_S0_maps[:,i],t2s!=0))
out[:,:,:,3] = np.squeeze(unmask(Z_maps[:,i],mask))
#import ipdb; ipdb.set_trace()
niwrite(out,fout,ccname)
os.system('3drefit -sublabel 0 PSC -sublabel 1 F_R2 -sublabel 2 F_SO -sublabel 3 Z_sn %s 2> /dev/null > /dev/null'%ccname)
csize = np.max([int(Nm*0.0005)+5,20])
#csize = 10
#Do simple clustering on F
os.system("3dcalc -overwrite -a %s[1..2] -expr 'a*step(a-%i)' -prefix .fcl_in.nii.gz -overwrite" % (ccname,fmin))
os.system('3dmerge -overwrite -dxyz=1 -1clust 1 %i -doall -prefix .fcl_out.nii.gz .fcl_in.nii.gz' % (csize))
sel = fmask(nib.load('.fcl_out.nii.gz').get_data(),t2s!=0)!=0
sel = np.array(sel,dtype=np.int)
F_R2_clmaps[:,i] = sel[:,0]
F_S0_clmaps[:,i] = sel[:,1]
#Do simple clustering on Z at p<0.05
sel = spatclust(None,mask,csize,1.95,head,aff,infile=ccname,dindex=3,tindex=3)
Z_clmaps[:,i] = sel
#Do simple clustering on ranked signal-change map
countsigFR2 = F_R2_clmaps[:,i].sum()
countsigFS0 = F_S0_clmaps[:,i].sum()
Br_clmaps_R2[:,i] = spatclust(rankvec(tsoc_Babs[:,i]),mask,csize,max(tsoc_Babs.shape)-countsigFR2,head,aff)
Br_clmaps_S0[:,i] = spatclust(rankvec(tsoc_Babs[:,i]),mask,csize,max(tsoc_Babs.shape)-countsigFS0,head,aff)
seldict = {}
selvars = ['Kappas','Rhos','WTS','varex','Z_maps','F_R2_maps','F_S0_maps',\
'Z_clmaps','F_R2_clmaps','F_S0_clmaps','tsoc_B','Br_clmaps_R2','Br_clmaps_S0','PSC']
for vv in selvars:
seldict[vv] = eval(vv)
if debugout or ('DEBUGOUT' in args):
#Package for debug
import cPickle as cP
import zlib
try: os.system('mkdir compsel.debug')
except: pass
selvars = ['Kappas','Rhos','WTS','varex','Z_maps','Z_clmaps','F_R2_clmaps','F_S0_clmaps','Br_clmaps_R2','Br_clmaps_S0','PSC']
for vv in selvars:
with open('compsel.debug/%s.pkl.gz' % vv,'wb') as ofh:
print "Writing debug output: compsel.debug/%s.pkl.gz" % vv
ofh.write(zlib.compress(cP.dumps(eval(vv))))
ofh.close()
return seldict,comptab,betas,mmix_new
def do_svm(train_set,train_labs,test_set,svmtype=0):
if svmtype==2: probability=True
else: probability = False
clf = svm.SVC(kernel='linear',probability=probability)
if svmtype==1: clf = svm.LinearSVC(loss='squared_hinge',penalty='l1',dual=False)
clf.fit(train_set,train_labs)
return clf.predict(test_set),clf
def fft_variance(fproj_arr,fproj_arr_val,A,B):
fproj_sel_T = stats.ttest_ind(fproj_arr[:,A].T,fproj_arr[:,B].T)
fproj_sel_A = (andb([fproj_sel_T[0]>0,fproj_sel_T[1]<0.05])==2).reshape(mask.shape[0:2])
fproj_sel_B = (andb([fproj_sel_T[0]<0,fproj_sel_T[1]<0.05])==2).reshape(mask.shape[0:2])
return fproj_arr_val[fproj_sel_A.flatten()].sum(0),fproj_arr_val[fproj_sel_B.flatten()].sum(0)
def gaussian(height, center_x, center_y, width_x, width_y):
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width_x, width_y
def fitgaussian(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit"""
params = moments(data)
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) - data)
p, success = scipy.optimize.leastsq(errorfunction, params)
return p
def selcomps(seldict,debug=False,olevel=2,oversion=99,knobargs='',filecsdata=False,savecsdiag=True,group0_only=False,strict_mode=False):
selmodelversion='fft20c.051517'
#import ipdb
import numpy.fft as fft
from sklearn import svm
from sklearn.cluster import DBSCAN
try:
if options.filecsdata: filecsdata=True
except:
pass
if filecsdata:
import cPickle as pickle
import bz2
if seldict!=None:
print "Saving component selection data"
csstate_f = bz2.BZ2File('compseldata.pklbz','wb')
pickle.dump(seldict,csstate_f)
csstate_f.close()
else:
try:
csstate_f = bz2.BZ2File('compseldata.pklbz','rb')
seldict = pickle.load(csstate_f)
csstate_f.close()
except:
print "No component data found!"
return None
#Dump dictionary into variable names
for key in seldict.keys(): exec("%s=seldict['%s']" % (key,key))
#List of components
midk = []
ign = []
nc = np.arange(len(Kappas))
ncl = np.arange(len(Kappas))
#If user has specified components to accept manually
try:
if options.manacc:
acc = sorted([int(vv) for vv in options.manacc.split(',')])
midk = []
rej = sorted(np.setdiff1d(ncl,acc))
return acc,rej,midk,[] #Add string for ign
except:
pass
"""
Set knobs
"""
if knobargs!='':
for knobarg in ''.join(knobargs).split(','): exec(knobarg)
"""
Do some tallies for no. of significant voxels
"""
countsigZ = Z_clmaps.sum(0)
countsigFS0 = F_S0_clmaps.sum(0)
countsigFR2 = F_R2_clmaps.sum(0)
countnoise = np.zeros(len(nc))
"""
Make table of dice values
"""
dice_table = np.zeros([nc.shape[0],2])
csize = np.max([int(mask.sum()*0.0005)+5,20])
for ii in ncl:
dice_FR2 = dice(unmask(Br_clmaps_R2[:,ii],mask)[t2s!=0],F_R2_clmaps[:,ii])
dice_FS0 = dice(unmask(Br_clmaps_S0[:,ii],mask)[t2s!=0],F_S0_clmaps[:,ii])
dice_table[ii,:] = [dice_FR2,dice_FS0] #step 3a here and above
dice_table[np.isnan(dice_table)]=0
if debug:
import pdb
pdb.set_trace()
#import IPython
#from IPython.core.debugger import Tracer; Tracer()()
"""
Make table of noise gain
"""
tt_table = np.zeros([len(nc),4])
counts_FR2_Z = np.zeros([len(nc),2])
for ii in nc:
comp_noise_sel = andb([np.abs(Z_maps[:,ii])>1.95,Z_clmaps[:,ii]==0])==2
countnoise[ii] = np.array(comp_noise_sel,dtype=np.int).sum()
noise_FR2_Z = np.log10(np.unique(F_R2_maps[unmask(comp_noise_sel,mask)[t2s!=0],ii]))
signal_FR2_Z = np.log10(np.unique(F_R2_maps[unmask(Z_clmaps[:,ii],mask)[t2s!=0]==1,ii]))
counts_FR2_Z[ii,:] = [len(signal_FR2_Z),len(noise_FR2_Z)]
try:
ttest = stats.ttest_ind(signal_FR2_Z,noise_FR2_Z,equal_var=True)
mwu = stats.norm.ppf(stats.mannwhitneyu(signal_FR2_Z,noise_FR2_Z)[1])
tt_table[ii,0] = np.abs(mwu)*ttest[0]/np.abs(ttest[0])
tt_table[ii,1] = ttest[1]
except: pass
tt_table[np.isnan(tt_table)]=0
#import pdb; pdb.set_trace()
tt_table[np.isinf(tt_table[:,0]),0]=np.percentile(tt_table[~np.isinf(tt_table[:,0]),0],98)
#Time series derivative kurtosis
mmix_dt = (mmix[:-1]-mmix[1:])
mmix_dt2 = (mmix_dt[:-1]-mmix_dt[1:])
mmix_kurt = stats.kurtosis(mmix_dt)
#Polynomial detrend of mmix
p0base = np.array([(np.arange(mmix.shape[0])-np.mean(np.arange(mmix.shape[0])))/np.std(np.arange(mmix.shape[0])),np.ones(mmix.shape[0])])
mmixp0 = mmix-np.dot(np.linalg.lstsq(mmix,p0base.T)[0],p0base).T
mmixp0_dt = (mmixp0[:-1]-mmixp0[1:])
mmixp0_dt2 = (mmixp0_dt[:-1]-mmixp0_dt[1:])
mmixp0_kurt = stats.kurtosis(mmixp0_dt)
if debug:
import ipdb
ipdb.set_trace()
"""
Step 1: Reject anything that's obviously an artifact
a. Estimate a null variance
"""
rej = ncl[andb([Rhos>Kappas,countsigFS0>countsigFR2])>0]
ncl = np.setdiff1d(ncl,rej)
if debug:
import ipdb
ipdb.set_trace()
"""
Step 2: Compute 3-D spatial FFT of Beta maps to detect high-spatial frequency artifacts
"""
fproj_arr = np.zeros([np.prod(mask.shape[0:2]),len(nc)])
fproj_arr_val = np.zeros([np.prod(mask.shape[0:2]),len(nc)])
spr = []
fdist = []
for ii in nc:
fproj = np.fft.fftshift(np.abs(np.fft.rfftn(unmask(seldict['PSC'],mask)[:,:,:,ii])))
fproj_z = fproj.max(2)
fproj[fproj==fproj.max()] = 0
fproj_arr[:,ii] = rankvec(fproj_z.flatten())
fproj_arr_val[:,ii] = fproj_z.flatten()
spr.append(np.array(fproj_z>fproj_z.max()/4,dtype=np.int).sum())
fprojr = np.array([fproj,fproj[:,:,::-1]]).max(0)
fdist.append(np.max([ fitgaussian(fproj.max(jj))[3:].max() for jj in range(len(fprojr.shape)) ]))
fdist = np.array(fdist)
spr = np.array(spr)
if debug:
import ipdb
ipdb.set_trace()
"""
Step 3: Create feature space of component properties
"""
fdist_pre = fdist.copy()
fdist_pre[fdist>np.median(fdist)*3] = np.median(fdist)*3
fdist_z = (fdist_pre - np.median(fdist_pre) ) / fdist_pre.std()
spz = (spr-spr.mean())/spr.std()
Tz = (tt_table[:,0]-tt_table[:,0].mean())/tt_table[:,0].std()
varex_ = np.log(varex)
Vz = (varex_-varex_.mean())/varex_.std()
Kz = (Kappas-Kappas.mean())/Kappas.std()
Rz = (Rhos-Rhos.mean())/Rhos.std()
Ktz = np.log(Kappas)/2
Ktz = (Ktz-Ktz.mean())/Ktz.std()
Rtz = np.log(Rhos)/2
Rtz = (Rtz-Rtz.mean())/Rtz.std()
KRr = stats.zscore(np.log(Kappas)/np.log(Rhos))
cnz = (countnoise-countnoise.mean())/countnoise.std()
Dz = stats.zscore(np.arctanh(dice_table[:,0]+0.001))
fz = np.array([Tz,Vz,Ktz,KRr,cnz,Rz,mmix_kurt,fdist_z])
if debug:
import ipdb
ipdb.set_trace()
"""
Step 3: Make initial guess of where BOLD components are and use DBSCAN to exclude noise components and find a sample set of 'good' components
"""
#epsmap is [index,level of overlap with dicemask,number of high Rho components]
F05,F025,F01 = getfbounds(ne)
epsmap = []
Rhos_sorted = np.array(sorted(Rhos))[::-1]
#Make an initial guess as to number of good components based on consensus of control points across Rhos and Kappas
KRcutguesses = [getelbow(Rhos),getelbow2(Rhos),getelbow3(Rhos),getelbow(Kappas),getelbow2(Kappas),getelbow3(Kappas)]
Kelbowval = np.median([getelbow(Kappas,True),getelbow2(Kappas,True),getelbow3(Kappas,True)]+list(getfbounds(ne)))
Khighelbowval = stats.scoreatpercentile([getelbow(Kappas,True),getelbow2(Kappas,True),getelbow3(Kappas,True)]+list(getfbounds(ne)),75)
KRcut = np.median(KRcutguesses)
#only use exclusive when inclusive is extremely inclusive - double KRcut
if getelbow2(Kappas) > KRcut*2 and getelbow(Kappas,True)<F01: Kcut = getelbow(Kappas,True)
else: Kcut = getelbow2(Kappas,True)
#only use inclusive when exclusive is extremely exclusive - half KRcut (remember for Rho inclusive is higher, so want both Kappa and Rho to defaut to lower)
if getelbow2(Rhos) > KRcut*2 : Rcut = getelbow(Rhos,True) #consider something like min([getelbow(Rhos,True),sorted(Rhos)[::-1][KRguess] ])
else: Rcut = getelbow2(Rhos,True)
if Rcut > Kcut: Kcut = Rcut #Rcut should never be higher than Kcut
KRelbow = andb([Kappas>Kcut,Rhos<Rcut ] )
#Make guess of Kundu et al 2011 plus remove high frequencies, generally high variance, and high variance given low Kappa
tt_lim = scoreatpercentile(tt_table[tt_table[:,0]>0,0],75)/3
KRguess = np.setdiff1d(np.setdiff1d(nc[KRelbow==2],rej),np.union1d(nc[tt_table[:,0]<tt_lim],np.union1d(np.union1d(nc[spz>1],nc[Vz>2]),nc[andb([varex>0.5*sorted(varex)[::-1][int(KRcut)],Kappas<2*Kcut])==2])))
guessmask = np.zeros(len(nc))
guessmask[KRguess] = 1
#Throw lower-risk bad components out
rejB = ncl[andb([tt_table[ncl,0]<0,varex[ncl]>np.median(varex),ncl > KRcut])==3]
rej = np.union1d(rej,rejB)
ncl = np.setdiff1d(ncl,rej)
if debug:
import ipdb
ipdb.set_trace()
for ii in range(20000):
db = DBSCAN(eps=.005+ii*.005, min_samples=3).fit(fz.T)
if db.labels_.max() > 1 and db.labels_.max() < len(nc)/6 and np.intersect1d(rej,nc[db.labels_==0]).shape[0]==0 and np.array(db.labels_==-1,dtype=int).sum()/float(len(nc))<.5:
epsmap.append([ii, dice(guessmask,db.labels_==0),np.intersect1d(nc[db.labels_==0],nc[Rhos>getelbow(Rhos_sorted,True)]).shape[0] ])
if debug: print "found solution", ii, db.labels_
db = None
if debug:
import pdb
pdb.set_trace()
epsmap = np.array(epsmap)
group0 = []
dbscanfailed=False
if len(epsmap)!=0 :
#Select index that maximizes Dice with guessmask but first minimizes number of higher Rho components
ii = epsmap[np.argmax(epsmap[epsmap[:,2]==np.min(epsmap[:,2]),1],0),0]
print 'Component selection tuning: ' , epsmap[:,1].max()
db = DBSCAN(eps=.005+ii*.005, min_samples=3).fit(fz.T)
ncl = nc[db.labels_==0]
ncl = np.setdiff1d(ncl,rej)
ncl = np.setdiff1d(ncl,ncl[ncl>len(nc)-len(rej)])
group0 = ncl.copy()
group_n1 = nc[db.labels_==-1]
to_clf = np.setdiff1d(nc,np.union1d(ncl,rej))
if len(group0)==0 or len(group0) < len(KRguess)*.5:
dbscanfailed=True
print "DBSCAN bassed guess failed. Using elbow guess method."
ncl = np.setdiff1d(np.setdiff1d(nc[KRelbow==2],rej),np.union1d(nc[tt_table[:,0]<tt_lim],np.union1d(np.union1d(nc[spz>1],nc[Vz>2]),nc[andb([varex>0.5*sorted(varex)[::-1][int(KRcut)],Kappas<2*Kcut])==2])))
group0 = ncl.copy()
group_n1 = []
to_clf = np.setdiff1d(nc,np.union1d(group0,rej))
if len(group0)<2 or (len(group0)<4 and float(len(rej))/len(group0)>3):
print "WARNING: Extremely limited reliable BOLD signal space. Not filtering further into midk etc."
midkfailed = True
min_acc = np.array([])
if len(group0)!=0:
toacc_hi = np.setdiff1d(nc [andb([ fdist <= np.max(fdist[group0]), Rhos<F025, Vz>-2 ])==3 ],np.union1d(group0,rej)) #For extremes, building in a 20% tolerance
min_acc = np.union1d(group0,toacc_hi)
to_clf = np.setdiff1d(nc , np.union1d(min_acc,rej) )
diagstepkeys=['rej','KRcut','Kcut','Rcut','dbscanfailed','midkfailed','KRguess','group0','min_acc','toacc_hi']
diagstepout=[]
for ddk in diagstepkeys: diagstepout.append("%s: %s" % (ddk,eval('str(%s)' % ddk) ) )
with open('csstepdata.txt','w') as ofh:
ofh.write('\n'.join(diagstepout))
ofh.close()
return list(sorted(min_acc)),list(sorted(rej)),[],list(sorted(to_clf))
if group0_only: return list(sorted(group0)),list(sorted(rej)),[],list(sorted(to_clf))
if debug:
import ipdb
ipdb.set_trace()
#Find additional components to reject based on Dice - doing this here since Dice is a little unstable, need to reference group0
rej_supp = []
dice_rej = False
if not dbscanfailed and len(rej)+len(group0)<0.75*len(nc):
dice_rej = True
rej_supp = np.setdiff1d(np.setdiff1d(np.union1d(rej,nc[dice_table[nc,0]<=dice_table[nc,1]] ),group0),group_n1)
rej = np.union1d(rej,rej_supp)
if debug:
import ipdb
ipdb.set_trace()
#Temporal features
mmix_kurt_z = (mmix_kurt-mmix_kurt[group0].mean())/mmix_kurt[group0].std()
mmixp0_kurt_z = (mmixp0_kurt-mmixp0_kurt[group0].mean())/mmixp0_kurt[group0].std()
mmix_kurt_z_max = np.max([mmix_kurt_z,mmixp0_kurt_z],0)
"""
Step 2: Classifiy midk and ignore using separte SVMs for difference variance regimes
#To render hyperplane:
min_x = np.min(spz2);max_x=np.max(spz2)
# plotting separating hyperplane
ww = clf_.coef_[0]
aa = -ww[0] / ww[1]
xx = np.linspace(min_x - 2, max_x + 2) # make sure the line is long enough
yy = aa * xx - (clf_.intercept_[0]) / ww[1]
plt.plot(xx, yy, '-')
"""
if debug:
import pdb
pdb.set_trace()
toacc_hi = np.setdiff1d(nc [andb([ fdist <= np.max(fdist[group0]), Rhos<F025, Vz>-2 ])==3 ],np.union1d(group0,rej)) #Tried getting rid of accepting based on SVM altogether, now using only rejecting
toacc_lo = np.intersect1d(to_clf,nc[andb([spz<1,Rz<0,mmix_kurt_z_max<5,Dz>-1,Tz>-1,Vz<0,Kappas>=F025,fdist<3*np.percentile(fdist[group0],98)])==8])
midk_clf,clf_ = do_svm(fproj_arr_val[:,np.union1d(group0,rej)].T,[0]*len(group0) + [1]*len(rej),fproj_arr_val[:,to_clf].T,svmtype=2)
midk = np.setdiff1d(to_clf[andb([midk_clf==1,varex[to_clf]>np.median(varex[group0]) ])==2],np.union1d(toacc_hi,toacc_lo))
if len(np.intersect1d(to_clf[andb([midk_clf==1,Vz[to_clf]>0 ])==2],toacc_hi))==0:
svm_acc_fail = True
toacc_hi = np.union1d(toacc_hi,to_clf[midk_clf==0]) #only use SVM to augment toacc_hi only if toacc_hi isn't already conflicting with SVM choice
else: svm_acc_fail = False
"""
Step 3: Compute variance associated with low T2* areas (e.g. draining veins and low T2* areas)
#To write out veinmask
veinout = np.zeros(t2s.shape)
veinout[t2s!=0] = veinmaskf
niwrite(veinout,aff,'veinmaskf.nii',header=head)
veinBout = unmask(veinmaskB,mask)
niwrite(veinBout,aff,'veins50.nii',header=head)
"""
tsoc_B_Zcl = np.zeros(tsoc_B.shape)
tsoc_B_Zcl[Z_clmaps!=0] = np.abs(tsoc_B)[Z_clmaps!=0]
sig_B = [ stats.scoreatpercentile(tsoc_B_Zcl[tsoc_B_Zcl[:,ii]!=0,ii],25) if len(tsoc_B_Zcl[tsoc_B_Zcl[:,ii]!=0,ii]) != 0 else 0 for ii in nc ]
sig_B = np.abs(tsoc_B)>np.tile(sig_B,[tsoc_B.shape[0],1])
veinmask = andb([t2s<scoreatpercentile(t2s[t2s!=0],15),t2s!=0])==2
veinmaskf = veinmask[t2s!=0]
veinR = np.array(sig_B[veinmaskf].sum(0),dtype=float)/sig_B[~veinmaskf].sum(0)
veinR[np.isnan(veinR)] = 0
veinc = np.union1d(rej,midk)
rej_veinRZ = ((veinR-veinR[veinc].mean())/veinR[veinc].std())[veinc]
rej_veinRZ[rej_veinRZ<0] = 0
rej_veinRZ[ countsigFR2[veinc] > np.array(veinmaskf,dtype=int).sum()] =0
t2s_lim = [stats.scoreatpercentile(t2s[t2s!=0],50),stats.scoreatpercentile(t2s[t2s!=0],80)/2]
phys_var_zs = []
for t2sl_i in range(len(t2s_lim)):
t2sl = t2s_lim[t2sl_i]
veinW = sig_B[:,veinc]*np.tile(rej_veinRZ,[sig_B.shape[0],1])
veincand = fmask(unmask(andb([s0[t2s!=0]<np.median(s0[t2s!=0]),t2s[t2s!=0]<t2sl])>=1,t2s!=0),mask)
veinW[~veincand]=0
invein = veinW.sum(1)[fmask(unmask(veinmaskf,t2s!=0)*unmask(veinW.sum(1)>1,mask),mask)]
minW = 10*(np.log10(invein).mean())-1*10**(np.log10(invein).std())
veinmaskB = veinW.sum(1)>minW
tsoc_Bp = tsoc_B.copy()
tsoc_Bp[tsoc_Bp<0]=0
sig_Bp = sig_B*tsoc_Bp>0
vvex = np.array([(tsoc_Bp[veinmaskB,ii]**2.).sum()/(tsoc_Bp[:,ii]**2.).sum() for ii in nc])
group0_res = np.intersect1d(KRguess,group0)
phys_var_zs.append( (vvex-vvex[group0_res].mean())/vvex[group0_res].std() )
veinBout = unmask(veinmaskB,mask)
niwrite(veinBout,aff,'veins_l%i.nii' % t2sl_i,header=head)
#Mask to sample veins
phys_var_z = np.array(phys_var_zs).max(0)
Vz2 = (varex_ - varex_[group0].mean())/varex_[group0].std()
"""
Step 4: Learn joint TE-dependence spatial and temporal models to move remaining artifacts to ignore class
"""
if debug:
import ipdb
ipdb.set_trace()
to_ign = []
minK_ign = np.max([F05,getelbow2(Kappas,True)])
newcest = len(group0)+len(toacc_hi[ Kappas[toacc_hi]>minK_ign ])
phys_art = np.setdiff1d(nc[andb([phys_var_z>3.5,Kappas<minK_ign])==2],group0)
phys_art = np.union1d(np.setdiff1d(nc[andb([phys_var_z>2,rankvec(phys_var_z)-rankvec(Kappas)>newcest/2,Vz2>-1])==3],group0),phys_art)
#Want to replace field_art with an acf/SVM based approach instead of a kurtosis/filter one
field_art = np.setdiff1d(nc[andb([mmix_kurt_z_max>5,Kappas<minK_ign])==2],group0)
field_art = np.union1d(np.setdiff1d(nc[andb([mmix_kurt_z_max>2,rankvec(mmix_kurt_z_max)-rankvec(Kappas)>newcest/2,Vz2>1,Kappas<F01])==4],group0),field_art)
misc_art = np.setdiff1d(nc[andb([(rankvec(Vz)-rankvec(Ktz))>newcest/2,Kappas<Khighelbowval])==2],group0)
ign_cand = np.unique(list(field_art)+list(phys_art)+list(misc_art))
g0_red = np.setdiff1d(group0,ign_cand)
midkrej = np.union1d(midk,rej)
to_ign = np.setdiff1d(list(ign_cand),midkrej)
toacc = np.union1d(toacc_hi,toacc_lo)
ncl = np.setdiff1d(np.union1d(ncl,toacc),np.union1d(to_ign,midkrej))
ign = np.setdiff1d(nc,list(ncl)+list(midk)+list(rej))
orphan = np.setdiff1d(nc,list(ncl)+list(to_ign)+list(midk)+list(rej))
#Last ditch effort to save some transient components
if not strict_mode:
Vz3 = (varex_ - varex_[ncl].mean())/varex_[ncl].std()
ncl = np.union1d(ncl,np.intersect1d(orphan,nc[andb([Kappas>F05,Rhos<F025,Kappas>Rhos,Vz3<=-1,Vz3>-3,mmix_kurt_z_max<2.5])==6]))
ign = np.setdiff1d(nc,list(ncl)+list(midk)+list(rej))
orphan = np.setdiff1d(nc,list(ncl)+list(to_ign)+list(midk)+list(rej))
if debug:
import pdb
pdb.set_trace()
if savecsdiag:
diagstepkeys=['selmodelversion','rej','KRcut','Kcut','Rcut','dbscanfailed','KRguess','group0','dice_rej','rej_supp','to_clf','midk', 'svm_acc_fail', 'toacc_hi','toacc_lo','field_art','phys_art','misc_art','ncl','ign']
diagstepout=[]
for ddk in diagstepkeys: diagstepout.append("%s: %s" % (ddk,eval('str(%s)' % ddk) ) )
with open('csstepdata.txt','w') as ofh:
ofh.write('\n'.join(diagstepout))
allfz = np.array([Tz,Vz,Ktz,KRr,cnz,Rz,mmix_kurt,fdist_z])
np.savetxt('csdata.txt',allfz)
return list(sorted(ncl)),list(sorted(rej)),list(sorted(midk)),list(sorted(ign))
|
ME-ICA/me-ica
|
meica.libs/select_model_fft20d.py
|
Python
|
lgpl-2.1
| 25,477
|
[
"Gaussian"
] |
a473b69124309b773660c2daefe990f276e5c9d8b5d91d9a7e72e5cd59eec80f
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2008 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
from decimal import Decimal
from stoqlib.database.runtime import get_default_store
from stoqlib.domain.commission import CommissionSource
from stoqlib.domain.person import Supplier
from stoqlib.domain.product import (Product, ProductSupplierInfo,
Storable)
from stoqlib.domain.sellable import (Sellable,
SellableCategory,
SellableUnit)
from stoqlib.importers.csvimporter import CSVImporter
from stoqlib.lib.parameters import sysparam
class ProductImporter(CSVImporter):
fields = ['base_category',
'barcode',
'category',
'description',
'price',
'cost',
'commission',
'commission2',
'markup',
'markup2'
]
optional_fields = [
'unit',
]
def __init__(self):
super(ProductImporter, self).__init__()
default_store = get_default_store()
suppliers = default_store.find(Supplier)
if not suppliers.count():
raise ValueError(u'You must have at least one suppliers on your '
u'database at this point.')
self.supplier = suppliers[0]
self.units = {}
for unit in default_store.find(SellableUnit):
self.units[unit.description] = unit
self.tax_constant_id = sysparam.get_object_id(
'DEFAULT_PRODUCT_TAX_CONSTANT')
self._code = 1
def _get_or_create(self, table, store, **attributes):
obj = store.find(table, **attributes).one()
if obj is None:
obj = table(store=store, **attributes)
return obj
def process_one(self, data, fields, store):
base_category = self._get_or_create(
SellableCategory, store,
suggested_markup=int(data.markup),
salesperson_commission=int(data.commission),
category=None,
description=data.base_category)
# create a commission source
self._get_or_create(
CommissionSource, store,
direct_value=int(data.commission),
installments_value=int(data.commission2),
category=base_category)
category = self._get_or_create(
SellableCategory, store,
description=data.category,
suggested_markup=int(data.markup2),
category=base_category)
sellable = Sellable(store=store,
cost=Decimal(data.cost),
category=category,
description=data.description,
price=int(data.price))
sellable.barcode = data.barcode
sellable.code = u'%02d' % self._code
self._code += 1
if u'unit' in fields:
if not data.unit in self.units:
raise ValueError(u"invalid unit: %s" % data.unit)
sellable.unit = store.fetch(self.units[data.unit])
sellable.tax_constant_id = self.tax_constant_id
product = Product(sellable=sellable, store=store)
supplier = store.fetch(self.supplier)
ProductSupplierInfo(store=store,
supplier=supplier,
is_main_supplier=True,
base_cost=Decimal(data.cost),
product=product)
Storable(product=product, store=store)
|
andrebellafronte/stoq
|
stoqlib/importers/productimporter.py
|
Python
|
gpl-2.0
| 4,371
|
[
"VisIt"
] |
41400bc5928067163b3bdbd3d61e3422285bd20dfcb617462e20976d7e4d6de1
|
"""
:Authors: - Iason
"""
from collections import defaultdict
import numpy as np
def inside(forest, topsort, omega=lambda edge: edge.log_prob):
"""
Inside recursion.
:param forest: an acyclic hypergraph.
:param topsort: a partial ordering of the nodes in the forest.
:param omega: a function that computes the weight of an edge (defaults to the edge's own log probability)
:return: a dictionary mapping a symbol to its inside weight.
"""
inside_prob = defaultdict(float)
# visit nodes bottom up
for parent in topsort:
incoming = forest.get(parent, frozenset())
# leaves have inside weight 1
if not incoming:
# log(1) = 0
inside_prob[parent] = 0
else:
# log(0) = -inf
total = -float("inf")
for edge in incoming:
w = sum((inside_prob[child] for child in edge.rhs), omega(edge))
# log(a) + log(b) = log(exp(a) + exp(b))
# total = log(exp(total) + exp(w))
total = np.logaddexp(total, w)
inside_prob[parent] = total
return inside_prob
|
wilkeraziz/pcfg-sampling
|
inference.py
|
Python
|
apache-2.0
| 1,152
|
[
"VisIt"
] |
b2be1a92d24e21a25517f42006043d6cb34299095c2dc8934139f8e12934bf8e
|
"""
Objects with No values
"""
from galaxy.datatypes.metadata import MetadataCollection
from galaxy.datatypes.registry import Registry
class RecursiveNone:
def __str__( self ):
return "None"
def __repr__( self ):
return str( self )
def __getattr__( self, name ):
value = RecursiveNone()
setattr( self, name, value )
return value
def __nonzero__( self ):
return False
class NoneDataset( RecursiveNone ):
def __init__( self, datatypes_registry = None, ext = 'data', dbkey = '?' ):
self.ext = self.extension = ext
self.dbkey = dbkey
if datatypes_registry is None: datatypes_registry = Registry()
self.datatype = datatypes_registry.get_datatype_by_extension( ext )
self._metadata = None
self.metadata = MetadataCollection( self )
def __getattr__( self, name ):
return "None"
def missing_meta( self ):
return False
|
volpino/Yeps-EURAC
|
lib/galaxy/util/none_like.py
|
Python
|
mit
| 952
|
[
"Galaxy"
] |
8524ab96f43277eeb3bbc19e01951e4896990035ab39a6b19bc1f08128661706
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline
#
# create sphere to color
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(20)
sphere.SetPhiResolution(40)
def colorCells (__vtk__temp0=0,__vtk__temp1=0):
randomColorGenerator = vtk.vtkMath()
input = randomColors.GetInput()
output = randomColors.GetOutput()
numCells = input.GetNumberOfCells()
colors = vtk.vtkFloatArray()
colors.SetNumberOfTuples(numCells)
i = 0
while i < numCells:
colors.SetValue(i,randomColorGenerator.Random(0,1))
i = i + 1
output.GetCellData().CopyScalarsOff()
output.GetCellData().PassData(input.GetCellData())
output.GetCellData().SetScalars(colors)
del colors
#reference counting - it's ok
del randomColorGenerator
# Compute random scalars (colors) for each cell
randomColors = vtk.vtkProgrammableAttributeDataFilter()
randomColors.SetInputConnection(sphere.GetOutputPort())
randomColors.SetExecuteMethod(colorCells)
# mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(randomColors.GetOutputPort())
mapper.SetScalarRange(randomColors.GetPolyDataOutput().GetScalarRange())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(mapper)
# Create a scalar bar
scalarBar = vtk.vtkScalarBarActor()
scalarBar.SetLookupTable(mapper.GetLookupTable())
scalarBar.SetTitle("Temperature")
scalarBar.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
scalarBar.GetPositionCoordinate().SetValue(0.1,0.01)
scalarBar.SetOrientationToHorizontal()
scalarBar.SetWidth(0.8)
scalarBar.SetHeight(0.17)
# Test the Get/Set Position
scalarBar.SetPosition(scalarBar.GetPosition())
# Create graphics stuff
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(sphereActor)
ren1.AddActor2D(scalarBar)
renWin.SetSize(350,350)
# render the image
#
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
scalarBar.SetNumberOfLabels(8)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Rendering/Core/Testing/Python/ScalarBar.py
|
Python
|
bsd-3-clause
| 2,359
|
[
"VTK"
] |
e99b3f9860cd5bd179f47bb1dd8a0a23b3b4ad338f3319cade8edd7f88b8193b
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import nnabla as nn
class Module(object):
"""Module mix-in for the parametric function classes.
"""
def __init__(self):
pass
def get_parameters(self, grad_only=True):
"""Get parameters.
Args:
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
If you set this option as `False`, All parameters are returned. Default is `True`.
Returns:
dict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabla.Variable`).
"""
params = OrderedDict()
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
continue
pname = k
name = "{}/{}".format(prefix, pname)
if grad_only and v.need_grad == False:
continue
params[name] = v
return params
def get_modules(self, memo=None, prefix=""):
"""Get modules.
This function is internally used as the helper method for other methods.
Args:
memo (set, optional): Module set in order to memorize to visit.
prefix (str, optional): Prefix to a specific parameter name.
Yields:
`Module`: The module class.
"""
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
for k, v in self.__dict__.items():
if not isinstance(v, Module):
continue
name, module = k, v
submodule_prefix = "{}/{}".format(prefix,
name) if prefix != "" else name
for m in module.get_modules(memo, submodule_prefix):
yield m
def save_parameters(self, path, grad_only=False):
"""Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
"""
params = self.get_parameters(grad_only=grad_only)
nn.save_parameters(path, params)
def load_parameters(self, path):
"""Load parameters from a file with the specified format.
Args:
path : path or file object
"""
nn.load_parameters(path)
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
continue
pname = k
name = "{}/{}".format(prefix, pname)
# Substitute
param0 = v
param1 = nn.parameter.pop_parameter(name)
if param0 is None:
raise ValueError(
"Model does not have {} parameter.".format(name))
param0.d = param1.d.copy()
nn.logger.info("`{}` loaded.)".format(name))
|
sony/nnabla
|
python/src/nnabla/experimental/parametric_function_class/module.py
|
Python
|
apache-2.0
| 4,013
|
[
"VisIt"
] |
253a691228f72be20f922a74ffbdad929d19b2d203494a0626eb9be99dd4ce2f
|
import sys
import os.path
import logging
import asyncio
from ._requires import click
from .http import HTTPError
log = logging.getLogger(__name__)
class _VolumeBinds:
def visit(self, obj):
return obj.accept(self)
def visit_RO(self, _):
return 'ro'
def visit_RW(self, _):
return 'rw'
def visit_localpath(self, obj):
from_ = os.path.abspath(obj.from_)
to = os.path.abspath(obj.to)
# FIXME: implement proper errors reporting
assert os.path.exists(from_),\
'Local path does not exists: {}'.format(from_)
return '{}:{}:{}'.format(from_, to, self.visit(obj.mode))
def visit_namedvolume(self, obj):
to = os.path.abspath(obj.to)
return '{}:{}:{}'.format(obj.name, to, self.visit(obj.mode))
def _volumes(volumes):
return {os.path.abspath(vol.to): {} for vol in volumes}
def _volume_binds(volumes):
transformer = _VolumeBinds()
return [transformer.visit(v) for v in volumes]
def _exposed_ports(ports):
return {'{}/{}'.format(p.port, p.proto): {} for p in ports}
def _bind_to(port):
return [{'HostPort': str(port.as_), 'HostIp': port.addr}]
def _port_binds(ports):
return {
'{}/{}'.format(port.port, port.proto): _bind_to(port)
for port in ports
}
async def start(docker, image, command, *, init=None, tty=True,
entrypoint=None, volumes=None, ports=None, environ=None,
work_dir=None, network=None, network_alias=None, label=None):
spec = {
'Image': image.name,
'Cmd': command,
'OpenStdin': True,
'Tty': tty,
}
if ports:
spec['ExposedPorts'] = _exposed_ports(ports)
if environ:
spec['Env'] = ['{}={}'.format(k, v) for k, v in (environ.items() or ())]
if volumes:
spec['Volumes'] = _volumes(volumes)
if entrypoint is not None:
spec['Entrypoint'] = entrypoint
if work_dir:
spec['WorkingDir'] = os.path.abspath(work_dir)
if label:
spec['Labels'] = {label: ''}
host_config = {}
if init:
host_config['Init'] = True
if volumes:
host_config['Binds'] = _volume_binds(volumes)
if ports:
host_config['PortBindings'] = _port_binds(ports)
if network:
host_config['NetworkMode'] = network
if host_config:
spec['HostConfig'] = host_config
networking_config = {}
if network and network_alias:
networking_config['EndpointsConfig'] = {
network: {'Aliases': [network_alias]},
}
if networking_config:
spec['NetworkingConfig'] = networking_config
return await docker.create_container(spec)
async def start_service(docker, *args, **kwargs):
c = await start(docker, *args, **kwargs)
await docker.start(c['Id'])
async def resize(docker, id_):
# TODO: maybe set also $LINES and $COLUMNS variables, add SIGWINCH handler
width, height = click.get_terminal_size()
try:
await docker.resize(id_, params={'w': str(width), 'h': str(height)})
except HTTPError as e:
log.debug('Failed to resize terminal: %s', e)
class StdIOProtocol(asyncio.Protocol):
transport: asyncio.Transport
def __init__(self, http_proto=None):
self.http_proto = http_proto
def connection_made(self, transport):
self.transport = transport
def pause_writing(self):
self.http_proto.transport.pause_reading()
def resume_writing(self):
self.http_proto.transport.resume_reading()
def data_received(self, data):
self.http_proto.transport.write(data)
async def attach(docker, id_):
loop = asyncio.get_running_loop()
stdin_proto = StdIOProtocol()
await loop.connect_read_pipe(lambda: stdin_proto, sys.stdin)
stdin_proto.transport.pause_reading()
stdout_proto = StdIOProtocol()
await loop.connect_write_pipe(lambda: stdout_proto, sys.stdout)
async with docker.attach(
id_, stdin_proto, stdout_proto,
params={'logs': '1', 'stream': '1',
'stdin': '1', 'stdout': '1', 'stderr': '1'}
) as http_proto:
stdin_proto.http_proto = http_proto
stdout_proto.http_proto = http_proto
stdin_proto.transport.resume_reading()
await resize(docker, id_)
await http_proto.wait_closed()
async def run(docker, tty, image, command, *, init=None,
volumes=None, ports=None, environ=None, work_dir=None,
network=None, network_alias=None):
c = await start(docker, image, command, init=init, tty=tty,
volumes=volumes,
ports=ports, environ=environ, work_dir=work_dir,
network=network, network_alias=network_alias,
entrypoint='')
try:
await docker.start(c['Id'])
await attach(docker, c['Id'])
exit_code = await docker.wait(c['Id'])
return exit_code['StatusCode']
finally:
await docker.remove_container(c['Id'],
params={'v': 'true', 'force': 'true'})
|
vmagamedov/pi
|
pi/run.py
|
Python
|
bsd-3-clause
| 5,105
|
[
"VisIt"
] |
796daef18b7cfc97ef17973d63d62dd2b6ff48879895a9cffe526ef545edd034
|
import os
import subprocess
import sys
import pandas as pd
from minedatabase import utils
from minedatabase.databases import MINE
from rdkit.Chem import AllChem
def load_cdmine_rxns(mine_db, excel_file, pic_dir=""):
abrv = {"hn": "[*]"}
if pic_dir and not os.path.exists(pic_dir):
os.mkdir(pic_dir)
compounds = pd.read_excel(excel_file, 1, skiprows=1).fillna("")
reactions = pd.read_excel(excel_file, 0, skiprows=1).fillna("")
for i, row in compounds.iterrows():
if row['SMILES']:
mol = AllChem.MolFromSmiles(row['SMILES'])
if mol:
c_id = mine_db.insert_compound(mol, {"Generation": 0})
abrv[row['Abbreviation'].strip()] = c_id
if pic_dir:
rc = subprocess.call("/Applications/ChemAxon/JChem/bin/molconvert -o %s/temp.png png:-a,w500 -s "
"'%s'" % (pic_dir, row['SMILES'].strip()), shell=True)
if not rc:
os.rename(pic_dir + "temp.png", pic_dir + c_id + ".png")
else:
print("Failed to parse %s" % row['SMILES'])
else:
print('SMILES missing from %s' % row.name)
reactions['Type of Reaction'].fillna('ffill', inplace=True)
for i, row in reactions.iterrows():
if row['Equation (Abbreviations)']:
rxn = row[['Metabolite', 'Equation (full names)']].to_dict()
if isinstance(row['PMID or doi'], str):
rxn['References'] = row['PMID or doi'].strip().split('; ')
else:
rxn['References'] = [str(row['PMID or doi'])]
rxn['Type'] = str(row['Type of Reaction']).strip()
rxn['Notes'] = str(row['Comments']).strip()
rxn['Reactants'], rxn['Products'] = utils.parse_text_rxn(row['Equation (Abbreviations)'], ' = ', ' + ', abrv)
rxn['InChI_hash'] = utils._calculate_rxn_hash(mine_db, rxn['Reactants'], rxn['Products'])
mine_db.insert_reaction(rxn)
else:
print('RXN missing from %s' % row.name)
if __name__ == '__main__':
mine = MINE(sys.argv[1])
load_cdmine_rxns(mine, sys.argv[2])
|
JamesJeffryes/MINE-Database
|
Scripts/add_rxns_from_excel.py
|
Python
|
mit
| 2,201
|
[
"RDKit"
] |
9cb9ef63c4e2ab432f600ae9647ac048c3bd77f60f427bdc629cef1cbc4099e5
|
import os
import pickle
import pylab as pl
from operator import itemgetter
import netcdf
import numpy as np
import sys
from operator import mul
from ncdftools import nccopydimension
from Scientific.IO import NetCDF
from array import array
import struct
def nctypecode(dtype):
# purose: netcdf-typecode from array-dtype
if ((dtype == np.dtype('float32')) or (np.dtype == 'float32')):
return 'f'
elif ((dtype == np.dtype('float64')) or (np.dtype == 'float64')):
return 'd'
elif ((dtype == np.dtype('int32')) or (np.dtype == 'int32')):
return 'i'
elif ((dtype == np.dtype('int64')) or (np.dtype == 'int64')):
return 'l'
class SomeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def ncvartypeoffset(ncfile,var):
""" purpose: get binary data type and offset of a variable in netcdf file
unfortunately, getting these properties are not explicitely implemented in scipy, but most of this code is stolen from scipy: /usr/lib/python2.7/dist-packages/scipy/io/netcdf.py
ncfile is a scipy.io.netcdf.netcdf_file
var variable we want to calculate the offset from
"""
oripos=ncfile.fp.tell()
ncfile.fp.seek(0)
magic = ncfile.fp.read(3)
ncfile.__dict__['version_byte'] = np.fromstring(ncfile.fp.read(1), '>b')[0]
# Read file headers and set data.
ncfile._read_numrecs()
ncfile._read_dim_array()
ncfile._read_gatt_array()
header = ncfile.fp.read(4)
count = ncfile._unpack_int()
vars = []
for ic in range(count):
vars.append(list(ncfile._read_var()))
ivar = np.where(np.array(vars) == var)[0][0]
ncfile.fp.seek(oripos)
return vars[ivar][6] , vars[ivar][7]
def rwicecube(filestream,shp,refiter,dimiter,dimpos,refnoiter,dimnoiter,icecube,vtype,vsize,voffset,rwchsize,mode):
"""
read or write data icecube from binary data and put it in an array
filestream: binary file reference
shp: shape of the filestream
refiter: reference to dimensions over which no slice is performed
pos: current index position of the non-sliced dimensions
"""
# e.g. shp = (200,100,50,50,20)
# refiter = (1,3,4)
# dimpos = (5,10,9)
# extend so that structured arrays are read at once
lennoiter = long(1)
for irefnoiter,erefnoiter in enumerate(refnoiter):
lennoiter = lennoiter*dimnoiter[irefnoiter]
fpos = 0
# e.g. fpos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimpos):
curadd = np.mod(edimpos,dimiter[idimpos])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
# exclude trivial special case of only 1 iteration step
# --> in that case fpos is just zero.
if refiter != [-1]:
if ((refiter[idimpos] + 1) < len(shp)):
for i in range(refiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fpos = fpos + curadd
# Initialize (for reading) or prepare (for writing) icecube array
if mode == 'read':
icecube = np.zeros((lennoiter,),dtype=vtype)*np.nan
elif mode == 'write':
icecube = np.reshape(icecube,(lennoiter,))
dimnoiterpos = [0]*len(dimnoiter)
# print icecube,dimnoiterpos
j = 0
while j < lennoiter:
fposicecube = fpos
for idimpos,edimpos in enumerate(dimnoiterpos):
curadd = np.mod(edimpos,dimnoiter[idimpos])
# e.g. fposicecube = (1)*52
# e.g. fposicecube = (9)+ 20*(10) + 50*50*20*(5)
if ((refnoiter[idimpos] + 1) < len(shp)):
for i in range(refnoiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fposicecube = fposicecube + curadd
if mode == 'read':
filestream.seek(voffset+vsize*fposicecube)
temp = np.fromfile(filestream,dtype='='+vtype[1],count=rwchsize)
temp.byteswap(True)
icecube[j:(j+rwchsize)] = temp
elif mode == 'write':
fout.fp.seek(voffset+vsize*fposicecube)
fpointout.seek(voffset+vsize*fposicecube)
# filestream.seek(voffset+vsize*fposicecube)
testdata[fposicecube:(fposicecube+rwchsize)] = np.array(icecube[j:(j+rwchsize)],dtype=vtype[1])
# little = struct.pack('>'+'d'*len(icecube[j:(j+rwchsize)]), *icecube[j:(j+rwchsize)])
# # Seek to offset based on piece index
# #print little
# filestream.write(little)
# filestream.write(np.array(icecube[j:(j+rwchsize)],dtype=vtype))
# # np.array(icecube[j:(j+rwchsize)],dtype=vtype[1]).byteswap().tofile(filestream)
temp = np.array(icecube[j:(j+rwchsize)],dtype='>d')
fout.fp.write(temp)
fpointout.write(temp)
# # print temp
# # filestream.write(temp[:])
# # little = struct.pack('<'+'B'*len(temp), *temp)
# # print icecube.byteswap().dtype
# # print voffset, vsize, fposicecube, vtype, rwchsize, icecube.dtype# ,icecube[j:(j+rwchsize)]
# go to next data strip
if dimnoiterpos != []:
# rwchsize: allow reading of chunks for the inner dimensions
dimnoiterpos[-1] = dimnoiterpos[-1] + rwchsize
for idimidx,edimidx in enumerate(reversed(dimnoiterpos)):
if idimidx > 0:
while dimnoiterpos[idimidx] >= dimnoiter[idimidx]:
dimnoiterpos[idimidx-1] = dimnoiterpos[idimidx-1] + 1
dimnoiterpos[idimidx] -= dimnoiter[idimidx]
j = j+rwchsize
icecube.shape = dimnoiter
if mode == 'read':
return icecube
def writeicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,data,vtype,vsize,voffset,rwchsize):
"""
write an icecube and perform an in-memory Post Swap of dimensions before (very fast)
hereby, we acquire the order of the icecube dimensions
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,np.transpose(data,trns),vtype,vsize,voffset,rwchsize,'write')
def readicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,vtype,vsize,voffset,rwchsize):
"""
read an icecube by sorting the indices (highest at the back).
perform an in-memory Post Swap of dimensions (very fast) to compensate for the sorting.
we allow reading in chunks according to the inner dimensions. They will be mostly there because we allow an max-icecubesize
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
icecube =rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,None,vtype,vsize,voffset,rwchsize,'read')
# build the 'inverse permutation' operator for tranposition before writeout
inv = range(len(trns))
for itrns, etrns in enumerate(trns):
inv[etrns] = itrns
return np.transpose(icecube,inv)
fnin = '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf.nc'
print fnin
# fobjin = open(fnin,'rb')
fin = netcdf.netcdf_file(fnin,'r')
fnout = '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf2.nc'
os.system('rm '+fnout)
print fnout
# fobjout = open(fnout,'wb+')
fout = NetCDF.NetCDFFile(fnout,'w')
fnpointout = '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf4.nc'
os.system('rm '+fnpointout)
print fnpointout
# fobjout = open(fnpointout,'wb+')
fpointout = open(fnpointout,'w')
# we kunnen eens proberen om een variabele aan te maken met een vooraf gespecifieerde dimensie!
datin = [[fin,'QV'],[fin,'rlat']]
datout = [[fout,'QV'],[fout,'TEST']]
# adtypeoutspec = [None,None] # to be obtained automatically from the data output stream (if it already exists)
# selection of function dimension input
func = lambda x, y: (np.array([[[np.mean(x)]],[[np.mean(x)]]],dtype=np.float) , np.array([[[np.mean(x)]],[[np.mean(x)]]],dtype=np.float)) # *(1.+np.zeros(x.shape))
dnamsel = ('rlon','time','t')
# obtain definitions of the variable stream input
vsdin = [] # input variable stream definitions
for idatin,edatin in enumerate(datin):
vsdin.append(dict())
vsdin[idatin]['dnams'] = []
for idim,edim in enumerate(datin[idatin][0].variables[datin[idatin][1]].dimensions):
vsdin[idatin]['dnams'].append(str(edim))
vsdin[idatin]['dims'] = list(datin[idatin][0].variables[datin[idatin][1]].shape)
vsdin[idatin]['itemsize'] = datin[idatin][0].variables[datin[idatin][1]].itemsize()
vsdin[idatin]['dtype'] = datin[idatin][0].variables[datin[idatin][1]]._dtype
vsdin[idatin]['voffset'] = datin[idatin][0].variables[datin[idatin][1]]._voffset
# obtain definitions of the variable stream output
vsdout = [] # input variable stream definitions
for idatout,edatout in enumerate(datout):
vsdout.append(dict())
if edatout[1] in edatout[0].variables:
vsdout[idatout]['dnams'] = []
for idim,edim in enumerate(datout[idatout][0].variables[datout[idatout][1]].dimensions):
vsdout[idatout]['dnams'].append(str(edim))
vsdout[idatout]['dims'] = list(datout[idatout][0].variables[datout[idatout][1]].shape)
vsdout[idatout]['itemsize'] = datout[idatout][0].variables[datout[idatout][1]].itemsize()
vsdout[idatout]['dtype']= datout[idatout][0].variables[datout[idatout][1]]._dtype
vsdout[idatout]['voffset'] = datout[idatout][0].variables[datout[idatout][1]]._voffset
else:
# the variable doesn't exists (we will create it afterwards)
vsdout[idatout]['dnams'] = None
vsdout[idatout]['dims'] = None
vsdout[idatout]['itemsize'] = None
vsdout[idatout]['dtype'] = None
# collecting the involved dimensions (will be considered as the standard output dimensions)
dnamsstd = [] # standard output dimensions: list of all output dimensions: this is collected from the input dimensions, the output dimensions and the selected/processed dimensions
dimsstd = [] # maximum length of an output dimension
idimsstd = 0
for ivsdin,evsdin in enumerate(vsdin):
dnaminlast = None
index = 0
for idnam,ednam in reversed(list(enumerate(evsdin['dnams']))):
if ednam not in dnamsstd:
# In dnamsstd, ednam should be just after the dimensions preceding ednams in dnams
# # actually, we also want that, in dnamsstd, ednam should be just before the dimensions succeeding ednams in dnams. Sometimes, this is not possible at the same time. But it will be the case if that is possible when applying one of the criteria
index = 0
# print 'dnamsstd: ', evsdin,dnamsstd
for idnam2,ednam2 in enumerate(dnamsstd):
# print ednam,ednam2,idnam2,evsdin['dnams'][0:idnam2+1]
if ednam2 in evsdin['dnams'][0:(idnam+1)]:
# print index
index = max(index,dnamsstd.index(ednam2) + 1)
dnamsstd.insert(index,ednam)
if ednam not in dnamsel:
dimsstd.insert(index,int(vsdin[ivsdin]['dims'][idnam]))
else:
# In this case, wait for assigning the output dimensions. This actually depends on the specified function
dimsstd.insert(index,None)
else:
if ((vsdin[ivsdin]['dims'][idnam] != 1) & (dimsstd[dnamsstd.index(ednam)] != 1) & \
# we allow non-equal dimension lengths, as long as the dimension is covered/captured by the function
# maybe still allow non-equal dimension length not covered by the function????
(dimsstd[dnamsstd.index(ednam)] != None) & \
(vsdin[ivsdin]['dims'][idnam] != dimsstd[dnamsstd.index(ednam)])):
raise SomeError("The corresponding output dnamensions (index: "+str(dnamsstd.index(ednam))+") of the input variable "+str(ivsdin)+ " "+ str(idnam)+ " "+" have a different length and not equal to 1.")
else:
# None means it's considered by the function
if (dimsstd[dnamsstd.index(ednam)] != None):
dimsstd[dnamsstd.index(ednam)] = max(dimsstd[dnamsstd.index(ednam)],vsdin[ivsdin]['dims'][idnam])
print 'Preliminary output dimensions: ', zip(dnamsstd,dimsstd)
idnam = 0
# add the missing dimensions selected for the function
for idnamsel,ednamsel in enumerate(dnamsel):
if ednamsel not in dnamsstd:
dnamsstd.insert(idnam,ednamsel)
dimsstd.insert(idnam,None) # to be defined from the function
idnam = idnam+1 # moet dit ook hier niet boven geimplementeerd worden?
else:
idnam = dnamsstd.index(ednam)+1
# adimsstd: list the specific output dimensions
# if function dimension: data output dimension should be the same as the function output dimension, but this should be checked afterwards.
# if not function dimension:
# # look what's the output dimension like. If the dimension is not in the output variable, we add a dummy 1-dimension
# we need to create/list adimsstd also before!! And then append them with the missing dimensions, as dummy 1-dimensions. If that is not sufficient, we will just get an error message.
# get references to the standard output dimensions on which the function is applied
refdfuncstd = []
for idnamsel,ednamsel in enumerate(dnamsel):
refdfuncstd.append(dnamsstd.index(ednamsel))
# all output dimensions are now collected...
# add the standard output dimensions that are missing in each seperate input variable as a dummy 1-dimension
for ivsdin,evsdin in enumerate(vsdin):
idnam = 0
for idnamsstd,ednamsstd in enumerate(dnamsstd):
if ednamsstd not in vsdin[ivsdin]['dnams']:
vsdin[ivsdin]['dnams'].insert(idnam,ednamsstd)
vsdin[ivsdin]['dims'].insert(idnam,1)
idnam = idnam + 1
else:
idnam = vsdin[ivsdin]['dnams'].index(ednamsstd) + 1
# do the same for the data output variables
# # vsdin[ivsdin]['refdstd']: references of data stream dimensions (vsdin[..]['dnams'] to the standard dimensions (dnamsstd)
for ivsdin,evsdin in enumerate(vsdin):
vsdin[ivsdin]['refdstd']= list([])
for idim,edim in enumerate(vsdin[ivsdin]['dnams']):
vsdin[ivsdin]['refdstd'].append(dnamsstd.index(edim))
for ivsdout,evsdout in enumerate(vsdout):
if vsdout[ivsdout]['dnams'] == None:
vsdout[ivsdout]['dnams'] = dnamsstd
# adimfuncin: the input dimensions of the function based on the refdfuncstd
# arefapply = [list([])]*len(arefsin)
# adimfuncin = np.array([list([None]*len(refdfuncstd))]*len(arefsin))
# adimfuncin: the dimensions of the function input
adimfuncin = np.zeros((len(vsdin),len(refdfuncstd)),dtype='int32') - 1
alenfuncin = []
for ivsdout in range(len(vsdout)):
if vsdout[ivsdout]['dnams'] == None:
vsdout[ivsdout]['dnams'] == dnamsstd
# vsdout[..]['refdstd']: references of data stream dimensions (vsdout[..]['dnams'] to the standard dimensions (dnamsstd)
for ivsdout,evsdout in enumerate(vsdout):
vsdout[ivsdout]['refdstd'] = list([])
for idim,edim in enumerate(vsdout[ivsdout]['dnams']):
vsdout[ivsdout]['refdstd'].append(dnamsstd.index(edim))
# arefdfuncout: references of the function dimensions to the data output stream dimensions
arefdfuncout = []
for ivsdout,evsdout in enumerate(vsdout):
arefdfuncout.append([])
for idnamsel,ednamsel in enumerate(dnamsel):
arefdfuncout[ivsdout].append(vsdout[ivsdout]['dnams'].index(ednamsel))
# is arefdfuncout[ivsdout][irefdfuncout] == vsdout[ivsdout]['refdstd'].index(erefdfuncstd) ???
# # maybe this needs to be
adimapplyout = np.zeros((len(vsdout),len(refdfuncstd)),dtype='int32') - 1
# arefdfuncin: references of the function dimensions to the data input stream dimensions
arefdfuncin = []
for ivsdin,evsdin in enumerate(vsdin):
arefdfuncin.append([])
for idnamsel,ednamsel in enumerate(dnamsel):
arefdfuncin[ivsdin].append(vsdin[ivsdin]['dnams'].index(ednamsel))
# to do next:::...
for ivsdin,evsdin in enumerate(vsdin):
for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
adimfuncin[ivsdin,irefdfuncstd] = evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdfuncstd)]
alenfuncin.append(reduce(mul,adimfuncin[ivsdin]))
# 'probe' function output dimensions
dummydat = []
for ivsdin,evsdin in enumerate(vsdin):
dummydat.append(np.zeros(adimfuncin[ivsdin]))
ddout = func(*dummydat)
if (type(ddout).__name__ == 'tuple'):
ddout = list(ddout)
if (type(ddout).__name__ != 'list'):
ddout = list([ddout])
# obtain output data type. If not specified, we obtain it from the function output.
# meanwhile, check whether the number of input dimensions are the same as the number of output dimensions.
if len(ddout) != len(vsdout):
raise SomeError('the amount of output variables in from '+ str(func) + ' ('+str(len(ddout))+') is not the same as specified ('+str(len(vsdout))+')')
for iddout in range(len(ddout)):
if type(ddout[iddout] ) != np.ndarray:
ddout[iddout] = np.array(ddout[iddout])
if (len(np.array(ddout[iddout]).shape) != len(adimfuncin[iddout])):
raise SomeError('The amount of input ('+str(len(adimfuncin[iddout]))+') and output dimensions ('+str(len(ddout[iddout].shape))+') of function is not the same')
if vsdout[iddout]['dims'] == None:
vsdout[iddout]['dims'] = dimsstd
# overwrite dimensions with the function output dimensions
for irefdfuncout,erefdfuncout in enumerate(arefdfuncout[iddout]):
vsdout[iddout]['dims'][erefdfuncout] = ddout[iddout].shape[irefdfuncout]
if vsdout[iddout]['dtype'] == None:
# output netcdf variable does not exist... creating
# why does this needs to be little endian????
vsdout[iddout]['dtype'] = '>'+nctypecode(ddout[iddout].dtype)
# try to copy dimension from data input
for idim,edim in enumerate(vsdout[iddout]['dnams']):
if edim not in datout[iddout][0].dimensions:
dimensionfound = False
idatin = 0
# # try to copy the dimension from the input data
# while ((not dimensionfound) & (idatin < (len(datin) ))):
# if edim in datin[idatin][0].dimensions:
# if (vsdout[iddout]['dims'][idim] == datin[idatin][0].dimensions[edim]):
# print datin[idatin][0],datout[iddout][0], edim
# nccopydimension(datin[idatin][0],datout[iddout][0], edim)
# dimensionfound = True
# idatin = idatin + 1
if dimensionfound == False:
datout[iddout][0].createDimension(edim,vsdout[iddout]['dims'][idim])
datout[iddout][0].createVariable(datout[iddout][1],vsdout[iddout]['dtype'][1],vsdout[iddout]['dnams'])
# we should check this at the time the dimensions are not created
if (vsdout[iddout]['dims'] != list(datout[iddout][0].variables[datout[iddout][1]].shape)):
raise SomeError("dimensions of output file ( "+str(vsdout[iddout]['dims'])+"; "+ str(vsdout[iddout]['dnams'])+") do not correspond with intended output dimension "+str(datout[iddout][0].variables[datout[iddout][1]].shape)+"; "+str(datout[iddout][0].variables[datout[iddout][1]].dimensions))
for idatout,edatout in enumerate(datout):
datout[idatout][0].sync()
# print 'iddout:', iddout
# oripos = datout[iddout][0].fp.tell()
# vsdout[iddout]['dtype'], vsdout[iddout]['voffset'] = ncvartypeoffset(datout[iddout][0],datout[iddout][1])
# vsdout[iddout]['itemsize'] = datout[iddout][0].variables[datout[iddout][1]].itemsize()
# # a few updates in the variable descriptions
# for iddout in range(len(ddout)):
# datout[iddout][0].sync() # to be able to use _voffset
# vsdout[iddout]['itemsize'] = datout[iddout][0].variables[datout[iddout][1]].itemsize()
# vsdout[iddout]['voffset'] = datout[iddout][0].variables[datout[iddout][1]]._voffset
#
# # # next: check whether the output variable dimensions (if already present) are not too large, otherwise raise error. + Construct final output dimension specs
#
#
# # to do next:::...
# # adimfuncout: the dimensions of the function output
# adimfuncout = np.zeros((len(vsdout),len(refdfuncstd)),dtype='int32') - 1
# alenfuncout = []
# for ivsdout,evsdout in enumerate(vsdout):
# for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
# adimfuncout[ivsdout,irefdfuncstd] = evsdout['dims'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)]
# # # or ...
# # for irefdfuncout,erefdfuncout in enumerate(arefdfuncout[ivsdout]):
# # adimfuncout[ivsdout,irefdfuncstd] = evsdout['dims'][erefdfuncout]
# alenfuncout.append(reduce(mul,adimfuncout[ivsdout]))
# # ???arefdfuncout[ivsdout][irefdfuncout] == vsdout[ivsdout]['refdstd'].index(erefdfuncstd)
#
# # make copies of adimfunc*, alenfunc*, arefdfunc*
#
# # lennoiterstd = list(lenfuncstd)
# # dimnoiterstd = list(dimdfuncstd)
# refdnoiterstd = list(refdfuncstd)
#
# alendnoiterin = list(alenfuncin)
# adimnoiterin = []
# arefdnoiterin = []
# for ivsdin,evsdin in enumerate(vsdin):
# adimnoiterin.append(list(adimfuncin[ivsdin]))
# arefdnoiterin.append(list(arefdfuncin[ivsdin]))
#
# alendnoiterout = list(alenfuncout)
# adimnoiterout = []
# arefdnoiterout = []
# for ivsdout,evsdout in enumerate(vsdout):
# adimnoiterout.append(list(adimfuncout[ivsdout]))
# arefdnoiterout.append(list(arefdfuncout[ivsdout]))
#
#
#
# # arefsin: references of the standard dimensions to the data stream dimensions
#
# arefsin = []
# for ivsdin,evsdin in enumerate(vsdin):
# arefsin.append([None]*len(vsdin[ivsdin]['refdstd']))
# # loop over the data stream dimensions
#
# for irefdstd,erefdstd in enumerate(vsdin[ivsdin]['refdstd']):
# arefsin[ivsdin][erefdstd] = irefdstd
#
# # arefsout: references of the standard dimensions to the data stream dimensions
#
# arefsout = []
# for ivsdout,evsdout in enumerate(vsdout):
# arefsout.append([None]*len(vsdout[ivsdout]['refdstd']))
# # loop over the data stream dimensions
#
# for irefdstd,erefdstd in enumerate(vsdout[ivsdout]['refdstd']):
# arefsout[ivsdout][erefdstd] = irefdstd
#
#
# dnamselnoiter = list(dnamsel)
#
# # membytes: minimum total memory that will be used. We will the increase usage when possible/allowed.
# membytes = 0
# for ivsdin,evsdin in enumerate(vsdin):
# membytes = membytes + alenfuncin[ivsdin] * vsdin[ivsdin]['itemsize']
#
# for ivsdout,evsdout in enumerate(vsdout):
# membytes = membytes + alenfuncout[ivsdout] * vsdout[ivsdout]['itemsize']
#
# maxmembytes = 1000000
# if membytes > maxmembytes:
# print 'Warning, used memory ('+str(membytes)+') exceeds maximum memory ('+str(maxmembytes)+').'
# else:
#
# # a temporary copy of alennoiter*
# alendnoiterin_tmp = list(alendnoiterin)
# alendnoiterout_tmp = list(alendnoiterout)
# # we try will to read the data in even larger icecubes to reduce disk access!
# idnam = len(dnamsstd) - 1
#
#
# cont = True
# while ((idnam >= 0) & (membytes <= maxmembytes) & cont):
# # while loop quite extensive but does what is should-> should be reduced and simplified
# cont = False # only continue to the next loop if idnam+1 (in previous loop) was (inserted) in refdnoiterstd
# if idnam not in refdnoiterstd:
# for ivsdin,evsdin in enumerate(vsdin):
# alendnoiterin_tmp[ivsdin] = alendnoiterin_tmp[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
# for ivsdout,evsdout in enumerate(vsdout):
# alendnoiterout_tmp[ivsdout] = alendnoiterout_tmp[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
#
# # recalculate the amount of bytes
# tmpmembytes = 0
# for ivsdin,evsdin in enumerate(vsdin):
# tmpmembytes = tmpmembytes + alendnoiterin_tmp[ivsdin] * vsdin[ivsdin]['itemsize']
#
# for ivsdout,evsdout in enumerate(vsdout):
# tmpmembytes = tmpmembytes + alendnoiterout_tmp[ivsdout] * vsdout[ivsdout]['itemsize']
#
# print 'tmpmembytes', tmpmembytes, membytes
# # if used memory still below threshold, we add it to the current dimension to the icecubes
# if tmpmembytes <= maxmembytes:
# refdnoiterstd.insert(0,idnam)
# for ivsdin,evsdin in enumerate(vsdin):
# arefdnoiterin[ivsdin].insert(0, arefsin[ivsdin][idnam])
# adimnoiterin[ivsdin].insert(0,vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]])
# alendnoiterin[ivsdin] = alendnoiterin[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
# for ivsdout,evsdout in enumerate(vsdout):
# arefdnoiterout[ivsdout].insert(0, arefsout[ivsdout][idnam])
# adimnoiterout[ivsdout].insert(0,vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]])
# alendnoiterout[ivsdout] = alendnoiterout[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
# dnamselnoiter.insert(0,dnamsstd[idnam])
#
# # recalculate the amount of bytes
# membytes = 0
# for ivsdin,evsdin in enumerate(vsdin):
# membytes = membytes + alendnoiterin[ivsdin] * vsdin[ivsdin]['itemsize']
#
# for ivsdout,evsdout in enumerate(vsdout):
# membytes = membytes + alendnoiterout[ivsdout] * vsdout[ivsdout]['itemsize']
#
# print 'membytes',membytes
# cont = True
# # if used memory still below threshold, we add it to the current dimension to the icecubes
#
# else:
# cont = True
# idnam = idnam - 1
#
#
# # adimnoiterin[ivsdin,irefdnoiterstd] = evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdnoiterstd)]
#
#
# # arefdfuncin: references of the function dimensions to the data input stream dimensions
# # arefdnoiterin: references of the icecube dimensions to the data input stream dimensions
# # # vsdin[ivsdin]['refdstd']: references of data stream dimensions (vsdin[..]['dnams'] to the standard dimensions (dnamsstd)
# # dnamselnoiter: references
#
#
#
# # guess from residual dimensions that are not in refnoiterin
# refditerstd = []
# dimiterstd = []
# for idim,edim in enumerate(dimsstd):
# if idim not in refdnoiterstd:
# refditerstd.append(idim)
# dimiterstd.append(edim)
#
# # guess from residual dimensions that are not in refnoiterin
# arefditerin = []
# adimiterin = []
# for ivsdin,evsdin in enumerate(vsdin):
# arefditerin.append([])
# adimiterin.append([])
# for idim,edim in enumerate(vsdin[ivsdin]['dims']):
# if idim not in arefdnoiterin[ivsdin]:
# arefditerin[ivsdin].append(idim)
# adimiterin[ivsdin].append(edim)
#
#
# # guess from residual dimensions that are not in refnoiterin
# arefditerout = []
# adimiterout = []
# for ivsdout,evsdout in enumerate(vsdout):
# arefditerout.append([])
# adimiterout.append([])
# for idim,edim in enumerate(vsdout[ivsdout]['dims']):
# if idim not in arefdnoiterout[ivsdout]:
# arefditerout[ivsdout].append(idim)
# adimiterout[ivsdout].append(edim)
#
# dimitermax = []
# for iref,eref in enumerate(refditerstd):
# dimitermax.append(1)
# for ivsdin,evsdin in enumerate(vsdin):
# dimitermax[iref] = max(dimitermax[iref],adimiterin[ivsdin][iref])
# print dimitermax[iref], adimiterin[ivsdin][iref]
# for ivsdout,evsdout in enumerate(vsdout):
# dimitermax[iref] = max(dimitermax[iref],adimiterout[ivsdout][iref])
#
#
# rwchunksizein = [1]*len(vsdin)
# for ivsdin,evsdin in enumerate(vsdin):
# idim = len(vsdin[ivsdin]['dims']) -1
# while ((idim in arefdnoiterin[ivsdin]) & (idim >= 0)):
# # The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
# rwchunksizein[ivsdin] = rwchunksizein[ivsdin]*vsdin[ivsdin]['dims'][idim]
# idim = idim - 1
#
# rwchunksizeout = [1]*len(vsdout)
# for ivsdout,evsdout in enumerate(vsdout):
# idim = len(vsdout[ivsdout]['dims']) -1
# while ((idim in arefdnoiterout[ivsdout]) & (idim >= 0)):
# # The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
# rwchunksizeout[ivsdout] = rwchunksizeout[ivsdout]*vsdout[ivsdout]['dims'][idim]
# idim = idim - 1
#
#
# adimnoapplyout = []
# alennoapplyout = []
# for ivsdout,evsdout in enumerate(vsdout):
# adimnoapplyout.append([])
# alennoapplyout.append(1)
# for irefdnoiterout in range(len(arefdnoiterout[ivsdout])-len(arefdfuncout[ivsdout])):
# adimnoapplyout[ivsdout].append(adimnoiterout[ivsdout][irefdnoiterout])
# alennoapplyout[ivsdout] =alennoapplyout[ivsdout]*adimnoapplyout[ivsdout][-1]
#
# if adimnoapplyout[ivsdout] == []:
# adimnoapplyout[ivsdout] = [1]
#
# adimnoapplyin = []
# alennoapplyin = []
# for ivsdin,evsdin in enumerate(vsdin):
# adimnoapplyin.append([])
# alennoapplyin.append(1)
# for irefdnoiterin in range(len(arefdnoiterin[ivsdin])-len(arefdfuncin[ivsdin])):
# adimnoapplyin[ivsdin].append(adimnoiterin[ivsdin][irefdnoiterin])
# alennoapplyin[ivsdin] =alennoapplyin[ivsdin]*adimnoapplyin[ivsdin][-1]
#
# if adimnoapplyin[ivsdin] == []:
# adimnoapplyin[ivsdin] = [1]
#
# dimnoapplymax = []
# for iref in range(len(arefdnoiterout[ivsdout])-len(arefdfuncout[ivsdout])):
# dimnoapplymax.append(1)
# for ivsdin,evsdin in enumerate(vsdin):
# dimnoapplymax[iref] = max(dimnoapplymax[iref],adimnoapplyin[ivsdin][iref])
# print dimnoapplymax[iref], adimnoapplyin[ivsdin][iref]
# for ivsdout,evsdout in enumerate(vsdout):
# dimnoapplymax[iref] = max(dimnoapplymax[iref],adimnoapplyout[ivsdout][iref])
#
# lennoapplymax = reduce(mul,dimnoapplymax)
#
#
#
# testdata = np.zeros(vsdout[0]['dims']).ravel()
#
#
# lenitermax = reduce(mul,dimitermax)
# dimiterpos = [0]*len(dimitermax)
# print str(0)+'/'+str(lenitermax),
# for j in range(lenitermax):
# # reading icecube, rearranged in the order of dimensions specified by arefnoiterin
# dataicecubein = []
# for ivsdin,evsdin in enumerate(vsdin):
# # dataicecubein.append(np.zeros((elendnoiterin,),dtype=vsdin[ilendnoiterin]['dtype']))
# dataicecubein.append(np.array(readicecubeps(\
# datin[ivsdin][0].fp,vsdin[ivsdin]['dims'],\
# arefditerin[ivsdin],\
# adimiterin[ivsdin],\
# dimiterpos,\
# arefdnoiterin[ivsdin],\
# adimnoiterin[ivsdin],\
# vsdin[ivsdin]['dtype'],\
# vsdin[ivsdin]['itemsize'],\
# vsdin[ivsdin]['voffset'],\
# rwchunksizein[ivsdin],\
# ), dtype=vsdin[ivsdin]['dtype']).ravel())
#
# dataicecubeout = []
# for ilendnoiterout,elendnoiterout in enumerate(alendnoiterout):
# dataicecubeout.append(np.zeros((elendnoiterout,),dtype=vsdout[ilendnoiterout]['dtype'][1]))
#
# dimnoapplypos = [0]*len(dimnoapplymax)
# for k in range(lennoapplymax):
# # actually, this is just the end of the file output already written
# ahunkin = []
# for ivsdin, evsdin in enumerate(vsdin):
# pos = 0
# # e.g. pos = (9)+ 20*(10) + 50*50*20*(5)
# for idimpos,edimpos in enumerate(dimnoapplypos):
# curadd = np.mod(edimpos,adimnoapplyin[ivsdin][idimpos])
# #e.g. if edimpos == (5): curadd = 50*50*20*(5)
# if ((idimpos + 1) < len(arefdnoiterin[ivsdin])):
# for i in range(idimpos + 1,len(arefdnoiterin[ivsdin])) :
# # here, we assume that the dimensions of the chunk are already in the order considered by adimsnoiter(out) etc. (cfr. preceeded transposition in readicecubeps)
# curadd = curadd * adimnoiterin[ivsdin][i]
# # curaddout = curaddout * dimnoiteroutref[i]
# pos = pos + curadd
# ahunkin.append(dataicecubein[ivsdin][pos:(pos+alenfuncin[ivsdin])])
# ahunkin[ivsdin].shape = adimfuncin[ivsdin]
#
# # apply the function
#
#
# ahunkout = func(*ahunkin)
# if (type(ahunkout).__name__ == 'tuple'):
# ahunkout = list(ahunkout)
# if (type(ahunkout).__name__ != 'list'):
# ahunkout = list([ahunkout])
#
# # print ahunkout
#
#
# # ahunkout = np.array(func(*ahunkin)) #np.array((np.zeros(hunk.shape) + 1)*np.mean(hunk),dtype=vtype)
# # print type(ahunkout).__name__
# # if (type(ahunkout).__name__ != 'list'): # tbi: nog te bekijken of dit wel de handigste voorwaarde is!
# # ahunkout = list([ahunkout])
#
# for ihunkout in range(len(ahunkout)):
# ahunkout[ihunkout] = np.array(ahunkout[ihunkout])
# # e.g. posout = (9)+ 20*(10) + 50*50*20*(5)
# posout = 0
# for idimpos,edimpos in enumerate(dimnoapplypos):
# curadd = np.mod(edimpos,adimnoapplyout[ihunkout][idimpos])
# #e.g. if edimpos == (5): curadd = 50*50*20*(5)
# if ((idimpos + 1) < len(arefdnoiterout[ihunkout])):
# for i in range(idimpos + 1,len(arefdnoiterout[ihunkout])) :
# # here, we assume that the idims are in the intended order (cfr. subsequent transposition in writeicecubeps)
# curadd = curadd * adimnoiterout[ihunkout][i]
# # curaddout = curaddout * dimnoiteroutref[i]
# posout = posout + curadd
#
# dataicecubeout[ihunkout][posout:(posout+alenfuncout[ihunkout])] = np.array(ahunkout[ihunkout].ravel(),dtype=vsdout[ihunkout]['dtype'][1])
#
# # go to next data slice
# dimnoapplypos[-1] = dimnoapplypos[-1] + 1
# for idimidx,edimidx in enumerate(reversed(dimnoapplypos)):
# # # alternative (makes 'dimiter' redundant)
# # if dimiterpos[idimidx] == shp[refiter[idimidx]]:
# if idimidx > 0:
# if dimnoapplypos[idimidx] == dimnoapply[idimidx]:
# dimnoapplypos[idimidx-1] = dimnoapplypos[idimidx-1] + 1
# dimnoapplypos[idimidx] = 0
#
# for idimsout in range(len(dataicecubeout)):
# dataicecubeout[idimsout].shape = adimnoiterout[idimsout]
# #print dataicecubeout[idimsout].shape
#
#
# for ivsdout in range(len(vsdout)):
# # print dataicecubeout[ivsdout].shape,vsdout[ivsdout]
# # print 'ivsdout', ivsdout
# writeicecubeps(\
# datout[ivsdout][0].fp,
# vsdout[ivsdout]['dims'],\
# arefditerout[ivsdout],\
# adimiterout[ivsdout],\
# dimiterpos,\
# arefdnoiterout[ivsdout],\
# adimnoiterout[ivsdout],\
# dataicecubeout[ivsdout],\
# vsdout[ivsdout]['dtype'],\
# vsdout[ivsdout]['itemsize'],\
# vsdout[ivsdout]['voffset'],\
# rwchunksizeout[ivsdout])
#
# # writeicecubeps(fout[idimsout],\
# # adimsout[idimsout],\
# # arefsnoiter[idimsout],\
# # adimiterout[idimsout],\
# # dimiterposout[idimsout],\
# # arefnoiterout[idimsout],\
# # adimnoiterout[idimsout],\
# # dataicecubeout[idimsout],\
# # vtype[idimsout],\
# # vsize[idimsout],\
# # voffset[idimsout],\
# # rwchunksizeout[idimsout])
#
#
#
# # go to next data slice
# dimiterpos[-1] = dimiterpos[-1] + 1
# for idimidx,edimidx in enumerate(reversed(dimiterpos)):
# # # alternative (makes 'dimiter' redundant)
# # if dimiterpos[idimidx] == shp[refiter[idimidx]]:
# if dimiterpos[idimidx] == dimitermax[idimidx]:
# if idimidx > 0:
# dimiterpos[idimidx-1] = dimiterpos[idimidx-1] + 1
# dimiterpos[idimidx] = 0
#
# sys.stdout.write ('\b'*(len(str(j)+'/'+str(lenitermax))+1))
# sys.stdout.write (str(j+1)+'/'+str(lenitermax))
#
# import pylab as pl
# fout.close()
# # fin.close()
# # fout = NetCDF.NetCDFFile(fnout,'r')
# fout = netcdf.netcdf_file(fnout,'r')
# fout.fp.seek(vsdout[0]['voffset'])
# fpointout.seek(vsdout[0]['voffset'])
# test = np.fromfile(fpointout,dtype=vsdout[0]['dtype'],count=reduce(mul,vsdout[0]['dims']))
# test.shape = (40,340)
# fig = pl.figure()
# pl.imshow(test)
# fig.show()
#
# fig = pl.figure()
# testdata.shape = vsdout[0]['dims']
# pl.imshow(testdata[0,:,:,0,1])
# fig.show()
#
# fout.close()
# fout = NetCDF.NetCDFFile(fnout,'r')
#
# fig = pl.figure()
# pl.imshow(fout.variables['QV'][0,:,:,0,0])
# fig.show()
# fout.close()
|
hendrikwout/pynacolada
|
trash/pynacolada-2013-11-1.py
|
Python
|
gpl-3.0
| 37,771
|
[
"NetCDF"
] |
cd38e04b5ede47eec45455e074591fd2182d67b3bca1bc8225b5bbbdf30bb97e
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Christian Schwantes
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function
import itertools
import mdtraj as md
import numpy as np
from mdtraj.testing import eq
def test_contact_0(get_fn):
pdb = md.load(get_fn('bpti.pdb'))
contacts = np.loadtxt(get_fn('contacts.dat')).astype(int)
ca, ca_pairs = md.compute_contacts(pdb, contacts, scheme='ca')
closest, closest_pairs = md.compute_contacts(pdb, contacts, scheme='closest')
closest_heavy, closest_heavy_pairs = md.compute_contacts(pdb, contacts, scheme='closest-heavy')
sidechain, sidechain_pairs = md.compute_contacts(pdb, contacts, scheme='sidechain')
sidechain_heavy, sidechain_heavy_pairs = md.compute_contacts(pdb, contacts, scheme='sidechain-heavy')
ref_ca = np.loadtxt(get_fn('cc_ca.dat'))
ref_closest = np.loadtxt(get_fn('cc_closest.dat'))
ref_closest_heavy = np.loadtxt(get_fn('cc_closest-heavy.dat'))
ref_sidechain = np.loadtxt(get_fn('cc_sidechain.dat'))
ref_sidechain_heavy = np.loadtxt(get_fn('cc_sidechain-heavy.dat'))
eq(ref_ca, ca.flatten())
eq(ref_closest, closest.flatten())
eq(ref_closest_heavy, closest_heavy.flatten())
eq(ref_sidechain, sidechain.flatten())
eq(ref_sidechain_heavy, sidechain_heavy.flatten())
eq(contacts, ca_pairs)
eq(contacts, closest_pairs)
eq(contacts, closest_heavy_pairs)
eq(contacts, sidechain_pairs)
eq(contacts, sidechain_heavy_pairs)
def test_contact_1(get_fn):
pdb = md.load(get_fn('bpti.pdb'))
dists, pairs = md.compute_contacts(pdb)
for r0, r1 in pairs:
# are these valid residue indices?
pdb.topology.residue(r0)
pdb.topology.residue(r1)
assert not (abs(r0 - r1) < 3)
maps = md.geometry.squareform(dists, pairs)
for i, (r0, r1) in enumerate(pairs):
for t in range(pdb.n_frames):
eq(maps[t, r0, r1], dists[t, i])
def test_contact_2(get_fn):
pdb = md.load(get_fn('1vii_sustiva_water.pdb'))
dists, pairs = md.compute_contacts(pdb, scheme='closest')
for r0, r1 in pairs:
assert pdb.topology.residue(r0).name != 'HOH'
assert pdb.topology.residue(r1).name != 'HOH'
# spot check one of the pairs
r0, r1 = pairs[10]
atoms_r0 = [a.index for a in pdb.topology.residue(r0).atoms]
atoms_r1 = [a.index for a in pdb.topology.residue(r1).atoms]
atomdist = md.compute_distances(pdb, list(itertools.product(atoms_r0, atoms_r1)))
np.testing.assert_array_equal(dists[:, 10], np.min(atomdist, axis=1))
maps = md.geometry.squareform(dists, pairs)
for i, (r0, r1) in enumerate(pairs):
for t in range(pdb.n_frames):
eq(maps[t, r0, r1], dists[t, i])
def test_contact_3(get_fn):
pdb = md.load(get_fn('bpti.pdb'))
beta = 20
dists, pairs = md.compute_contacts(pdb, soft_min=True, soft_min_beta=beta)
maps = md.geometry.squareform(dists, pairs)
for i, (r0, r1) in enumerate(pairs):
for t in range(pdb.n_frames):
assert np.allclose(beta / np.log(np.sum(np.exp(beta / maps[t, r0, r1]))), dists[t, i])
|
leeping/mdtraj
|
tests/test_contact.py
|
Python
|
lgpl-2.1
| 4,069
|
[
"MDTraj"
] |
4bd9a7d944318e047219fc72cf3abd1b0d0a48e11fce861f5438e587d0b4bf5b
|
# $Id$
#
# Copyright (C) 2004-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the AnalyzeComposite functionality
"""
import os
import unittest
from rdkit import RDConfig
from rdkit.ML import AnalyzeComposite
import pickle
class TestCase(unittest.TestCase):
def setUp(self):
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'ML', 'test_data')
def test1_Issue163(self):
name1 = os.path.join(self.baseDir, 'humanoral.1.pkl')
try:
with open(name1, 'rb') as pklF:
c1 = pickle.load(pklF)
except Exception:
c1 = None
self.assertTrue(c1)
name2 = os.path.join(self.baseDir, 'humanoral.2.pkl')
try:
with open(name2, 'rb') as pklF:
c2 = pickle.load(pklF)
except Exception:
c2 = None
self.assertTrue(c2)
try:
res = sorted(AnalyzeComposite.ProcessIt([c1, c2], verbose=-1))
except Exception:
import traceback
traceback.print_exc()
ok = 0
else:
ok = 1
self.assertTrue(ok)
self.assertEqual(res[0][0], 'BALABANJ')
self.assertEqual(res[1][0], 'BERTZCT')
self.assertEqual(res[-1][0], 'VSA_ESTATE9')
for entry in res:
self.assertEqual(len(entry), 5)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/ML/UnitTestAnalyzeComposite.py
|
Python
|
bsd-3-clause
| 1,647
|
[
"RDKit"
] |
13bf3ec6814f97ad812a3d45a75418f6fcbbc895f21c56aba0b6c490446de662
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import copy
import functools
import sys
import pasta
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class UnaliasedTFImport(ast_edits.AnalysisResult):
def __init__(self):
self.log_level = ast_edits.ERROR
self.log_message = ("The tf_upgrade_v2 script detected an unaliased "
"`import tensorflow`. The script can only run when "
"importing with `import tensorflow as tf`.")
class VersionedTFImport(ast_edits.AnalysisResult):
def __init__(self, version):
self.log_level = ast_edits.INFO
self.log_message = ("Not upgrading symbols because `tensorflow." + version
+ "` was directly imported as `tf`.")
class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): VersionedTFImport("compat.v1"),
("tensorflow.compat.v2", "tf"): VersionedTFImport("compat.v2"),
}
class TFAPIChangeSpec(ast_edits.NoUpdateSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
# TODO(b/129398290)
# "tf.string_split": {
# "delimiter": "sep",
# },
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
"hash_table_shared_name": None,
},
"tf.autograph.to_code": {
"arg_types": None,
"arg_values": None,
"indentation": None,
},
"tf.autograph.to_graph": {
"arg_types": None,
"arg_values": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.create_file_writer": {
"name": None,
},
"tf.contrib.summary.generic": {
"name": "tag",
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
"tf.nn.weighted_cross_entropy_with_logits": {
"targets": "labels",
},
"tf.decode_raw": {
"bytes": "input_bytes",
},
"tf.io.decode_raw": {
"bytes": "input_bytes",
},
"tf.contrib.framework.load_variable": {
"checkpoint_dir": "ckpt_dir_or_file",
}
}
# Mapping from function to the new name of the function
# Add additional renames not in renames_v2.py to all_renames_v2.py.
self.symbol_renames = all_renames_v2.symbol_renames
self.import_renames = {}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
# TODO(b/129398290)
# "tf.string_split",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
"tf.train.sdca_fprint",
"tf.train.sdca_optimizer",
"tf.train.sdca_shrink_l1",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.create_file_writer": [
"logdir", "max_queue", "flush_millis", "filename_suffix", "name"],
"tf.contrib.summary.generic": [
"name", "tensor", "metadata", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
contrib_layers_layer_norm_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.layers.layer_norm` has been "
"deprecated, and its implementation has been integrated with "
"`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. "
"Note that, the default value of `epsilon` is changed to `1e-3` in the "
"new API from `1e-12`, and this may introduce numerical differences. "
"Please check the new API and use that instead."
)
initializers_no_dtype_comment = (
ast_edits.INFO, "Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for "
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
summary_api_comment = (
ast_edits.INFO,
"The TF 1.x summary API cannot be automatically migrated to TF 2.0, so "
"symbols have been converted to tf.compat.v1.summary.* and must be "
"migrated manually. Typical usage will only require changes to the "
"summary writing logic, not to individual calls like scalar(). "
"For examples of the new summary API, see the Effective TF 2.0 "
"migration document or check the TF 2.0 TensorBoard tutorials.")
contrib_summary_comment = (
ast_edits.WARNING,
"tf.contrib.summary.* functions have been migrated best-effort to "
"tf.compat.v2.summary.* equivalents where possible, but the resulting "
"code is not guaranteed to work, so please check carefully. For more "
"information about the new summary API, see the Effective TF 2.0 "
"migration document or check the updated TensorBoard tutorials.")
contrib_summary_family_arg_comment = (
ast_edits.WARNING,
"<function name> replacement does not accept a 'family' argument; "
"instead regular name scoping should be used. This call site specifies "
"a family argument that has been removed on conversion, so the emitted "
"tag names may be incorrect without manual editing.")
contrib_create_file_writer_comment = (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() has been ported to the new "
"tf.compat.v2.summary.create_file_writer(), which no longer re-uses "
"existing event files for the same logdir; instead it always opens a "
"new writer/file. The python writer objects must be re-used explicitly "
"if the reusing behavior is desired.")
contrib_summary_record_every_n_comment = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.summary.record_summaries_every_n_global_steps(n, step) "
"should be replaced by a call to tf.compat.v2.summary.record_if() with "
"the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph "
"mode, the lambda body can be used directly). If no global step was "
"passed, instead use tf.compat.v1.train.get_or_create_global_step().")
contrib_summary_graph_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.graph() has no direct "
"equivalent in TF 2.0 because manual graph construction has been "
"superseded by use of tf.function. To log tf.function execution graphs "
"to the summary writer, use the new tf.compat.v2.summary.trace_* "
"functions instead.")
contrib_summary_import_event_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.import_event() has no "
"direct equivalent in TF 2.0. For a similar experimental feature, try "
"tf.compat.v2.summary.experimental.write_raw_pb() which also accepts "
"serialized summary protocol buffer input, but for tf.Summary "
"protobufs rather than tf.Events.")
keras_default_save_format_comment = (
ast_edits.WARNING,
"(This warning is only applicable if the code saves a tf.Keras model) "
"Keras model.save now saves to the Tensorflow SavedModel format by "
"default, instead of HDF5. To continue saving to HDF5, add the "
"argument save_format='h5' to the save() function.")
distribute_strategy_api_changes = (
"If you're using the strategy with a "
"custom training loop, note the following changes in methods: "
"make_dataset_iterator->experimental_distribute_dataset, "
"experimental_make_numpy_iterator->experimental_make_numpy_dataset, "
"extended.call_for_each_replica->experimental_run_v2, "
"reduce requires an axis argument, "
"unwrap->experimental_local_results "
"experimental_initialize and experimental_finalize no longer needed ")
contrib_mirrored_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.MirroredStrategy has "
"been migrated to tf.distribute.MirroredStrategy. Things to note: "
"Constructor arguments have changed. If you are using "
"MirroredStrategy with Keras training framework, the input provided to "
"`model.fit` will be assumed to have global batch size and split "
"across the replicas. " + distribute_strategy_api_changes)
core_mirrored_strategy_warning = (
ast_edits.WARNING,
"(Manual edit may be required) tf.distribute.MirroredStrategy API has "
"changed. " + distribute_strategy_api_changes)
contrib_one_device_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has "
"been migrated to tf.distribute.OneDeviceStrategy. " +
distribute_strategy_api_changes)
contrib_tpu_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.TPUStrategy has "
"been migrated to tf.distribute.experimental.TPUStrategy. Note the "
"slight changes in constructor. " + distribute_strategy_api_changes)
contrib_collective_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.CollectiveAllReduceStrategy has "
"been migrated to "
"tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the "
"changes in constructor. " + distribute_strategy_api_changes)
contrib_ps_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.ParameterServerStrategy has "
"been migrated to "
"tf.distribute.experimental.ParameterServerStrategy (multi machine) "
" and tf.distribute.experimental.CentralStorageStrategy (one machine). "
"Note the changes in constructors. " + distribute_strategy_api_changes)
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.save":
keras_default_save_format_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.layers.layer_norm":
contrib_layers_layer_norm_comment,
"tf.contrib.summary.all_summary_ops":
contrib_summary_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.create_file_writer":
contrib_create_file_writer_comment,
"tf.contrib.summary.generic":
contrib_summary_comment,
"tf.contrib.summary.graph":
contrib_summary_graph_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.import_event":
contrib_summary_import_event_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.record_summaries_every_n_global_steps":
contrib_summary_record_every_n_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.estimator.model_to_estimator":
(ast_edits.WARNING,
"Estimators from <function name> will save object-based "
"checkpoints (format used by `keras_model.save_weights` and "
"`keras_model.load_weights`) by default in 2.0. To continue "
"saving name-based checkpoints, set `checkpoint_format='saver'`."),
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables()."),
"tf.pywrap_tensorflow":
(ast_edits.ERROR,
"<function name> cannot be converted automatically. "
"`tf.pywrap_tensorflow` will not be distributed with "
"TensorFlow 2.0, please consider an alternative in public "
"TensorFlow APIs."),
"tf.contrib.distribute.MirroredStrategy":
contrib_mirrored_strategy_warning,
"tf.distribute.MirroredStrategy":
core_mirrored_strategy_warning,
"tf.contrib.distribute.OneDeviceStrategy":
contrib_one_device_strategy_warning,
"tf.contrib.distribute.TPUStrategy":
contrib_tpu_strategy_warning,
"tf.contrib.distribute.CollectiveAllReduceStrategy":
contrib_collective_strategy_warning,
"tf.contrib.distribute.ParameterServerStrategy":
contrib_ps_strategy_warning,
"tf.summary.FileWriter": summary_api_comment,
"tf.summary.FileWriterCache": summary_api_comment,
"tf.summary.Summary": summary_api_comment,
"tf.summary.audio": summary_api_comment,
"tf.summary.histogram": summary_api_comment,
"tf.summary.image": summary_api_comment,
"tf.summary.merge": summary_api_comment,
"tf.summary.merge_all": summary_api_comment,
"tf.summary.scalar": summary_api_comment,
"tf.summary.tensor_summary": summary_api_comment,
"tf.summary.text": summary_api_comment,
}
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.create_file_writer": {
("name", 4): (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() no longer supports "
"implicit writer re-use based on shared logdirs or resource "
"names; this call site passed a 'name' argument that has been "
"removed. The new tf.compat.v2.summary.create_file_writer() "
"replacement has a 'name' parameter but the semantics are "
"the usual ones to name the op itself and do not control "
"writer re-use; writers must be manually re-used if desired.")
},
"tf.contrib.summary.generic": {
("name", 0): (
ast_edits.WARNING,
"tf.contrib.summary.generic() takes a 'name' argument for the "
"op name that also determines the emitted tag (prefixed by any "
"active name scopes), but tf.compat.v2.summary.write(), which "
"replaces it, separates these into 'tag' and 'name' arguments. "
"The 'name' argument here has been converted to 'tag' to "
"preserve a meaningful tag, but any name scopes will not be "
"reflected in the tag without manual editing."),
("family", 3): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.histogram": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.scalar": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.image.resize": {
("align_corners",
3): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
canned_estimator_msg_optimizer = (
"tf.keras.optimizers.* only, so the call was converted to compat.v1. "
"Please note that tf.train.Optimizers have one-to-one correspondents "
"in tf.keras.optimizers, so you may be able to convert to the new "
"optimizers directly (See https://www.tensorflow.org/api_docs/python"
"/tf/keras/optimizers). Checkpoint compatibility is not guaranteed, "
"but there is a checkpoint converter tool that you can use.")
canned_estimator_msg = (
"no longer takes `input_layer_partitioner` arg, and it supports "
+ canned_estimator_msg_optimizer)
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
# TODO(b/129398290)
# "tf.string_split": _string_split_transformer,
"tf.strings.split": _string_split_rtype_transformer,
"tf.estimator.BaselineEstimator":
functools.partial(
_rename_if_arg_found_transformer,
arg_name="optimizer",
message=("tf.estimator.BaselineEstimator supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["optimizer"],
message=("tf.estimator.BaselineClassifier supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message=("tf.estimator.BaselineRegressor supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.DNNEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNEstimator no longer takes "
"input_layer_partitioner, so the call was converted to "
"compat.v1."
),
"tf.estimator.DNNClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNClassifier " + canned_estimator_msg,
),
"tf.estimator.DNNRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNRegressor " + canned_estimator_msg,
),
"tf.estimator.LinearEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearEstimator " + canned_estimator_msg,
),
"tf.estimator.LinearClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearClassifier " + canned_estimator_msg,
),
"tf.estimator.LinearRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearRegressor " + canned_estimator_msg,
),
"tf.estimator.DNNLinearCombinedEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedEstimator "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedClassifier "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedRegressor "
+ canned_estimator_msg),
),
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.always_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="True"),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.generic": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.never_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="False"),
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.contrib.layers.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"tf.contrib.layers.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"tf.contrib.layers.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
"slim.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"slim.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"slim.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"slim.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"slim.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.keras.models.save_model": functools.partial(
_add_argument_transformer,
arg_name="save_format",
arg_value_ast=ast.Str("h5")),
}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
def preprocess(self, root_node):
visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec())
visitor.visit(root_node)
detections = set(visitor.results)
# If we have detected the presence of imports of specific TF versions,
# We want to modify the update spec to check only module deprecations
# and skip all other conversions.
if detections:
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
self.function_transformers = {}
self.import_renames = {}
return visitor.log, visitor.warnings_and_errors
def clear_preprocessing(self):
self.__init__()
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]),
full_name or name)
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs,
cond):
"""Adds cond argument to tf.contrib.summary.xxx_record_summaries().
This is in anticipation of them being renamed to tf.summary.record_if(), which
requires the cond argument.
"""
node.args.append(pasta.parse(cond))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding `%s` argument to %s in anticipation of it being renamed to "
"tf.compat.v2.summary.record_if()" % (cond, full_name or name)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
# TODO(annarev): this should be updated to tf.keras.losses.Reduction.SUM
# once b/125525822 is fixed.
default_value = "tf.compat.v1.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _rename_if_any_arg_found_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if any of the arg_names is found.
Args:
parent: Parent of node.
node: ast.Call node to modify.
full_name: full name of function to modify.
name: name of function to modify.
logs: list of logs to append to.
arg_names: list of names of the argument to look for.
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node,
full_name, name, logs,
arg_name, arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _rename_if_arg_found_and_add_loss_reduction_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Combination of _rename_if_arg_found and _add_loss_reduction transformers.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_names: list of names of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
node = _add_loss_reduction_transformer(parent, node, full_name, name, logs)
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node, full_name,
name, logs, arg_name,
arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_variance_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.variance_scaling_initializer.
Transforms:
tf.contrib.layers.variance_scaling_initializer(
factor, mode, uniform, seed, dtype
) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, mode=mode.lower(),
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
And handles the case where no factor is provided and scale needs to be
set to 2.0 to match contrib's default instead of tf.keras.initializer's
default of 1.0
"""
def _replace_distribution(parent, old_value):
"""Replaces old_value: ("uniform" if (old_value) else "truncated_normal")"""
new_value = pasta.parse(
"\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = new_value.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.ast_utils.replace_child(parent, old_value, new_value)
pasta.base.formatting.set(new_value, "prefix", "(")
pasta.base.formatting.set(new_value, "suffix", ")")
def _replace_mode(parent, old_value):
"""Replaces old_value with (old_value).lower()."""
new_value = pasta.parse("mode.lower()")
mode = new_value.body[0].value.func
pasta.ast_utils.replace_child(mode, mode.value, old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Need to keep track of scale because slim & keras
# have different defaults
found_scale = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
found_scale = True
if keyword_arg.arg == "mode":
_replace_mode(keyword_arg, keyword_arg.value)
if keyword_arg.arg == "uniform":
keyword_arg.arg = "distribution"
_replace_distribution(keyword_arg, keyword_arg.value)
# Handle any detected positional arguments
if len(node.args) >= 1:
found_scale = True
if len(node.args) >= 2:
_replace_mode(node, node.args[1])
if len(node.args) >= 3:
_replace_distribution(node, node.args[2])
# If no scale was provided, make tf 2.0 use slim's default factor
if not found_scale:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
scale_value = pasta.parse("2.0")
node.keywords = ([ast.keyword(arg="scale", value=scale_value)]
+ node.keywords)
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers.variance_scaling_initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node
def _contrib_layers_l2_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l2 regularizer with Keras one, with l=0.5*scale.
Also drops the scope argument.
"""
def _replace_scale_node(parent, old_value):
"""Replaces old_value with 0.5*(old_value)."""
half = ast.Num(n=0.5)
half.lineno = 0
half.col_offset = 0
new_value = ast.BinOp(left=half, op=ast.Mult(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around scale.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
keyword.arg = "l"
_replace_scale_node(keyword, keyword.value)
if keyword.arg == "scope":
scope_keyword = keyword
# Maybe it was a positional arg
if len(node.args) >= 1:
_replace_scale_node(node, node.args[0])
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Multiplying scale arg of tf.contrib.layers.l2_regularizer"
" by half to what tf.keras.regularizers.l2 expects.\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l2"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
def _rename_to_compat_v1(node, full_name, logs, reason):
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
return _rename_func(node, full_name, new_name, logs, reason)
def _rename_func(node, full_name, new_name, logs, reason):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renamed %r to %r: %s" % (full_name, new_name, reason)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
def _string_split_transformer(parent, node, full_name, name, logs):
"""Update tf.string_split arguments: skip_empty, sep, result_type, source."""
# Check the skip_empty parameter: if not false, then use compat.v1.
for i, kw in enumerate(node.keywords):
if kw.arg == "skip_empty":
if _is_ast_false(kw.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"removed argument skip_empty for tf.string_split."))
node.keywords.pop(i)
break
else:
return _rename_to_compat_v1(
node, full_name, logs, "tf.string_split's replacement no longer "
"takes the skip_empty argument.")
# Check the sep parameter: if it's definitely an empty string, use
# tf.strings.bytes_split(). If we can't tell, then use compat.v1.
found_sep = False
for i, kw in enumerate(node.keywords):
if kw.arg == "sep":
found_sep = True
if isinstance(kw.value, ast.Str):
if kw.value.s == "":
node = _rename_func(
node, full_name, "tf.strings.bytes_split", logs,
"Splitting bytes is not handled by tf.strings.bytes_split().")
node.keywords.pop(i)
else:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep is the empty string; but sep is not a string literal, "
"so we can't tell if it's an empty string.")
if not found_sep:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep unspecified: it now splits on all whitespace, not just "
"the space character.")
# Check the result_type parameter
return _string_split_rtype_transformer(parent, node, full_name, name, logs)
def _string_split_rtype_transformer(parent, node, full_name, name, logs):
"""Update tf.strings.split arguments: result_type, source."""
# Remove the "result_type" argument.
need_to_sparse = True
for i, kw in enumerate(node.keywords):
if kw.arg == "result_type":
if (isinstance(kw.value, ast.Str) and
kw.value.s in ("RaggedTensor", "SparseTensor")):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument result_type=%r for function %s" %
(kw.value.s, full_name or name)))
node.keywords.pop(i)
if kw.value.s == "RaggedTensor":
need_to_sparse = False
else:
return _rename_to_compat_v1(
node, full_name, logs,
"%s no longer takes the result_type parameter." % full_name)
break
for i, kw in enumerate(node.keywords):
if kw.arg == "source":
kw.arg = "input"
# If necessary, add a call to .to_sparse() to convert the output of
# strings.split from a RaggedTensor to a SparseTensor.
if need_to_sparse:
if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"):
return # Prevent infinite recursion (since child nodes are transformed)
logs.append(
(ast_edits.INFO, node.lineno, node.col_offset,
"Adding call to RaggedTensor.to_sparse() to result of strings.split, "
"since it now returns a RaggedTensor."))
node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse")
try:
node = ast.Call(node, [], [])
except TypeError:
node = ast.Call(node, [], [], None, None)
return node
|
ghchinoy/tensorflow
|
tensorflow/tools/compatibility/tf_upgrade_v2.py
|
Python
|
apache-2.0
| 98,859
|
[
"Gaussian",
"VisIt"
] |
ee046f88496e5f8053d673ce88ed2bae48fc830b3f938e38f264d57abf2e8766
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._vim_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file is autogenerated by scripts/get_vimkw.py
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Split up in multiple functions so it's importable by jython, which has a
# per-method size limit.
def _getauto():
var = (
('BufAdd','BufAdd'),
('BufCreate','BufCreate'),
('BufDelete','BufDelete'),
('BufEnter','BufEnter'),
('BufFilePost','BufFilePost'),
('BufFilePre','BufFilePre'),
('BufHidden','BufHidden'),
('BufLeave','BufLeave'),
('BufNew','BufNew'),
('BufNewFile','BufNewFile'),
('BufRead','BufRead'),
('BufReadCmd','BufReadCmd'),
('BufReadPost','BufReadPost'),
('BufReadPre','BufReadPre'),
('BufUnload','BufUnload'),
('BufWinEnter','BufWinEnter'),
('BufWinLeave','BufWinLeave'),
('BufWipeout','BufWipeout'),
('BufWrite','BufWrite'),
('BufWriteCmd','BufWriteCmd'),
('BufWritePost','BufWritePost'),
('BufWritePre','BufWritePre'),
('Cmd','Cmd'),
('CmdwinEnter','CmdwinEnter'),
('CmdwinLeave','CmdwinLeave'),
('ColorScheme','ColorScheme'),
('CompleteDone','CompleteDone'),
('CursorHold','CursorHold'),
('CursorHoldI','CursorHoldI'),
('CursorMoved','CursorMoved'),
('CursorMovedI','CursorMovedI'),
('EncodingChanged','EncodingChanged'),
('FileAppendCmd','FileAppendCmd'),
('FileAppendPost','FileAppendPost'),
('FileAppendPre','FileAppendPre'),
('FileChangedRO','FileChangedRO'),
('FileChangedShell','FileChangedShell'),
('FileChangedShellPost','FileChangedShellPost'),
('FileEncoding','FileEncoding'),
('FileReadCmd','FileReadCmd'),
('FileReadPost','FileReadPost'),
('FileReadPre','FileReadPre'),
('FileType','FileType'),
('FileWriteCmd','FileWriteCmd'),
('FileWritePost','FileWritePost'),
('FileWritePre','FileWritePre'),
('FilterReadPost','FilterReadPost'),
('FilterReadPre','FilterReadPre'),
('FilterWritePost','FilterWritePost'),
('FilterWritePre','FilterWritePre'),
('FocusGained','FocusGained'),
('FocusLost','FocusLost'),
('FuncUndefined','FuncUndefined'),
('GUIEnter','GUIEnter'),
('GUIFailed','GUIFailed'),
('InsertChange','InsertChange'),
('InsertCharPre','InsertCharPre'),
('InsertEnter','InsertEnter'),
('InsertLeave','InsertLeave'),
('MenuPopup','MenuPopup'),
('QuickFixCmdPost','QuickFixCmdPost'),
('QuickFixCmdPre','QuickFixCmdPre'),
('QuitPre','QuitPre'),
('RemoteReply','RemoteReply'),
('SessionLoadPost','SessionLoadPost'),
('ShellCmdPost','ShellCmdPost'),
('ShellFilterPost','ShellFilterPost'),
('SourceCmd','SourceCmd'),
('SourcePre','SourcePre'),
('SpellFileMissing','SpellFileMissing'),
('StdinReadPost','StdinReadPost'),
('StdinReadPre','StdinReadPre'),
('SwapExists','SwapExists'),
('Syntax','Syntax'),
('TabEnter','TabEnter'),
('TabLeave','TabLeave'),
('TermChanged','TermChanged'),
('TermResponse','TermResponse'),
('TextChanged','TextChanged'),
('TextChangedI','TextChangedI'),
('User','User'),
('UserGettingBored','UserGettingBored'),
('VimEnter','VimEnter'),
('VimLeave','VimLeave'),
('VimLeavePre','VimLeavePre'),
('VimResized','VimResized'),
('WinEnter','WinEnter'),
('WinLeave','WinLeave'),
('event','event'),
)
return var
auto = _getauto()
def _getcommand():
var = (
('a','a'),
('ab','ab'),
('abc','abclear'),
('abo','aboveleft'),
('al','all'),
('ar','ar'),
('ar','args'),
('arga','argadd'),
('argd','argdelete'),
('argdo','argdo'),
('arge','argedit'),
('argg','argglobal'),
('argl','arglocal'),
('argu','argument'),
('as','ascii'),
('au','au'),
('b','buffer'),
('bN','bNext'),
('ba','ball'),
('bad','badd'),
('bd','bdelete'),
('bel','belowright'),
('bf','bfirst'),
('bl','blast'),
('bm','bmodified'),
('bn','bnext'),
('bo','botright'),
('bp','bprevious'),
('br','br'),
('br','brewind'),
('brea','break'),
('breaka','breakadd'),
('breakd','breakdel'),
('breakl','breaklist'),
('bro','browse'),
('bu','bu'),
('buf','buf'),
('bufdo','bufdo'),
('buffers','buffers'),
('bun','bunload'),
('bw','bwipeout'),
('c','c'),
('c','change'),
('cN','cN'),
('cN','cNext'),
('cNf','cNf'),
('cNf','cNfile'),
('cabc','cabclear'),
('cad','cad'),
('cad','caddexpr'),
('caddb','caddbuffer'),
('caddf','caddfile'),
('cal','call'),
('cat','catch'),
('cb','cbuffer'),
('cc','cc'),
('ccl','cclose'),
('cd','cd'),
('ce','center'),
('cex','cexpr'),
('cf','cfile'),
('cfir','cfirst'),
('cg','cgetfile'),
('cgetb','cgetbuffer'),
('cgete','cgetexpr'),
('changes','changes'),
('chd','chdir'),
('che','checkpath'),
('checkt','checktime'),
('cl','cl'),
('cl','clist'),
('cla','clast'),
('clo','close'),
('cmapc','cmapclear'),
('cn','cn'),
('cn','cnext'),
('cnew','cnewer'),
('cnf','cnf'),
('cnf','cnfile'),
('co','copy'),
('col','colder'),
('colo','colorscheme'),
('com','com'),
('comc','comclear'),
('comp','compiler'),
('con','con'),
('con','continue'),
('conf','confirm'),
('cope','copen'),
('cp','cprevious'),
('cpf','cpfile'),
('cq','cquit'),
('cr','crewind'),
('cs','cs'),
('cscope','cscope'),
('cstag','cstag'),
('cuna','cunabbrev'),
('cw','cwindow'),
('d','d'),
('d','delete'),
('de','de'),
('debug','debug'),
('debugg','debuggreedy'),
('del','del'),
('delc','delcommand'),
('delel','delel'),
('delep','delep'),
('deletel','deletel'),
('deletep','deletep'),
('deletl','deletl'),
('deletp','deletp'),
('delf','delf'),
('delf','delfunction'),
('dell','dell'),
('delm','delmarks'),
('delp','delp'),
('dep','dep'),
('di','di'),
('di','display'),
('diffg','diffget'),
('diffo','diffoff'),
('diffp','diffpatch'),
('diffpu','diffput'),
('diffs','diffsplit'),
('difft','diffthis'),
('diffu','diffupdate'),
('dig','dig'),
('dig','digraphs'),
('dir','dir'),
('dj','djump'),
('dl','dl'),
('dli','dlist'),
('do','do'),
('doau','doau'),
('dp','dp'),
('dr','drop'),
('ds','dsearch'),
('dsp','dsplit'),
('e','e'),
('e','edit'),
('ea','ea'),
('earlier','earlier'),
('ec','ec'),
('echoe','echoerr'),
('echom','echomsg'),
('echon','echon'),
('el','else'),
('elsei','elseif'),
('em','emenu'),
('en','en'),
('en','endif'),
('endf','endf'),
('endf','endfunction'),
('endfo','endfor'),
('endfun','endfun'),
('endt','endtry'),
('endw','endwhile'),
('ene','enew'),
('ex','ex'),
('exi','exit'),
('exu','exusage'),
('f','f'),
('f','file'),
('files','files'),
('filet','filet'),
('filetype','filetype'),
('fin','fin'),
('fin','find'),
('fina','finally'),
('fini','finish'),
('fir','first'),
('fix','fixdel'),
('fo','fold'),
('foldc','foldclose'),
('foldd','folddoopen'),
('folddoc','folddoclosed'),
('foldo','foldopen'),
('for','for'),
('fu','fu'),
('fu','function'),
('fun','fun'),
('g','g'),
('go','goto'),
('gr','grep'),
('grepa','grepadd'),
('gui','gui'),
('gvim','gvim'),
('h','h'),
('h','help'),
('ha','hardcopy'),
('helpf','helpfind'),
('helpg','helpgrep'),
('helpt','helptags'),
('hi','hi'),
('hid','hide'),
('his','history'),
('i','i'),
('ia','ia'),
('iabc','iabclear'),
('if','if'),
('ij','ijump'),
('il','ilist'),
('imapc','imapclear'),
('in','in'),
('intro','intro'),
('is','isearch'),
('isp','isplit'),
('iuna','iunabbrev'),
('j','join'),
('ju','jumps'),
('k','k'),
('kee','keepmarks'),
('keepa','keepa'),
('keepalt','keepalt'),
('keepj','keepjumps'),
('keepp','keeppatterns'),
('l','l'),
('l','list'),
('lN','lN'),
('lN','lNext'),
('lNf','lNf'),
('lNf','lNfile'),
('la','la'),
('la','last'),
('lad','lad'),
('lad','laddexpr'),
('laddb','laddbuffer'),
('laddf','laddfile'),
('lan','lan'),
('lan','language'),
('lat','lat'),
('later','later'),
('lb','lbuffer'),
('lc','lcd'),
('lch','lchdir'),
('lcl','lclose'),
('lcs','lcs'),
('lcscope','lcscope'),
('le','left'),
('lefta','leftabove'),
('lex','lexpr'),
('lf','lfile'),
('lfir','lfirst'),
('lg','lgetfile'),
('lgetb','lgetbuffer'),
('lgete','lgetexpr'),
('lgr','lgrep'),
('lgrepa','lgrepadd'),
('lh','lhelpgrep'),
('ll','ll'),
('lla','llast'),
('lli','llist'),
('lmak','lmake'),
('lmapc','lmapclear'),
('lne','lne'),
('lne','lnext'),
('lnew','lnewer'),
('lnf','lnf'),
('lnf','lnfile'),
('lo','lo'),
('lo','loadview'),
('loadk','loadk'),
('loadkeymap','loadkeymap'),
('loc','lockmarks'),
('lockv','lockvar'),
('lol','lolder'),
('lop','lopen'),
('lp','lprevious'),
('lpf','lpfile'),
('lr','lrewind'),
('ls','ls'),
('lt','ltag'),
('lua','lua'),
('luado','luado'),
('luafile','luafile'),
('lv','lvimgrep'),
('lvimgrepa','lvimgrepadd'),
('lw','lwindow'),
('m','move'),
('ma','ma'),
('ma','mark'),
('mak','make'),
('marks','marks'),
('mat','match'),
('menut','menut'),
('menut','menutranslate'),
('mes','mes'),
('messages','messages'),
('mk','mk'),
('mk','mkexrc'),
('mks','mksession'),
('mksp','mkspell'),
('mkv','mkv'),
('mkv','mkvimrc'),
('mkvie','mkview'),
('mo','mo'),
('mod','mode'),
('mz','mz'),
('mz','mzscheme'),
('mzf','mzfile'),
('n','n'),
('n','next'),
('nb','nbkey'),
('nbc','nbclose'),
('nbs','nbstart'),
('ne','ne'),
('new','new'),
('nmapc','nmapclear'),
('noa','noa'),
('noautocmd','noautocmd'),
('noh','nohlsearch'),
('nu','number'),
('o','o'),
('o','open'),
('ol','oldfiles'),
('omapc','omapclear'),
('on','only'),
('opt','options'),
('ownsyntax','ownsyntax'),
('p','p'),
('p','print'),
('pc','pclose'),
('pe','pe'),
('pe','perl'),
('ped','pedit'),
('perld','perldo'),
('po','pop'),
('popu','popu'),
('popu','popup'),
('pp','ppop'),
('pr','pr'),
('pre','preserve'),
('prev','previous'),
('pro','pro'),
('prof','profile'),
('profd','profdel'),
('promptf','promptfind'),
('promptr','promptrepl'),
('ps','psearch'),
('ptN','ptN'),
('ptN','ptNext'),
('pta','ptag'),
('ptf','ptfirst'),
('ptj','ptjump'),
('ptl','ptlast'),
('ptn','ptn'),
('ptn','ptnext'),
('ptp','ptprevious'),
('ptr','ptrewind'),
('pts','ptselect'),
('pu','put'),
('pw','pwd'),
('py','py'),
('py','python'),
('py3','py3'),
('py3','py3'),
('py3do','py3do'),
('pydo','pydo'),
('pyf','pyfile'),
('python3','python3'),
('q','q'),
('q','quit'),
('qa','qall'),
('quita','quitall'),
('r','r'),
('r','read'),
('re','re'),
('rec','recover'),
('red','red'),
('red','redo'),
('redi','redir'),
('redr','redraw'),
('redraws','redrawstatus'),
('reg','registers'),
('res','resize'),
('ret','retab'),
('retu','return'),
('rew','rewind'),
('ri','right'),
('rightb','rightbelow'),
('ru','ru'),
('ru','runtime'),
('rub','ruby'),
('rubyd','rubydo'),
('rubyf','rubyfile'),
('rundo','rundo'),
('rv','rviminfo'),
('sN','sNext'),
('sa','sargument'),
('sal','sall'),
('san','sandbox'),
('sav','saveas'),
('sb','sbuffer'),
('sbN','sbNext'),
('sba','sball'),
('sbf','sbfirst'),
('sbl','sblast'),
('sbm','sbmodified'),
('sbn','sbnext'),
('sbp','sbprevious'),
('sbr','sbrewind'),
('scrip','scrip'),
('scrip','scriptnames'),
('scripte','scriptencoding'),
('scs','scs'),
('scscope','scscope'),
('se','set'),
('setf','setfiletype'),
('setg','setglobal'),
('setl','setlocal'),
('sf','sfind'),
('sfir','sfirst'),
('sh','shell'),
('si','si'),
('sig','sig'),
('sign','sign'),
('sil','silent'),
('sim','simalt'),
('sl','sl'),
('sl','sleep'),
('sla','slast'),
('sm','smagic'),
('sm','smap'),
('sme','sme'),
('smenu','smenu'),
('sn','snext'),
('sni','sniff'),
('sno','snomagic'),
('snoreme','snoreme'),
('snoremenu','snoremenu'),
('so','so'),
('so','source'),
('sor','sort'),
('sp','split'),
('spe','spe'),
('spe','spellgood'),
('spelld','spelldump'),
('spelli','spellinfo'),
('spellr','spellrepall'),
('spellu','spellundo'),
('spellw','spellwrong'),
('spr','sprevious'),
('sre','srewind'),
('st','st'),
('st','stop'),
('sta','stag'),
('star','star'),
('star','startinsert'),
('start','start'),
('startg','startgreplace'),
('startr','startreplace'),
('stj','stjump'),
('stopi','stopinsert'),
('sts','stselect'),
('sun','sunhide'),
('sunme','sunme'),
('sunmenu','sunmenu'),
('sus','suspend'),
('sv','sview'),
('sw','swapname'),
('sy','sy'),
('syn','syn'),
('sync','sync'),
('syncbind','syncbind'),
('syntime','syntime'),
('t','t'),
('tN','tN'),
('tN','tNext'),
('ta','ta'),
('ta','tag'),
('tab','tab'),
('tabN','tabN'),
('tabN','tabNext'),
('tabc','tabclose'),
('tabd','tabdo'),
('tabe','tabedit'),
('tabf','tabfind'),
('tabfir','tabfirst'),
('tabl','tablast'),
('tabm','tabmove'),
('tabn','tabnext'),
('tabnew','tabnew'),
('tabo','tabonly'),
('tabp','tabprevious'),
('tabr','tabrewind'),
('tabs','tabs'),
('tags','tags'),
('tc','tcl'),
('tcld','tcldo'),
('tclf','tclfile'),
('te','tearoff'),
('tf','tfirst'),
('th','throw'),
('tj','tjump'),
('tl','tlast'),
('tm','tm'),
('tm','tmenu'),
('tn','tn'),
('tn','tnext'),
('to','topleft'),
('tp','tprevious'),
('tr','tr'),
('tr','trewind'),
('try','try'),
('ts','tselect'),
('tu','tu'),
('tu','tunmenu'),
('u','u'),
('u','undo'),
('un','un'),
('una','unabbreviate'),
('undoj','undojoin'),
('undol','undolist'),
('unh','unhide'),
('unl','unl'),
('unlo','unlockvar'),
('uns','unsilent'),
('up','update'),
('v','v'),
('ve','ve'),
('ve','version'),
('verb','verbose'),
('vert','vertical'),
('vi','vi'),
('vi','visual'),
('vie','view'),
('vim','vimgrep'),
('vimgrepa','vimgrepadd'),
('viu','viusage'),
('vmapc','vmapclear'),
('vne','vnew'),
('vs','vsplit'),
('w','w'),
('w','write'),
('wN','wNext'),
('wa','wall'),
('wh','while'),
('win','win'),
('win','winsize'),
('winc','wincmd'),
('windo','windo'),
('winp','winpos'),
('wn','wnext'),
('wp','wprevious'),
('wq','wq'),
('wqa','wqall'),
('ws','wsverb'),
('wundo','wundo'),
('wv','wviminfo'),
('x','x'),
('x','xit'),
('xa','xall'),
('xmapc','xmapclear'),
('xme','xme'),
('xmenu','xmenu'),
('xnoreme','xnoreme'),
('xnoremenu','xnoremenu'),
('xunme','xunme'),
('xunmenu','xunmenu'),
('xwininfo','xwininfo'),
('y','yank'),
)
return var
command = _getcommand()
def _getoption():
var = (
('acd','acd'),
('ai','ai'),
('akm','akm'),
('al','al'),
('aleph','aleph'),
('allowrevins','allowrevins'),
('altkeymap','altkeymap'),
('ambiwidth','ambiwidth'),
('ambw','ambw'),
('anti','anti'),
('antialias','antialias'),
('ar','ar'),
('arab','arab'),
('arabic','arabic'),
('arabicshape','arabicshape'),
('ari','ari'),
('arshape','arshape'),
('autochdir','autochdir'),
('autoindent','autoindent'),
('autoread','autoread'),
('autowrite','autowrite'),
('autowriteall','autowriteall'),
('aw','aw'),
('awa','awa'),
('background','background'),
('backspace','backspace'),
('backup','backup'),
('backupcopy','backupcopy'),
('backupdir','backupdir'),
('backupext','backupext'),
('backupskip','backupskip'),
('balloondelay','balloondelay'),
('ballooneval','ballooneval'),
('balloonexpr','balloonexpr'),
('bdir','bdir'),
('bdlay','bdlay'),
('beval','beval'),
('bex','bex'),
('bexpr','bexpr'),
('bg','bg'),
('bh','bh'),
('bin','bin'),
('binary','binary'),
('biosk','biosk'),
('bioskey','bioskey'),
('bk','bk'),
('bkc','bkc'),
('bl','bl'),
('bomb','bomb'),
('breakat','breakat'),
('brk','brk'),
('browsedir','browsedir'),
('bs','bs'),
('bsdir','bsdir'),
('bsk','bsk'),
('bt','bt'),
('bufhidden','bufhidden'),
('buflisted','buflisted'),
('buftype','buftype'),
('casemap','casemap'),
('cb','cb'),
('cc','cc'),
('ccv','ccv'),
('cd','cd'),
('cdpath','cdpath'),
('cedit','cedit'),
('cf','cf'),
('cfu','cfu'),
('ch','ch'),
('charconvert','charconvert'),
('ci','ci'),
('cin','cin'),
('cindent','cindent'),
('cink','cink'),
('cinkeys','cinkeys'),
('cino','cino'),
('cinoptions','cinoptions'),
('cinw','cinw'),
('cinwords','cinwords'),
('clipboard','clipboard'),
('cmdheight','cmdheight'),
('cmdwinheight','cmdwinheight'),
('cmp','cmp'),
('cms','cms'),
('co','co'),
('cocu','cocu'),
('cole','cole'),
('colorcolumn','colorcolumn'),
('columns','columns'),
('com','com'),
('comments','comments'),
('commentstring','commentstring'),
('compatible','compatible'),
('complete','complete'),
('completefunc','completefunc'),
('completeopt','completeopt'),
('concealcursor','concealcursor'),
('conceallevel','conceallevel'),
('confirm','confirm'),
('consk','consk'),
('conskey','conskey'),
('copyindent','copyindent'),
('cot','cot'),
('cp','cp'),
('cpo','cpo'),
('cpoptions','cpoptions'),
('cpt','cpt'),
('crb','crb'),
('cryptmethod','cryptmethod'),
('cscopepathcomp','cscopepathcomp'),
('cscopeprg','cscopeprg'),
('cscopequickfix','cscopequickfix'),
('cscoperelative','cscoperelative'),
('cscopetag','cscopetag'),
('cscopetagorder','cscopetagorder'),
('cscopeverbose','cscopeverbose'),
('cspc','cspc'),
('csprg','csprg'),
('csqf','csqf'),
('csre','csre'),
('cst','cst'),
('csto','csto'),
('csverb','csverb'),
('cuc','cuc'),
('cul','cul'),
('cursorbind','cursorbind'),
('cursorcolumn','cursorcolumn'),
('cursorline','cursorline'),
('cwh','cwh'),
('debug','debug'),
('deco','deco'),
('def','def'),
('define','define'),
('delcombine','delcombine'),
('dex','dex'),
('dg','dg'),
('dict','dict'),
('dictionary','dictionary'),
('diff','diff'),
('diffexpr','diffexpr'),
('diffopt','diffopt'),
('digraph','digraph'),
('dip','dip'),
('dir','dir'),
('directory','directory'),
('display','display'),
('dy','dy'),
('ea','ea'),
('ead','ead'),
('eadirection','eadirection'),
('eb','eb'),
('ed','ed'),
('edcompatible','edcompatible'),
('ef','ef'),
('efm','efm'),
('ei','ei'),
('ek','ek'),
('enc','enc'),
('encoding','encoding'),
('endofline','endofline'),
('eol','eol'),
('ep','ep'),
('equalalways','equalalways'),
('equalprg','equalprg'),
('errorbells','errorbells'),
('errorfile','errorfile'),
('errorformat','errorformat'),
('esckeys','esckeys'),
('et','et'),
('eventignore','eventignore'),
('ex','ex'),
('expandtab','expandtab'),
('exrc','exrc'),
('fcl','fcl'),
('fcs','fcs'),
('fdc','fdc'),
('fde','fde'),
('fdi','fdi'),
('fdl','fdl'),
('fdls','fdls'),
('fdm','fdm'),
('fdn','fdn'),
('fdo','fdo'),
('fdt','fdt'),
('fen','fen'),
('fenc','fenc'),
('fencs','fencs'),
('fex','fex'),
('ff','ff'),
('ffs','ffs'),
('fic','fic'),
('fileencoding','fileencoding'),
('fileencodings','fileencodings'),
('fileformat','fileformat'),
('fileformats','fileformats'),
('fileignorecase','fileignorecase'),
('filetype','filetype'),
('fillchars','fillchars'),
('fk','fk'),
('fkmap','fkmap'),
('flp','flp'),
('fml','fml'),
('fmr','fmr'),
('fo','fo'),
('foldclose','foldclose'),
('foldcolumn','foldcolumn'),
('foldenable','foldenable'),
('foldexpr','foldexpr'),
('foldignore','foldignore'),
('foldlevel','foldlevel'),
('foldlevelstart','foldlevelstart'),
('foldmarker','foldmarker'),
('foldmethod','foldmethod'),
('foldminlines','foldminlines'),
('foldnestmax','foldnestmax'),
('foldopen','foldopen'),
('foldtext','foldtext'),
('formatexpr','formatexpr'),
('formatlistpat','formatlistpat'),
('formatoptions','formatoptions'),
('formatprg','formatprg'),
('fp','fp'),
('fs','fs'),
('fsync','fsync'),
('ft','ft'),
('gcr','gcr'),
('gd','gd'),
('gdefault','gdefault'),
('gfm','gfm'),
('gfn','gfn'),
('gfs','gfs'),
('gfw','gfw'),
('ghr','ghr'),
('go','go'),
('gp','gp'),
('grepformat','grepformat'),
('grepprg','grepprg'),
('gtl','gtl'),
('gtt','gtt'),
('guicursor','guicursor'),
('guifont','guifont'),
('guifontset','guifontset'),
('guifontwide','guifontwide'),
('guiheadroom','guiheadroom'),
('guioptions','guioptions'),
('guipty','guipty'),
('guitablabel','guitablabel'),
('guitabtooltip','guitabtooltip'),
('helpfile','helpfile'),
('helpheight','helpheight'),
('helplang','helplang'),
('hf','hf'),
('hh','hh'),
('hi','hi'),
('hid','hid'),
('hidden','hidden'),
('highlight','highlight'),
('history','history'),
('hk','hk'),
('hkmap','hkmap'),
('hkmapp','hkmapp'),
('hkp','hkp'),
('hl','hl'),
('hlg','hlg'),
('hls','hls'),
('hlsearch','hlsearch'),
('ic','ic'),
('icon','icon'),
('iconstring','iconstring'),
('ignorecase','ignorecase'),
('im','im'),
('imactivatefunc','imactivatefunc'),
('imactivatekey','imactivatekey'),
('imaf','imaf'),
('imak','imak'),
('imc','imc'),
('imcmdline','imcmdline'),
('imd','imd'),
('imdisable','imdisable'),
('imi','imi'),
('iminsert','iminsert'),
('ims','ims'),
('imsearch','imsearch'),
('imsf','imsf'),
('imstatusfunc','imstatusfunc'),
('inc','inc'),
('include','include'),
('includeexpr','includeexpr'),
('incsearch','incsearch'),
('inde','inde'),
('indentexpr','indentexpr'),
('indentkeys','indentkeys'),
('indk','indk'),
('inex','inex'),
('inf','inf'),
('infercase','infercase'),
('inoremap','inoremap'),
('insertmode','insertmode'),
('invacd','invacd'),
('invai','invai'),
('invakm','invakm'),
('invallowrevins','invallowrevins'),
('invaltkeymap','invaltkeymap'),
('invanti','invanti'),
('invantialias','invantialias'),
('invar','invar'),
('invarab','invarab'),
('invarabic','invarabic'),
('invarabicshape','invarabicshape'),
('invari','invari'),
('invarshape','invarshape'),
('invautochdir','invautochdir'),
('invautoindent','invautoindent'),
('invautoread','invautoread'),
('invautowrite','invautowrite'),
('invautowriteall','invautowriteall'),
('invaw','invaw'),
('invawa','invawa'),
('invbackup','invbackup'),
('invballooneval','invballooneval'),
('invbeval','invbeval'),
('invbin','invbin'),
('invbinary','invbinary'),
('invbiosk','invbiosk'),
('invbioskey','invbioskey'),
('invbk','invbk'),
('invbl','invbl'),
('invbomb','invbomb'),
('invbuflisted','invbuflisted'),
('invcf','invcf'),
('invci','invci'),
('invcin','invcin'),
('invcindent','invcindent'),
('invcompatible','invcompatible'),
('invconfirm','invconfirm'),
('invconsk','invconsk'),
('invconskey','invconskey'),
('invcopyindent','invcopyindent'),
('invcp','invcp'),
('invcrb','invcrb'),
('invcscoperelative','invcscoperelative'),
('invcscopetag','invcscopetag'),
('invcscopeverbose','invcscopeverbose'),
('invcsre','invcsre'),
('invcst','invcst'),
('invcsverb','invcsverb'),
('invcuc','invcuc'),
('invcul','invcul'),
('invcursorbind','invcursorbind'),
('invcursorcolumn','invcursorcolumn'),
('invcursorline','invcursorline'),
('invdeco','invdeco'),
('invdelcombine','invdelcombine'),
('invdg','invdg'),
('invdiff','invdiff'),
('invdigraph','invdigraph'),
('invea','invea'),
('inveb','inveb'),
('inved','inved'),
('invedcompatible','invedcompatible'),
('invek','invek'),
('invendofline','invendofline'),
('inveol','inveol'),
('invequalalways','invequalalways'),
('inverrorbells','inverrorbells'),
('invesckeys','invesckeys'),
('invet','invet'),
('invex','invex'),
('invexpandtab','invexpandtab'),
('invexrc','invexrc'),
('invfen','invfen'),
('invfic','invfic'),
('invfileignorecase','invfileignorecase'),
('invfk','invfk'),
('invfkmap','invfkmap'),
('invfoldenable','invfoldenable'),
('invgd','invgd'),
('invgdefault','invgdefault'),
('invguipty','invguipty'),
('invhid','invhid'),
('invhidden','invhidden'),
('invhk','invhk'),
('invhkmap','invhkmap'),
('invhkmapp','invhkmapp'),
('invhkp','invhkp'),
('invhls','invhls'),
('invhlsearch','invhlsearch'),
('invic','invic'),
('invicon','invicon'),
('invignorecase','invignorecase'),
('invim','invim'),
('invimc','invimc'),
('invimcmdline','invimcmdline'),
('invimd','invimd'),
('invimdisable','invimdisable'),
('invincsearch','invincsearch'),
('invinf','invinf'),
('invinfercase','invinfercase'),
('invinsertmode','invinsertmode'),
('invis','invis'),
('invjoinspaces','invjoinspaces'),
('invjs','invjs'),
('invlazyredraw','invlazyredraw'),
('invlbr','invlbr'),
('invlinebreak','invlinebreak'),
('invlisp','invlisp'),
('invlist','invlist'),
('invloadplugins','invloadplugins'),
('invlpl','invlpl'),
('invlz','invlz'),
('invma','invma'),
('invmacatsui','invmacatsui'),
('invmagic','invmagic'),
('invmh','invmh'),
('invml','invml'),
('invmod','invmod'),
('invmodeline','invmodeline'),
('invmodifiable','invmodifiable'),
('invmodified','invmodified'),
('invmore','invmore'),
('invmousef','invmousef'),
('invmousefocus','invmousefocus'),
('invmousehide','invmousehide'),
('invnu','invnu'),
('invnumber','invnumber'),
('invodev','invodev'),
('invopendevice','invopendevice'),
('invpaste','invpaste'),
('invpi','invpi'),
('invpreserveindent','invpreserveindent'),
('invpreviewwindow','invpreviewwindow'),
('invprompt','invprompt'),
('invpvw','invpvw'),
('invreadonly','invreadonly'),
('invrelativenumber','invrelativenumber'),
('invremap','invremap'),
('invrestorescreen','invrestorescreen'),
('invrevins','invrevins'),
('invri','invri'),
('invrightleft','invrightleft'),
('invrl','invrl'),
('invrnu','invrnu'),
('invro','invro'),
('invrs','invrs'),
('invru','invru'),
('invruler','invruler'),
('invsb','invsb'),
('invsc','invsc'),
('invscb','invscb'),
('invscrollbind','invscrollbind'),
('invscs','invscs'),
('invsecure','invsecure'),
('invsft','invsft'),
('invshellslash','invshellslash'),
('invshelltemp','invshelltemp'),
('invshiftround','invshiftround'),
('invshortname','invshortname'),
('invshowcmd','invshowcmd'),
('invshowfulltag','invshowfulltag'),
('invshowmatch','invshowmatch'),
('invshowmode','invshowmode'),
('invsi','invsi'),
('invsm','invsm'),
('invsmartcase','invsmartcase'),
('invsmartindent','invsmartindent'),
('invsmarttab','invsmarttab'),
('invsmd','invsmd'),
('invsn','invsn'),
('invsol','invsol'),
('invspell','invspell'),
('invsplitbelow','invsplitbelow'),
('invsplitright','invsplitright'),
('invspr','invspr'),
('invsr','invsr'),
('invssl','invssl'),
('invsta','invsta'),
('invstartofline','invstartofline'),
('invstmp','invstmp'),
('invswapfile','invswapfile'),
('invswf','invswf'),
('invta','invta'),
('invtagbsearch','invtagbsearch'),
('invtagrelative','invtagrelative'),
('invtagstack','invtagstack'),
('invtbi','invtbi'),
('invtbidi','invtbidi'),
('invtbs','invtbs'),
('invtermbidi','invtermbidi'),
('invterse','invterse'),
('invtextauto','invtextauto'),
('invtextmode','invtextmode'),
('invtf','invtf'),
('invtgst','invtgst'),
('invtildeop','invtildeop'),
('invtimeout','invtimeout'),
('invtitle','invtitle'),
('invto','invto'),
('invtop','invtop'),
('invtr','invtr'),
('invttimeout','invttimeout'),
('invttybuiltin','invttybuiltin'),
('invttyfast','invttyfast'),
('invtx','invtx'),
('invudf','invudf'),
('invundofile','invundofile'),
('invvb','invvb'),
('invvisualbell','invvisualbell'),
('invwa','invwa'),
('invwarn','invwarn'),
('invwb','invwb'),
('invweirdinvert','invweirdinvert'),
('invwfh','invwfh'),
('invwfw','invwfw'),
('invwic','invwic'),
('invwildignorecase','invwildignorecase'),
('invwildmenu','invwildmenu'),
('invwinfixheight','invwinfixheight'),
('invwinfixwidth','invwinfixwidth'),
('invwiv','invwiv'),
('invwmnu','invwmnu'),
('invwrap','invwrap'),
('invwrapscan','invwrapscan'),
('invwrite','invwrite'),
('invwriteany','invwriteany'),
('invwritebackup','invwritebackup'),
('invws','invws'),
('is','is'),
('isf','isf'),
('isfname','isfname'),
('isi','isi'),
('isident','isident'),
('isk','isk'),
('iskeyword','iskeyword'),
('isp','isp'),
('isprint','isprint'),
('joinspaces','joinspaces'),
('js','js'),
('key','key'),
('keymap','keymap'),
('keymodel','keymodel'),
('keywordprg','keywordprg'),
('km','km'),
('kmp','kmp'),
('kp','kp'),
('langmap','langmap'),
('langmenu','langmenu'),
('laststatus','laststatus'),
('lazyredraw','lazyredraw'),
('lbr','lbr'),
('lcs','lcs'),
('linebreak','linebreak'),
('lines','lines'),
('linespace','linespace'),
('lisp','lisp'),
('lispwords','lispwords'),
('list','list'),
('listchars','listchars'),
('lm','lm'),
('lmap','lmap'),
('loadplugins','loadplugins'),
('lpl','lpl'),
('ls','ls'),
('lsp','lsp'),
('lw','lw'),
('lz','lz'),
('ma','ma'),
('macatsui','macatsui'),
('magic','magic'),
('makeef','makeef'),
('makeprg','makeprg'),
('mat','mat'),
('matchpairs','matchpairs'),
('matchtime','matchtime'),
('maxcombine','maxcombine'),
('maxfuncdepth','maxfuncdepth'),
('maxmapdepth','maxmapdepth'),
('maxmem','maxmem'),
('maxmempattern','maxmempattern'),
('maxmemtot','maxmemtot'),
('mco','mco'),
('mef','mef'),
('menuitems','menuitems'),
('mfd','mfd'),
('mh','mh'),
('mis','mis'),
('mkspellmem','mkspellmem'),
('ml','ml'),
('mls','mls'),
('mm','mm'),
('mmd','mmd'),
('mmp','mmp'),
('mmt','mmt'),
('mod','mod'),
('modeline','modeline'),
('modelines','modelines'),
('modifiable','modifiable'),
('modified','modified'),
('more','more'),
('mouse','mouse'),
('mousef','mousef'),
('mousefocus','mousefocus'),
('mousehide','mousehide'),
('mousem','mousem'),
('mousemodel','mousemodel'),
('mouses','mouses'),
('mouseshape','mouseshape'),
('mouset','mouset'),
('mousetime','mousetime'),
('mp','mp'),
('mps','mps'),
('msm','msm'),
('mzq','mzq'),
('mzquantum','mzquantum'),
('nf','nf'),
('nnoremap','nnoremap'),
('noacd','noacd'),
('noai','noai'),
('noakm','noakm'),
('noallowrevins','noallowrevins'),
('noaltkeymap','noaltkeymap'),
('noanti','noanti'),
('noantialias','noantialias'),
('noar','noar'),
('noarab','noarab'),
('noarabic','noarabic'),
('noarabicshape','noarabicshape'),
('noari','noari'),
('noarshape','noarshape'),
('noautochdir','noautochdir'),
('noautoindent','noautoindent'),
('noautoread','noautoread'),
('noautowrite','noautowrite'),
('noautowriteall','noautowriteall'),
('noaw','noaw'),
('noawa','noawa'),
('nobackup','nobackup'),
('noballooneval','noballooneval'),
('nobeval','nobeval'),
('nobin','nobin'),
('nobinary','nobinary'),
('nobiosk','nobiosk'),
('nobioskey','nobioskey'),
('nobk','nobk'),
('nobl','nobl'),
('nobomb','nobomb'),
('nobuflisted','nobuflisted'),
('nocf','nocf'),
('noci','noci'),
('nocin','nocin'),
('nocindent','nocindent'),
('nocompatible','nocompatible'),
('noconfirm','noconfirm'),
('noconsk','noconsk'),
('noconskey','noconskey'),
('nocopyindent','nocopyindent'),
('nocp','nocp'),
('nocrb','nocrb'),
('nocscoperelative','nocscoperelative'),
('nocscopetag','nocscopetag'),
('nocscopeverbose','nocscopeverbose'),
('nocsre','nocsre'),
('nocst','nocst'),
('nocsverb','nocsverb'),
('nocuc','nocuc'),
('nocul','nocul'),
('nocursorbind','nocursorbind'),
('nocursorcolumn','nocursorcolumn'),
('nocursorline','nocursorline'),
('nodeco','nodeco'),
('nodelcombine','nodelcombine'),
('nodg','nodg'),
('nodiff','nodiff'),
('nodigraph','nodigraph'),
('noea','noea'),
('noeb','noeb'),
('noed','noed'),
('noedcompatible','noedcompatible'),
('noek','noek'),
('noendofline','noendofline'),
('noeol','noeol'),
('noequalalways','noequalalways'),
('noerrorbells','noerrorbells'),
('noesckeys','noesckeys'),
('noet','noet'),
('noex','noex'),
('noexpandtab','noexpandtab'),
('noexrc','noexrc'),
('nofen','nofen'),
('nofic','nofic'),
('nofileignorecase','nofileignorecase'),
('nofk','nofk'),
('nofkmap','nofkmap'),
('nofoldenable','nofoldenable'),
('nogd','nogd'),
('nogdefault','nogdefault'),
('noguipty','noguipty'),
('nohid','nohid'),
('nohidden','nohidden'),
('nohk','nohk'),
('nohkmap','nohkmap'),
('nohkmapp','nohkmapp'),
('nohkp','nohkp'),
('nohls','nohls'),
('nohlsearch','nohlsearch'),
('noic','noic'),
('noicon','noicon'),
('noignorecase','noignorecase'),
('noim','noim'),
('noimc','noimc'),
('noimcmdline','noimcmdline'),
('noimd','noimd'),
('noimdisable','noimdisable'),
('noincsearch','noincsearch'),
('noinf','noinf'),
('noinfercase','noinfercase'),
('noinsertmode','noinsertmode'),
('nois','nois'),
('nojoinspaces','nojoinspaces'),
('nojs','nojs'),
('nolazyredraw','nolazyredraw'),
('nolbr','nolbr'),
('nolinebreak','nolinebreak'),
('nolisp','nolisp'),
('nolist','nolist'),
('noloadplugins','noloadplugins'),
('nolpl','nolpl'),
('nolz','nolz'),
('noma','noma'),
('nomacatsui','nomacatsui'),
('nomagic','nomagic'),
('nomh','nomh'),
('noml','noml'),
('nomod','nomod'),
('nomodeline','nomodeline'),
('nomodifiable','nomodifiable'),
('nomodified','nomodified'),
('nomore','nomore'),
('nomousef','nomousef'),
('nomousefocus','nomousefocus'),
('nomousehide','nomousehide'),
('nonu','nonu'),
('nonumber','nonumber'),
('noodev','noodev'),
('noopendevice','noopendevice'),
('nopaste','nopaste'),
('nopi','nopi'),
('nopreserveindent','nopreserveindent'),
('nopreviewwindow','nopreviewwindow'),
('noprompt','noprompt'),
('nopvw','nopvw'),
('noreadonly','noreadonly'),
('norelativenumber','norelativenumber'),
('noremap','noremap'),
('norestorescreen','norestorescreen'),
('norevins','norevins'),
('nori','nori'),
('norightleft','norightleft'),
('norl','norl'),
('nornu','nornu'),
('noro','noro'),
('nors','nors'),
('noru','noru'),
('noruler','noruler'),
('nosb','nosb'),
('nosc','nosc'),
('noscb','noscb'),
('noscrollbind','noscrollbind'),
('noscs','noscs'),
('nosecure','nosecure'),
('nosft','nosft'),
('noshellslash','noshellslash'),
('noshelltemp','noshelltemp'),
('noshiftround','noshiftround'),
('noshortname','noshortname'),
('noshowcmd','noshowcmd'),
('noshowfulltag','noshowfulltag'),
('noshowmatch','noshowmatch'),
('noshowmode','noshowmode'),
('nosi','nosi'),
('nosm','nosm'),
('nosmartcase','nosmartcase'),
('nosmartindent','nosmartindent'),
('nosmarttab','nosmarttab'),
('nosmd','nosmd'),
('nosn','nosn'),
('nosol','nosol'),
('nospell','nospell'),
('nosplitbelow','nosplitbelow'),
('nosplitright','nosplitright'),
('nospr','nospr'),
('nosr','nosr'),
('nossl','nossl'),
('nosta','nosta'),
('nostartofline','nostartofline'),
('nostmp','nostmp'),
('noswapfile','noswapfile'),
('noswf','noswf'),
('nota','nota'),
('notagbsearch','notagbsearch'),
('notagrelative','notagrelative'),
('notagstack','notagstack'),
('notbi','notbi'),
('notbidi','notbidi'),
('notbs','notbs'),
('notermbidi','notermbidi'),
('noterse','noterse'),
('notextauto','notextauto'),
('notextmode','notextmode'),
('notf','notf'),
('notgst','notgst'),
('notildeop','notildeop'),
('notimeout','notimeout'),
('notitle','notitle'),
('noto','noto'),
('notop','notop'),
('notr','notr'),
('nottimeout','nottimeout'),
('nottybuiltin','nottybuiltin'),
('nottyfast','nottyfast'),
('notx','notx'),
('noudf','noudf'),
('noundofile','noundofile'),
('novb','novb'),
('novisualbell','novisualbell'),
('nowa','nowa'),
('nowarn','nowarn'),
('nowb','nowb'),
('noweirdinvert','noweirdinvert'),
('nowfh','nowfh'),
('nowfw','nowfw'),
('nowic','nowic'),
('nowildignorecase','nowildignorecase'),
('nowildmenu','nowildmenu'),
('nowinfixheight','nowinfixheight'),
('nowinfixwidth','nowinfixwidth'),
('nowiv','nowiv'),
('nowmnu','nowmnu'),
('nowrap','nowrap'),
('nowrapscan','nowrapscan'),
('nowrite','nowrite'),
('nowriteany','nowriteany'),
('nowritebackup','nowritebackup'),
('nows','nows'),
('nrformats','nrformats'),
('nu','nu'),
('number','number'),
('numberwidth','numberwidth'),
('nuw','nuw'),
('odev','odev'),
('oft','oft'),
('ofu','ofu'),
('omnifunc','omnifunc'),
('opendevice','opendevice'),
('operatorfunc','operatorfunc'),
('opfunc','opfunc'),
('osfiletype','osfiletype'),
('pa','pa'),
('para','para'),
('paragraphs','paragraphs'),
('paste','paste'),
('pastetoggle','pastetoggle'),
('patchexpr','patchexpr'),
('patchmode','patchmode'),
('path','path'),
('pdev','pdev'),
('penc','penc'),
('pex','pex'),
('pexpr','pexpr'),
('pfn','pfn'),
('ph','ph'),
('pheader','pheader'),
('pi','pi'),
('pm','pm'),
('pmbcs','pmbcs'),
('pmbfn','pmbfn'),
('popt','popt'),
('preserveindent','preserveindent'),
('previewheight','previewheight'),
('previewwindow','previewwindow'),
('printdevice','printdevice'),
('printencoding','printencoding'),
('printexpr','printexpr'),
('printfont','printfont'),
('printheader','printheader'),
('printmbcharset','printmbcharset'),
('printmbfont','printmbfont'),
('printoptions','printoptions'),
('prompt','prompt'),
('pt','pt'),
('pumheight','pumheight'),
('pvh','pvh'),
('pvw','pvw'),
('qe','qe'),
('quoteescape','quoteescape'),
('rdt','rdt'),
('re','re'),
('readonly','readonly'),
('redrawtime','redrawtime'),
('regexpengine','regexpengine'),
('relativenumber','relativenumber'),
('remap','remap'),
('report','report'),
('restorescreen','restorescreen'),
('revins','revins'),
('ri','ri'),
('rightleft','rightleft'),
('rightleftcmd','rightleftcmd'),
('rl','rl'),
('rlc','rlc'),
('rnu','rnu'),
('ro','ro'),
('rs','rs'),
('rtp','rtp'),
('ru','ru'),
('ruf','ruf'),
('ruler','ruler'),
('rulerformat','rulerformat'),
('runtimepath','runtimepath'),
('sb','sb'),
('sbo','sbo'),
('sbr','sbr'),
('sc','sc'),
('scb','scb'),
('scr','scr'),
('scroll','scroll'),
('scrollbind','scrollbind'),
('scrolljump','scrolljump'),
('scrolloff','scrolloff'),
('scrollopt','scrollopt'),
('scs','scs'),
('sect','sect'),
('sections','sections'),
('secure','secure'),
('sel','sel'),
('selection','selection'),
('selectmode','selectmode'),
('sessionoptions','sessionoptions'),
('sft','sft'),
('sh','sh'),
('shcf','shcf'),
('shell','shell'),
('shellcmdflag','shellcmdflag'),
('shellpipe','shellpipe'),
('shellquote','shellquote'),
('shellredir','shellredir'),
('shellslash','shellslash'),
('shelltemp','shelltemp'),
('shelltype','shelltype'),
('shellxescape','shellxescape'),
('shellxquote','shellxquote'),
('shiftround','shiftround'),
('shiftwidth','shiftwidth'),
('shm','shm'),
('shortmess','shortmess'),
('shortname','shortname'),
('showbreak','showbreak'),
('showcmd','showcmd'),
('showfulltag','showfulltag'),
('showmatch','showmatch'),
('showmode','showmode'),
('showtabline','showtabline'),
('shq','shq'),
('si','si'),
('sidescroll','sidescroll'),
('sidescrolloff','sidescrolloff'),
('siso','siso'),
('sj','sj'),
('slm','slm'),
('sm','sm'),
('smartcase','smartcase'),
('smartindent','smartindent'),
('smarttab','smarttab'),
('smc','smc'),
('smd','smd'),
('sn','sn'),
('so','so'),
('softtabstop','softtabstop'),
('sol','sol'),
('sp','sp'),
('spc','spc'),
('spell','spell'),
('spellcapcheck','spellcapcheck'),
('spellfile','spellfile'),
('spelllang','spelllang'),
('spellsuggest','spellsuggest'),
('spf','spf'),
('spl','spl'),
('splitbelow','splitbelow'),
('splitright','splitright'),
('spr','spr'),
('sps','sps'),
('sr','sr'),
('srr','srr'),
('ss','ss'),
('ssl','ssl'),
('ssop','ssop'),
('st','st'),
('sta','sta'),
('stal','stal'),
('startofline','startofline'),
('statusline','statusline'),
('stl','stl'),
('stmp','stmp'),
('sts','sts'),
('su','su'),
('sua','sua'),
('suffixes','suffixes'),
('suffixesadd','suffixesadd'),
('sw','sw'),
('swapfile','swapfile'),
('swapsync','swapsync'),
('swb','swb'),
('swf','swf'),
('switchbuf','switchbuf'),
('sws','sws'),
('sxe','sxe'),
('sxq','sxq'),
('syn','syn'),
('synmaxcol','synmaxcol'),
('syntax','syntax'),
('t_AB','t_AB'),
('t_AF','t_AF'),
('t_AL','t_AL'),
('t_CS','t_CS'),
('t_CV','t_CV'),
('t_Ce','t_Ce'),
('t_Co','t_Co'),
('t_Cs','t_Cs'),
('t_DL','t_DL'),
('t_EI','t_EI'),
('t_F1','t_F1'),
('t_F2','t_F2'),
('t_F3','t_F3'),
('t_F4','t_F4'),
('t_F5','t_F5'),
('t_F6','t_F6'),
('t_F7','t_F7'),
('t_F8','t_F8'),
('t_F9','t_F9'),
('t_IE','t_IE'),
('t_IS','t_IS'),
('t_K1','t_K1'),
('t_K3','t_K3'),
('t_K4','t_K4'),
('t_K5','t_K5'),
('t_K6','t_K6'),
('t_K7','t_K7'),
('t_K8','t_K8'),
('t_K9','t_K9'),
('t_KA','t_KA'),
('t_KB','t_KB'),
('t_KC','t_KC'),
('t_KD','t_KD'),
('t_KE','t_KE'),
('t_KF','t_KF'),
('t_KG','t_KG'),
('t_KH','t_KH'),
('t_KI','t_KI'),
('t_KJ','t_KJ'),
('t_KK','t_KK'),
('t_KL','t_KL'),
('t_RI','t_RI'),
('t_RV','t_RV'),
('t_SI','t_SI'),
('t_Sb','t_Sb'),
('t_Sf','t_Sf'),
('t_WP','t_WP'),
('t_WS','t_WS'),
('t_ZH','t_ZH'),
('t_ZR','t_ZR'),
('t_al','t_al'),
('t_bc','t_bc'),
('t_cd','t_cd'),
('t_ce','t_ce'),
('t_cl','t_cl'),
('t_cm','t_cm'),
('t_cs','t_cs'),
('t_da','t_da'),
('t_db','t_db'),
('t_dl','t_dl'),
('t_fs','t_fs'),
('t_k1','t_k1'),
('t_k2','t_k2'),
('t_k3','t_k3'),
('t_k4','t_k4'),
('t_k5','t_k5'),
('t_k6','t_k6'),
('t_k7','t_k7'),
('t_k8','t_k8'),
('t_k9','t_k9'),
('t_kB','t_kB'),
('t_kD','t_kD'),
('t_kI','t_kI'),
('t_kN','t_kN'),
('t_kP','t_kP'),
('t_kb','t_kb'),
('t_kd','t_kd'),
('t_ke','t_ke'),
('t_kh','t_kh'),
('t_kl','t_kl'),
('t_kr','t_kr'),
('t_ks','t_ks'),
('t_ku','t_ku'),
('t_le','t_le'),
('t_mb','t_mb'),
('t_md','t_md'),
('t_me','t_me'),
('t_mr','t_mr'),
('t_ms','t_ms'),
('t_nd','t_nd'),
('t_op','t_op'),
('t_se','t_se'),
('t_so','t_so'),
('t_sr','t_sr'),
('t_te','t_te'),
('t_ti','t_ti'),
('t_ts','t_ts'),
('t_u7','t_u7'),
('t_ue','t_ue'),
('t_us','t_us'),
('t_ut','t_ut'),
('t_vb','t_vb'),
('t_ve','t_ve'),
('t_vi','t_vi'),
('t_vs','t_vs'),
('t_xs','t_xs'),
('ta','ta'),
('tabline','tabline'),
('tabpagemax','tabpagemax'),
('tabstop','tabstop'),
('tag','tag'),
('tagbsearch','tagbsearch'),
('taglength','taglength'),
('tagrelative','tagrelative'),
('tags','tags'),
('tagstack','tagstack'),
('tal','tal'),
('tb','tb'),
('tbi','tbi'),
('tbidi','tbidi'),
('tbis','tbis'),
('tbs','tbs'),
('tenc','tenc'),
('term','term'),
('termbidi','termbidi'),
('termencoding','termencoding'),
('terse','terse'),
('textauto','textauto'),
('textmode','textmode'),
('textwidth','textwidth'),
('tf','tf'),
('tgst','tgst'),
('thesaurus','thesaurus'),
('tildeop','tildeop'),
('timeout','timeout'),
('timeoutlen','timeoutlen'),
('title','title'),
('titlelen','titlelen'),
('titleold','titleold'),
('titlestring','titlestring'),
('tl','tl'),
('tm','tm'),
('to','to'),
('toolbar','toolbar'),
('toolbariconsize','toolbariconsize'),
('top','top'),
('tpm','tpm'),
('tr','tr'),
('ts','ts'),
('tsl','tsl'),
('tsr','tsr'),
('ttimeout','ttimeout'),
('ttimeoutlen','ttimeoutlen'),
('ttm','ttm'),
('tty','tty'),
('ttybuiltin','ttybuiltin'),
('ttyfast','ttyfast'),
('ttym','ttym'),
('ttymouse','ttymouse'),
('ttyscroll','ttyscroll'),
('ttytype','ttytype'),
('tw','tw'),
('tx','tx'),
('uc','uc'),
('udf','udf'),
('udir','udir'),
('ul','ul'),
('undodir','undodir'),
('undofile','undofile'),
('undolevels','undolevels'),
('undoreload','undoreload'),
('updatecount','updatecount'),
('updatetime','updatetime'),
('ur','ur'),
('ut','ut'),
('vb','vb'),
('vbs','vbs'),
('vdir','vdir'),
('ve','ve'),
('verbose','verbose'),
('verbosefile','verbosefile'),
('vfile','vfile'),
('vi','vi'),
('viewdir','viewdir'),
('viewoptions','viewoptions'),
('viminfo','viminfo'),
('virtualedit','virtualedit'),
('visualbell','visualbell'),
('vnoremap','vnoremap'),
('vop','vop'),
('wa','wa'),
('wak','wak'),
('warn','warn'),
('wb','wb'),
('wc','wc'),
('wcm','wcm'),
('wd','wd'),
('weirdinvert','weirdinvert'),
('wfh','wfh'),
('wfw','wfw'),
('wh','wh'),
('whichwrap','whichwrap'),
('wi','wi'),
('wic','wic'),
('wig','wig'),
('wildchar','wildchar'),
('wildcharm','wildcharm'),
('wildignore','wildignore'),
('wildignorecase','wildignorecase'),
('wildmenu','wildmenu'),
('wildmode','wildmode'),
('wildoptions','wildoptions'),
('wim','wim'),
('winaltkeys','winaltkeys'),
('window','window'),
('winfixheight','winfixheight'),
('winfixwidth','winfixwidth'),
('winheight','winheight'),
('winminheight','winminheight'),
('winminwidth','winminwidth'),
('winwidth','winwidth'),
('wiv','wiv'),
('wiw','wiw'),
('wm','wm'),
('wmh','wmh'),
('wmnu','wmnu'),
('wmw','wmw'),
('wop','wop'),
('wrap','wrap'),
('wrapmargin','wrapmargin'),
('wrapscan','wrapscan'),
('write','write'),
('writeany','writeany'),
('writebackup','writebackup'),
('writedelay','writedelay'),
('ws','ws'),
('ww','ww'),
)
return var
option = _getoption()
|
wakatime/wakatime
|
wakatime/packages/py27/pygments/lexers/_vim_builtins.py
|
Python
|
bsd-3-clause
| 57,090
|
[
"BLAST"
] |
225fe98eb3f43d652330b0913ce64fa0b81dff78dabafbfd486b413a4ce65360
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.