text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Orca
#
# Copyright 2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for gnome-screensaver-dialog."""
from script import Script
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/gnome-screensaver-dialog/__init__.py
|
Python
|
gpl-3.0
| 854
|
[
"ORCA"
] |
46c676609fb3c72d486a6db2ebbe2f54c5a276d885006aefcaa790fdf6131ca1
|
#!/usr/bin/env python
import sys
import math
from simtk.openmm.app import Element
import simtk.unit as unit
import subprocess
import datetime
from six.moves import cStringIO
import mdtraj as md
import logging
logger = logging.getLogger(__name__)
def fix(atomClass):
if atomClass == 'X':
return ''
return atomClass
elements = {}
for elem in Element._elements_by_symbol.values():
num = elem.atomic_number
if num not in elements or elem.mass < elements[num].mass:
elements[num] = elem
OTHER = 0
ATOMS = 1
CONNECT = 2
CONNECTIVITY = 3
RESIDUECONNECT = 4
section = OTHER
charge14scale = 1.0 / 1.2
epsilon14scale = 0.5
skipResidues = ['CIO', 'IB'] # "Generic" ions defined by Amber, which are identical to other real ions
skipClasses = ['OW', 'HW'] # Skip water atoms, since we define these in separate files
# Manually get the hydrogen element
hydrogen = Element.getBySymbol("H")
class AmberParser(object):
def __init__(self, override_mol2_residue_name=None):
"""Create an AmberParser object for converting amber force field files to XML format.
Parameters
----------
override_mol2_residue_name : str, default=None
If given, use this name to override mol2 residue names.
Useful to ensure that multiple ligands have unique residue
names, as required by the OpenMM ffXML parser.
"""
self.override_mol2_residue_name = override_mol2_residue_name
self.current_mol2 = 0
self.residueAtoms = {}
self.residueBonds = {}
self.residueConnections = {}
self.types = []
self.type_names = []
self.masses = {}
self.resAtomTypes = {}
self.vdwEquivalents = {}
self.vdw = {}
self.charge = {}
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
self.set_provenance()
def addAtom(self, residue, atomName, atomClass, element, charge, use_numeric_types=True):
"""Add an atom to the database of FF data.
Notes
-----
use_numeric_types was not originally present in the OpenMM AMBER
parsers. It was added so that we can have atom types of the form
"RES-X", where RES is the name of the molecule or residue and X
is the atom numbering within that molecule. use_numeric_types is
set to False when processing mol2 files--e.g. for ligands.
"""
if residue is None:
return
type_id = len(self.types)
self.residueAtoms[residue].append([atomName, type_id])
self.types.append((atomClass, element, charge))
if use_numeric_types:
self.type_names.append("%d" % (type_id))
else:
self.type_names.append("%s-%s" % (residue, atomName))
def addBond(self, residue, atom1, atom2):
"""Add a bond to the database of FF data."""
if residue is None:
return
self.residueBonds[residue].append((atom1, atom2))
def addExternalBond(self, residue, atom):
"""Add an external bond to the database of FF data."""
if residue is None:
return
if atom != -1:
self.residueConnections[residue] += [atom]
def process_mol2_file(self, inputfile):
"""Process an AMBER GAFF-compatible mol2 file.
Parameters
----------
inputfile : str
filename of an .mol2 file
Notes
-----
Antechamber is known to produce NONSTANDARD mol2 files. This function
is designed to work with those nonstandard mol2 files, not
Tripos standard mol2 files. We are forced to live with the poor
decisions of our predecessors...
"""
atoms, bonds = md.formats.mol2.mol2_to_dataframes(inputfile)
if self.override_mol2_residue_name is None:
residue_name = atoms.resName[1] # To Do: Add check for consistency
else:
residue_name = self.override_mol2_residue_name
# Give each mol2 file a unique numbering to avoid conflicts.
residue_name = "%s-%d" % (residue_name, self.current_mol2)
self.current_mol2 += 1
self.residueAtoms[residue_name] = []
self.residueBonds[residue_name] = []
self.residueConnections[residue_name] = []
for (i0, i1, name, x, y, z, atype, code, resname, charge) in atoms.itertuples(index=True):
# i0 and i1 are zero-based and one-based indices, respectively
full_name = residue_name + "_" + name
element_symbol = md.formats.mol2.gaff_elements[atype]
e = Element.getBySymbol(element_symbol)
self.addAtom(residue_name, name, atype, e, charge, use_numeric_types=False) # use_numeric_types set to false to use string-based atom names, rather than numbers
self.vdwEquivalents[full_name] = atype
for (id0, id1, bond_type) in bonds.itertuples(False):
i = id0 - 1 # Subtract 1 for zero based indexing in OpenMM???
j = id1 - 1 # Subtract 1 for zero based indexing in OpenMM???
self.addBond(residue_name, i, j)
def process_library_file(self, inputfile):
"""Process an AMBER .lib file.
Parameters
----------
inputfile : str
filename of an .lib file
"""
for line in open(inputfile):
if line.startswith('!entry'):
fields = line.split('.')
residue = fields[1]
if residue in skipResidues:
residue = None
continue
key = fields[3].split()[0]
if key == 'atoms':
section = ATOMS
self.residueAtoms[residue] = []
self.residueBonds[residue] = []
self.residueConnections[residue] = []
elif key == 'connect':
section = CONNECT
elif key == 'connectivity':
section = CONNECTIVITY
elif key == 'residueconnect':
section = RESIDUECONNECT
else:
section = OTHER
elif section == ATOMS:
fields = line.split()
atomName = fields[0][1:-1]
atomClass = fields[1][1:-1]
if fields[6] == '-1':
# Workaround for bug in some Amber files.
if atomClass[0] == 'C':
elem = elements[6]
elif atomClass[0] == 'H':
elem = elements[1]
else:
raise ValueError('Illegal atomic number: ' + line)
else:
elem = elements[int(fields[6])]
self.charge = float(fields[7])
self.addAtom(residue, atomName, atomClass, elem, self.charge)
elif section == CONNECT:
self.addExternalBond(residue, int(line) - 1)
elif section == CONNECTIVITY:
fields = line.split()
self.addBond(residue, int(fields[0]) - 1, int(fields[1]) - 1)
elif section == RESIDUECONNECT:
# Some Amber files have errors in them, incorrectly listing atoms that should not be
# connected in the first two positions. We therefore rely on the "connect" section for
# those, using this block only for other external connections.
for atom in [int(x) - 1 for x in line.split()[2:]]:
self.addExternalBond(residue, atom)
def process_dat_file(self, inputfile):
"""Process an AMBER .dat file.
Parameters
----------
inputfile : str
filename of an .dat file
"""
block = 0
continueTorsion = False
for line in open(inputfile):
# Use to detect blank lines
line_length = len(line.strip())
if block == 0: # Title
block += 1
elif block == 1: # Mass
if line_length == 0:
block += 1
else:
params = self._parse_dat_atom_symbols_and_masses(line)
self.masses[params['kndsym']] = float(params['amass'])
elif block == 2: # Hydrophilic atoms
block += 1
elif block == 3: # Bonds
if line_length == 0:
block += 1
else:
params = self._parse_dat_bond_length_parameters(line)
self.bonds.append([params['ibt'], params['jbt'], params['rk'], params['req']])
elif block == 4: # Angles
if line_length == 0:
block += 1
else:
params = self._parse_dat_bond_angle_parameters(line)
self.angles.append([params['itt'], params['jtt'], params['ktt'], params['tk'], params['teq']])
elif block == 5: # Torsions
if line_length == 0:
block += 1
else:
params = self._parse_dat_dihedral_parameters(line)
# Periodicity parameter pn is an int stored as a float,
# and a negative sign indicates additional dihedral terms are added on the next line
pn = int(float(params['pn']))
pk_over_idivf = float(params['pk']) / float(params['idivf'])
if continueTorsion:
self.torsions[-1] += [pk_over_idivf, params['phase'], abs(pn)]
else:
self.torsions.append([params['ipt'], params['jpt'], params['kpt'], params['lpt'], pk_over_idivf, params['phase'], abs(pn)])
continueTorsion = (pn < 0)
elif block == 6: # Improper torsions
if line_length == 0:
block += 1
else:
params = self._parse_dat_improper_dihedral_parameters(line)
self.impropers.append((params['ipt'], params['jpt'], params['kpt'], params['lpt'],params['pk'], params['phase'], params['pn']))
elif block == 7: # 10-12 hbond potential
if line_length == 0:
block += 1
elif block == 8: # VDW equivalents
if line_length == 0:
block += 1
else:
symbols = self._parse_dat_6_12_equivalence_symbols(line)
for atom in symbols['ieqv']:
self.vdwEquivalents[atom] = symbols['iorg']
elif block == 9: # VDW type
block += 1
spec = self._parse_dat_6_12_potential_kind(line)
self.vdwType = spec['kindnb'].upper()
if self.vdwType not in ['RE', 'AC']:
raise ValueError('Nonbonded type (KINDNB) must be RE or AC')
elif block == 10: # VDW parameters
if line_length == 0:
block += 1
else:
params = self._parse_dat_6_12_nb_parameters(line, spec['kindnb'])
if self.vdwType == "RE":
self.vdw[params['ltynb']] = (params['r'], params['edep'])
elif self.vdwType == "AC":
self.vdw[params['ltynb']] = (params['a'], params['c'])
@staticmethod
def _parse_dat_atom_symbols_and_masses(line):
"""
Parse a line in a parm.dat file using atom symbol and mass specification.
Parameters
----------
line : str
Single line string containing atom symbol and mass parameters in a parm.dat file.
Returns
-------
dict containing
kndsym : str
amass : str
atpol : str
line :str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 2 - ***** INPUT FOR ATOM SYMBOLS AND MASSES *****
KNDSYM , AMASS, ATPOL
FORMAT(A2,2X,F10.2x,f10.2)
KNDSYM The unique atom symbol used in the system.
AMASS Atomic mass of the center having the symbol "KNDSYM".
ATPOL The atomic polarizability for each atom (in A**3)
This is the type of polarizability used in sander
and gibbs. No parameters are supplied for this since
the feature is still in development (Amber 4.1).
NOTE: All the unique atomic symbols and their masses must
be read. The input is terminated by a blank card.
Examples
--------
c3 12.01 0.878 Sp3 C
ca 12.01 0.360 Sp2 C in pure aromatic systems
"""
kndsym = line[0:2].strip()
amass = line[4:14].strip()
# prevent potential IndexError from line being too short
try:
apol = line[14:24].split()[0].strip()
except IndexError:
apol = line[14:-1].split()[0].strip()
return locals()
@staticmethod
def _parse_dat_bond_length_parameters(line):
"""
Parse a line in a parm.dat file using bond length format specification.
Parameters
----------
line : str
Single line string containing bond length parameters in a parm.dat file.
Returns
-------
dict containing
ibt : str
jbt : str
rk : str
req : str
line : str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 4 - ***** INPUT FOR BOND LENGTH PARAMETERS *****
IBT , JBT , RK , REQ
FORMAT(A2,1X,A2,2F10.2)
IBT,JBT Atom symbols for the two bonded atoms.
RK The harmonic force constant for the bond "IBT"-"JBT".
The unit is kcal/mol/(A**2).
REQ The equilibrium bond length for the above bond in Angstroms
The input is terminated by a blank card.
Examples
--------
n -os 395.0 1.4103 SOURCE4 30 0.0112
no-s4 143.0 1.9960 SOURCE3 3 0.0313
no-s6 149.6 1.9760 SOURCE3 3 0.0520
"""
ibt = line[0:2].strip()
jbt = line[3:5].strip()
rk = line[5:15].strip()
# prevent potential IndexError from line being too short
try:
req = line[15:25].split()[0].strip()
except IndexError:
req = line[15:-1].split()[0].strip()
return locals()
@staticmethod
def _parse_dat_bond_angle_parameters(line):
"""
Parse a line in a parm.dat file using bond angle format specification.
Parameters
----------
line : str
Single line string containing bond angle parameters in a parm.dat file.
Returns
-------
dict containing
itt : str
jtt : str
ktt : str
tk : str
teq : str
line : str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 5 - ***** INPUT FOR BOND ANGLE PARAMETERS *****
ITT , JTT , KTT , TK , TEQ
FORMAT(A2,1X,A2,1X,A2,2F10.2)
ITT,... The atom symbols for the atoms making an angle.
TK The harmonic force constants for the angle "ITT"-"JTT"-
"KTT" in units of kcal/mol/(rad**2) (radians are the
traditional unit for angle parameters in force fields).
TEQ The equilibrium bond angle for the above angle in degrees.
The input is terminated by a blank card.
Examples
--------
n3-c3-n3 69.61 109.59 SOURCE4 27 1.8125
n3-c3-nc 68.79 113.29 SOURCE3 1 0.0000
n3-c3-nd 68.79 113.29 SOURCE3 1 same_as_n3-c3-nc
c1-sh-hs 48.23 95.99 calculated_based_on_C#C-SH 0
"""
itt = line[0:2].strip()
jtt = line[3:5].strip()
ktt = line[6:8].strip()
tk = line[8:18].strip()
# prevent potential IndexError from line being too short
try:
teq = line[18:28].split()[0].strip()
except IndexError:
teq = line[18:-1].split()[0].strip()
return locals()
@staticmethod
def _parse_dat_dihedral_parameters(line):
"""
Parse a line in a parm.dat file using dihedral format specification.
Parameters
----------
line : str
Single line string containing dihedral parameters in a parm.dat file.
Returns
-------
dict containing
ipt : str
jpt : str
kpt : str
lpt : str
idivf : str
pk : str
phase : str
pn : str
line : str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 6 - ***** INPUT FOR DIHEDRAL PARAMETERS *****
IPT , JPT , KPT , LPT , IDIVF , PK , PHASE , PN
FORMAT(A2,1X,A2,1X,A2,1X,A2,I4,3F15.2)
IPT, ... The atom symbols for the atoms forming a dihedral
angle. If IPT .eq. 'X ' .and. LPT .eq. 'X ' then
any dihedrals in the system involving the atoms "JPT" and
and "KPT" are assigned the same parameters. This is
called the general dihedral type and is of the form
"X "-"JPT"-"KPT"-"X ".
IDIVF The factor by which the torsional barrier is divided.
Consult Weiner, et al., JACS 106:765 (1984) p. 769 for
details. Basically, the actual torsional potential is
(PK/IDIVF) * (1 + cos(PN*phi - PHASE))
PK The barrier height divided by a factor of 2.
PHASE The phase shift angle in the torsional function.
The unit is degrees.
PN The periodicity of the torsional barrier.
NOTE: If PN .lt. 0.0 then the torsional potential
is assumed to have more than one term, and the
values of the rest of the terms are read from the
next cards until a positive PN is encountered. The
negative value of pn is used only for identifying
the existence of the next term and only the
absolute value of PN is kept.
The input is terminated by a blank card.
Examples
--------
X -c -cy-X 6 0.000 180.000 2.000 JCC, 7, (1986), 230
X -c -ca-X 4 4.000 180.000 2.000 optimized by Junmei Wang, Jan-2013
X -c -cc-X 4 11.500 180.000 2.000 statistic value
"""
ipt = line[0:2].strip()
jpt = line[3:5].strip()
kpt = line[6:8].strip()
lpt = line[9:11].strip()
idivf = line[11:15].strip()
pk = line[15:30].strip()
phase = line[30:45].strip()
# prevent potential IndexError from line being too short
try:
pn = line[45:60].split()[0].strip()
except IndexError:
pn = line[45:-1].split()[0].strip()
return locals()
@staticmethod
def _parse_dat_improper_dihedral_parameters(line):
"""
Parse a line in a parm.dat file using improper dihedral format specification.
Parameters
----------
line : str
Single line string containing dihedral parameters in a parm.dat file.
Returns
-------
dict containing
ipt : str
jpt : str
kpt : str
lpt : str
idivf : str
pk : str
phase : str
pn : str
line : str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 7 - ***** INPUT FOR IMPROPER DIHEDRAL PARAMETERS *****
IPT , JPT , KPT , LPT , IDIVF , PK , PHASE , PN
FORMAT(A2,1X,A2,1X,A2,1X,A2,I4,3F15.2)
The input is the same as in for the dihedrals except that
the torsional barrier height is NOT divided by the factor
idivf. The improper torsions are defined between any four
atoms not bonded (in a successive fashion) with each other
as in the case of "regular" or "proper" dihedrals. Improper
dihedrals are used to keep certain groups planar and to
prevent the racemization of certain centers in the united
atom model. Consult the above reference for details.
Important note: all general type improper dihedrals
(e.g. x -x -ct-hc) should appear before all
specifics (ct-ct-ct-hc) in the parm list.
Otherwise the generals will override the
specific with no warning.
The input is terminated by a blank card.
Examples
--------
X -o -c -o 1.1 180. 2. JCC,7,(1986),230
X -X -c -o 10.5 180. 2. JCC,7,(1986),230
"""
ipt = line[0:2].strip()
jpt = line[3:5].strip()
kpt = line[6:8].strip()
lpt = line[9:11].strip()
idivf = line[11:15].strip()
pk = line[15:30].strip()
phase = line[30:45].strip()
# prevent potential IndexError from line being too short
try:
pn = line[45:60].split()[0].strip()
except IndexError:
pn = line[45:-1].split()[0].strip()
return locals()
@staticmethod
def _parse_dat_6_12_equivalence_symbols(line):
"""
Parse a line in a parm.dat file using equivalencing symbols for non-bonded 6-12 potential specification.
Parameters
----------
line : str
Single line string containing equivalencing symbols for 6-12 non-bonded parameters in a parm.dat file.
Returns
-------
dict containing
iorg : str
ieqv : list of str
line : str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 9 - ***** INPUT FOR EQUIVALENCING ATOM SYMBOLS FOR
THE NON-BONDED 6-12 POTENTIAL PARAMETERS *****
IORG , IEQV(I) , I = 1 , 19
FORMAT(20(A2,2X))
IORG The atom symbols to which other atom symbols are to be
equivalenced in generating the 6-12 potential parameters.
IEQV(I) The atoms symbols which are to be equivalenced to the
atom symbol "IORG". If more than 19 atom symbols have
to be equivalenced to a given atom symbol they can be
included as extra cards.
It is advisable not to equivalence any hydrogen bond
atom type atoms with any other atom types.
NOTE: The input is terminated by a blank card.
"""
ieqv = list()
iorg = line[0:2].strip()
# continue adding names till line runs out,
# or reaches 19 which is the maximum according to format
try:
for n in range(1,20):
ieqv.append(line[4*n:4*n+2].split()[0].strip())
except IndexError:
pass
return dict(ieqv=ieqv, iorg=iorg, line=line)
@staticmethod
def _parse_dat_6_12_potential_kind(line):
"""
Parse a line in a parm.dat file using input for non-bonded 6-12 potential specification.
Parameters
----------
line : str
Single line string containing the input format for 6-12 non-bonded parameters in a parm.dat file.
Returns
-------
dict containing
label : str
kindb : str
line : str
Notes
-----
Original format specification http://ambermd.org/formats.html#parm.dat
- 10 - ***** INPUT FOR THE 6-12 POTENTIAL PARAMETERS *****
LABEL , KINDNB
FORMAT(A4,6X,A2)
LABEL The name of the non-bonded input parameter to be
used. It has to be matched with "NAMNB" read through
unit 5. The program searches the file to load the
the required non-bonded parameters. If that name is
not found the run will be terminated.
KINDNB Flag for the type of 6-12 parameters.
'SK' Slater-Kirkwood parameters are input.
see "caution" below.
'RE' van der Waals radius and the potential well depth
parameters are read.
'AC' The 6-12 potential coefficients are read.
NOTE: All the non equivalenced atoms' parameters have to
be given.
The input is terminated when label .eq. 'END'
Examples
--------
MOD4 RE
"""
label = line[0:4].strip()
kindnb = line[10:12].strip()
if kindnb not in ["SK", "RE", "AC"]:
raise ValueError("Unsupported 6-12 potential format {kindb}".format(**locals()))
return locals()
@staticmethod
def _parse_dat_6_12_nb_parameters(line, kindnb):
"""
Parse a line in a parm.dat file using RE format for 6-12 potential specification.
Parameters
----------
line : str
Single line string containing equivalencing symbols for 6-12 non-bonded parameters in a parm.dat file.
kindnb : str
The kind of format for the nonbonded parameter line ("SK", "RE", or "AC")
Returns
-------
dict containing
lytnb : str
line : str
kindnb : str
and for SK
pol : str
xneff : str
rmin :str
or for RE
r : str
edep : str
or for AC
a : str
c : str
Notes
-----
This code assumes the format is FORMAT(2X,A2,6X,2F10.6) for 10B and 10C
Original format specification http://ambermd.org/formats.html#parm.dat
- 10A - ***** ONLY IF KINDNB .EQ. 'SK' *****
LTYNB , POL , XNEFF , RMIN
FORMAT(2X,A2,6X,3F10.6)
LTYNB Atom symbol.
POL Atomic polarizability for the atom centers having the
the above symbol.
XNEFF Effective number of electrons on the atom centers having
the above symbol.
RMIN van der Waals radius of the atom center having the above
symbol.
- 10B - ***** ONLY IF KINDNB .EQ. 'RE' *****
LTYNB , R , EDEP
LTYNB Atom symbol.
R The van der Waals radius of the atoms having the symbol
"LTYNB" (Angstoms)
EDEP The 6-12 potential well depth. (kcal/mol)
------------------------------------------------------------------------
- 10C - ***** ONLY IF KINDNB .EQ. 'AC' *****
LTYNB , A , C
LTYNB Atom symbol.
A The coefficient of the 12th power term (A/r**12).
C The coefficient of the 6th power term (-C/r**6).
Examples
--------
c1 1.9080 0.2100 cp C DLM 11/2007 well depth from OPLS replacing 0.0860
c2 1.9080 0.0860 sp2 atom in the middle of C=CD-CD=C
"""
ltynb = line[2:4].strip()
if kindnb.upper() == "SK":
pol = line[10:20].strip()
xneff = line[20:30].strip()
# prevent IndexError from line being too short
try:
rmin = line[30:40].split()[0].strip()
except IndexError:
rmin = line[30:-1].split()[0].strip()
elif kindnb.upper() == "RE":
r = line[10:20].strip()
# prevent IndexError from line being too short
try:
edep = line[20:30].split()[0].strip()
except IndexError:
edep = line[20:-1].split()[0].strip()
elif kindnb.upper() == "AC":
a = line[10:20].strip()
# prevent IndexError from line being too short
try:
c = line[20:30].split()[0].strip()
except IndexError:
c = line[20:-1].split()[0].strip()
else:
raise ValueError("Unsupported NB format {nbformat}".format(**locals()))
return locals()
def process_frc_file(self, inputfile):
"""Process an AMBER .frc file.
Parameters
----------
inputfile : str
filename of an .frc file
"""
block = ''
continueTorsion = False
first = True
for line in open(inputfile):
line = line.strip()
if len(line) == 0 or first:
block = None
first = False
elif block is None:
block = line
elif block.startswith('MASS'):
fields = line.split()
self.masses[fields[0]] = float(fields[1])
elif block.startswith('BOND'):
fields = line[5:].split()
self.bonds.append((line[:2].strip(), line[3:5].strip(), fields[0], fields[1]))
elif block.startswith('ANGL'):
fields = line[8:].split()
self.angles.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), fields[0], fields[1]))
elif block.startswith('DIHE'):
fields = line[11:].split()
periodicity = int(float(fields[3]))
if continueTorsion:
self.torsions[-1] += [float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)]
else:
self.torsions.append([line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)])
continueTorsion = (periodicity < 0)
elif block.startswith('IMPR'):
fields = line[11:].split()
self.impropers.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), fields[0], fields[1], fields[2]))
elif block.startswith('NONB'):
fields = line.split()
self.vdw[fields[0]] = (fields[1], fields[2])
def generate_xml(self):
"""Return the processed forcefield files as an XML stream.
Returns
-------
stream : cStringIO
The text of the output XML forcefield data.
Notes
-----
The stream can be written to disk via:
outfile = open("my_forcefield.xml", 'w')
outfile.write(stream.read())
outfile.close()
"""
stream = cStringIO()
write_stream = lambda x: stream.write(x + "\n")
write_stream(self.provenance)
write_stream("<ForceField>")
write_stream(" <AtomTypes>")
for index, type in enumerate(self.types):
write_stream(""" <Type name="%s" class="%s" element="%s" mass="%s"/>""" % (self.type_names[index], type[0], type[1].symbol, type[1].mass.value_in_unit(unit.amu)))
write_stream(" </AtomTypes>")
write_stream(" <Residues>")
for res in sorted(self.residueAtoms):
write_stream(""" <Residue name="%s">""" % res)
for atom in self.residueAtoms[res]:
atom_name, type_id = tuple(atom)
atom_type = self.type_names[type_id]
write_stream(" <Atom name=\"%s\" type=\"%s\"/>" % (atom_name, atom_type))
if res in self.residueBonds:
for bond in self.residueBonds[res]:
write_stream(""" <Bond from="%d" to="%d"/>""" % bond)
if res in self.residueConnections:
for bond in self.residueConnections[res]:
write_stream(""" <ExternalBond from="%d"/>""" % bond)
write_stream(" </Residue>")
write_stream(" </Residues>")
write_stream(" <HarmonicBondForce>")
processed = set()
for bond in self.bonds:
signature = (bond[0], bond[1])
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
length = float(bond[3]) * 0.1
k = float(bond[2]) * 2 * 100 * 4.184
write_stream(""" <Bond class1="%s" class2="%s" length="%s" k="%s"/>""" % (bond[0], bond[1], str(length), str(k)))
write_stream(" </HarmonicBondForce>")
write_stream(" <HarmonicAngleForce>")
processed = set()
for angle in self.angles:
signature = (angle[0], angle[1], angle[2])
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
theta = float(angle[4]) * math.pi / 180.0
k = float(angle[3]) * 2 * 4.184
write_stream(""" <Angle class1="%s" class2="%s" class3="%s" angle="%s" k="%s"/>""" % (angle[0], angle[1], angle[2], str(theta), str(k)))
write_stream(" </HarmonicAngleForce>")
write_stream(" <PeriodicTorsionForce>")
processed = set()
for tor in reversed(self.torsions):
signature = (fix(tor[0]), fix(tor[1]), fix(tor[2]), fix(tor[3]))
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
tag = " <Proper class1=\"%s\" class2=\"%s\" class3=\"%s\" class4=\"%s\"" % signature
i = 4
while i < len(tor):
index = i / 3
periodicity = int(float(tor[i + 2]))
phase = float(tor[i + 1]) * math.pi / 180.0
k = tor[i] * 4.184
tag += " periodicity%d=\"%d\" phase%d=\"%s\" k%d=\"%s\"" % (index, periodicity, index, str(phase), index, str(k))
i += 3
tag += "/>"
write_stream(tag)
processed = set()
for tor in reversed(self.impropers):
signature = (fix(tor[2]), fix(tor[0]), fix(tor[1]), fix(tor[3]))
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
tag = " <Improper class1=\"%s\" class2=\"%s\" class3=\"%s\" class4=\"%s\"" % signature
i = 4
while i < len(tor):
index = i / 3
periodicity = int(float(tor[i + 2]))
phase = float(tor[i + 1]) * math.pi / 180.0
k = float(tor[i]) * 4.184
tag += " periodicity%d=\"%d\" phase%d=\"%s\" k%d=\"%s\"" % (index, periodicity, index, str(phase), index, str(k))
i += 3
tag += "/>"
write_stream(tag)
write_stream(" </PeriodicTorsionForce>")
write_stream(""" <NonbondedForce coulomb14scale="%g" lj14scale="%s">""" % (charge14scale, epsilon14scale))
sigmaScale = 0.1 * 2.0 / (2.0 ** (1.0 / 6.0))
for index, type in enumerate(self.types):
atomClass = type[0]
q = type[2]
if atomClass in self.vdwEquivalents:
atomClass = self.vdwEquivalents[atomClass]
if atomClass in self.vdw:
params = [float(x) for x in self.vdw[atomClass]]
if self.vdwType == 'RE':
sigma = params[0] * sigmaScale
epsilon = params[1] * 4.184
else:
sigma = (params[0] / params[1]) ** (1.0 / 6.0)
epsilon = 4.184 * params[1] * params[1] / (4 * params[0])
else:
sigma = 1.0
epsilon = 0
if q != 0 or epsilon != 0:
write_stream(""" <Atom type="%s" charge="%s" sigma="%s" epsilon="%s"/>""" % (self.type_names[index], q, sigma, epsilon))
write_stream(" </NonbondedForce>")
write_stream("</ForceField>")
stream.seek(0)
return stream
def parse_filenames(self, filenames):
"""Process a list of filenames according to their filetype suffixes
Parameters
----------
filenames : list (of strings)
List of filenames of type (lib, off, dat, or mol2)
Notes
-----
When parameterizing small molecules, the correct order of inputs is:
$AMBER_LIB_PATH/gaff.dat ligand_name.mol2 ligand_name.frcmod
"""
for inputfile in filenames:
if inputfile.endswith('.lib') or inputfile.endswith('.off'):
self.process_library_file(inputfile)
elif inputfile.endswith('.dat'):
self.process_dat_file(inputfile)
elif inputfile.endswith("mol2"):
self.process_mol2_file(inputfile)
else:
self.process_frc_file(inputfile)
self.reduce_atomtypes()
def reduce_atomtypes(self, symmetrize_protons=False):
"""Reduce the list of atom self.types.
Parameters
----------
symmetrize_protons : bool, default=False
if True, multiple hydrogens bound to the same heavy atom
should all use the same type.
Notes
-----
The default behavior of symmetrize_protons differs from the
original OpenMM version of this script. For arbitrary small
molecules, we can not assume symmetric protons.
"""
removeType = [False] * len(self.types)
for res in self.residueAtoms:
if res not in self.residueBonds:
continue
atomBonds = [[] for atom in self.residueAtoms[res]]
for bond in self.residueBonds[res]:
atomBonds[bond[0]].append(bond[1])
atomBonds[bond[1]].append(bond[0])
if symmetrize_protons is True:
for index, atom in enumerate(self.residueAtoms[res]):
hydrogens = [x for x in atomBonds[index] if self.types[self.residueAtoms[res][x][1]][1] == hydrogen]
for h in hydrogens[1:]:
removeType[self.residueAtoms[res][h][1]] = True
self.residueAtoms[res][h][1] = self.residueAtoms[res][hydrogens[0]][1]
newTypes = []
replaceWithType = [0] * len(self.types)
for i in range(len(self.types)):
if not removeType[i]:
newTypes.append(self.types[i])
replaceWithType[i] = len(newTypes) - 1
self.types = newTypes
for res in self.residueAtoms:
for atom in self.residueAtoms[res]:
atom[1] = replaceWithType[atom[1]]
def set_provenance(self):
"""Set the provenance attribute with information about the current python session."""
self.provenance = []
line = """<!-- %s -->\n""" % "Time and parameters of origin:"
self.provenance.append(line)
now = datetime.datetime.now()
line = """<!-- %s -->\n""" % str(now)
self.provenance.append(line)
cmd_string = subprocess.list2cmdline(sys.argv[1:])
cmd_string = cmd_string.replace("-", " ") # Replace XML specific characters that can break some XML parsers
cmd_string = cmd_string.replace(">", " ") #
cmd_string = cmd_string.replace("<", " ") #
line = """<!-- %s -->\n""" % cmd_string
self.provenance.append(line)
self.provenance = "".join(self.provenance)
|
choderalab/openmoltools
|
openmoltools/amber_parser.py
|
Python
|
mit
| 40,943
|
[
"Amber",
"MDTraj",
"OpenMM"
] |
46bdda48242aebca7863269fe045bc5d315a8465e7bd445f3899a36fe08dac83
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
from typing import Dict, Tuple, Union
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver.p4util import temp_circular_import_blocker
from psi4.driver import qcdb
from psi4.driver.p4util.exceptions import *
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
@classmethod
def _molecule_from_string(cls,
molstr,
dtype=None,
name=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
return_dict=False,
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='none',
missing_enabled_return_efp='none',
verbose=1):
molrec = qcel.molparse.from_string(
molstr=molstr,
dtype=dtype,
name=name,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
return_processed=False,
enable_qm=enable_qm,
enable_efp=enable_efp,
missing_enabled_return_qm=missing_enabled_return_qm,
missing_enabled_return_efp=missing_enabled_return_efp,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec['qm']), molrec
else:
return core.Molecule.from_dict(molrec['qm'])
@classmethod
def _molecule_from_arrays(cls,
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
comment=None,
provenance=None,
connectivity=None,
missing_enabled_return='error',
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1,
return_dict=False):
"""Construct Molecule from unvalidated arrays and variables.
Light wrapper around :py:func:`~qcelemental.molparse.from_arrays`
that is a full-featured constructor to dictionary representa-
tion of Molecule. This follows one step further to return
Molecule instance.
Parameters
----------
See :py:func:`~qcelemental.molparse.from_arrays`.
Returns
-------
:py:class:`psi4.core.Molecule`
"""
molrec = qcel.molparse.from_arrays(
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
comment=comment,
provenance=provenance,
connectivity=connectivity,
domain='qm',
missing_enabled_return=missing_enabled_return,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec), molrec
else:
return core.Molecule.from_dict(molrec)
@classmethod
def _molecule_from_schema(cls, molschema: Dict, return_dict: bool = False, nonphysical: bool = False, verbose: int = 1) -> Union[core.Molecule, Tuple[core.Molecule, Dict]]:
"""Construct Molecule from non-Psi4 schema.
Light wrapper around :py:func:`~psi4.core.Molecule.from_arrays`.
Parameters
----------
molschema
Dictionary form of Molecule following known schema.
return_dict
Additionally return Molecule dictionary intermediate.
nonphysical
Do allow masses outside an element's natural range to pass validation?
verbose
Amount of printing.
Returns
-------
mol : :py:class:`psi4.core.Molecule`
molrec : dict
Dictionary representation of instance.
Only provided if `return_dict` is True.
"""
molrec = qcel.molparse.from_schema(molschema, nonphysical=nonphysical, verbose=verbose)
qmol = core.Molecule.from_dict(molrec)
geom = np.array(molrec["geom"]).reshape((-1, 3))
qmol._initial_cartesian = core.Matrix.from_array(geom)
if return_dict:
return qmol, molrec
else:
return qmol
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.to_arrays = qcdb.Molecule.to_arrays
cls.to_dict = qcdb.Molecule.to_dict
cls.BFS = qcdb.Molecule.BFS
cls.B787 = qcdb.Molecule.B787
cls.scramble = qcdb.Molecule.scramble
cls.from_arrays = _molecule_from_arrays
cls.from_string = _molecule_from_string
cls.to_string = qcdb.Molecule.to_string
cls.from_schema = _molecule_from_schema
cls.to_schema = qcdb.Molecule.to_schema
cls.run_dftd3 = qcdb.Molecule.run_dftd3
cls.run_gcp= qcdb.Molecule.run_gcp
cls.format_molecule_for_mol = qcdb.Molecule.format_molecule_for_mol
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
molrec = qcel.molparse.from_string(
geom, enable_qm=True, missing_enabled_return_qm='minimal', enable_efp=True, missing_enabled_return_efp='none')
molecule = core.Molecule.from_dict(molrec['qm'])
if "geom" in molrec["qm"]:
geom = np.array(molrec["qm"]["geom"]).reshape((-1, 3))
if molrec["qm"]["units"] == "Angstrom":
geom = geom / qcel.constants.bohr2angstroms
molecule._initial_cartesian = core.Matrix.from_array(geom)
molecule.set_name(name)
if 'efp' in molrec:
try:
import pylibefp
except ImportError as e: # py36 ModuleNotFoundError
raise ImportError("""Install pylibefp to use EFP functionality. `conda install pylibefp -c psi4` Or build with `-DENABLE_libefp=ON`""") from e
#print('Using pylibefp: {} (version {})'.format(pylibefp.__file__, pylibefp.__version__))
efpobj = pylibefp.from_dict(molrec['efp'])
# pylibefp.core.efp rides along on molecule
molecule.EFP = efpobj
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)
|
psi-rking/psi4
|
psi4/driver/molutil.py
|
Python
|
lgpl-3.0
| 9,830
|
[
"Psi4"
] |
1adbddd434f2d3cfe4444e29bf4cc888f2817ff8fedf50799a4c1fc18e1bb67e
|
import tables
import glob
import os
import mdtraj as md
import mdtraj.utils.fah
n_runs = 1
n_clones = 500 # To do: look this up via glob
project = 10468
codename = {10466:"T4", 10467:"src", 10468:"abl", 10469:"EGFR"}[project]
input_data_path = "/data/choderalab/fah/analysis/%d/concatenated_trajectories/" % project
filenames = glob.glob(os.path.join(input_data_path, "run*-clone*.h5"))
for filename in filenames:
file = tables.File(filename, 'r')
if "coordinates" not in file.root:
os.unlink(filename)
print("deleted")
|
kyleabeauchamp/fah-projects
|
code/analysis/delete_bad_trajectories.py
|
Python
|
gpl-2.0
| 550
|
[
"MDTraj"
] |
3b2e0fd3b89b4f9413ea83a101b1f8a52af61046218c0a571d124b4aa5341426
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores_, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
jaidevd/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
Python
|
bsd-3-clause
| 5,079
|
[
"Gaussian"
] |
6fef3fb0a6d887d8549d085b8975ccbe4aad5d9e175b59ccc7cdb9fd58cb0556
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, print_function
import os
import re
import sys
import uuid
import codecs
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from pip.req import parse_requirements
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
pypi_readme_note = """\
.. note::
For the latest source, discussions, etc., please visit the
`GitHub repository <https://github.com/OohlaLabs/uniauth>`_
"""
setup(
name='uniauth',
version=find_version('uniauth', '__init__.py'),
author='OohlaLabs Limited',
author_email='packages@oohlalabs.com',
maintainer='Thierry Jossermoz',
maintainer_email='thierry.jossermoz@oohlalabs.com',
url="https://github.com/OohlaLabs/uniauth",
description="Minimalist and framework independent OAuth(1 & 2) consumers",
long_description="\n\n".join([pypi_readme_note, read('README.rst')]),
install_requires=[str(ir.req) for ir in parse_requirements('requirements.txt', session=uuid.uuid1())],
packages=find_packages(),
tests_require=["tox"],
cmdclass={"test": Tox},
license='MIT',
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
]
)
|
jthi3rry/uniauth
|
setup.py
|
Python
|
mit
| 2,280
|
[
"VisIt"
] |
34bfd2d9d80fdc3ad627bbc3e88fe8b6886f4060e8c0ce9a83da3d4f6f114613
|
# -*- coding: utf-8 -*-
import hashlib
from .sailthru_http import sailthru_http_request
try:
import simplejson as json
except ImportError:
import json
def extract_params(params):
"""
Extracts the values of a set of parameters, recursing into nested dictionaries.
"""
values = []
if isinstance(params, dict):
for key, value in params.items():
values.extend(extract_params(value))
elif isinstance(params, list):
for value in params:
values.extend(extract_params(value))
else:
values.append(params)
return values
def get_signature_string(params, secret):
"""
Returns the unhashed signature string (secret + sorted list of param values) for an API call.
@param params: dictionary values to generate signature string
@param secret: secret string
"""
str_list = [str(item) for item in extract_params(params)]
str_list.sort()
return (secret + ''.join(str_list)).encode('utf-8')
def get_signature_hash(params, secret):
"""
Returns an MD5 hash of the signature string for an API call.
@param params: dictionary values to generate signature hash
@param sercret: secret string
"""
return hashlib.md5(get_signature_string(params, secret)).hexdigest()
class SailthruClient(object):
"""
This class makes HTTP Request to Sailthru API server
Response from server depends on the format being queried
Usage:
from sailthru import SailthruClient
api_key = "your-api-key"
api_secret = "api-secret"
client = SailthruClient(api_key, api_secret)
"""
def __init__(self, api_key, secret, api_url=None, request_timeout=10):
self.api_key = api_key
self.secret = secret
self.api_url = api_url if api_url else 'https://api.sailthru.com'
self.request_timeout = request_timeout
self.last_rate_limit_info = {}
def send(self, template, email, _vars=None, options=None, schedule_time=None, limit=None):
"""
Remotely send an email template to a single email address.
http://docs.sailthru.com/api/send
@param template: template string
@param email: Email value
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param limit: optional dictionary to name, time, and handle conflicts of limits
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
options = options or {}
data = {'template': template,
'email': email,
'vars': _vars,
'options': options.copy()}
if limit:
data['limit'] = limit.copy()
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data)
def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None):
"""
Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
evars = evars or {}
options = options or {}
data = {'template': template,
'email': ','.join(emails) if isinstance(emails, list) else emails,
'vars': _vars.copy(),
'evars': evars.copy(),
'options': options.copy()}
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data)
def get_send(self, send_id):
"""
Get the status of a send
"""
return self.api_get('send', {'send_id': send_id})
def cancel_send(self, send_id):
"""
Cancels an email that you had previously scheduled for future sending with the schedule_time parameter. It is not possible to cancel an email that has not been scheduled.
"""
return self.api_delete('send', {'send_id': send_id})
def get_email(self, email):
"""
DEPRECATED!
get user email data
http://docs.sailthru.com/api/email
"""
data = {'email': email}
return self._api_request('email', data, 'GET')
def set_email(self, email, _vars=None, lists=None, templates=None, verified=0, optout=None, send=None, send_vars=None):
"""
DEPRECATED!
Update information about one of your users, including adding and removing the user from lists.
http://docs.sailthru.com/api/email
"""
_vars = _vars or {}
lists = lists or []
templates = templates or []
send_vars = send_vars or []
data = {'email': email,
'vars': _vars.copy(),
'lists': lists,
'templates': templates,
'verified': int(verified)}
if optout is not None:
data['optout'] = optout
if send is not None:
data['send'] = send
if send_vars:
data['send_vars'] = send_vars
return self.api_post('email', data)
def get_user(self, idvalue, options=None):
"""
get user by a given id
http://getstarted.sailthru.com/api/user
"""
options = options or {}
data = options.copy()
data['id'] = idvalue
return self.api_get('user', data)
def save_user(self, idvalue, options=None):
"""
save user by a given id
http://getstarted.sailthru.com/api/user
"""
options = options or {}
data = options.copy()
data['id'] = idvalue
return self.api_post('user', data)
def schedule_blast(self, name, list, schedule_time, from_name, from_email, subject, content_html, content_text, options=None):
"""
Schedule a mass mail blast
http://docs.sailthru.com/api/blast
@param name: name to give to this new blast
@param list: mailing list name to send to
@param schedule_time: when the blast should send. Dates in the past will be scheduled for immediate delivery. Any English textual datetime format known to PHP's strtotime function is acceptable, such as 2009-03-18 23:57:22 UTC, now (immediate delivery), +3 hours (3 hours from now), or February 14, 9:30 EST. Be sure to specify a timezone if you use an exact time.
@param from_name: name appearing in the "From" of the email
@param from_email: email address to use as the "from" - choose from any of your verified emails
@param subject: subject line of the email
@param content_html: HTML format version of the email
@param content_text: Text format version of the email
@param options: optional parameters dictionary
blast_id
copy_blast
copy_template
replyto
report_email
is_link_tracking
is_google_analytics
is_public
suppress_list
test_vars
email_hour_range
abtest
test_percent
data_feed_url
"""
options = options or {}
data = options.copy()
data['name'] = name
data['list'] = list
data['schedule_time'] = schedule_time
data['from_name'] = from_name
data['from_email'] = from_email
data['subject'] = subject
data['content_html'] = content_html
data['content_text'] = content_text
return self.api_post('blast', data)
def schedule_blast_from_template(self, template, list_name, schedule_time, options=None):
"""
Schedule a mass mail blast from template
http://docs.sailthru.com/api/blast
@param template: template to copy from
@param list_name: list to send to
@param schedule_time
@param options: additional optional params
"""
options = options or {}
data = options.copy()
data['copy_template'] = template
data['list'] = list_name
data['schedule_time'] = schedule_time
return self.api_post('blast', data)
def schedule_blast_from_blast(self, blast_id, schedule_time, options=None):
"""
Schedule a mass mail blast from previous blast
http://docs.sailthru.com/api/blast
@param blast_id: blast_id to copy from
@param schedule_time
@param options: additional optional params
"""
options = options or {}
data = options.copy()
data['copy_blast'] = blast_id
data['schedule_time'] = schedule_time
return self.api_post('blast', data)
def update_blast(self, blast_id, name=None, list=None, schedule_time=None, from_name=None, from_email=None,
subject=None, content_html=None, content_text=None, options=None):
"""
updates existing blast
http://docs.sailthru.com/api/blast
@param blast_id: blast id
@param name: name of the blast
@param list: blast list
@param schedule_time: new schedule time
@param from_name: name appearing in the "From" of the email
@param from_email: email address to use as the "from" - choose from any of your verified emails
@param subject: subject line of the email
@param content_html: HTML format version of the email
@param content_text: Text format version of the email
@param options: optional parameters dictionary
blast_id
copy_blast
copy_template
replyto
report_email
is_link_tracking
is_google_analytics
is_public
suppress_list
test_vars
email_hour_range
abtest
test_percent
data_feed_url
"""
options = options or {}
data = options.copy()
data['blast_id'] = blast_id
if name is not None:
data['name'] = name
if list is not None:
data['list'] = list
if schedule_time is not None:
data['schedule_time'] = schedule_time
if from_name is not None:
data['from_name'] = from_name
if from_email is not None:
data['from_email'] = from_email
if subject is not None:
data['subject'] = subject
if content_html is not None:
data['content_html'] = content_html
if content_text is not None:
data['content_text'] = content_text
return self.api_post('blast', data)
def get_blast(self, blast_id):
"""
Get Blast information
http://docs.sailthru.com/api/blast
"""
return self.api_get('blast', {'blast_id': blast_id})
def delete_blast(self, blast_id):
"""
delete existing blast
"""
return self.api_delete('blast', {'blast_id': blast_id})
def cancel_blast(self, blast_id):
"""
Cancel a scheduled Blast
"""
data = {'blast_id': blast_id,
'schedule_time': ''}
return self.api_post('blast', data)
def get_template(self, template_name):
"""
get information of a given template
"""
return self.api_get('template', {'template': template_name})
def get_templates(self):
"""
get metadata for all user templates
"""
return self.api_get('template', {})
def delete_template(self, template_name):
"""
delete existing template
"""
data = {'template': template_name}
return self.api_delete('template', data)
def save_template(self, template, template_fields=None):
data = {'template': template}
if template_fields:
data.update(template_fields)
return self.api_post('template', data)
def get_list(self, list_name, options=None):
"""
Get detailed metadata information about a list.
"""
options = options or {}
data = {'list': list_name}
data.update(options)
return self.api_get('list', data)
def get_lists(self):
"""
Get metadata for all lists
"""
return self.api_get('list', {})
def save_list(self, list_name, emails):
"""
Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string
"""
data = {'list': list_name,
'emails': ','.join(emails) if isinstance(emails, list) else emails}
return self.api_post('list', data)
def delete_list(self, list_name):
"""
delete given list
http://docs.sailthru.com/api/list
"""
return self.api_delete('list', {'list': list_name})
def import_contacts(self, email, password, include_name=False):
"""
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
"""
data = {'email': email,
'password': password}
if include_name:
data['names'] = 1
return self.api_post('contacts', data)
def push_content(self, title, url,
images=None, date=None, expire_date=None,
description=None, location=None, price=None,
tags=None,
author=None, site_name=None,
spider=None, vars=None):
"""
Push a new piece of content to Sailthru.
Expected names for the `images` argument's map are "full" and "thumb"
Expected format for `location` should be [longitude,latitude]
@param title: title string for the content
@param url: URL string for the content
@param images: map of image names
@param date: date string
@param expire_date: date string for when the content expires
@param description: description for the content
@param location: location of the content
@param price: price for the content
@param tags: list or comma separated string values
@param author: author for the content
@param site_name: site name for the content
@param spider: truthy value to force respidering content
@param vars: replaceable vars dictionary
"""
vars = vars or {}
data = {'title': title,
'url': url}
if images is not None:
data['images'] = images
if date is not None:
data['date'] = date
if expire_date is not None:
data['expire_date'] = date
if location is not None:
data['location'] = date
if price is not None:
data['price'] = price
if description is not None:
data['description'] = description
if site_name is not None:
data['site_name'] = images
if author is not None:
data['author'] = author
if spider:
data['spider'] = 1
if tags is not None:
data['tags'] = ",".join(tags) if isinstance(tags, list) else tags
if len(vars) > 0:
data['vars'] = vars.copy()
return self.api_post('content', data)
def get_alert(self, email):
"""
Retrieve a user's alert settings.
"""
return self.api_get('alert', {'email': email})
def save_alert(self, email, type, template, when=None, options=None):
"""
Add a new alert to a user. You can add either a realtime or a summary alert (daily/weekly).
http://docs.sailthru.com/api/alert
Usage:
email = 'praj@sailthru.com'
type = 'weekly'
template = 'default'
when = '+5 hours'
alert_options = {'match': {}, 'min': {}, 'max': {}, 'tags': []}
alert_options['match']['type'] = 'shoes'
alert_options['min']['price'] = 20000 #cents
alert_options['tags'] = ['red', 'blue', 'green']
response = client.save_alert(email, type, template, when, alert_options)
@param email: Email value
@param type: daily|weekly|realtime
@param template: template name
@param when: date string required for summary alert (daily/weekly)
@param options: dictionary value for adding tags, max price, min price, match type
"""
options = options or {}
data = options.copy()
data['email'] = email
data['type'] = type
data['template'] = template
if type in ['weekly', 'daily']:
data['when'] = when
return self.api_post('alert', data)
def delete_alert(self, email, alert_id):
"""
delete user alert
"""
data = {'email': email,
'alert_id': alert_id}
return self.api_delete('alert', data)
def purchase(self, email, items=None, incomplete=None, message_id=None, options=None, extid=None):
"""
Record that a user has made a purchase, or has added items to their purchase total.
http://docs.sailthru.com/api/purchase
@param email: Email string
@param items: list of item dictionary with keys: id, title, price, qty, and url
@param message_id: message_id string
@param extid: external ID to track purchases
@param options: other options that can be set as per the API documentation
"""
items = items or {}
options = options or {}
data = options.copy()
data['email'] = email
data['items'] = items
if incomplete is not None:
data['incomplete'] = incomplete
if message_id is not None:
data['message_id'] = message_id
if extid is not None:
data['extid'] = extid
return self.api_post('purchase', data)
def get_purchase(self, purchase_id, purchase_key='sid'):
"""
Retrieve information about a purchase using the system's unique ID or a client's ID
@param id_: a string that represents a unique_id or an extid.
@param key: a string that is either 'sid' or 'extid'.
"""
data = {'purchase_id': purchase_id,
'purchase_key': purchase_key}
return self.api_get('purchase', data)
def stats_list(self, list=None, date=None, headers=None):
"""
Retrieve information about your subscriber counts on a particular list, on a particular day.
http://docs.sailthru.com/api/stat
"""
data = {'stat': 'list'}
if list is not None:
data['list'] = list
if date is not None:
data['date'] = date
return self._stats(data, headers)
def stats_blast(self, blast_id=None, start_date=None, end_date=None, options=None):
"""
Retrieve information about a particular blast or aggregated information from all of blasts over a specified date range.
http://docs.sailthru.com/api/stat
"""
options = options or {}
data = options.copy()
if blast_id is not None:
data['blast_id'] = blast_id
if start_date is not None:
data['start_date'] = start_date
if end_date is not None:
data['end_date'] = end_date
data['stat'] = 'blast'
return self._stats(data)
def stats_send(self, template, start_date, end_date, options=None):
"""
Retrieve information about a particular transactional or aggregated information
from transactionals from that template over a specified date range.
http://docs.sailthru.com/api/stat
"""
options = options or {}
data = options.copy()
data = {'template': template,
'start_date': start_date,
'end_date': end_date
}
data['stat'] = 'send'
return self._stats(data)
def _stats(self, data, headers=None):
"""
Make Stats API Request
"""
return self.api_get('stats', data, headers)
def receive_verify_post(self, post_params):
"""
Returns true if the incoming request is an authenticated verify post.
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'send_id', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'verify':
return False
sig = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if sig != get_signature_hash(post_params, self.secret):
return False
send_response = self.get_send(post_params['send_id'])
try:
send_body = send_response.get_body()
send_json = json.loads(send_body)
if 'email' not in send_body:
return False
if send_json['email'] != post_params['email']:
return False
except ValueError:
return False
return True
def receive_update_post(self, post_params):
"""
Update postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'update':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
return True
def receive_optout_post(self, post_params):
"""
Optout postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'optout':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
return True
def receive_hardbounce_post(self, post_params):
"""
Hard bounce postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'hardbounce':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
# for sends
if 'send_id' in post_params:
send_id = post_params['send_id']
send_response = self.get_send(send_id)
if not send_response.is_ok():
return False
send_obj = send_response.get_body()
if not send_obj or 'email' not in send_obj:
return False
# for blasts
if 'blast_id' in post_params:
blast_id = post_params['blast_id']
blast_response = self.get_blast(blast_id)
if not blast_response.is_ok():
return False
blast_obj = blast_response.get_body()
if not blast_obj:
return False
return True
def check_for_valid_postback_actions(self, required_keys, post_params):
"""
checks if post_params contain required keys
"""
for key in required_keys:
if key not in post_params:
return False
return True
def api_get(self, action, data, headers=None):
"""
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
return self._api_request(action, data, 'GET', headers)
def api_post(self, action, data, binary_data_param=None):
"""
Perform an HTTP POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
binary_data_param = binary_data_param or []
if binary_data_param:
return self.api_post_multipart(action, data, binary_data_param)
else:
return self._api_request(action, data, 'POST')
def api_post_multipart(self, action, data, binary_data_param):
"""
Perform an HTTP Multipart POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
@param: binary_data_params: array of multipart keys
"""
binary_data = {}
data = data.copy()
try:
file_handles = []
for param in binary_data_param:
if param in data:
binary_data[param] = file_handle = open(data[param], 'r')
file_handles.append(file_handle)
del data[param]
json_payload = self._prepare_json_payload(data)
return self._http_request(action, json_payload, "POST", binary_data)
finally:
for file_handle in file_handles:
file_handle.close()
def api_delete(self, action, data):
"""
Perform an HTTP DELETE request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
return self._api_request(action, data, 'DELETE')
def _api_request(self, action, data, request_type, headers=None):
"""
Make Request to Sailthru API with given data and api key, format and signature hash
"""
if 'file' in data:
file_data = {'file': open(data['file'], 'rb')}
else:
file_data = None
return self._http_request(action, self._prepare_json_payload(data), request_type, file_data, headers)
def _http_request(self, action, data, method, file_data=None, headers=None):
url = self.api_url + '/' + action
file_data = file_data or {}
response = sailthru_http_request(url, data, method, file_data, headers, self.request_timeout)
if (action in self.last_rate_limit_info):
self.last_rate_limit_info[action][method] = response.get_rate_limit_headers()
else:
self.last_rate_limit_info[action] = { method : response.get_rate_limit_headers() }
return response
def _prepare_json_payload(self, data):
payload = {'api_key': self.api_key,
'format': 'json',
'json': json.dumps(data)}
signature = get_signature_hash(payload, self.secret)
payload['sig'] = signature
return payload
def get_last_rate_limit_info(self, action, method):
"""
Get rate limit information for last API call
:param action: API endpoint
:param method: Http method, GET, POST or DELETE
:return: dict|None
"""
method = method.upper()
if (action in self.last_rate_limit_info and method in self.last_rate_limit_info[action]):
return self.last_rate_limit_info[action][method]
return None
|
sailthru/sailthru-python-client
|
sailthru/sailthru_client.py
|
Python
|
mit
| 28,899
|
[
"BLAST"
] |
efc10c43d3b4092959bb9f41384e8927b12d11cc1d2495df8ed95221fdaa3224
|
""" :mod: DataManager
=======================
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists DataManager and related classes.
"""
# # RSCID
__RCSID__ = "$Id$"
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import sortList, randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Resources.Utilities import Utils
##############################3
# The code commented under was design originally to modify the code of the ReplicaManager
# in a backward compatible way. Finally, it will normally not be used, but I keep it here for a little
# while, just in case...
#
# class StorageBackwardCompatibility( object ):
# """
# :class: This class ensures the backward compatibility with the previous implementation
# of the RM. It forwards the calls to the StorageElement. It is meant to disappear
# asap.
# """
#
# __deprecatedArguments = ["singleFile", "singleDirectory"] # Arguments that are now useless
#
# # Some methods have a different name in the ReplicaManager and the StorageElement...
# # We could avoid this static list in the __getattr__ by checking the StorageElement object and so on
# # but fine... let's not be too smart, otherwise it becomes unreadable :-)
# __equivalentMethodNames = {"getLfnForPfn" : "getPfnPath",
# "getPfnForLfn" : "getPfnForLfn",
# "getPfnForProtocol" : "getPfnForProtocol",
# "getPrestageStorageFileStatus" : "prestageFileStatus",
# "getStorageDirectory" : "getDirectory",
# "getStorageDirectoryIsDirectory" : "isDirectory",
# "getStorageDirectoryMetadata" : "getDirectoryMetadata",
# "getStorageDirectorySize" : "getDirectorySize",
# "getStorageFileAccessUrl" : "getAccessUrl",
# "getStorageFileExists" : "exists",
# "getStorageFile" : "getFile",
# "getStorageFileIsFile" : "isFile",
# "getStorageFileMetadata" : "getFileMetadata",
# "getStorageFileSize" : "getFileSize",
# "getStorageListDirectory" : "listDirectory",
# "pinStorageFile" : "pinFile",
# "prestageStorageFile" : "prestageFile",
# "putStorageDirectory" : "putDirectory",
# "putStorageFile" : "putFile",
# "releaseStorageFile" : "releaseFile",
# "removeStorageDirectory" : "removeDirectory",
# "removeStorageFile" : "removeFile",
# }
#
#
# # We can set default argument in the __executeFunction which impacts all plugins
# __defaultsArguments = { "getAccessUrl" : { "protocol" : None },
# "getPfnForProtocol" : { "protocol" : "SRM2", "withPort" : True },
# "prestageFile" : { "lifetime" : 86400 },
# "pinFile" : { "lifetime" : 86400 },
# "getFile" : { "localPath": False },
# "getDirectory" : { "localPath" : False },
# "removeDirectory" : { "recursive" : False },
# }
#
# def __init__( self ):
# super( StorageBackwardCompatibility, self ).__init__()
# self.methodName = None
#
#
#
# # We need to keep this because it makes the loop... stupid
# def getPfnForLfn( self, lfns, storageElementName ):
# """ get PFNs for supplied LFNs at :storageElementName: SE
#
# :param self: self reference
# :param list lfns: list of LFNs
# :param str stotrageElementName: DIRAC SE name
# """
# if type( lfns ) == type( '' ):
# lfns = [lfns]
# storageElement = StorageElement( storageElementName )
# res = storageElement.isValid( "getPfnForLfn" )
# if not res['OK']:
# self.log.debug( "getPfnForLfn: Failed to instantiate StorageElement at %s" % storageElementName )
# return res
# retDict = { "Successful" : {}, "Failed" : {} }
# for lfn in lfns:
# res = storageElement.getPfnForLfn( lfn )
# if res["OK"]:
# retDict["Successful"][lfn] = res["Value"]
# else:
# retDict["Failed"][lfn] = res["Message"]
# return S_OK( retDict )
#
# # We need to keep this because it makes the loop... stupid
# def getPfnForProtocol( self, pfns, storageElementName, protocol = "SRM2", withPort = True ):
# """ create PFNs strings at :storageElementName: SE using protocol :protocol:
#
# :param self: self reference
# :param list pfns: list of PFNs
# :param str storageElementName: DIRAC SE name
# :param str protocol: protocol name (default: 'SRM2')
# :param bool withPort: flag to include port in PFN (default: True)
# """
# storageElement = StorageElement( storageElementName )
# res = storageElement.isValid( "getPfnForProtocol" )
# if not res["OK"]:
# self.log.debug( "getPfnForProtocol: Failed to instantiate StorageElement at %s" % storageElementName )
# return res
# retDict = { "Successful" : {}, "Failed" : {}}
# for pfn in pfns:
# res = storageElement.getPfnForProtocol( pfn, protocol, withPort = withPort )
# if res["OK"]:
# retDict["Successful"][pfn] = res["Value"]
# else:
# retDict["Failed"][pfn] = res["Message"]
# return S_OK( retDict )
#
# # different order of argument....
# def replicateStorageFile( self, physicalFile, size, storageElementName, singleFile = False ):
# """ replicate a physical file to a storage element
#
# :param self: self reference
# :param mixed physicalFile: dictionary with PFN information
# :param int size: size of PFN in bytes
# :param str storageElementName: DIRAC SE name
# :param bool singleFile: execute for the first PFN only
# """
# self.methodName = 'replicateFile'
# return self.__executeMethod( physicalFile, storageElementName, sourceSize = size, singleFile = singleFile )
#
#
#
# def _callStorageElementFcn( self, storageElementName, pfn, method, argsDict = None ):
#
# print "StorageBackwardCompatibility _callStorageElementFcn %s" % method
#
# # We take either the equivalent name, or the name itself
# self.methodName = method
#
# kwargs = {}
# if argsDict:
# kwargs.update( argsDict )
# return self.__executeMethod( pfn, storageElementName, **kwargs )
#
# def __executeMethod( self, lfn, storageElementName, *args, **kwargs ):
# """ a simple wrapper around the :StorageElement: functionality
#
# :param self: self reference
# :param str storageElementName: DIRAC SE name to be accessed e.g. CERN-DST
# :param mixed lfn: contains a single LFN string or a list of LFNs or dictionary containing LFNs
# :param dict argsDict: additional keyword arguments that are required for the :method:
# """
#
# print "EXECUTE METHOD OF STORAGE BACKWAARD"
#
# removedArgs = {}
#
# self.log.debug( "StorageBackwardCompatibility.__executeMethod : preparing the execution of %s" % ( self.methodName ) )
#
# # args should normaly be empty to avoid problem...
# if len( args ):
# self.log.debug( "StorageBackwardCompatibility.__executeMethod: args should be empty!%s" % args )
#
#
# # We check the deprecated arguments
# for depArg in StorageBackwardCompatibility.__deprecatedArguments:
# if depArg in kwargs:
# self.log.debug( "StorageBackwardCompatibility.__executeMethod: %s is not an allowed argument anymore. Please change your code!" % depArg )
# removedArgs[depArg] = kwargs[depArg]
# del kwargs[depArg]
#
#
#
# # Set default argument if any
# methDefaultArgs = StorageBackwardCompatibility.__defaultsArguments.get( self.methodName, {} )
# for argName in methDefaultArgs:
# if argName not in kwargs:
# self.log.debug( "StorageBackwardCompatibility.__executeMethod : default argument %s for %s not present.\
# Setting value %s." % ( argName, self.methodName, methDefaultArgs[argName] ) )
# kwargs[argName] = methDefaultArgs[argName]
#
#
#
# storageElement = StorageElement( storageElementName )
# res = storageElement.isValid( self.methodName )
# if not res['OK']:
# errStr = "StorageBackwardCompatibility.__executeMethod: Failed to instantiate Storage Element"
# self.log.debug( errStr, "for performing %s at %s." % ( self.methodName, storageElementName ) )
# return res
#
# # # get sybmbol
# fcFcn = getattr( storageElement, self.methodName ) if hasattr( storageElement, self.methodName ) else None
# # # make sure it is callable
# fcFcn = fcFcn if callable( fcFcn ) else None
# if not fcFcn:
# errMsg = "StorageBackwardCompatibility.__executeMethod: '%s' isn't a member function in StorageElement." % self.methodName
# self.log.debug( errMsg )
# return S_ERROR( errMsg )
#
# # # call it at least
# res = fcFcn( lfn, *args, **kwargs )
# # # return the output
# if not res["OK"]:
# errStr = "StorageBackwardCompatibility.__executeMethod: Completely failed to perform %s." % self.methodName
# self.log.debug( errStr, '%s : %s' % ( storageElementName, res["Message"] ) )
#
# return res
#
#
# def __getattr__( self, name ):
# print "storage backward : %s" % name
# # We take either the equivalent name, or the name itself
# self.methodName = StorageBackwardCompatibility.__equivalentMethodNames.get( name, None )
#
# if self.methodName:
# return self.__executeMethod
#
# return super( StorageBackwardCompatibility, self ).__getattr__( name )
#
#
#
#
#
#
# class CatalogBackwardCompatibility( object ):
# """
# :class: This class ensures the backward compatibility with the previous implementation
# of the RM. It forwards the calls to the FileCatalog. It is meant to disappear
# asap.
# """
#
# __deprecatedArguments = ["singleFile", "singleDirectory", "catalogs"] # Arguments that are now useless
#
# # Some methods have a different name in the ReplicaManager and the StorageElement...
# # We could avoid this static list in the __getattr__ by checking the StorageElement object and so on
# # but fine... let's not be too smart, otherwise it becomes unreadable :-)
# __equivalentMethodNames = { "addCatalogFile" : "addFile",
# "addCatalogReplica" : "addReplica",
# "createCatalogDirectory" : "createDirectory",
# "createCatalogLink" : "createLink",
# "getCatalogDirectoryMetadata" : "getDirectoryMetadata",
# "getCatalogDirectoryReplicas" : "getDirectoryReplicas",
# "getCatalogDirectorySize" : "getDirectorySize",
# "getCatalogExists" : "exists",
# "getCatalogFileMetadata" : "getFileMetadata",
# "getCatalogFileSize" : "getFileSize",
# "getCatalogIsDirectory" : "isDirectory",
# "getCatalogIsFile" : "isFile",
# "getCatalogIsLink" : "isLink",
# "getCatalogLFNForPFN" : "getLFNForPFN",
# "getCatalogListDirectory" : "listDirectory",
# "getCatalogReadLink" : "readLink",
# "getCatalogReplicas" : "getReplicas",
# "getCatalogReplicaStatus" : "getReplicaStatus",
# "removeCatalogDirectory" : "removeDirectory",
# "removeCatalogLink" : "removeLink",
# "removeCatalogReplica" : "removeReplica",
# "setCatalogReplicaHost" : "setReplicaHost",
# "setCatalogReplicaStatus" : "setReplicaStatus",
# }
#
#
# # We can set default argument in the __executeFunction which impacts all plugins
# __defaultsArguments = { "getReplicas" : { "allStatus" : False },
# "listDirectory" : { "verbose" : False },
# "removeDirectory" : { "recursive" : False },
# }
#
# def __init__( self ):
# super( CatalogBackwardCompatibility, self ).__init__()
# self.methodName = None
#
#
#
#
#
#
# def removeCatalogFile( self, lfn, singleFile = False, catalogs = None ):
# """ remove a file from the FileCatalog
#
# :param self: self reference
# :param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
# :param bool singleFile: execute for the first LFN only
# :param list catalogs: catalogs' names
# """
# # # make sure lfns are sorted from the longest to the shortest
# if type( lfn ) == ListType:
# lfn = sorted( lfn, reverse = True )
#
# self.methodName = "removeFile"
# return self.__executeMethod( lfn, singleFile = singleFile, catalogs = catalogs )
#
#
# def _callFileCatalogFcn( self, lfn, method, argsDict = None, catalogs = None ):
#
# print "CatalogBackwardCompatibility _callFileCatalogFcn %s" % method
#
# # We take either the equivalent name, or the name itself
# self.methodName = method
# kwargs = {"catalogs" : catalogs}
# if argsDict:
# kwargs.update( argsDict )
#
# return self.__executeMethod( lfn, **kwargs )
#
#
#
#
# def __executeMethod( self, lfn, *args, **kwargs ):
# """ a simple wrapper around the :StorageElement: functionality
#
# :param self: self reference
# :param str storageElementName: DIRAC SE name to be accessed e.g. CERN-DST
# :param mixed lfn: contains a single LFN string or a list of LFNs or dictionary containing LFNs
# :param dict argsDict: additional keyword arguments that are required for the :method:
# """
#
# print "EXECUTE METHOD OF CATALOGBACKWAARD"
#
# removedArgs = {}
#
# self.log.debug( "CatalogBackwardCompatibility.__executeMethod : preparing the execution of %s" % ( self.methodName ) )
#
# # args should normaly be empty to avoid problem...
# if len( args ):
# self.log.debug( "CatalogBackwardCompatibility.__executeMethod: args should be empty!%s" % args )
#
#
# # We check the deprecated arguments
# for depArg in CatalogBackwardCompatibility.__deprecatedArguments:
# if depArg in kwargs:
# self.log.debug( "CatalogBackwardCompatibility.__executeMethod: %s is not an allowed argument anymore. Please change your code!" % depArg )
# removedArgs[depArg] = kwargs[depArg]
# del kwargs[depArg]
#
#
#
# # Set default argument if any
# methDefaultArgs = CatalogBackwardCompatibility.__defaultsArguments.get( self.methodName, {} )
# for argName in methDefaultArgs:
# if argName not in kwargs:
# self.log.debug( "CatalogBackwardCompatibility.__executeMethod : default argument %s for %s not present.\
# Setting value %s." % ( argName, self.methodName, methDefaultArgs[argName] ) )
# kwargs[argName] = methDefaultArgs[argName]
#
#
# catalogs = removedArgs.get( "catalogs", list() )
# if not catalogs:
# catalogs = list()
#
#
#
# lfns = None
# if not lfn or type( lfn ) not in StringTypes + ( ListType, DictType ):
# errStr = "_callFileCatalogFcn: Wrong 'lfn' argument."
# self.log.debug( errStr )
# return S_ERROR( errStr )
# elif type( lfn ) in StringTypes:
# lfns = { lfn : False }
# elif type( lfn ) == ListType:
# lfns = dict.fromkeys( lfn, False )
# elif type( lfn ) == DictType:
# lfns = lfn.copy()
#
# # # lfns supplied?
# if not lfns:
# errMsg = "CatalogBackwardCompatibility.__executeMethod: No lfns supplied."
# self.log.debug( errMsg )
# return S_ERROR( errMsg )
# self.log.debug( "CatalogBackwardCompatibility.__executeMethod: Will execute '%s' method with %s lfns." % ( self.methodName, len( lfns ) ) )
# # # create FileCatalog instance
# fileCatalog = FileCatalog( catalogs = catalogs )
# if not fileCatalog.isOK():
# return S_ERROR( "CatalogBackwardCompatibility.__executeMethod: Can't get FileCatalogs %s" % catalogs )
# # # get symbol
# fcFcn = getattr( fileCatalog, self.methodName ) if hasattr( fileCatalog, self.methodName ) else None
# # # check if it is callable
# fcFcn = fcFcn if callable( fcFcn ) else None
# if not fcFcn:
# errMsg = "CatalogBackwardCompatibility.__executeMethod: '%s' isn't a member function in FileCatalog." % self.methodName
# self.log.debug( errMsg )
# return S_ERROR( errMsg )
# # # call it at least
# res = fcFcn( lfns, **kwargs )
# if not res["OK"]:
# self.log.debug( "CatalogBackwardCompatibility.__executeMethod: Failed to execute '%s'." % self.methodName, res["Message"] )
# return res
#
# def __getattr__( self, name ):
#
# # We take either the equivalent name, or the name itself
# self.methodName = CatalogBackwardCompatibility.__equivalentMethodNames.get( name, None )
# if self.methodName:
# return self.__executeMethod
#
# return super( CatalogBackwardCompatibility, self ).__getattr__( name )
###############################################################################################################################################
class DataManager( object ):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__( self, catalogs = [] ):
""" c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.fc = FileCatalog( catalogs )
self.accountingClient = None
self.registrationProtocol = ['SRM2', 'DIP']
self.thirdPartyProtocols = ['SRM2', 'DIP']
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations().getValue( 'DataManagement/IgnoreMissingInFC', False )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyOperationWritePermission( self, path ):
""" Check if we have write permission to the given directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
res = self.fc.getPathPermissions( paths )
if not res['OK']:
return res
for path in paths:
if not res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
return S_OK( False )
return S_OK( True )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.debug( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.debug( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyOperationWritePermission( folder )
if not res['OK']:
return res
if not res['Value']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.debug( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() + [ '%s/dirac_directory' % folder ] )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
gLogger.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = Utils.executeSingleFileOrDirWrapper( self.fc.removeDirectory( folder, recursive = True ) )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement( storageElement )
res = Utils.executeSingleFileOrDirWrapper( se.exists( directory ) )
if not res['OK']:
self.log.debug( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.debug( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = Utils.executeSingleFileOrDirWrapper( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.debug( "Failed to remove storage directory", res['Message'] )
return res
self.log.debug( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.debug( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = Utils.executeSingleFileOrDirWrapper( self.fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Problem getting the %s directory content" % currentDir, res['Message'] )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.debug( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.debug( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.debug( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = Utils.executeSingleFileOrDirWrapper( self.fc.listDirectory( currentDir, verbose = ( days != 0 ) ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.debug( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fc.getFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
gDataStoreClient.commit()
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
self.log.debug( "No accessible replicas found" )
return S_ERROR( "No accessible replicas found" )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
for storageElementName in res['Value']:
se = StorageElement( storageElementName )
physicalFile = se.getPfnForLfn( lfn ).get( 'Value', {} ).get( 'Successful', {} ).get( lfn, replicas[storageElementName] )
# print '__getFile', physicalFile, replicas[storageElementName]
oDataOperation = self.__initialiseAccountingObject( 'getFile', storageElementName, 1 )
oDataOperation.setStartTime()
startTime = time.time()
res = Utils.executeSingleFileOrDirWrapper( se.getFile( physicalFile, localPath = os.path.realpath( destinationDir ) ) )
getTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', getTime )
if not res['OK']:
self.log.debug( "Failed to get %s from %s" % ( lfn, storageElementName ), res['Message'] )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
else:
oDataOperation.setValueByKey( 'TransferSize', res['Value'] )
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Size of downloaded file (%d) does not match catalog (%d)" % ( res['Value'],
metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Checksum of downloaded file (%s) does not match catalog (%s)" % ( localAdler,
metadata['Checksum'] ) )
else:
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
return S_OK( localFile )
gDataStoreClient.addRegister( oDataOperation )
self.log.debug( "getFile: Failed to get local copy from any replicas.", lfn )
return S_ERROR( "DataManager.getFile: Failed to get local copy from any replicas." )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# ancestors = ancestors if ancestors else list()
res = self.__verifyOperationWritePermission( os.path.dirname( lfn ) )
if not res['OK']:
return res
if not res['Value']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.debug( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.debug( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fc.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completey failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.debug( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.debug( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
destinationSE = storageElement.getStorageElementName()['Value']
res = Utils.executeSingleFileOrDirWrapper( storageElement.getPfnForLfn( lfn ) )
if not res['OK']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destPfn, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destPfn, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.debug( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "DataManager.replicateAndRegister: Completely failed to replicate file."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.debug( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.debug( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.debug( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.debug( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.debug( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
"""
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfn )
if not res['OK']:
return res
if not res['Value']:
errStr = "__replicate: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
self.log.debug( "__replicate: Performing replication initialization." )
res = self.__initializeReplication( lfn, sourceSE, destSE )
if not res['OK']:
self.log.debug( "__replicate: Replication initialisation failed.", lfn )
return res
destStorageElement = res['Value']['DestStorage']
lfnReplicas = res['Value']['Replicas']
destSE = res['Value']['DestSE']
catalogueSize = res['Value']['CatalogueSize']
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSE in lfnReplicas:
self.log.debug( "__replicate: LFN is already registered at %s." % destSE )
return S_OK()
###########################################################
# Resolve the best source storage elements for replication
self.log.debug( "__replicate: Determining the best source replicas." )
res = self.__resolveBestReplicas( lfn, sourceSE, lfnReplicas, catalogueSize )
if not res['OK']:
self.log.debug( "__replicate: Best replica resolution failed.", lfn )
return res
replicaPreference = res['Value']
###########################################################
# Now perform the replication for the file
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
res = Utils.executeSingleFileOrDirWrapper( destStorageElement.getPfnForLfn( destPath ) )
if not res['OK']:
errStr = "__replicate: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
# Find out if there is a replica already at the same site
localReplicas = []
otherReplicas = []
for sourceSE, sourcePfn in replicaPreference:
if sourcePfn == destPfn:
continue
res = isSameSiteSE( sourceSE, destSE )
if res['OK'] and res['Value']:
localReplicas.append( ( sourceSE, sourcePfn ) )
else:
otherReplicas.append( ( sourceSE, sourcePfn ) )
replicaPreference = localReplicas + otherReplicas
for sourceSE, sourcePfn in replicaPreference:
self.log.debug( "__replicate: Attempting replication from %s to %s." % ( sourceSE, destSE ) )
fileDict = {destPfn:sourcePfn}
if sourcePfn == destPfn:
continue
localFile = ''
#FIXME: this should not be hardcoded!!!
if sourcePfn.find( 'srm' ) == -1 or destPfn.find( 'srm' ) == -1:
# No third party transfer is possible, we have to replicate through the local cache
localDir = '.'
if localCache:
localDir = localCache
self.getFile( lfn, destinationDir = localDir )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
fileDict = {destPfn:localFile}
res = destStorageElement.replicateFile( fileDict, sourceSize = catalogueSize, singleFile = True )
if localFile and os.path.exists( localFile ):
os.remove( localFile )
if res['OK']:
self.log.debug( "__replicate: Replication successful." )
resDict = {'DestSE':destSE, 'DestPfn':destPfn}
return S_OK( resDict )
else:
errStr = "__replicate: Replication failed."
self.log.debug( errStr, "%s from %s to %s." % ( lfn, sourceSE, destSE ) )
##########################################################
# If the replication failed for all sources give up
errStr = "__replicate: Failed to replicate with all sources."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
def __initializeReplication( self, lfn, sourceSE, destSE ):
# Horrible, but kept to not break current log messages
logStr = "__initializeReplication:"
###########################################################
# Check the sourceSE if specified
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
if not self.__SEActive( sourceSE ).get( 'Value', {} ).get( 'Read' ):
infoStr = "%s Supplied source Storage Element is not currently allowed for Read." % ( logStr )
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
###########################################################
# Check that the destination storage element is sane and resolve its name
self.log.debug( "%s Verifying dest StorageElement validity (%s)." % ( logStr, destSE ) )
destStorageElement = StorageElement( destSE )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.debug( errStr, "%s %s" % ( destSE, res['Message'] ) )
return S_ERROR( errStr )
destSE = destStorageElement.getStorageElementName()['Value']
self.log.info( "%s Destination Storage Element verified." % logStr )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
if not self.__SEActive( destSE ).get( 'Value', {} ).get( 'Write' ):
infoStr = "%s Supplied destination Storage Element is not currently allowed for Write." % ( logStr )
self.log.debug( infoStr, destSE )
return S_ERROR( infoStr )
###########################################################
# Get the LFN replicas from the file catalogue
self.log.debug( "%s Attempting to obtain replicas for %s." % ( logStr, lfn ) )
res = self.fc.getReplicas( lfn )
if not res[ 'OK' ]:
errStr = "%s Completely failed to get replicas for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get replicas for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
self.log.debug( "%s Successfully obtained replicas for LFN." % logStr )
lfnReplicas = res['Value']['Successful'][lfn]
###########################################################
# Check the file is at the sourceSE
self.log.debug( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE and sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
###########################################################
# If the file catalogue size is zero fail the transfer
self.log.debug( "%s Attempting to obtain size for %s." % ( logStr, lfn ) )
res = self.fc.getFileSize( lfn )
if not res['OK']:
errStr = "%s Completely failed to get size for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get size for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
catalogueSize = res['Value']['Successful'][lfn]
if catalogueSize == 0:
errStr = "%s Registered file size is 0." % logStr
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
self.log.debug( "%s File size determined to be %s." % ( logStr, catalogueSize ) )
self.log.info( "%s Replication initialization successful." % logStr )
resDict = {
'DestStorage' : destStorageElement,
'DestSE' : destSE,
'Replicas' : lfnReplicas,
'CatalogueSize' : catalogueSize
}
return S_OK( resDict )
def __resolveBestReplicas( self, lfn, sourceSE, lfnReplicas, catalogueSize ):
""" find best replicas """
###########################################################
# Determine the best replicas (remove banned sources, invalid storage elements and file with the wrong size)
logStr = "__resolveBestReplicas:"
replicaPreference = []
for diracSE, pfn in lfnReplicas.items():
if sourceSE and diracSE != sourceSE:
self.log.debug( "%s %s replica not requested." % ( logStr, diracSE ) )
continue
if not self.__SEActive( diracSE ).get( 'Value', {} ).get( 'Read' ):
self.log.debug( "%s %s is currently not allowed as a source." % ( logStr, diracSE ) )
else:
self.log.debug( "%s %s is available for use." % ( logStr, diracSE ) )
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
pfn = Utils.executeSingleFileOrDirWrapper( storageElement.getPfnForLfn( lfn ) ).get( 'Value', pfn )
if storageElement.getRemoteProtocols()['Value']:
self.log.debug( "%s Attempting to get source pfns for remote protocols." % logStr )
res = Utils.executeSingleFileOrDirWrapper( storageElement.getPfnForProtocol( pfn, protocol = self.thirdPartyProtocols ) )
if res['OK']:
sourcePfn = res['Value']
self.log.debug( "%s Attempting to get source file size." % logStr )
res = storageElement.getFileSize( sourcePfn )
if res['OK']:
if sourcePfn in res['Value']['Successful']:
sourceFileSize = res['Value']['Successful'][sourcePfn]
self.log.debug( "%s Source file size determined to be %s." % ( logStr, sourceFileSize ) )
if catalogueSize == sourceFileSize:
fileTuple = ( diracSE, sourcePfn )
replicaPreference.append( fileTuple )
else:
errStr = "%s Catalogue size and physical file size mismatch." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, sourcePfn ) )
else:
errStr = "%s Failed to get physical file size." % logStr
self.log.debug( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Value']['Failed'][sourcePfn] ) )
else:
errStr = "%s Completely failed to get physical file size." % logStr
self.log.debug( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Message'] ) )
else:
errStr = "%s Failed to get PFN for replication for StorageElement." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
errStr = "%s Source Storage Element has no remote protocols." % logStr
self.log.debug( errStr, diracSE )
if not replicaPreference:
errStr = "%s Failed to find any valid source Storage Elements." % logStr
self.log.debug( errStr )
return S_ERROR( errStr )
else:
return S_OK( replicaPreference )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN':physicalFile, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
fileCatalog = self.fc
if catalog:
fileCatalog = FileCatalog( catalog )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
res = fileCatalog.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
return S_OK( {'Successful':res['Value']['Successful'], 'Failed':res['Value']['Failed']} )
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, pfn, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, pfn ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, pfn in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, pfn in replicaTuple:
res = Utils.executeSingleFileOrDirWrapper( destStorageElement.getPfnForProtocol( pfn, protocol = self.registrationProtocol, withPort = False ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.debug( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, pfn, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
if catalog:
fileCatalog = FileCatalog( catalog )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fc.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force == None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fc.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory.
if lfns:
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( ( lfn, pfn ) )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
fileTuple = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, fileTuple )
if not res['OK']:
errStr = res['Message']
for lfn, pfn in fileTuple:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fc.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removaReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.debug( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
replicaTuples.append( ( lfn, repDict[storageElementName] ) )
res = self.__removeReplica( storageElementName, replicaTuples )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, fileTuple ):
""" remove replica """
lfnDict = {}
failed = {}
for lfn, pfn in fileTuple:
res = self.__verifyOperationWritePermission( lfn )
if not res['OK'] or not res['Value']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
failed[lfn] = errStr
else:
# This is the PFN as in hte FC
lfnDict[lfn] = pfn
res = self.__removePhysicalReplica( storageElementName, lfnDict.keys() )
if not res['OK']:
errStr = "__removeReplica: Failed to remove catalog replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
replicaTuples = [( lfn, lfnDict[lfn], storageElementName ) for lfn in res['Value']['Successful']]
successful = {}
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.debug( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn in lfnDict if lfn not in failed], errStr ) )
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.fc.getReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.debug( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuple ):
""" remove replica form catalogue """
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuple ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuple:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fc.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
for lfn in res['Value']['Successful']:
infoStr = "__removeCatalogReplica: Successfully removed replica."
self.log.debug( infoStr, lfn )
if res['Value']['Successful']:
self.log.debug( "__removeCatalogReplica: Removed %d replicas" % len( res['Value']['Successful'] ) )
for lfn, error in res['Value']['Failed'].items():
errStr = "__removeCatalogReplica: Failed to remove replica."
self.log.debug( errStr, "%s %s" % ( lfn, error ) )
oDataOperation.setValueByKey( 'RegistrationOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplica( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.debug( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.fc.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
lfnsToRemove = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
lfnsToRemove.append( lfn )
self.log.debug( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( lfnsToRemove ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, lfnsToRemove )
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
for pfn in res['Value']['Successful']:
successful[lfn] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, lfnsToRemove ):
""" remove replica from storage element """
self.log.debug( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( lfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName )
pfnsToRemove = dict( [( storageElement.getPfnForLfn( lfn )['Value'].get( 'Successful', {} ).get( lfn ), lfn ) for lfn in lfnsToRemove] )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( pfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
res = storageElement.removeFile( pfnsToRemove.keys() )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
else:
result = {'Failed':{}, 'Successful':{}}
for surl, value in res['Value']['Failed'].items():
lfn = pfnsToRemove[surl]
if 'No such file or directory' in value:
result['Successful'][lfn] = surl
else:
result['Failed'][lfn] = value
for surl in res['Value']['Successful']:
lfn = pfnsToRemove[surl]
ret = Utils.executeSingleFileOrDirWrapper( storageElement.getPfnForProtocol( surl, protocol = self.registrationProtocol, withPort = False ) )
if not ret['OK']:
result['Successful'][lfn] = surl
else:
result['Successful'][lfn] = ret['Value']
ret = storageElement.getFileSize( res['Value']['Successful'] )
deletedSize = sum( ret.get( 'Value', {} ).get( 'Successful', {} ).values() )
oDataOperation.setValueByKey( 'TransferOK', deletedSize )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.debug( infoStr )
return S_OK( result )
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param :
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
res = Utils.executeSingleFileOrDirWrapper( storageElement.getPfnForLfn( lfn ) )
if not res['OK']:
errStr = "put: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.debug( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = destPfn
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.fc.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
if not seReadStatus.setdefault( se, self.__SEActive( se ).get( 'Value', {} ).get( 'Read', False ) ):
replicas.pop( se )
return S_OK( replicaDict )
def __SEActive( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][se].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][se].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def getReplicas( self, lfns ):
""" get replicas from catalogue """
res = FileCatalog().getReplicas( lfns, allStatus = True )
if res['OK']:
for lfn, replicas in res['Value']['Successful'].items():
for se in replicas:
replicas[se] = StorageElement( se ).getPfnForLfn( lfn ).get( 'Value', {} ).get( 'Successful', {} ).get( lfn, replicas[se] )
return res
##################################################################################################3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists( self, storageElementName, lfn, method, **argsDict ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get replicas for lfn
res = FileCatalog().getReplicas( lfn )
if not res["OK"]:
errStr = "_callReplicaSEFcn: Completely failed to get replicas for LFNs."
self.log.debug( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
existingReplicas = []
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
existingReplicas.append( lfn )
else:
errStr = "_callReplicaSEFcn: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
# # call StorageElement function at least
se = StorageElement( storageElementName )
fcn = getattr( se, method )
res = fcn( existingReplicas, **argsDict )
# # check result
if not res["OK"]:
errStr = "_callReplicaSEFcn: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed nad successful
for lfn, lfnRes in res["Value"]["Successful"].items():
retDict["Successful"][lfn] = lfnRes
for lfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][lfn] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getAccessUrl" )
def getReplicaMetadata( self, lfn, storageElementName ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"prestageFile", lifetime = lifetime )
def pinReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"pinFile", lifetime = lifetime )
def releaseReplica( self, lfn, storageElementName ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"getFile", localPath = localPath )
# we should so something to get rid of this one
def removeCatalogFile( self, lfn ):
""" remove a file from the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
# # make sure lfns are sorted from the longest to the shortest
if type( lfn ) == ListType:
lfn = sorted( lfn, reverse = True )
return FileCatalog().removeFile( lfn )
|
avedaee/DIRAC
|
DataManagementSystem/Client/DataManager.py
|
Python
|
gpl-3.0
| 87,813
|
[
"DIRAC"
] |
5e4ea5ddf96c095df2a945092e1f25f6cb147938261d38b80ead1ee499e8ff39
|
## \package GONode Both node and term type vertice classes
# \author Brian Muller <mullerb@musc.edu>
from GOError import GOError
class GONode:
TERM_TYPE = "term_type"
PROTEIN_TYPE = "protein_type"
## Constructor
def __init__ (self, nodetype, dbid, storage=None, name=None, description=None):
self.goError = GOError()
self.storage = storage
self.dbid = dbid
self.type = nodetype
self.name = name
self.description = description
self.leaf = False
def __ne__(self, other):
return not self == other
def __repr__(self):
props = ['dbid','storage', 'name','description']
return "<GONode {%s}>" % ", ".join(["%s: %s" % (n, self.__dict__[n]) for n in props])
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return hash(self) == hash(other)
def __cmp__(self, other):
# hash(self) - hash(other) was sometimes a long, but the following seems to always
# be an int - must be int, because of heapq restrictions
return hash(hash(self) - hash(other))
def requireStorage(self, fname):
if self.storage is None:
self.goError.handleFatal("The method %s cannot be called because no storage was specified" % fname)
def isTermNode(self):
return not self.isProteinNode()
class GOTermNode(GONode):
## Constructor
def __init__ (self, goid, dbid=None, name=None, description=None, storage=None):
GONode.__init__(self, nodetype=GONode.TERM_TYPE, storage=storage, dbid=dbid, name=name, description=description)
self.goid = goid
self._proteins = {}
self._pmids = {}
if self.storage != None and self.dbid == None:
self.dbid = self.storage.getTermsID(self.goid)
def __str__(self):
if hasattr(self, "str"):
return self.str
props = ['name', 'description', 'goid']
self.str = "<GOTermNode {%s}>" % ", ".join(["%s: %s" % (n, self.__dict__[n]) for n in props])
return self.str
# Get all of the proteins associated with this term node. Results are cached.
# @param species The gene products will be given per specified species
def getProteins(self, species):
self.requireStorage("getProteins")
if not self._proteins.has_key(species):
protein_ids = self.storage.getTermsProteinIDs(self, species)
self._proteins[species] = self.storage.makeProteins(protein_ids)
return self._proteins[species]
## Get a list of the PMID's used as evidence for this term.
# @param species The species to restrict the search to.
# \return A list of PMIDs
def getPMIDReferences(self, species):
self.requireStorage("getPMIDReferences")
if not self._pmids.has_key(species):
self._pmids[species] = self.storage.getPMIDReferences(self, species)
return self._pmids[species]
def isProteinNode(self):
return False
def setProteins(self, prots, species):
self._proteins[species] = prots
class GOProteinNode(GONode):
## Constructor
def __init__ (self, symbol, dbid=None, name=None, description=None, storage=None):
GONode.__init__(self, nodetype=GONode.PROTEIN_TYPE, storage=storage, dbid=dbid, name=name, description=description)
self.symbol = symbol
self._terms = {}
if self.storage != None and self.dbid == None:
self.dbid = self.storage.getProteinsID(self.symbol)
def __str__(self):
props = ['name', 'description', 'symbol']
return "<GOProteinNode {%s}>" % ", ".join(["%s: %s" % (n, self.__dict__[n]) for n in props])
# Get all of the terms associated with this protein node. Results are cached.
# @param species The terms will be given per specified species
def getTerms(self, species):
self.requireStorage("getTermNodes")
if not self._terms.has_key(species):
term_ids = self.storage.getProteinsTermIDs(self, species)
self._terms[species] = self.storage.makeTerms(term_ids)
return self._terms[species]
def isProteinNode(self):
return True
|
aswarren/GOGranny
|
GOGranny/GONode.py
|
Python
|
gpl-2.0
| 4,304
|
[
"Brian"
] |
2a693f965e14dfdd06d29256c0736dac4db5b6f3645fa162619f457a42395738
|
# 2D channel example
# ==================
#
# .. highlight:: python
#
# This example demonstrates a depth-averaged 2D simulation in a closed
# rectangular domain, where the flow is forced by an initial pertubation in the
# water elevation field.
#
# We begin by importing Thetis and creating a rectangular mesh with :py:func:`~.firedrake.utility_meshes.RectangleMesh`.
# The domain is 40 km long and 2 km wide.
# We generate 25 elements in the along-channel direction and 2 in the
# cross-channel direction::
from thetis import *
lx = 40e3
ly = 2e3
nx = 25
ny = 2
mesh2d = RectangleMesh(nx, ny, lx, ly)
# Next we define a bathymetry function in the 2D mesh, using continuous linear
# elements. In this example we set the bathymetry to constant 20 m depth::
P1_2d = FunctionSpace(mesh2d, 'CG', 1)
bathymetry_2d = Function(P1_2d, name='Bathymetry')
depth = 20.0
bathymetry_2d.assign(depth)
# .. note::
#
# See
# `Firedrake manual <http://firedrakeproject.org/variational-problems.html>`_
# for more information on mesh generation, functions and function spaces.
#
# We are now ready to create a 2D solver object, and set some options::
# total duration in seconds
t_end = 2 * 3600
# export interval in seconds
t_export = 100.0
solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d)
options = solver_obj.options
options.simulation_export_time = t_export
options.simulation_end_time = t_end
# Here we simply define the total duration of the run, and the
# export interval. See :py:class:`~.ModelOptions` for more information about the
# available options.
#
# Next we define the used time integrator, and set the time step::
options.timestepper_type = 'CrankNicolson'
options.timestep = 50.0
# Because Crank-Nicolson is an uncondionally stable method, we can set
# the time step freely.
#
# We then define the initial condition for elevation. We begin by creating a
# function (in the same linear continous function space)::
elev_init = Function(P1_2d, name='initial elevation')
# We then need to define an analytical expression the the x,y coordinates of the
# mesh. To this end, we use
# :py:class:`~.ufl.classes.SpatialCoordinate` and define a `UFL <http://fenics-ufl.readthedocs.io/en/latest/>`_ expression (see
# `Firedrake's interpolation manual <http://firedrakeproject.org/interpolation.html>`_
# for more information)::
xy = SpatialCoordinate(mesh2d)
gauss_width = 4000.
gauss_ampl = 2.0
gauss_expr = gauss_ampl * exp(-((xy[0]-lx/2)/gauss_width)**2)
# This defines a 2 m tall Gaussian hill in the x-direction in the middle on the
# domain. We can then interpolate this expression on the function::
elev_init.interpolate(gauss_expr)
# and set this function as an initial condition to the elevation field::
solver_obj.assign_initial_conditions(elev=elev_init)
# Model setup is now complelete. We run the model by issuing::
solver_obj.iterate()
# While the model is running, Thetis prints some statistics on the command line:
#
# .. code-block:: none
#
# 0 0 T= 0.00 eta norm: 6251.2574 u norm: 0.0000 0.00
# 1 2 T= 100.00 eta norm: 5905.0262 u norm: 1398.1128 0.76
# 2 4 T= 200.00 eta norm: 5193.5227 u norm: 2377.8512 0.03
# 3 6 T= 300.00 eta norm: 4656.5334 u norm: 2856.5165 0.03
# ...
#
# The first column is the export index, the second one the number of executed
# time steps, followed by the simulation time. ``eta norm`` and ``u norm`` are
# the L2 norms of the elevation and depth averaged velocity fields, respectively.
# The last column stands for the (approximate) wall-clock time between exports.
#
# The simulation terminates once the end time is reached.
# See :doc:`outputs and visualization <../outputs_and_visu>` page on how to
# visualize the results.
#
# This tutorial can be dowloaded as a Python script `here <demo_2d_channel.py>`__.
|
tkarna/cofs
|
demos/demo_2d_channel.py
|
Python
|
mit
| 3,870
|
[
"Gaussian"
] |
f2b9bb59222f78d0df5b489adcae99a8f188756e0eae2691d012ca61f3b9b206
|
from ase import Atoms
from ase.calculators.test import numeric_force
from gpaw import GPAW, FermiDirac, PoissonSolver
from gpaw.test import equal
a = 5.404
bulk = Atoms(symbols='Si8',
positions=[(0, 0, 0.1 / a),
(0, 0.5, 0.5),
(0.5, 0, 0.5),
(0.5, 0.5, 0),
(0.25, 0.25, 0.25),
(0.25, 0.75, 0.75),
(0.75, 0.25, 0.75),
(0.75, 0.75, 0.25)],
pbc=True)
bulk.set_cell((a, a, a), scale_atoms=True)
n = 20
calc = GPAW(gpts=(n, n, n),
nbands=8*3,
occupations=FermiDirac(width=0.01),
poissonsolver=PoissonSolver(nn='M', relax='J'),
kpts=(2, 2, 2),
convergence={'energy': 1e-7}
)
bulk.set_calculator(calc)
f1 = bulk.get_forces()[0, 2]
e1 = bulk.get_potential_energy()
niter1 = calc.get_number_of_iterations()
f2 = numeric_force(bulk, 0, 2)
print f1,f2,f1-f2
equal(f1, f2, 0.005)
# Volume per atom:
vol = a**3 / 8
de = calc.get_electrostatic_corrections() / vol
print de
assert abs(de[0] - -2.190) < 0.001
print e1, f1, niter1
energy_tolerance = 0.00025
force_tolerance = 0.0001
niter_tolerance = 0
equal(e1, -46.6596470348, energy_tolerance) # svnversion 5252
equal(f1, -1.38242356123, force_tolerance) # svnversion 5252
|
robwarm/gpaw-symm
|
gpaw/test/8Si.py
|
Python
|
gpl-3.0
| 1,380
|
[
"ASE",
"GPAW"
] |
53ad9534c42ee8f73a4122b25bb80519fd7c538f1a18b6bfad5020a3c2380f08
|
"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
3manuek/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
Python
|
bsd-3-clause
| 7,452
|
[
"Gaussian"
] |
404a8f2b1b55a010b794e2851d69c4628ffb11844e99129d40813ea36791402a
|
# -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
Scrapers for embedding images
=============================
Collect images that have been produced by code blocks.
The only scrapers we support are Matplotlib and Mayavi, others should
live in modules that will support them (e.g., PyVista, Plotly).
"""
import os
import sys
import re
from distutils.version import LooseVersion
from textwrap import indent
from warnings import filterwarnings
from sphinx.errors import ExtensionError
from .utils import scale_image, optipng
__all__ = ['save_figures', 'figure_rst', 'ImagePathIterator', 'clean_modules',
'matplotlib_scraper', 'mayavi_scraper']
###############################################################################
# Scrapers
def _import_matplotlib():
"""Import matplotlib safely."""
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('agg')
matplotlib_backend = matplotlib.get_backend().lower()
filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
if matplotlib_backend != 'agg':
raise ExtensionError(
"Sphinx-Gallery relies on the matplotlib 'agg' backend to "
"render figures and write them to files. You are "
"currently using the {} backend. Sphinx-Gallery will "
"terminate the build now, because changing backends is "
"not well supported by matplotlib. We advise you to move "
"sphinx_gallery imports before any matplotlib-dependent "
"import. Moving sphinx_gallery imports at the top of "
"your conf.py file should fix this issue"
.format(matplotlib_backend))
import matplotlib.pyplot as plt
return matplotlib, plt
def _matplotlib_fig_titles(fig):
titles = []
# get supertitle if exists
suptitle = getattr(fig, "_suptitle", None)
if suptitle is not None:
titles.append(suptitle.get_text())
# get titles from all axes, for all locs
title_locs = ['left', 'center', 'right']
for ax in fig.axes:
for loc in title_locs:
text = ax.get_title(loc=loc)
if text:
titles.append(text)
fig_titles = ', '.join(titles)
return fig_titles
_ANIMATION_RST = '''
.. container:: sphx-glr-animation
.. raw:: html
{0}
'''
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs):
"""Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png', 'jpg', and 'svg' are
supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
"""
matplotlib, plt = _import_matplotlib()
from matplotlib.animation import Animation
image_path_iterator = block_vars['image_path_iterator']
image_rsts = []
# Check for animations
anims = list()
if gallery_conf.get('matplotlib_animations', False):
for ani in block_vars['example_globals'].values():
if isinstance(ani, Animation):
anims.append(ani)
# Then standard images
for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator):
if 'format' in kwargs:
image_path = '%s.%s' % (os.path.splitext(image_path)[0],
kwargs['format'])
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_num)
# Deal with animations
cont = False
for anim in anims:
if anim._fig is fig:
image_rsts.append(_anim_rst(anim, image_path, gallery_conf))
cont = True
break
if cont:
continue
# get fig titles
fig_titles = _matplotlib_fig_titles(fig)
to_rgba = matplotlib.colors.colorConverter.to_rgba
# shallow copy should be fine here, just want to avoid changing
# "kwargs" for subsequent figures processed by the loop
these_kwargs = kwargs.copy()
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr) and \
attr not in kwargs:
these_kwargs[attr] = fig_attr
try:
fig.savefig(image_path, **these_kwargs)
except Exception:
plt.close('all')
raise
if 'images' in gallery_conf['compress_images']:
optipng(image_path, gallery_conf['compress_images_args'])
image_rsts.append(
figure_rst([image_path], gallery_conf['src_dir'], fig_titles))
plt.close('all')
rst = ''
if len(image_rsts) == 1:
rst = image_rsts[0]
elif len(image_rsts) > 1:
image_rsts = [re.sub(r':class: sphx-glr-single-img',
':class: sphx-glr-multi-img',
image) for image in image_rsts]
image_rsts = [HLIST_IMAGE_MATPLOTLIB + indent(image, u' ' * 6)
for image in image_rsts]
rst = HLIST_HEADER + ''.join(image_rsts)
return rst
def _anim_rst(anim, image_path, gallery_conf):
import matplotlib
from matplotlib.animation import FFMpegWriter, ImageMagickWriter
# output the thumbnail as the image, as it will just be copied
# if it's the file thumbnail
fig = anim._fig
image_path = image_path.replace('.png', '.gif')
fig_size = fig.get_size_inches()
thumb_size = gallery_conf['thumbnail_size']
use_dpi = round(
min(t_s / f_s for t_s, f_s in zip(thumb_size, fig_size)))
# FFmpeg is buggy for GIFs before Matplotlib 3.3.1
if LooseVersion(matplotlib.__version__) >= LooseVersion('3.3.1') and \
FFMpegWriter.isAvailable():
writer = 'ffmpeg'
elif ImageMagickWriter.isAvailable():
writer = 'imagemagick'
else:
writer = None
anim.save(image_path, writer=writer, dpi=use_dpi)
html = anim._repr_html_()
if html is None: # plt.rcParams['animation.html'] == 'none'
html = anim.to_jshtml()
html = indent(html, ' ')
return _ANIMATION_RST.format(html)
def mayavi_scraper(block, block_vars, gallery_conf):
"""Scrape Mayavi images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
"""
from mayavi import mlab
image_path_iterator = block_vars['image_path_iterator']
image_paths = list()
e = mlab.get_engine()
for scene, image_path in zip(e.scenes, image_path_iterator):
try:
mlab.savefig(image_path, figure=scene)
except Exception:
mlab.close(all=True)
raise
# make sure the image is not too large
scale_image(image_path, image_path, 850, 999)
if 'images' in gallery_conf['compress_images']:
optipng(image_path, gallery_conf['compress_images_args'])
image_paths.append(image_path)
mlab.close(all=True)
return figure_rst(image_paths, gallery_conf['src_dir'])
_scraper_dict = dict(
matplotlib=matplotlib_scraper,
mayavi=mayavi_scraper,
)
class ImagePathIterator(object):
"""Iterate over image paths for a given example.
Parameters
----------
image_path : str
The template image path.
"""
def __init__(self, image_path):
self.image_path = image_path
self.paths = list()
self._stop = 1000000
def __len__(self):
"""Return the number of image paths used.
Returns
-------
n_paths : int
The number of paths.
"""
return len(self.paths)
def __iter__(self):
"""Iterate over paths.
Returns
-------
paths : iterable of str
This enables the use of this Python pattern::
>>> for epoch in epochs: # doctest: +SKIP
>>> print(epoch) # doctest: +SKIP
Where ``epoch`` is given by successive outputs of
:func:`mne.Epochs.next`.
"""
# we should really never have 1e6, let's prevent some user pain
for ii in range(self._stop):
yield self.next()
else:
raise ExtensionError('Generated over %s images' % (self._stop,))
def next(self):
return self.__next__()
def __next__(self):
# The +1 here is because we start image numbering at 1 in filenames
path = self.image_path.format(len(self) + 1)
self.paths.append(path)
return path
# For now, these are what we support
_KNOWN_IMG_EXTS = ('png', 'svg', 'jpg', 'gif')
def _find_image_ext(path):
"""Find an image, tolerant of different file extensions."""
path = os.path.splitext(path)[0]
for ext in _KNOWN_IMG_EXTS:
this_path = '%s.%s' % (path, ext)
if os.path.isfile(this_path):
break
else:
ext = 'png'
return ('%s.%s' % (path, ext), ext)
def save_figures(block, block_vars, gallery_conf):
"""Save all open figures of the example code-block.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
images_rst : str
rst code to embed the images in the document.
"""
image_path_iterator = block_vars['image_path_iterator']
all_rst = u''
prev_count = len(image_path_iterator)
for scraper in gallery_conf['image_scrapers']:
rst = scraper(block, block_vars, gallery_conf)
if not isinstance(rst, str):
raise ExtensionError('rst from scraper %r was not a string, '
'got type %s:\n%r'
% (scraper, type(rst), rst))
n_new = len(image_path_iterator) - prev_count
for ii in range(n_new):
current_path, _ = _find_image_ext(
image_path_iterator.paths[prev_count + ii])
if not os.path.isfile(current_path):
raise ExtensionError(
'Scraper %s did not produce expected image:'
'\n%s' % (scraper, current_path))
all_rst += rst
return all_rst
def figure_rst(figure_list, sources_dir, fig_titles=''):
"""Generate RST for a list of image filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
fig_titles : str
Titles of figures, empty string if no titles found. Currently
only supported for matplotlib figures, default = ''.
Returns
-------
images_rst : str
rst code to embed the images in the document
"""
figure_paths = [os.path.relpath(figure_path, sources_dir)
.replace(os.sep, '/').lstrip('/')
for figure_path in figure_list]
# Get alt text
alt = ''
if fig_titles:
alt = fig_titles
elif figure_list:
file_name = os.path.split(figure_list[0])[1]
# remove ext & 'sphx_glr_' from start & n#'s from end
file_name_noext = os.path.splitext(file_name)[0][9:-4]
# replace - & _ with \s
file_name_final = re.sub(r'[-,_]', ' ', file_name_noext)
alt = file_name_final
alt = _single_line_sanitize(alt)
images_rst = ""
if len(figure_paths) == 1:
figure_name = figure_paths[0]
images_rst = SINGLE_IMAGE % (figure_name, alt)
elif len(figure_paths) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_paths:
images_rst += HLIST_IMAGE_TEMPLATE % (figure_name, alt)
return images_rst
def _single_line_sanitize(s):
"""Remove problematic newlines."""
# For example, when setting a :alt: for an image, it shouldn't have \n
# This is a function in case we end up finding other things to replace
return s.replace('\n', ' ')
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: sphx-glr-horizontal
"""
HLIST_IMAGE_MATPLOTLIB = """
*
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: /%s
:alt: %s
:class: sphx-glr-multi-img
"""
SINGLE_IMAGE = """
.. image:: /%s
:alt: %s
:class: sphx-glr-single-img
"""
###############################################################################
# Module resetting
def _reset_matplotlib(gallery_conf, fname):
"""Reset matplotlib."""
_, plt = _import_matplotlib()
plt.rcdefaults()
def _reset_seaborn(gallery_conf, fname):
"""Reset seaborn."""
# Horrible code to 'unload' seaborn, so that it resets
# its default when is load
# Python does not support unloading of modules
# https://bugs.python.org/issue9072
for module in list(sys.modules.keys()):
if 'seaborn' in module:
del sys.modules[module]
_reset_dict = {
'matplotlib': _reset_matplotlib,
'seaborn': _reset_seaborn,
}
def clean_modules(gallery_conf, fname):
"""Remove, unload, or reset modules after running each example.
After a script is executed it can load a variety of settings that one
does not want to influence in other examples in the gallery.
Parameters
----------
gallery_conf : dict
The gallery configuration.
fname : str or None
The example being run. Will be None when this is called entering
a directory of examples to be built.
"""
for reset_module in gallery_conf['reset_modules']:
reset_module(gallery_conf, fname)
|
Eric89GXL/sphinx-gallery
|
sphinx_gallery/scrapers.py
|
Python
|
bsd-3-clause
| 15,061
|
[
"Mayavi"
] |
bd966265ab697fd600422d8df5abcca919e5bc1130bc352ce78a1fc41f8f55b1
|
import unittest, time, sys, os, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_exec
import h2o_glm, h2o_gbm, h2o_rf
class ModelManagementTestCase(unittest.TestCase):
tear_down_cloud = True
# tear_down_cloud = False
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
cloud_size = 5
if h2o.clone_cloud_json != None:
print "NOTE: Connecting to existing cloud, and leaving the cloud running afterwards: " + \
os.path.abspath(h2o.clone_cloud_json)
print "Calling h2o.init(" + str(cloud_size) + "). . ."
h2o.init(cloud_size, java_heap_GB=2, timeoutSecs=120)
@classmethod
def tearDownClass(cls):
if h2o.clone_cloud_json == None:
if ModelManagementTestCase.tear_down_cloud:
h2o.tear_down_cloud()
else:
None
else:
h2o.check_sandbox_for_errors(sandboxIgnoreErrors=False, python_test_name="test_model_management")
already_set_up = False
''' Lazy setup of the common frames and models used by the test cases. '''
def setUp(self):
if ModelManagementTestCase.already_set_up:
return
self.create_models(self.import_frames())
ModelManagementTestCase.already_set_up = True
def import_frame(self, target_key, bucket, csvFilename, csvPathname, expected_rows, expected_cols):
path = csvPathname + '/' + csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=path, hex_key=target_key, schema='put') # upload the file
destination_key = parseResult['destination_key'] # we block until it's actually ready
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
actual_rows = inspect['numRows']
actual_cols = inspect['numCols']
print 'loaded frame "' + target_key +'" from path: ' + path
print 'rows: ', actual_rows
print 'cols: ', actual_cols
# Don't have access to the testCase assert methods here because they aren't class methods. :-(
assert expected_rows == actual_rows, "Expected " + str(expected_rows) + " but got " + str(actual_rows) + " for path: " + path
assert expected_cols == actual_cols, "Expected " + str(expected_cols) + " but got " + str(actual_cols) + " for path: " + path
# TODO: other info we could check
# (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
# h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=True)
#
# summaryResult = h2o_cmd.runSummary(key=parseResult['destination_key'])
# h2o_cmd.infoFromSummary(summaryResult) # , noPrint=True
return destination_key
# TODO: generalize by passing in the exec2 expression
def create_new_boolean(self, frame, old_col_name, new_col_name):
node = h2o.nodes[0]
# NOTE: 1-based column indexing!
resultExec, ncols = h2o_exec.exec_expr(execExpr='ncol(' + frame + ')')
# print 'before doing anything, ncols: ', int(ncols)
resultExec, dontcare = h2o_exec.exec_expr(execExpr="{0}[, ncol({0}) + 1] = ({0}${1} == 1)".format(frame, old_col_name))
resultExec, ncols = h2o_exec.exec_expr(execExpr="ncol({0})".format(frame))
ncols = int(ncols)
# print 'after allegedly creating new column ncols: ', ncols
node.set_column_names(source=frame, cols='C' + str(ncols), comma_separated_list=new_col_name)
def import_frames(self):
node = h2o.nodes[0]
prostate_hex = self.import_frame('prostate.hex', 'smalldata', 'prostate.csv', 'logreg', 380, 9)
airlines_train_hex = self.import_frame('airlines_train.hex', 'smalldata', 'AirlinesTrain.csv.zip', 'airlines', 24421, 12)
airlines_test_hex = self.import_frame('airlines_test.hex', 'smalldata', 'AirlinesTest.csv.zip', 'airlines', 2691, 12)
has_uuid_hex = self.import_frame('has_uuid.hex', 'smalldata', 'test_all_raw_top10rows.csv', 'test', 12, 89)
# get the hashes
print "Checking " + str(len(h2o.nodes)) + " nodes for frames: "
for a_node in h2o.nodes:
print " " + a_node.http_addr + ":" + str(a_node.port)
test_hash_before = -1
train_hash_before = -1
for a_node in h2o.nodes:
print " Checking " + a_node.http_addr + ":" + str(a_node.port)
frames = a_node.frames()
self.assertKeysExist(frames, 'frames', ['airlines_train.hex'])
self.assertKeysExist(frames, 'frames', ['airlines_test.hex'])
self.assertKeysExist(frames, 'frames/airlines_test.hex', ['id'])
self.assertKeysExist(frames, 'frames', ['has_uuid.hex'])
# Make sure we have the same checksums everywhere:
tmp = frames['frames']['airlines_test.hex']['id']
if test_hash_before != -1:
self.assertEquals(tmp, test_hash_before, "Same hash on every node for airlines_test.hex")
test_hash_before = tmp
# Make sure we have the same checksums everywhere:
tmp = frames['frames']['airlines_train.hex']['id']
if train_hash_before != -1:
self.assertEquals(tmp, train_hash_before, "Same hash on every node for airlines_train.hex")
train_hash_before = tmp
self.assertNotEqual("ffffffffffffffff", test_hash_before);
self.assertNotEqual("ffffffffffffffff", train_hash_before);
self.assertNotEqual("0", test_hash_before);
self.assertNotEqual("0", train_hash_before);
# Add new proper boolean response columns
self.create_new_boolean('airlines_train.hex', 'IsDepDelayed_REC', 'IsDepDelayed_REC_recoded')
self.create_new_boolean('airlines_test.hex', 'IsDepDelayed_REC', 'IsDepDelayed_REC_recoded')
# get the hashes and ensure they've changed
frames = node.frames()
self.assertKeysExist(frames, 'frames', ['airlines_train.hex'])
self.assertKeysExist(frames, 'frames', ['airlines_test.hex'])
self.assertKeysExist(frames, 'frames/airlines_test.hex', ['id'])
train_hash_after = frames['frames']['airlines_train.hex']['id']
test_hash_after = frames['frames']['airlines_test.hex']['id']
self.assertNotEqual(train_hash_before, train_hash_after, "Expected airlines_train hash to change. . . Before and after were both: " + train_hash_after)
self.assertNotEqual(test_hash_before, test_hash_after, "Expected airlines_test hash to change. . . Before and after were both: " + test_hash_after)
print "airlines_train hash before: ", train_hash_before, ", after: ", train_hash_after
print "airlines_test hash before: ", test_hash_before, ", after: ", test_hash_after
return (prostate_hex, airlines_train_hex, airlines_test_hex)
def create_models(self, frame_keys):
prostate_hex, airlines_train_hex, airlines_test_hex = frame_keys
self.assertIsNotNone(prostate_hex)
self.assertIsNotNone(airlines_train_hex)
self.assertIsNotNone(airlines_test_hex)
node = h2o.nodes[0]
num_models = 0
durations = {}
print "##############################################################"
print "Generating AirlinesTrain GLM2 binary classification model. . ."
# R equivalent: h2o.glm.FV(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, family = "binomial", alpha=0.05, lambda=1.0e-2, standardize=FALSE, nfolds=0)
before = time.time() * 1000
glm_AirlinesTrain_1_params = {
'destination_key': 'glm_AirlinesTrain_binary_1',
'response': 'IsDepDelayed',
'ignored_cols': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'family': 'binomial',
'alpha': 0.5,
'standardize': 0,
'lambda': 1.0e-2,
'n_folds': 0,
'use_all_factor_levels': 1,
'variable_importances': 1
}
glm_AirlinesTrain_1 = node.GLM(airlines_train_hex, **glm_AirlinesTrain_1_params)
durations['glm_AirlinesTrain_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_AirlinesTrain_1, None, **glm_AirlinesTrain_1_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################################################"
print "Generating AirlinesTrain GLM2 binary classification model with nfold crossvalidation. . ."
# R equivalent: h2o.glm.FV(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, family = "binomial", alpha=0.05, lambda=1.0e-2, standardize=FALSE, nfolds=3)
before = time.time() * 1000
glm_AirlinesTrain_3fold_params = {
'destination_key': 'glm_AirlinesTrain_binary_3fold',
'response': 'IsDepDelayed',
'ignored_cols': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'family': 'binomial',
'alpha': 0.5,
'standardize': 0,
'lambda': 1.0e-2,
'n_folds': 3,
'use_all_factor_levels': 1,
'variable_importances': 1
}
glm_AirlinesTrain_3fold = node.GLM(airlines_train_hex, **glm_AirlinesTrain_3fold_params)
durations['glm_AirlinesTrain_binary_3fold'] = time.time() * 1000 - before
num_models = num_models + 1 # TODO: interesting that the xval models aren't visible as they are in GBM
h2o_glm.simpleCheckGLM(self, glm_AirlinesTrain_3fold, None, **glm_AirlinesTrain_3fold_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
# print "##############################################################"
# print "Grid search: Generating AirlinesTrain GLM2 binary classification models. . ."
# before = time.time() * 1000
# glm_AirlinesTrain_grid_params = {
# 'destination_key': 'glm_AirlinesTrain_binary_grid_',
# 'response': 'IsDepDelayed',
# 'ignored_cols': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
# 'family': 'binomial',
# 'alpha': '0.5, 1.0',
# 'standardize': 0,
# 'lambda': '1.0e-2,1.0e-3,1.0e-4',
# 'n_folds': 2,
# 'use_all_factor_levels': 1,
# 'variable_importances': 1
# }
# glm_AirlinesTrain_grid = node.GLMGrid(airlines_train_hex, **glm_AirlinesTrain_grid_params)
# durations['glm_AirlinesTrain_binary_grid'] = time.time() * 1000 - before
# num_models = num_models + 6
# h2o_glm.simpleCheckGLMGrid(self, glm_AirlinesTrain_grid, None, **glm_AirlinesTrain_grid_params)
# for a_node in h2o.nodes:
# print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
# dummy = a_node.frames()
# dummy = a_node.models()
print "####################################################################"
print "Generating AirlinesTrain simple GBM binary classification model. . ."
# R equivalent: h2o.gbm(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, n.trees=3, interaction.depth=1, distribution="multinomial", n.minobsinnode=2, shrinkage=.1)
before = time.time() * 1000
gbm_AirlinesTrain_1_params = {
'destination_key': 'gbm_AirlinesTrain_binary_1',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 3,
'max_depth': 1,
'classification': 1,
'n_folds': 0
# TODO: what about minobsinnode and shrinkage?!
}
gbm_AirlinesTrain_1 = node.gbm(airlines_train_hex, **gbm_AirlinesTrain_1_params)
durations['gbm_AirlinesTrain_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#####################################################################"
print "Generating AirlinesTrain complex GBM binary classification model. . ."
# R equivalent: h2o.gbm(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, n.trees=50, interaction.depth=5, distribution="multinomial", n.minobsinnode=2, shrinkage=.1)
before = time.time() * 1000
gbm_AirlinesTrain_2_params = {
'destination_key': 'gbm_AirlinesTrain_binary_2',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 50,
'max_depth': 5,
'classification': 1,
'n_folds': 0
# TODO: what about minobsinnode and shrinkage?!
}
gbm_AirlinesTrain_2 = node.gbm(airlines_train_hex, **gbm_AirlinesTrain_2_params)
durations['gbm_AirlinesTrain_binary_2'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "###############################################################################################"
print "Generating AirlinesTrain simple GBM binary classification model with nfold crossvalidation. . ."
# R equivalent: h2o.gbm(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, n.trees=3, interaction.depth=1, distribution="multinomial", n.minobsinnode=2, shrinkage=.1)
before = time.time() * 1000
gbm_AirlinesTrain_3fold_params = {
'destination_key': 'gbm_AirlinesTrain_binary_3fold',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 3,
'max_depth': 1,
'classification': 1,
'n_folds': 3
# TODO: what about minobsinnode and shrinkage?!
}
gbm_AirlinesTrain_3fold = node.gbm(airlines_train_hex, **gbm_AirlinesTrain_3fold_params)
durations['gbm_AirlinesTrain_binary_3fold'] = time.time() * 1000 - before
num_models = num_models + 4 # 1 main model and 3 xval models
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "####################################################################"
print "Generating AirlinesTrain simple DRF binary classification model. . ."
# R equivalent: h2o.randomForest.FV(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, ntree=5, depth=2)
before = time.time() * 1000
rf_AirlinesTrain_1_params = {
'destination_key': 'rf_AirlinesTrain_binary_1',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 5,
'max_depth': 2,
'classification': 1,
'seed': 1234567890123456789L
}
rf_AirlinesTrain_1 = node.random_forest(airlines_train_hex, **rf_AirlinesTrain_1_params)
durations['rf_AirlinesTrain_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#####################################################################"
print "Generating AirlinesTrain complex DRF binary classification model. . ."
# R equivalent: h2o.randomForest.FV(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, ntree=50, depth=10)
before = time.time() * 1000
rf_AirlinesTrain_2_params = {
'destination_key': 'rf_AirlinesTrain_binary_2',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 50,
'max_depth': 10,
'classification': 1,
'seed': 1234567890123456789L
}
rf_AirlinesTrain_2 = node.random_forest(airlines_train_hex, **rf_AirlinesTrain_2_params)
durations['rf_AirlinesTrain_binary_2'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "###############################################################################################"
print "Generating AirlinesTrain simple DRF binary classification model with nfold crossvalidation. . ."
# R equivalent: h2o.randomForest.FV(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, ntree=5, depth=2)
before = time.time() * 1000
rf_AirlinesTrain_3fold_params = {
'destination_key': 'rf_AirlinesTrain_binary_3fold',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 5,
'max_depth': 2,
'classification': 1,
'seed': 1234567890123456789L,
'n_folds': 3
}
rf_AirlinesTrain_3fold = node.random_forest(airlines_train_hex, **rf_AirlinesTrain_3fold_params)
durations['rf_AirlinesTrain_binary_3fold'] = time.time() * 1000 - before
num_models = num_models + 4
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#####################################################################"
print "Generating AirlinesTrain complex SpeeDRF binary classification model. . ."
# what is the R binding?
before = time.time() * 1000
speedrf_AirlinesTrain_1_params = {
'destination_key': 'speedrf_AirlinesTrain_binary_1',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 50,
'max_depth': 10,
'classification': 1,
'importance': 1,
'seed': 1234567890123456789L
}
speedrf_AirlinesTrain_1 = node.speedrf(airlines_train_hex, **speedrf_AirlinesTrain_1_params)
durations['speedrf_AirlinesTrain_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "####################################################################################################"
print "Generating AirlinesTrain complex SpeeDRF binary classification model with nfold crossvalidation. . ."
# what is the R binding?
before = time.time() * 1000
speedrf_AirlinesTrain_3fold_params = {
'destination_key': 'speedrf_AirlinesTrain_binary_3fold',
'response': 'IsDepDelayed',
'ignored_cols_by_name': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'ntrees': 50,
'max_depth': 10,
'classification': 1,
'importance': 1,
'seed': 1234567890123456789L,
'n_folds': 3
}
speedrf_AirlinesTrain_3fold = node.speedrf(airlines_train_hex, **speedrf_AirlinesTrain_3fold_params)
durations['speedrf_AirlinesTrain_binary_3fold'] = time.time() * 1000 - before
num_models = num_models + 4 # 1 main model and 3 xval models
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "######################################################################"
print "Generating AirlinesTrain DeepLearning binary classification model. . ."
# R equivalent: h2o.deeplearning(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, classification=TRUE, hidden=c(10, 10))
before = time.time() * 1000
dl_AirlinesTrain_1_params = {
'destination_key': 'dl_AirlinesTrain_binary_1',
'response': 'IsDepDelayed',
'ignored_cols': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'hidden': [10, 10],
'classification': 1,
'variable_importances': 1
}
dl_AirlinesTrain_1 = node.deep_learning(airlines_train_hex, **dl_AirlinesTrain_1_params)
durations['dl_AirlinesTrain_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "##############################################################################################"
print "Generating AirlinesTrain GLM2 binary classification model with different response column. . ."
# R equivalent: h2o.glm.FV(y = "IsDepDelayed_REC", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, family = "binomial", alpha=0.05, lambda=1.0e-2, standardize=FALSE, nfolds=0)
before = time.time() * 1000
glm_AirlinesTrain_A_params = {
'destination_key': 'glm_AirlinesTrain_binary_A',
'response': 'IsDepDelayed_REC_recoded',
'ignored_cols': 'IsDepDelayed, IsDepDelayed_REC',
'family': 'binomial',
'alpha': 0.5,
'standardize': 0,
'lambda': 1.0e-2,
'n_folds': 0,
'use_all_factor_levels': 1,
'variable_importances': 1
}
glm_AirlinesTrain_A = node.GLM(airlines_train_hex, **glm_AirlinesTrain_A_params)
durations['glm_AirlinesTrain_binary_A'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_AirlinesTrain_A, None, **glm_AirlinesTrain_A_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#################################################################################################"
print "Generating AirlinesTrain DeepLearning binary classification model with nfold crossvalidation. . ."
# R equivalent: h2o.deeplearning(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, classification=TRUE, hidden=c(10, 10), nfolds=3)
before = time.time() * 1000
dl_AirlinesTrain_3fold_params = {
'destination_key': 'dl_AirlinesTrain_binary_3fold',
'response': 'IsDepDelayed',
'ignored_cols': 'IsDepDelayed_REC, IsDepDelayed_REC_recoded',
'hidden': [10, 10],
'classification': 1,
'variable_importances': 1,
'n_folds': 3
}
dl_AirlinesTrain_3fold = node.deep_learning(airlines_train_hex, **dl_AirlinesTrain_3fold_params)
durations['dl_AirlinesTrain_binary_3fold'] = time.time() * 1000 - before
num_models = num_models + 4 # 1 main model and 3 xval models
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "##############################################################################################"
print "Generating AirlinesTrain Naive Bayes binary classification model. . ."
# R equivalent: h2o.naive_bayes(y = "IsDepDelayed", x = c("Origin", "Dest", "fDayofMonth", "fYear", "UniqueCarrier", "fDayOfWeek", "fMonth", "DepTime", "ArrTime", "Distance"), data = airlines_train.hex, family = "binomial", alpha=0.05, lambda=1.0e-2, standardize=FALSE, nfolds=0)
before = time.time() * 1000
nb_AirlinesTrain_params = {
'destination_key': 'nb_AirlinesTrain_binary_1',
'response': 'IsDepDelayed',
'ignored_cols': 'IsDepDelayed_REC_recoded, IsDepDelayed_REC',
}
nb_AirlinesTrain = node.naive_bayes(source=airlines_train_hex, timeoutSecs=120, **nb_AirlinesTrain_params)
durations['nb_AirlinesTrain_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
# These Prostate GLM models are also used to test that we get a warning only if variable_importances == 1 and use_all_factor_levels = 0. The defaults for these are now both 0. There are 9 combinations to test:
# num variable_importances use_all_factor_levels warning expected?
# -----------------------------------------------------------------------
# 00 0 0 False
# 01 0 1 False
# 10 1 0 True
# 11 1 1 False
# xx default (0) default (0) False
# x0 default (0) 0 False
# x1 default (0) 1 False
# 0x 0 default (0) False
# 1x 1 default (0) True
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances false and use_all_factor_levels false (should have no warnings) . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_00_params = {
'destination_key': 'glm_Prostate_binary_00',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
'variable_importances': 0,
'use_all_factor_levels': 0
}
glm_Prostate_00 = node.GLM(prostate_hex, **glm_Prostate_00_params)
durations['glm_Prostate_binary_00'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_00, None, **glm_Prostate_00_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances false and use_all_factor_levels true (should have no warnings) . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_01_params = {
'destination_key': 'glm_Prostate_binary_01',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
'variable_importances': 0,
'use_all_factor_levels': 1
}
glm_Prostate_01 = node.GLM(prostate_hex, **glm_Prostate_01_params)
durations['glm_Prostate_binary_01'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_01, None, **glm_Prostate_01_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances true and use_all_factor_levels false (should have a warning) . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_10_params = {
'destination_key': 'glm_Prostate_binary_10',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
'variable_importances': 1,
'use_all_factor_levels': 0
}
glm_Prostate_10 = node.GLM(prostate_hex, **glm_Prostate_10_params)
durations['glm_Prostate_binary_10'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_10, None, **glm_Prostate_10_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances true and use_all_factor_levels true (should have no warnings) . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_11_params = {
'destination_key': 'glm_Prostate_binary_11',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
'variable_importances': 1,
'use_all_factor_levels': 1
}
glm_Prostate_11 = node.GLM(prostate_hex, **glm_Prostate_11_params)
durations['glm_Prostate_binary_11'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_11, None, **glm_Prostate_11_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances default (should default to false) and use_all_factor_levels default (should default to false), should have no warnings . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_xx_params = {
'destination_key': 'glm_Prostate_binary_xx',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
# 'variable_importances': 0,
# 'use_all_factor_levels': 0
}
glm_Prostate_xx = node.GLM(prostate_hex, **glm_Prostate_xx_params)
durations['glm_Prostate_binary_xx'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_xx, None, **glm_Prostate_xx_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances default (should default to false) and use_all_factor_levels false (should have no warnings) . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_x0_params = {
'destination_key': 'glm_Prostate_binary_x0',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
# 'variable_importances': 0,
'use_all_factor_levels': 0
}
glm_Prostate_x0 = node.GLM(prostate_hex, **glm_Prostate_x0_params)
durations['glm_Prostate_binary_x0'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_x0, None, **glm_Prostate_x0_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances default (should default to false) and use_all_factor_levels true (should have no warnings) . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_x1_params = {
'destination_key': 'glm_Prostate_binary_x1',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
# 'variable_importances': 0,
'use_all_factor_levels': 1
}
glm_Prostate_x1 = node.GLM(prostate_hex, **glm_Prostate_x1_params)
durations['glm_Prostate_binary_x1'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_x1, None, **glm_Prostate_x1_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances false and use_all_factor_levels default (should default to false), should have no warnings . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_0x_params = {
'destination_key': 'glm_Prostate_binary_0x',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
'variable_importances': 0,
# 'use_all_factor_levels': 0
}
glm_Prostate_0x = node.GLM(prostate_hex, **glm_Prostate_0x_params)
durations['glm_Prostate_binary_0x'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_0x, None, **glm_Prostate_0x_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#########################################################"
print "Generating Prostate GLM2 binary classification model with variable_importances True and use_all_factor_levels default (should default to false), should have a warning . . ."
# R equivalent: h2o.glm.FV(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_1x_params = {
'destination_key': 'glm_Prostate_binary_1x',
'response': 'CAPSULE',
'ignored_cols': None,
'family': 'binomial',
'alpha': 0.5,
'n_folds': 0,
'variable_importances': 1,
# 'use_all_factor_levels': 0
}
glm_Prostate_1x = node.GLM(prostate_hex, **glm_Prostate_1x_params)
durations['glm_Prostate_binary_1x'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_1x, None, **glm_Prostate_1x_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
#
# END OF 9 PROSTATE GLM2 VARIATIONS
#
print "###############################################################"
print "Generating Prostate simple DRF binary classification model. . ."
# R equivalent: h2o.randomForest.FV(y = "CAPSULE", x = c("AGE","RACE","DCAPS"), data = prostate.hex, ntree=10, depth=5)
before = time.time() * 1000
rf_Prostate_1_params = {
'destination_key': 'rf_Prostate_binary_1',
'response': 'CAPSULE',
'ignored_cols_by_name': None,
'ntrees': 10,
'max_depth': 5,
'classification': 1,
'seed': 1234567890123456789L
}
rf_Prostate_1 = node.random_forest(prostate_hex, **rf_Prostate_1_params)
durations['rf_Prostate_binary_1'] = time.time() * 1000 - before
num_models = num_models + 1
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "#####################################################################"
print "Generating Prostate complex SpeeDRF binary classification model. . ."
before = time.time() * 1000
speedrf_Prostate_1_params = {
'destination_key': 'speedrf_Prostate_binary_1',
'response': 'CAPSULE',
'ignored_cols_by_name': None,
'ntrees': 50,
'max_depth': 10,
'classification': 1,
'importance': 1,
'seed': 1234567890123456789L
}
speedrf_Prostate_1 = node.speedrf(prostate_hex, **speedrf_Prostate_1_params)
num_models = num_models + 1
durations['speedrf_Prostate_binary_1'] = time.time() * 1000 - before
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
print "##############################################"
print "Generating Prostate GLM2 regression model. . ."
# R equivalent: h2o.glm.FV(y = "AGE", x = c("CAPSULE","RACE","PSA","DCAPS"), data = prostate.hex, family = "gaussian", nfolds = 0, alpha = 0.5)
before = time.time() * 1000
glm_Prostate_regression_1_params = {
'destination_key': 'glm_Prostate_regression_1',
'response': 'AGE',
'ignored_cols': None,
'family': 'gaussian',
'alpha': 0.5,
'n_folds': 0,
'use_all_factor_levels': 1,
'variable_importances': 1
}
glm_Prostate_regression_1 = node.GLM(prostate_hex, **glm_Prostate_regression_1_params)
durations['glm_Prostate_regression_1'] = time.time() * 1000 - before
num_models = num_models + 1
h2o_glm.simpleCheckGLM(self, glm_Prostate_regression_1, None, **glm_Prostate_regression_1_params)
for a_node in h2o.nodes:
print "Checking /Frames and /Models on: " + a_node.http_addr + ":" + str(a_node.port)
dummy = a_node.frames()
dummy = a_node.models()
# Done building models!
# We were getting different results for each node. Bad, bad bad. . .
print "########################################################"
print "Checking " + str(len(h2o.nodes)) + " nodes for " + str(num_models) + " models: "
for a_node in h2o.nodes:
print " " + a_node.http_addr + ":" + str(a_node.port)
found_problem = False
for a_node in h2o.nodes:
print " Checking: " + a_node.http_addr + ":" + str(a_node.port)
models = a_node.models()
got = len(models['models'])
print "For node: " + a_node.http_addr + ":" + str(a_node.port) + " checking that we got ", str(num_models), " models. . ."
if num_models != got:
print "p00p, not enough. . ."
found_problem = True
print "Got these models: " + repr(models['models'].keys())
print "Expected " + str(num_models) + ", got: " + str(got)
for key, value in models['models'].iteritems():
self.assertEquals(value['state'], 'DONE', "Expected state to be DONE for model: " + key)
idx = key.find('_xval')
# For cross-validation models use the time for the parent model, since we should be less
if -1 == idx:
expected = durations[key]
else:
expected = durations[key[0:idx]]
self.assertTrue(value['training_duration_in_ms'] < expected, "Expected training duration as computed by the server (" + str(value['training_duration_in_ms']) + ") to be less than we compute in the test (" + str(expected) + ") for model: " + key)
self.assertKeysExistAndNonNull(value, "", ['expert_parameters'])
# TODO: put back when Long serialization is fixed (probably not until h2o-dev)
# if 'seed' in value['expert_parameters']:
# self.assertEquals(long(value['expert_parameters']['seed']), 1234567890123456789L, "Seed incorrect for model: " + key + ". Expected: 1234567890123456789; got: " + str(long(value['expert_parameters']['seed'])))
# if '_seed' in value['expert_parameters']:
# self.assertEquals(long(value['expert_parameters']['_seed']), 1234567890123456789L, "Seed incorrect for model: " + key + ". Expected: 1234567890123456789; got: " + str(long(value['expert_parameters']['_seed'])))
self.assertNotEqual(found_problem, True, "Missing models on at least one node.")
class ApiTestCase(ModelManagementTestCase):
def followPath(self, d, path_elems):
for path_elem in path_elems:
if "" != path_elem:
idx = -1
if path_elem.endswith("]"):
idx = int(path_elem[path_elem.find("[") + 1:path_elem.find("]")])
path_elem = path_elem[:path_elem.find("[")]
assert path_elem in d, "Failed to find key: " + path_elem + " in dict: " + repr(d)
if -1 == idx:
d = d[path_elem]
else:
d = d[path_elem][idx]
return d
def assertKeysExist(self, d, path, keys):
path_elems = path.split("/")
d = self.followPath(d, path_elems)
for key in keys:
assert key in d, "Failed to find key: " + key + " in dict: " + repr(d)
def assertKeysExistAndNonNull(self, d, path, keys):
path_elems = path.split("/")
d = self.followPath(d, path_elems)
for key in keys:
assert key in d, "Failed to find key: " + key + " in dict: " + repr(d)
assert d[key] != None, "Value unexpectedly null: " + key + " in dict: " + repr(d)
def assertKeysDontExist(self, d, path, keys):
path_elems = path.split("/")
d = self.followPath(d, path_elems)
for key in keys:
assert key not in d, "Unexpectedly found key: " + key + " in dict: " + repr(d)
# TODO: look more inside the auc and cm elements
def validate_binomial_classifier_metrics(self, metrics, model, frame):
self.assertKeysExistAndNonNull(metrics, "", ['cm', 'auc', 'model', 'model_category', 'frame', 'duration_in_ms', 'scoring_time']) # TODO: HitRatio
# test auc object
self.assertNotEqual(None, metrics['auc'])
# What fields should we find in the AUC object? Well. . . the criteria:
criteria = ['F1', 'F2', 'F0point5', 'accuracy', 'precision', 'recall', 'specificity', 'mcc', 'max_per_class_error']
# And the "accuracy_for_criteria" and so on:
criteria_max_min = [criterion + '_for_criteria' for criterion in criteria ]
# And now hackage, because the error field is called "errorr" due to limitations of R:
criteria += ['errorr']
criteria_max_min += ['error_for_criteria']
# then a bunch of other fields:
misc_fields = ['thresholds', 'threshold_criterion', 'actual_domain', 'AUC', 'Gini', 'confusion_matrices', 'threshold_criteria', 'threshold_for_criteria', 'confusion_matrix_for_criteria']
self.assertKeysExistAndNonNull(metrics, "auc", criteria + criteria_max_min + misc_fields)
# So far so good. Now, what about what's in the fields? First the criteria lists that contain a value for each threshold:
assert type(metrics['auc']['thresholds']) is list, "thresholds value is a list."
assert len(metrics['auc']['thresholds']) > 0, "thresholds value is a list of more than 0 elments."
num_thresholds = len(metrics['auc']['thresholds'])
for criterion in criteria:
assert len(metrics['auc'][criterion]) == num_thresholds, criterion + " list is the same length as thresholds list."
assert metrics['auc'][criterion][num_thresholds / 2] != 0.0, criterion + " list has a non-zero median element."
# Now the criteria lists that contain a value for each criterion:
assert type(metrics['auc']['threshold_criteria']) is list, "threshold_criteria value is a list."
assert len(metrics['auc']['threshold_criteria']) > 0, "threshold_criteria value is a list of more than 0 elments."
num_threshold_criteria = len(metrics['auc']['threshold_criteria'])
# Are we testing all of them? Note that the threshold criteria sections don't include error / errorrrrrrr
assert num_threshold_criteria == len(criteria) - 1, "We are testing all the threshold criteria (test a)."
assert num_threshold_criteria == len(criteria_max_min) - 1, "We are testing all the threshold criteria (test b)."
for criterion_mm in criteria_max_min:
assert len(metrics['auc'][criterion_mm]) == num_threshold_criteria, criterion_mm + " list is the same length as threshold_criteria list."
assert metrics['auc'][criterion_mm][num_threshold_criteria / 2] != 0.0, criterion_mm + " list has a non-zero median element."
# confusion_matrix_for_criteria:
assert len(metrics['auc']['confusion_matrix_for_criteria']) == num_threshold_criteria, "confusion_matrix_for_criteria list is the same length as threshold_criteria list."
assert type(metrics['auc']['confusion_matrix_for_criteria']) is list, "confusion_matrix_for_criteria is a list."
assert type(metrics['auc']['confusion_matrix_for_criteria'][num_threshold_criteria / 2]) is list, "confusion_matrix_for_criteria is a list of lists."
assert type(metrics['auc']['confusion_matrix_for_criteria'][num_threshold_criteria / 2][0]) is list, "confusion_matrix_for_criteria is a list of lists of lists."
assert metrics['auc']['confusion_matrix_for_criteria'][num_threshold_criteria / 2][0][0] != 0.0, "confusion_matrix_for_criteria list has a non-zero median element."
# test cm object
self.assertNotEqual(None, metrics['cm'])
self.assertKeysExistAndNonNull(metrics, "cm", ['actual_domain', 'predicted_domain', 'domain', 'cm', 'mse'])
assert type(metrics['cm']['actual_domain']) is list, "actual_domain is a list."
assert type(metrics['cm']['predicted_domain']) is list, "predicted_domain is a list."
assert type(metrics['cm']['domain']) is list, "domain is a list."
assert len(metrics['cm']['actual_domain']) > 0, "actual_domain is a list of more than 0 elements."
assert len(metrics['cm']['predicted_domain']) > 0, "predicted_domain is a list of more than 0 elements."
assert len(metrics['cm']['domain']) > 0, "domain is a list of more than 0 elements."
assert type(metrics['cm']['cm']) is list, "cm is a list."
assert type(metrics['cm']['cm'][0]) is list, "cm is a list of lists."
assert sum(metrics['cm']['cm'][0]) > 0, "first domain of cm has at least one non-zero value."
self.assertNotEqual(0.0, metrics['cm']['mse'])
# misc fields
self.assertEquals(metrics['model_category'], 'Binomial')
# self.assertNotEqual(0, metrics['duration_in_ms']) # NOTE: it's possible, but unlikely, for this to be 0 legitimately
self.assertNotEqual(0, metrics['scoring_time'])
# check model fields
assert type(metrics['model']) is dict, "model field is an object."
self.assertKeysExistAndNonNull(metrics, 'model', ['key', 'creation_epoch_time_millis', 'id', 'model_category'])
self.assertEquals(metrics['model']['model_category'], 'Binomial')
self.assertEquals(metrics['model']['key'], model)
self.assertNotEqual(0, metrics['model']['creation_epoch_time_millis'])
assert re.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}").match(metrics['model']['id'])
# check frame fields
assert type(metrics['frame']) is dict, "frame field is an object."
self.assertKeysExistAndNonNull(metrics, 'frame', ['key', 'creation_epoch_time_millis', 'id'])
self.assertEquals(metrics['frame']['key'], frame)
self.assertNotEqual(0, metrics['frame']['creation_epoch_time_millis'])
assert re.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}").match(metrics['frame']['id'])
def test_endpoints(self):
node = h2o.nodes[0]
print "##############################################"
print "Testing /2/Frames with various options. . ."
print "##############################################"
print ""
print "##############################################"
print "Testing /2/Frames list. . ."
frames = node.frames()
self.assertKeysExist(frames, 'frames', ['airlines_train.hex', 'airlines_test.hex', 'prostate.hex'])
self.assertKeysDontExist(frames, 'frames', ['glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A', 'glm_Prostate_binary_xx', 'rf_Prostate_binary_1', 'glm_Prostate_regression_1'])
self.assertKeysDontExist(frames, '', ['models'])
print "##############################################"
print "Testing /2/Frames?key=airlines_test.hex. . ."
frames = node.frames(key='airlines_test.hex')
self.assertKeysExist(frames, 'frames', ['airlines_test.hex'])
self.assertKeysDontExist(frames, 'frames', ['glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A', 'glm_Prostate_binary_xx', 'rf_Prostate_binary_1', 'glm_Prostate_regression_1', 'airlines_train.hex', 'prostate.hex'])
self.assertKeysDontExist(frames, '', ['models'])
self.assertKeysExist(frames, 'frames/airlines_test.hex', ['creation_epoch_time_millis', 'id', 'key', 'column_names', 'compatible_models'])
self.assertEqual(frames['frames']['airlines_test.hex']['id'], "fffffffffffff38d", msg="The airlines_test.hex frame hash should be deterministic. Expected fffffffffffff38d, got: " + frames['frames']['airlines_test.hex']['id'])
self.assertEqual(frames['frames']['airlines_test.hex']['key'], "airlines_test.hex", msg="The airlines_test.hex key should be airlines_test.hex.")
print "##############################################"
print "Testing /2/Frames?key=airlines_test.hex&find_compatible_models=true. . ."
frames = node.frames(key='airlines_test.hex', find_compatible_models=1)
self.assertKeysExist(frames, 'frames', ['airlines_test.hex'])
self.assertKeysDontExist(frames, 'frames', ['glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A', 'glm_Prostate_binary_xx', 'rf_Prostate_binary_1', 'glm_Prostate_regression_1', 'airlines_train.hex', 'prostate.hex'])
self.assertKeysExist(frames, '', ['models'])
self.assertKeysExist(frames, 'models', ['glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A'])
self.assertKeysDontExist(frames, 'models', ['glm_Prostate_binary_xx', 'rf_Prostate_binary_1', 'glm_Prostate_regression_1', 'airlines_train.hex', 'airlines_train.hex', 'airlines_test.hex', 'prostate.hex'])
print "##############################################"
print "Testing /2/Frames with various options. . ."
print "##############################################"
print ""
print "##############################################"
print "Testing /2/Models list. . ."
models = node.models()
self.assertKeysExist(models, 'models', ['glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A', 'glm_Prostate_binary_xx', 'rf_Prostate_binary_1', 'glm_Prostate_regression_1'])
self.assertKeysExist(models, 'models/glm_AirlinesTrain_binary_1', ['id', 'key', 'creation_epoch_time_millis', 'model_category', 'state', 'input_column_names', 'response_column_name', 'critical_parameters', 'secondary_parameters', 'expert_parameters', 'compatible_frames', 'warnings'])
self.assertEqual(0, len(models['models']['glm_AirlinesTrain_binary_1']['warnings']), msg="Expect no warnings for glm_AirlinesTrain_binary_1.")
self.assertEqual(models['models']['glm_AirlinesTrain_binary_1']['key'], 'glm_AirlinesTrain_binary_1', "key should equal our key: " + "glm_AirlinesTrain_binary_1")
self.assertKeysDontExist(models, 'models', ['airlines_train.hex', 'airlines_test.hex', 'prostate.hex'])
self.assertKeysDontExist(models, '', ['frames'])
print "##############################################"
print "Testing /2/Models?key=rf_Prostate_binary_1. . ."
models = node.models(key='rf_Prostate_binary_1')
self.assertKeysExist(models, 'models', ['rf_Prostate_binary_1'])
self.assertKeysExist(models, 'models/rf_Prostate_binary_1', ['warnings'])
self.assertEqual(0, len(models['models']['rf_Prostate_binary_1']['warnings']), msg="Expect no warnings for rf_Prostate_binary_1.")
self.assertKeysDontExist(models, 'models', ['airlines_train.hex', 'airlines_test.hex', 'prostate.hex', 'glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A', 'glm_Prostate_binary_xx', 'glm_Prostate_regression_1'])
self.assertKeysDontExist(models, '', ['frames'])
print "##############################################"
print "Testing /2/Models?key=rf_Prostate_binary_1&find_compatible_frames=true. . ."
models = node.models(key='rf_Prostate_binary_1', find_compatible_frames=1)
self.assertKeysExist(models, 'models', ['rf_Prostate_binary_1'])
self.assertKeysDontExist(models, 'models', ['airlines_train.hex', 'airlines_test.hex', 'prostate.hex', 'glm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_1', 'gbm_AirlinesTrain_binary_2', 'rf_AirlinesTrain_binary_1', 'rf_AirlinesTrain_binary_2', 'dl_AirlinesTrain_binary_1', 'glm_AirlinesTrain_binary_A', 'glm_Prostate_binary_xx', 'glm_Prostate_regression_1'])
self.assertKeysExist(models, '', ['frames'])
self.assertKeysExist(models, 'frames', ['prostate.hex'])
self.assertKeysDontExist(models, 'frames', ['airlines_train.hex', 'airlines_test.hex'])
print "##############################################"
print "Testing /2/Models?key=glm_Prostate_binary_* variable importance warnings. . ."
should_have_warnings = ['glm_Prostate_binary_10', 'glm_Prostate_binary_1x']
should_not_have_warnings = ['glm_Prostate_binary_00', 'glm_Prostate_binary_01', 'glm_Prostate_binary_11', 'glm_Prostate_binary_xx', 'glm_Prostate_binary_x0', 'glm_Prostate_binary_x1', 'glm_Prostate_binary_0x']
for m in should_have_warnings:
models = node.models(key=m)
self.assertKeysExist(models, 'models', [m])
self.assertKeysExist(models, 'models/' + m, ['warnings'])
self.assertEqual(1, len(models['models'][m]['warnings']), msg="Expect one warning for " + m + ": " + repr(models['models'][m]['warnings']))
self.assertTrue("use_all_factor_levels" in models['models'][m]['warnings'][0], "Expect variable importances warning since we aren't using use_all_factor_levels.")
for m in should_not_have_warnings:
models = node.models(key=m)
self.assertKeysExist(models, 'models', [m])
self.assertKeysExist(models, 'models/' + m, ['warnings'])
self.assertEqual(0, len(models['models'][m]['warnings']), msg="Expect zero warnings for " + m + ": " + repr(models['models'][m]['warnings']))
def test_binary_classifiers(self):
node = h2o.nodes[0]
print "##############################################"
print "Testing /2/Models with scoring. . ."
print "##############################################"
print ""
print "##############################################"
test_frames = ["prostate.hex", "airlines_train.hex"]
for test_frame in test_frames:
print "Scoring compatible frames for compatible models for /2/Models?key=" + test_frame + "&find_compatible_models=true. . ."
frames = node.frames(key=test_frame, find_compatible_models=1)
compatible_models = frames['frames'][test_frame]['compatible_models']
# NOTE: we start with frame airlines_train.hex and find the compatible models.
# Then for each of those models we find all the compatible frames (there are at least two)
# and score them.
for model_key in compatible_models:
# find all compatible frames
models = node.models(key=model_key, find_compatible_frames=1)
compatible_frames = models['models'][model_key]['compatible_frames']
self.assertKeysExist(models, 'models/' + model_key, ['training_duration_in_ms'])
self.assertNotEqual(models['models'][model_key]['training_duration_in_ms'], 0, "Expected non-zero training time for model: " + model_key)
should_not_have_varimp = ['glm_Prostate_binary_00', 'glm_Prostate_binary_01', 'glm_Prostate_binary_0x', 'glm_Prostate_binary_xx', 'glm_Prostate_binary_x0', 'glm_Prostate_binary_x1']
if models['models'][model_key]['model_algorithm'] != 'Naive Bayes' and model_key not in should_not_have_varimp:
self.assertKeysExistAndNonNull(models, 'models/' + model_key, ['variable_importances'])
self.assertKeysExistAndNonNull(models, 'models/' + model_key + '/variable_importances', ['varimp', 'method', 'max_var', 'scaled'])
for frame_key in compatible_frames:
print "Scoring: /2/Models?key=" + model_key + "&score_frame=" + frame_key
scoring_result = node.models(key=model_key, score_frame=frame_key)
self.assertKeysExist(scoring_result, '', ['metrics'])
self.assertKeysExist(scoring_result, 'metrics[0]', ['model', 'frame', 'duration_in_ms'])
self.assertKeysExist(scoring_result, 'metrics[0]/model', ['key', 'model_category', 'id', 'creation_epoch_time_millis'])
model_category = scoring_result['metrics'][0]['model']['model_category']
self.assertEqual(scoring_result['metrics'][0]['model']['key'], model_key, "Expected model key: " + model_key + " but got: " + scoring_result['metrics'][0]['model']['key'])
self.assertEqual(scoring_result['metrics'][0]['frame']['key'], frame_key, "Expected frame key: " + frame_key + " but got: " + scoring_result['metrics'][0]['frame']['key'])
if model_category == 'Binomial':
self.validate_binomial_classifier_metrics(scoring_result['metrics'][0], model_key, frame_key)
if model_category == 'Regression':
# self.assertKeysDontExist(scoring_result, 'metrics[0]', ['cm', 'auc']) # TODO: HitRatio
None
print "##############################################"
print "Testing /2/Frames with scoring. . ."
print "##############################################"
print ""
print "##############################################"
test_frames = ["prostate.hex", "airlines_test.hex"]
for frame_key in test_frames:
print "Scoring compatible models for /2/Frames?key=" + frame_key + "&find_compatible_models=true. . ."
frames = node.frames(key=frame_key, find_compatible_models=1)
compatible_models = frames['frames'][frame_key]['compatible_models']
for model_key in compatible_models:
print "Scoring: /2/Frames?key=" + frame_key + "&score_model=" + model_key
scoring_result = node.frames(key=frame_key, score_model=model_key)
self.assertKeysExist(scoring_result, '', ['metrics'])
self.assertKeysExist(scoring_result, 'metrics[0]', ['model_category'])
model_category = scoring_result['metrics'][0]['model_category']
self.assertKeysExist(scoring_result, 'metrics[0]', ['model', 'frame', 'duration_in_ms'])
self.assertEqual(scoring_result['metrics'][0]['model']['key'], model_key, "Expected model key: " + model_key + " but got: " + scoring_result['metrics'][0]['model']['key'])
self.assertEqual(scoring_result['metrics'][0]['frame']['key'], frame_key, "Expected frame key: " + frame_key + " but got: " + scoring_result['metrics'][0]['frame']['key'])
print "the model_category: ", model_category
if model_category == 'Binomial':
self.validate_binomial_classifier_metrics(scoring_result['metrics'][0], model_key, frame_key)
# TODO: look inside the auc and cm elements
if model_category == 'Regression':
# self.assertKeysDontExist(scoring_result, 'metrics[0]', ['cm', 'auc']) # TODO: HitRatio
None
def test_steam(self):
print "----------------------------------------------------------"
print " Testing Steam... "
print "----------------------------------------------------------"
# Go up two dirs and add '/client'.
# Don't know if there's a better way to do this. - Prithvi
client_dir = os.path.join(os.path.split(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])[0], 'client')
node0 = h2o.nodes[0]
os.environ['STEAM_NODE_ADDR'] = node0.http_addr
os.environ['STEAM_NODE_PORT'] = str(node0.port)
# Run `make test -C path_to_h2o/client`
command_string = "make test -C " + client_dir
# However, when `make test` fails, h2o.spawn_wait() fails hard without an exit code.
# Further, if this is trapped in a try/except, the failed tests are not routed to stdout.
(ps, outpath, errpath) = h2o.spawn_cmd('steam_tests', command_string.split())
h2o.spawn_wait(ps, outpath, errpath, timeout=1000)
print "----------------------------------------------------------"
print " Steam tests completed successfully! "
print "----------------------------------------------------------"
if __name__ == '__main__':
h2o.unit_main()
|
rowhit/h2o-2
|
py/testdir_multi_jvm/test_model_management.py
|
Python
|
apache-2.0
| 67,229
|
[
"Gaussian"
] |
a42633ba1a3320d5aad1204da0f3b4732190aec90e3992f62ca4c04a66571bcd
|
# encoding: utf-8
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import datetime
import numpy as np
storm_num = 1
# Landfall for storm choosen
if storm_num == 1:
# Storm 1
landfall = datetime.datetime(1997, 11, 15, 3) - datetime.datetime(1997, 1, 1, 0)
elif storm_num == 2:
# Storm 2 - 2008 12 08 18 to 2008 12 20 02
landfall = datetime.datetime(2008, 12, 17, 1) - datetime.datetime(2008, 1, 1, 0)
# days s/hour hours/day
days2seconds = lambda days: days * 60.0**2 * 24.0
seconds2days = lambda seconds: seconds / (60.0**2 * 24.0)
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
# (47 W, 100 E, 31 N, -10 S) - bathy
clawdata.lower[0] = 47 # west longitude
clawdata.upper[0] = 100 # east longitude
clawdata.lower[1] = -10 # south latitude
clawdata.upper[1] = 31 # north latitude
# Number of grid cells:
degree_factor = 4 # (0.25º,0.25º) ~ (25237.5 m, 27693.2 m) resolution
clawdata.num_cells[0] = int(clawdata.upper[0] - clawdata.lower[0]) * degree_factor
clawdata.num_cells[1] = int(clawdata.upper[1] - clawdata.lower[1]) * degree_factor
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# First three are from shallow GeoClaw, fourth is friction and last 3 are
# storm fields
clawdata.num_aux = 3 + 1 + 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = days2seconds(landfall.days - 3) + landfall.seconds
# clawdata.t0 = days2seconds(landfall.days - 12) + landfall.seconds
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
# clawdata.tfinal = days2seconds(date2days('2008091400'))
clawdata.tfinal = days2seconds(landfall.days + 1.0) + landfall.seconds
# clawdata.tfinal = days2seconds(landfall.days) + landfall.seconds
recurrence = 24
clawdata.num_output_times = int((clawdata.tfinal - clawdata.t0)
* recurrence / (60**2 * 24))
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True
clawdata.output_format = 'binary' # 'ascii' or 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all'
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# clawdata.cfl_desired = 0.25
# clawdata.cfl_max = 0.5
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 1
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 1
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# clawdata.source_split = 'strang'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 5
# List of refinement ratios at each level (length at least mxnest-1)
# amrdata.refinement_ratios_x = [2,2,3,4,16]
# amrdata.refinement_ratios_y = [2,2,3,4,16]
# amrdata.refinement_ratios_t = [2,2,3,4,16]
# amrdata.refinement_ratios_x = [2,2,2,6,16]
# amrdata.refinement_ratios_y = [2,2,2,6,16]
# amrdata.refinement_ratios_t = [2,2,2,6,16]
amrdata.refinement_ratios_x = [2,2,2,6,4,4]
amrdata.refinement_ratios_y = [2,2,2,6,4,4]
amrdata.refinement_ratios_t = [2,2,2,6,4,4]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft','center','center','center',
'center', 'center', 'center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# == setregions.data values ==
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Mumbai Region
regions.append([2, 5, rundata.clawdata.t0, rundata.clawdata.tfinal,
70, 75, 17, 22])
# Mumbai
regions.append([4, 7, days2seconds(landfall.days - 1.0) + landfall.seconds,
rundata.clawdata.tfinal,
72.6, 73, 18.80, 19.15])
# == setgauges.data values ==
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
rundata.gaugedata.gauges.append([1, 72.811790, 18.936508, rundata.clawdata.t0, rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([2, 72.972316, 18.997762, rundata.clawdata.t0, rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([3, 72.819311, 18.818044, rundata.clawdata.t0, rundata.clawdata.tfinal])
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
# == Forcing Options
geo_data.coriolis_forcing = True
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025 # Overridden below
geo_data.friction_depth = 1e10
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 2.0 # Due to seasonal swelling of gulf
geo_data.dry_tolerance = 1.e-2
# Refinement Criteria
refine_data = rundata.refinement_data
refine_data.wave_tolerance = 1.0
# refine_data.wave_tolerance = 0.5
# refine_data.speed_tolerance = [0.25,0.5,1.0,2.0,3.0,4.0]
# refine_data.speed_tolerance = [0.5,1.0,1.5,2.0,2.5,3.0]
refine_data.speed_tolerance = [1.0,2.0,3.0,4.0]
refine_data.deep_depth = 300.0
refine_data.max_level_deep = 4
refine_data.variable_dt_refinement_ratios = True
# == settopo.data values ==
topo_data = rundata.topo_data
topo_data.topofiles = []
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
# See regions for control over these regions, need better bathy data for the
# smaller domains
# if os.environ.has_key("DATA_PATH"):
# topo_path = os.path.join(os.environ["DATA_PATH"], "topography", "indian")
# else:
topo_path = os.path.join('..', 'bathy')
indian_ocean = os.path.join(topo_path, "indian_ocean.nc")
mumbai_topo = os.path.join(topo_path, "mumbai.tt3")
topo_data.topofiles.append([4, 1, 5, rundata.clawdata.t0,
rundata.clawdata.tfinal,
indian_ocean])
topo_data.topofiles.append([3, 1, 7, rundata.clawdata.t0,
rundata.clawdata.tfinal,
mumbai_topo])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
dtopo_data.dtopofiles = []
# for moving topography, append lines of the form : (<= 1 allowed for now!)
# [topotype, minlevel,maxlevel,fname]
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == setfixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
return rundata
# end of function setgeo
# ----------------------
def set_storm(rundata):
data = rundata.surge_data
# Physics parameters
data.rho_air = 1.15
data.ambient_pressure = 101.3e3 # Nominal atmos pressure
# Source term controls - These are currently not respected
data.wind_forcing = True
data.drag_law = 1
data.pressure_forcing = True
# Source term algorithm parameters
# data.wind_tolerance = 1e-4
# data.pressure_tolerance = 1e-4 # Pressure source term tolerance
# AMR parameters
data.wind_refine = [20.0,40.0,60.0] # m/s
data.R_refine = [60.0e3,40e3,20e3] # m
# Storm parameters
data.storm_type = 1 # Type of storm
data.landfall = days2seconds(landfall.days) + landfall.seconds
data.display_landfall_time = True
# Storm type 1 - Idealized storm track
if storm_num == 1:
data.storm_file = os.path.expandvars(os.path.join(os.getcwd(), 'mumbai_1.storm'))
elif storm_num == 2:
data.storm_file = os.path.expandvars(os.path.join(os.getcwd(), 'mumbai_2.storm'))
return rundata
def set_friction(rundata):
data = rundata.friction_data
# Variable friction
data.variable_friction = True
# Region based friction
# Entire domain
data.friction_regions.append([rundata.clawdata.lower,
rundata.clawdata.upper,
[np.infty,0.0,-np.infty],
[0.030, 0.022]])
return rundata
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata = set_storm(rundata)
rundata = set_friction(rundata)
rundata.write()
|
mandli/surge-examples
|
mumbai/setrun.py
|
Python
|
mit
| 17,576
|
[
"NetCDF"
] |
aa656d1e88eac56d346b8c1795d44ca3277a1334e0c94373b91011ff7e112060
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database of Ed and Rob's favorite linear acene dimers.
| Geometries from nowhere special, and reference energies undefined.
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'FIRST3'`` benzene, napthalene, and anthracene dimers
- ``'FIRST5'`` benzene - pentacene dimers
- ``'FIRST10'`` benzene - decacene dimers
"""
import re
import qcdb
# <<< ACENES Database Module >>>
dbse = 'ACENES'
# <<< Database Members >>>
HRXN = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20']
HRXN_SM = ['1', '2', '3', '4', '5']
HRXN_LG = ['11', '12', '13', '14', '15', '16', '17', '18', '19', '20']
FIRST3 = ['1', '2', '3']
FIRST5 = ['1', '2', '3', '4', '5']
FIRST10 = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, '1' )] = 0.000
BIND['%s-%s' % (dbse, '10' )] = 0.000
BIND['%s-%s' % (dbse, '11' )] = 0.000
BIND['%s-%s' % (dbse, '12' )] = 0.000
BIND['%s-%s' % (dbse, '13' )] = 0.000
BIND['%s-%s' % (dbse, '14' )] = 0.000
BIND['%s-%s' % (dbse, '15' )] = 0.000
BIND['%s-%s' % (dbse, '16' )] = 0.000
BIND['%s-%s' % (dbse, '17' )] = 0.000
BIND['%s-%s' % (dbse, '18' )] = 0.000
BIND['%s-%s' % (dbse, '19' )] = 0.000
BIND['%s-%s' % (dbse, '2' )] = 0.000
BIND['%s-%s' % (dbse, '20' )] = 0.000
BIND['%s-%s' % (dbse, '3' )] = 0.000
BIND['%s-%s' % (dbse, '4' )] = 0.000
BIND['%s-%s' % (dbse, '5' )] = 0.000
BIND['%s-%s' % (dbse, '6' )] = 0.000
BIND['%s-%s' % (dbse, '7' )] = 0.000
BIND['%s-%s' % (dbse, '8' )] = 0.000
BIND['%s-%s' % (dbse, '9' )] = 0.000
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, '1' )] = ''
TAGL['%s-%s-dimer' % (dbse, '1' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '1' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '1' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '1' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '1' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '10' )] = ''
TAGL['%s-%s-dimer' % (dbse, '10' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '10' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '10' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '10' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '10' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '11' )] = ''
TAGL['%s-%s-dimer' % (dbse, '11' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '11' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '11' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '11' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '11' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '12' )] = ''
TAGL['%s-%s-dimer' % (dbse, '12' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '12' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '12' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '12' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '12' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '13' )] = ''
TAGL['%s-%s-dimer' % (dbse, '13' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '13' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '13' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '13' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '13' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '14' )] = ''
TAGL['%s-%s-dimer' % (dbse, '14' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '14' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '14' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '14' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '14' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '15' )] = ''
TAGL['%s-%s-dimer' % (dbse, '15' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '15' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '15' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '15' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '15' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '16' )] = ''
TAGL['%s-%s-dimer' % (dbse, '16' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '16' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '16' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '16' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '16' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '17' )] = ''
TAGL['%s-%s-dimer' % (dbse, '17' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '17' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '17' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '17' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '17' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '18' )] = ''
TAGL['%s-%s-dimer' % (dbse, '18' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '18' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '18' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '18' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '18' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '19' )] = ''
TAGL['%s-%s-dimer' % (dbse, '19' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '19' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '19' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '19' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '19' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '2' )] = ''
TAGL['%s-%s-dimer' % (dbse, '2' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '2' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '2' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '2' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '2' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '20' )] = ''
TAGL['%s-%s-dimer' % (dbse, '20' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '20' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '20' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '20' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '20' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '3' )] = ''
TAGL['%s-%s-dimer' % (dbse, '3' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '3' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '3' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '3' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '3' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '4' )] = ''
TAGL['%s-%s-dimer' % (dbse, '4' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '4' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '4' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '4' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '4' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '5' )] = ''
TAGL['%s-%s-dimer' % (dbse, '5' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '5' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '5' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '5' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '5' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '6' )] = ''
TAGL['%s-%s-dimer' % (dbse, '6' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '6' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '6' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '6' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '6' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '7' )] = ''
TAGL['%s-%s-dimer' % (dbse, '7' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '7' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '7' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '7' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '7' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '8' )] = ''
TAGL['%s-%s-dimer' % (dbse, '8' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '8' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '8' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '8' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '8' )] = 'Monomer B from '
TAGL['%s-%s' % (dbse, '9' )] = ''
TAGL['%s-%s-dimer' % (dbse, '9' )] = 'Dimer from '
TAGL['%s-%s-monoA-CP' % (dbse, '9' )] = 'Monomer A from '
TAGL['%s-%s-monoB-CP' % (dbse, '9' )] = 'Monomer B from '
TAGL['%s-%s-monoA-unCP' % (dbse, '9' )] = 'Monomer A from '
TAGL['%s-%s-monoB-unCP' % (dbse, '9' )] = 'Monomer B from '
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H -1.23575000 2.14038200 0.00000000
H 1.23575000 2.14038200 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 0.46425000 2.14038200 3.50000000
H 2.93575000 2.14038200 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H -1.23575000 23.83171400 0.00000000
H 1.23575000 23.83171400 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 0.46425000 23.83171400 3.50000000
H 2.93575000 23.83171400 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H -1.23575000 26.24186200 0.00000000
H 1.23575000 26.24186200 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 0.46425000 26.24186200 3.50000000
H 2.93575000 26.24186200 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H -1.23575000 28.65201000 0.00000000
H 1.23575000 28.65201000 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 0.46425000 28.65201000 3.50000000
H 2.93575000 28.65201000 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H -1.23575000 31.06215800 0.00000000
H 1.23575000 31.06215800 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 0.46425000 31.06215800 3.50000000
H 2.93575000 31.06215800 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H -1.23575000 33.47230600 0.00000000
H 1.23575000 33.47230600 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 0.46425000 33.47230600 3.50000000
H 2.93575000 33.47230600 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
C 1.39150000 33.74207200 0.00000000
C -1.39150000 33.74207200 0.00000000
C -0.69575000 34.94714600 0.00000000
C 0.69575000 34.94714600 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H 2.47150000 33.74207200 0.00000000
H -2.47150000 33.74207200 0.00000000
H -1.23575000 35.88245400 0.00000000
H 1.23575000 35.88245400 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
C 3.09150000 33.74207200 3.50000000
C 0.30850000 33.74207200 3.50000000
C 1.00425000 34.94714600 3.50000000
C 2.39575000 34.94714600 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 4.17150000 33.74207200 3.50000000
H -0.77150000 33.74207200 3.50000000
H 0.46425000 35.88245400 3.50000000
H 2.93575000 35.88245400 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
C 1.39150000 33.74207200 0.00000000
C -1.39150000 33.74207200 0.00000000
C -0.69575000 34.94714600 0.00000000
C 0.69575000 34.94714600 0.00000000
C 1.39150000 36.15222000 0.00000000
C -1.39150000 36.15222000 0.00000000
C -0.69575000 37.35729400 0.00000000
C 0.69575000 37.35729400 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H 2.47150000 33.74207200 0.00000000
H -2.47150000 33.74207200 0.00000000
H 2.47150000 36.15222000 0.00000000
H -2.47150000 36.15222000 0.00000000
H -1.23575000 38.29260200 0.00000000
H 1.23575000 38.29260200 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
C 3.09150000 33.74207200 3.50000000
C 0.30850000 33.74207200 3.50000000
C 1.00425000 34.94714600 3.50000000
C 2.39575000 34.94714600 3.50000000
C 3.09150000 36.15222000 3.50000000
C 0.30850000 36.15222000 3.50000000
C 1.00425000 37.35729400 3.50000000
C 2.39575000 37.35729400 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 4.17150000 33.74207200 3.50000000
H -0.77150000 33.74207200 3.50000000
H 4.17150000 36.15222000 3.50000000
H -0.77150000 36.15222000 3.50000000
H 0.46425000 38.29260200 3.50000000
H 2.93575000 38.29260200 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
C 1.39150000 33.74207200 0.00000000
C -1.39150000 33.74207200 0.00000000
C -0.69575000 34.94714600 0.00000000
C 0.69575000 34.94714600 0.00000000
C 1.39150000 36.15222000 0.00000000
C -1.39150000 36.15222000 0.00000000
C -0.69575000 37.35729400 0.00000000
C 0.69575000 37.35729400 0.00000000
C 1.39150000 38.56236800 0.00000000
C -1.39150000 38.56236800 0.00000000
C -0.69575000 39.76744200 0.00000000
C 0.69575000 39.76744200 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H 2.47150000 33.74207200 0.00000000
H -2.47150000 33.74207200 0.00000000
H 2.47150000 36.15222000 0.00000000
H -2.47150000 36.15222000 0.00000000
H 2.47150000 38.56236800 0.00000000
H -2.47150000 38.56236800 0.00000000
H -1.23575000 40.70275000 0.00000000
H 1.23575000 40.70275000 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
C 3.09150000 33.74207200 3.50000000
C 0.30850000 33.74207200 3.50000000
C 1.00425000 34.94714600 3.50000000
C 2.39575000 34.94714600 3.50000000
C 3.09150000 36.15222000 3.50000000
C 0.30850000 36.15222000 3.50000000
C 1.00425000 37.35729400 3.50000000
C 2.39575000 37.35729400 3.50000000
C 3.09150000 38.56236800 3.50000000
C 0.30850000 38.56236800 3.50000000
C 1.00425000 39.76744200 3.50000000
C 2.39575000 39.76744200 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 4.17150000 33.74207200 3.50000000
H -0.77150000 33.74207200 3.50000000
H 4.17150000 36.15222000 3.50000000
H -0.77150000 36.15222000 3.50000000
H 4.17150000 38.56236800 3.50000000
H -0.77150000 38.56236800 3.50000000
H 0.46425000 40.70275000 3.50000000
H 2.93575000 40.70275000 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
C 1.39150000 33.74207200 0.00000000
C -1.39150000 33.74207200 0.00000000
C -0.69575000 34.94714600 0.00000000
C 0.69575000 34.94714600 0.00000000
C 1.39150000 36.15222000 0.00000000
C -1.39150000 36.15222000 0.00000000
C -0.69575000 37.35729400 0.00000000
C 0.69575000 37.35729400 0.00000000
C 1.39150000 38.56236800 0.00000000
C -1.39150000 38.56236800 0.00000000
C -0.69575000 39.76744200 0.00000000
C 0.69575000 39.76744200 0.00000000
C 1.39150000 40.97251600 0.00000000
C -1.39150000 40.97251600 0.00000000
C -0.69575000 42.17759000 0.00000000
C 0.69575000 42.17759000 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H 2.47150000 33.74207200 0.00000000
H -2.47150000 33.74207200 0.00000000
H 2.47150000 36.15222000 0.00000000
H -2.47150000 36.15222000 0.00000000
H 2.47150000 38.56236800 0.00000000
H -2.47150000 38.56236800 0.00000000
H 2.47150000 40.97251600 0.00000000
H -2.47150000 40.97251600 0.00000000
H -1.23575000 43.11289800 0.00000000
H 1.23575000 43.11289800 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
C 3.09150000 33.74207200 3.50000000
C 0.30850000 33.74207200 3.50000000
C 1.00425000 34.94714600 3.50000000
C 2.39575000 34.94714600 3.50000000
C 3.09150000 36.15222000 3.50000000
C 0.30850000 36.15222000 3.50000000
C 1.00425000 37.35729400 3.50000000
C 2.39575000 37.35729400 3.50000000
C 3.09150000 38.56236800 3.50000000
C 0.30850000 38.56236800 3.50000000
C 1.00425000 39.76744200 3.50000000
C 2.39575000 39.76744200 3.50000000
C 3.09150000 40.97251600 3.50000000
C 0.30850000 40.97251600 3.50000000
C 1.00425000 42.17759000 3.50000000
C 2.39575000 42.17759000 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 4.17150000 33.74207200 3.50000000
H -0.77150000 33.74207200 3.50000000
H 4.17150000 36.15222000 3.50000000
H -0.77150000 36.15222000 3.50000000
H 4.17150000 38.56236800 3.50000000
H -0.77150000 38.56236800 3.50000000
H 4.17150000 40.97251600 3.50000000
H -0.77150000 40.97251600 3.50000000
H 0.46425000 43.11289800 3.50000000
H 2.93575000 43.11289800 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
C 1.39150000 33.74207200 0.00000000
C -1.39150000 33.74207200 0.00000000
C -0.69575000 34.94714600 0.00000000
C 0.69575000 34.94714600 0.00000000
C 1.39150000 36.15222000 0.00000000
C -1.39150000 36.15222000 0.00000000
C -0.69575000 37.35729400 0.00000000
C 0.69575000 37.35729400 0.00000000
C 1.39150000 38.56236800 0.00000000
C -1.39150000 38.56236800 0.00000000
C -0.69575000 39.76744200 0.00000000
C 0.69575000 39.76744200 0.00000000
C 1.39150000 40.97251600 0.00000000
C -1.39150000 40.97251600 0.00000000
C -0.69575000 42.17759000 0.00000000
C 0.69575000 42.17759000 0.00000000
C 1.39150000 43.38266400 0.00000000
C -1.39150000 43.38266400 0.00000000
C -0.69575000 44.58773800 0.00000000
C 0.69575000 44.58773800 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H 2.47150000 33.74207200 0.00000000
H -2.47150000 33.74207200 0.00000000
H 2.47150000 36.15222000 0.00000000
H -2.47150000 36.15222000 0.00000000
H 2.47150000 38.56236800 0.00000000
H -2.47150000 38.56236800 0.00000000
H 2.47150000 40.97251600 0.00000000
H -2.47150000 40.97251600 0.00000000
H 2.47150000 43.38266400 0.00000000
H -2.47150000 43.38266400 0.00000000
H -1.23575000 45.52304600 0.00000000
H 1.23575000 45.52304600 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
C 3.09150000 33.74207200 3.50000000
C 0.30850000 33.74207200 3.50000000
C 1.00425000 34.94714600 3.50000000
C 2.39575000 34.94714600 3.50000000
C 3.09150000 36.15222000 3.50000000
C 0.30850000 36.15222000 3.50000000
C 1.00425000 37.35729400 3.50000000
C 2.39575000 37.35729400 3.50000000
C 3.09150000 38.56236800 3.50000000
C 0.30850000 38.56236800 3.50000000
C 1.00425000 39.76744200 3.50000000
C 2.39575000 39.76744200 3.50000000
C 3.09150000 40.97251600 3.50000000
C 0.30850000 40.97251600 3.50000000
C 1.00425000 42.17759000 3.50000000
C 2.39575000 42.17759000 3.50000000
C 3.09150000 43.38266400 3.50000000
C 0.30850000 43.38266400 3.50000000
C 1.00425000 44.58773800 3.50000000
C 2.39575000 44.58773800 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 4.17150000 33.74207200 3.50000000
H -0.77150000 33.74207200 3.50000000
H 4.17150000 36.15222000 3.50000000
H -0.77150000 36.15222000 3.50000000
H 4.17150000 38.56236800 3.50000000
H -0.77150000 38.56236800 3.50000000
H 4.17150000 40.97251600 3.50000000
H -0.77150000 40.97251600 3.50000000
H 4.17150000 43.38266400 3.50000000
H -0.77150000 43.38266400 3.50000000
H 0.46425000 45.52304600 3.50000000
H 2.93575000 45.52304600 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H -1.23575000 4.55053000 0.00000000
H 1.23575000 4.55053000 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 0.46425000 4.55053000 3.50000000
H 2.93575000 4.55053000 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
C 1.39150000 21.69133200 0.00000000
C -1.39150000 21.69133200 0.00000000
C -0.69575000 22.89640600 0.00000000
C 0.69575000 22.89640600 0.00000000
C 1.39150000 24.10148000 0.00000000
C -1.39150000 24.10148000 0.00000000
C -0.69575000 25.30655400 0.00000000
C 0.69575000 25.30655400 0.00000000
C 1.39150000 26.51162800 0.00000000
C -1.39150000 26.51162800 0.00000000
C -0.69575000 27.71670200 0.00000000
C 0.69575000 27.71670200 0.00000000
C 1.39150000 28.92177600 0.00000000
C -1.39150000 28.92177600 0.00000000
C -0.69575000 30.12685000 0.00000000
C 0.69575000 30.12685000 0.00000000
C 1.39150000 31.33192400 0.00000000
C -1.39150000 31.33192400 0.00000000
C -0.69575000 32.53699800 0.00000000
C 0.69575000 32.53699800 0.00000000
C 1.39150000 33.74207200 0.00000000
C -1.39150000 33.74207200 0.00000000
C -0.69575000 34.94714600 0.00000000
C 0.69575000 34.94714600 0.00000000
C 1.39150000 36.15222000 0.00000000
C -1.39150000 36.15222000 0.00000000
C -0.69575000 37.35729400 0.00000000
C 0.69575000 37.35729400 0.00000000
C 1.39150000 38.56236800 0.00000000
C -1.39150000 38.56236800 0.00000000
C -0.69575000 39.76744200 0.00000000
C 0.69575000 39.76744200 0.00000000
C 1.39150000 40.97251600 0.00000000
C -1.39150000 40.97251600 0.00000000
C -0.69575000 42.17759000 0.00000000
C 0.69575000 42.17759000 0.00000000
C 1.39150000 43.38266400 0.00000000
C -1.39150000 43.38266400 0.00000000
C -0.69575000 44.58773800 0.00000000
C 0.69575000 44.58773800 0.00000000
C 1.39150000 45.79281200 0.00000000
C -1.39150000 45.79281200 0.00000000
C -0.69575000 46.99788600 0.00000000
C 0.69575000 46.99788600 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H 2.47150000 21.69133200 0.00000000
H -2.47150000 21.69133200 0.00000000
H 2.47150000 24.10148000 0.00000000
H -2.47150000 24.10148000 0.00000000
H 2.47150000 26.51162800 0.00000000
H -2.47150000 26.51162800 0.00000000
H 2.47150000 28.92177600 0.00000000
H -2.47150000 28.92177600 0.00000000
H 2.47150000 31.33192400 0.00000000
H -2.47150000 31.33192400 0.00000000
H 2.47150000 33.74207200 0.00000000
H -2.47150000 33.74207200 0.00000000
H 2.47150000 36.15222000 0.00000000
H -2.47150000 36.15222000 0.00000000
H 2.47150000 38.56236800 0.00000000
H -2.47150000 38.56236800 0.00000000
H 2.47150000 40.97251600 0.00000000
H -2.47150000 40.97251600 0.00000000
H 2.47150000 43.38266400 0.00000000
H -2.47150000 43.38266400 0.00000000
H 2.47150000 45.79281200 0.00000000
H -2.47150000 45.79281200 0.00000000
H -1.23575000 47.93319400 0.00000000
H 1.23575000 47.93319400 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
C 3.09150000 21.69133200 3.50000000
C 0.30850000 21.69133200 3.50000000
C 1.00425000 22.89640600 3.50000000
C 2.39575000 22.89640600 3.50000000
C 3.09150000 24.10148000 3.50000000
C 0.30850000 24.10148000 3.50000000
C 1.00425000 25.30655400 3.50000000
C 2.39575000 25.30655400 3.50000000
C 3.09150000 26.51162800 3.50000000
C 0.30850000 26.51162800 3.50000000
C 1.00425000 27.71670200 3.50000000
C 2.39575000 27.71670200 3.50000000
C 3.09150000 28.92177600 3.50000000
C 0.30850000 28.92177600 3.50000000
C 1.00425000 30.12685000 3.50000000
C 2.39575000 30.12685000 3.50000000
C 3.09150000 31.33192400 3.50000000
C 0.30850000 31.33192400 3.50000000
C 1.00425000 32.53699800 3.50000000
C 2.39575000 32.53699800 3.50000000
C 3.09150000 33.74207200 3.50000000
C 0.30850000 33.74207200 3.50000000
C 1.00425000 34.94714600 3.50000000
C 2.39575000 34.94714600 3.50000000
C 3.09150000 36.15222000 3.50000000
C 0.30850000 36.15222000 3.50000000
C 1.00425000 37.35729400 3.50000000
C 2.39575000 37.35729400 3.50000000
C 3.09150000 38.56236800 3.50000000
C 0.30850000 38.56236800 3.50000000
C 1.00425000 39.76744200 3.50000000
C 2.39575000 39.76744200 3.50000000
C 3.09150000 40.97251600 3.50000000
C 0.30850000 40.97251600 3.50000000
C 1.00425000 42.17759000 3.50000000
C 2.39575000 42.17759000 3.50000000
C 3.09150000 43.38266400 3.50000000
C 0.30850000 43.38266400 3.50000000
C 1.00425000 44.58773800 3.50000000
C 2.39575000 44.58773800 3.50000000
C 3.09150000 45.79281200 3.50000000
C 0.30850000 45.79281200 3.50000000
C 1.00425000 46.99788600 3.50000000
C 2.39575000 46.99788600 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 4.17150000 21.69133200 3.50000000
H -0.77150000 21.69133200 3.50000000
H 4.17150000 24.10148000 3.50000000
H -0.77150000 24.10148000 3.50000000
H 4.17150000 26.51162800 3.50000000
H -0.77150000 26.51162800 3.50000000
H 4.17150000 28.92177600 3.50000000
H -0.77150000 28.92177600 3.50000000
H 4.17150000 31.33192400 3.50000000
H -0.77150000 31.33192400 3.50000000
H 4.17150000 33.74207200 3.50000000
H -0.77150000 33.74207200 3.50000000
H 4.17150000 36.15222000 3.50000000
H -0.77150000 36.15222000 3.50000000
H 4.17150000 38.56236800 3.50000000
H -0.77150000 38.56236800 3.50000000
H 4.17150000 40.97251600 3.50000000
H -0.77150000 40.97251600 3.50000000
H 4.17150000 43.38266400 3.50000000
H -0.77150000 43.38266400 3.50000000
H 4.17150000 45.79281200 3.50000000
H -0.77150000 45.79281200 3.50000000
H 0.46425000 47.93319400 3.50000000
H 2.93575000 47.93319400 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H -1.23575000 6.96067800 0.00000000
H 1.23575000 6.96067800 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 0.46425000 6.96067800 3.50000000
H 2.93575000 6.96067800 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H -1.23575000 9.37082600 0.00000000
H 1.23575000 9.37082600 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 0.46425000 9.37082600 3.50000000
H 2.93575000 9.37082600 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H -1.23575000 11.78097400 0.00000000
H 1.23575000 11.78097400 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 0.46425000 11.78097400 3.50000000
H 2.93575000 11.78097400 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H -1.23575000 14.19112200 0.00000000
H 1.23575000 14.19112200 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 0.46425000 14.19112200 3.50000000
H 2.93575000 14.19112200 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H -1.23575000 16.60127000 0.00000000
H 1.23575000 16.60127000 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 0.46425000 16.60127000 3.50000000
H 2.93575000 16.60127000 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H -1.23575000 19.01141800 0.00000000
H 1.23575000 19.01141800 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 0.46425000 19.01141800 3.50000000
H 2.93575000 19.01141800 3.50000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
C 0.69575000 -1.20507400 0.00000000
C -0.69575000 -1.20507400 0.00000000
C 1.39150000 0.00000000 0.00000000
C -1.39150000 0.00000000 0.00000000
C -0.69575000 1.20507400 0.00000000
C 0.69575000 1.20507400 0.00000000
C 1.39150000 2.41014800 0.00000000
C -1.39150000 2.41014800 0.00000000
C -0.69575000 3.61522200 0.00000000
C 0.69575000 3.61522200 0.00000000
C 1.39150000 4.82029600 0.00000000
C -1.39150000 4.82029600 0.00000000
C -0.69575000 6.02537000 0.00000000
C 0.69575000 6.02537000 0.00000000
C 1.39150000 7.23044400 0.00000000
C -1.39150000 7.23044400 0.00000000
C -0.69575000 8.43551800 0.00000000
C 0.69575000 8.43551800 0.00000000
C 1.39150000 9.64059200 0.00000000
C -1.39150000 9.64059200 0.00000000
C -0.69575000 10.84566600 0.00000000
C 0.69575000 10.84566600 0.00000000
C 1.39150000 12.05074000 0.00000000
C -1.39150000 12.05074000 0.00000000
C -0.69575000 13.25581400 0.00000000
C 0.69575000 13.25581400 0.00000000
C 1.39150000 14.46088800 0.00000000
C -1.39150000 14.46088800 0.00000000
C -0.69575000 15.66596200 0.00000000
C 0.69575000 15.66596200 0.00000000
C 1.39150000 16.87103600 0.00000000
C -1.39150000 16.87103600 0.00000000
C -0.69575000 18.07611000 0.00000000
C 0.69575000 18.07611000 0.00000000
C 1.39150000 19.28118400 0.00000000
C -1.39150000 19.28118400 0.00000000
C -0.69575000 20.48625800 0.00000000
C 0.69575000 20.48625800 0.00000000
H 1.23575000 -2.14038200 0.00000000
H -1.23575000 -2.14038200 0.00000000
H 2.47150000 0.00000000 0.00000000
H -2.47150000 0.00000000 0.00000000
H 2.47150000 2.41014800 0.00000000
H -2.47150000 2.41014800 0.00000000
H 2.47150000 4.82029600 0.00000000
H -2.47150000 4.82029600 0.00000000
H 2.47150000 7.23044400 0.00000000
H -2.47150000 7.23044400 0.00000000
H 2.47150000 9.64059200 0.00000000
H -2.47150000 9.64059200 0.00000000
H 2.47150000 12.05074000 0.00000000
H -2.47150000 12.05074000 0.00000000
H 2.47150000 14.46088800 0.00000000
H -2.47150000 14.46088800 0.00000000
H 2.47150000 16.87103600 0.00000000
H -2.47150000 16.87103600 0.00000000
H 2.47150000 19.28118400 0.00000000
H -2.47150000 19.28118400 0.00000000
H -1.23575000 21.42156600 0.00000000
H 1.23575000 21.42156600 0.00000000
--
0 1
C 2.39575000 -1.20507400 3.50000000
C 1.00425000 -1.20507400 3.50000000
C 3.09150000 0.00000000 3.50000000
C 0.30850000 0.00000000 3.50000000
C 1.00425000 1.20507400 3.50000000
C 2.39575000 1.20507400 3.50000000
C 3.09150000 2.41014800 3.50000000
C 0.30850000 2.41014800 3.50000000
C 1.00425000 3.61522200 3.50000000
C 2.39575000 3.61522200 3.50000000
C 3.09150000 4.82029600 3.50000000
C 0.30850000 4.82029600 3.50000000
C 1.00425000 6.02537000 3.50000000
C 2.39575000 6.02537000 3.50000000
C 3.09150000 7.23044400 3.50000000
C 0.30850000 7.23044400 3.50000000
C 1.00425000 8.43551800 3.50000000
C 2.39575000 8.43551800 3.50000000
C 3.09150000 9.64059200 3.50000000
C 0.30850000 9.64059200 3.50000000
C 1.00425000 10.84566600 3.50000000
C 2.39575000 10.84566600 3.50000000
C 3.09150000 12.05074000 3.50000000
C 0.30850000 12.05074000 3.50000000
C 1.00425000 13.25581400 3.50000000
C 2.39575000 13.25581400 3.50000000
C 3.09150000 14.46088800 3.50000000
C 0.30850000 14.46088800 3.50000000
C 1.00425000 15.66596200 3.50000000
C 2.39575000 15.66596200 3.50000000
C 3.09150000 16.87103600 3.50000000
C 0.30850000 16.87103600 3.50000000
C 1.00425000 18.07611000 3.50000000
C 2.39575000 18.07611000 3.50000000
C 3.09150000 19.28118400 3.50000000
C 0.30850000 19.28118400 3.50000000
C 1.00425000 20.48625800 3.50000000
C 2.39575000 20.48625800 3.50000000
H 2.93575000 -2.14038200 3.50000000
H 0.46425000 -2.14038200 3.50000000
H 4.17150000 0.00000000 3.50000000
H -0.77150000 0.00000000 3.50000000
H 4.17150000 2.41014800 3.50000000
H -0.77150000 2.41014800 3.50000000
H 4.17150000 4.82029600 3.50000000
H -0.77150000 4.82029600 3.50000000
H 4.17150000 7.23044400 3.50000000
H -0.77150000 7.23044400 3.50000000
H 4.17150000 9.64059200 3.50000000
H -0.77150000 9.64059200 3.50000000
H 4.17150000 12.05074000 3.50000000
H -0.77150000 12.05074000 3.50000000
H 4.17150000 14.46088800 3.50000000
H -0.77150000 14.46088800 3.50000000
H 4.17150000 16.87103600 3.50000000
H -0.77150000 16.87103600 3.50000000
H 4.17150000 19.28118400 3.50000000
H -0.77150000 19.28118400 3.50000000
H 0.46425000 21.42156600 3.50000000
H 2.93575000 21.42156600 3.50000000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['ACENES-1-dimer' ] = 623.45166278
DATA['NUCLEAR REPULSION ENERGY']['ACENES-1-monoA-unCP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['ACENES-1-monoB-unCP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['ACENES-2-dimer' ] = 1447.97918063
DATA['NUCLEAR REPULSION ENERGY']['ACENES-2-monoA-unCP' ] = 462.51351405
DATA['NUCLEAR REPULSION ENERGY']['ACENES-2-monoB-unCP' ] = 462.51351405
DATA['NUCLEAR REPULSION ENERGY']['ACENES-3-dimer' ] = 2478.53381614
DATA['NUCLEAR REPULSION ENERGY']['ACENES-3-monoA-unCP' ] = 776.53779489
DATA['NUCLEAR REPULSION ENERGY']['ACENES-3-monoB-unCP' ] = 776.53779489
DATA['NUCLEAR REPULSION ENERGY']['ACENES-4-dimer' ] = 3664.54622337
DATA['NUCLEAR REPULSION ENERGY']['ACENES-4-monoA-unCP' ] = 1131.16297168
DATA['NUCLEAR REPULSION ENERGY']['ACENES-4-monoB-unCP' ] = 1131.16297168
DATA['NUCLEAR REPULSION ENERGY']['ACENES-5-dimer' ] = 4974.79907551
DATA['NUCLEAR REPULSION ENERGY']['ACENES-5-monoA-unCP' ] = 1517.73569587
DATA['NUCLEAR REPULSION ENERGY']['ACENES-5-monoB-unCP' ] = 1517.73569587
DATA['NUCLEAR REPULSION ENERGY']['ACENES-6-dimer' ] = 6388.28553373
DATA['NUCLEAR REPULSION ENERGY']['ACENES-6-monoA-unCP' ] = 1930.62605849
DATA['NUCLEAR REPULSION ENERGY']['ACENES-6-monoB-unCP' ] = 1930.62605849
DATA['NUCLEAR REPULSION ENERGY']['ACENES-7-dimer' ] = 7889.97351320
DATA['NUCLEAR REPULSION ENERGY']['ACENES-7-monoA-unCP' ] = 2365.88438821
DATA['NUCLEAR REPULSION ENERGY']['ACENES-7-monoB-unCP' ] = 2365.88438821
DATA['NUCLEAR REPULSION ENERGY']['ACENES-8-dimer' ] = 9468.60272003
DATA['NUCLEAR REPULSION ENERGY']['ACENES-8-monoA-unCP' ] = 2820.58881529
DATA['NUCLEAR REPULSION ENERGY']['ACENES-8-monoB-unCP' ] = 2820.58881529
DATA['NUCLEAR REPULSION ENERGY']['ACENES-9-dimer' ] = 11115.43628142
DATA['NUCLEAR REPULSION ENERGY']['ACENES-9-monoA-unCP' ] = 3292.49113688
DATA['NUCLEAR REPULSION ENERGY']['ACENES-9-monoB-unCP' ] = 3292.49113688
DATA['NUCLEAR REPULSION ENERGY']['ACENES-10-dimer' ] = 12823.50435945
DATA['NUCLEAR REPULSION ENERGY']['ACENES-10-monoA-unCP' ] = 3779.80831432
DATA['NUCLEAR REPULSION ENERGY']['ACENES-10-monoB-unCP' ] = 3779.80831432
DATA['NUCLEAR REPULSION ENERGY']['ACENES-11-dimer' ] = 14587.12067915
DATA['NUCLEAR REPULSION ENERGY']['ACENES-11-monoA-unCP' ] = 4281.09183900
DATA['NUCLEAR REPULSION ENERGY']['ACENES-11-monoB-unCP' ] = 4281.09183900
DATA['NUCLEAR REPULSION ENERGY']['ACENES-12-dimer' ] = 16401.55973922
DATA['NUCLEAR REPULSION ENERGY']['ACENES-12-monoA-unCP' ] = 4795.14176671
DATA['NUCLEAR REPULSION ENERGY']['ACENES-12-monoB-unCP' ] = 4795.14176671
DATA['NUCLEAR REPULSION ENERGY']['ACENES-13-dimer' ] = 18262.83338543
DATA['NUCLEAR REPULSION ENERGY']['ACENES-13-monoA-unCP' ] = 5320.94785288
DATA['NUCLEAR REPULSION ENERGY']['ACENES-13-monoB-unCP' ] = 5320.94785288
DATA['NUCLEAR REPULSION ENERGY']['ACENES-14-dimer' ] = 20167.53140066
DATA['NUCLEAR REPULSION ENERGY']['ACENES-14-monoA-unCP' ] = 5857.64789898
DATA['NUCLEAR REPULSION ENERGY']['ACENES-14-monoB-unCP' ] = 5857.64789898
DATA['NUCLEAR REPULSION ENERGY']['ACENES-15-dimer' ] = 22112.70479751
DATA['NUCLEAR REPULSION ENERGY']['ACENES-15-monoA-unCP' ] = 6404.49745327
DATA['NUCLEAR REPULSION ENERGY']['ACENES-15-monoB-unCP' ] = 6404.49745327
DATA['NUCLEAR REPULSION ENERGY']['ACENES-16-dimer' ] = 24095.77845643
DATA['NUCLEAR REPULSION ENERGY']['ACENES-16-monoA-unCP' ] = 6960.84724639
DATA['NUCLEAR REPULSION ENERGY']['ACENES-16-monoB-unCP' ] = 6960.84724639
DATA['NUCLEAR REPULSION ENERGY']['ACENES-17-dimer' ] = 26114.48445685
DATA['NUCLEAR REPULSION ENERGY']['ACENES-17-monoA-unCP' ] = 7526.12604311
DATA['NUCLEAR REPULSION ENERGY']['ACENES-17-monoB-unCP' ] = 7526.12604311
DATA['NUCLEAR REPULSION ENERGY']['ACENES-18-dimer' ] = 28166.81033192
DATA['NUCLEAR REPULSION ENERGY']['ACENES-18-monoA-unCP' ] = 8099.82737807
DATA['NUCLEAR REPULSION ENERGY']['ACENES-18-monoB-unCP' ] = 8099.82737807
DATA['NUCLEAR REPULSION ENERGY']['ACENES-19-dimer' ] = 30250.95830169
DATA['NUCLEAR REPULSION ENERGY']['ACENES-19-monoA-unCP' ] = 8681.49913522
DATA['NUCLEAR REPULSION ENERGY']['ACENES-19-monoB-unCP' ] = 8681.49913522
DATA['NUCLEAR REPULSION ENERGY']['ACENES-20-dimer' ] = 32365.31272557
DATA['NUCLEAR REPULSION ENERGY']['ACENES-20-monoA-unCP' ] = 9270.73524802
DATA['NUCLEAR REPULSION ENERGY']['ACENES-20-monoB-unCP' ] = 9270.73524802
DATA['NUCLEAR REPULSION ENERGY']['ACENES-1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['ACENES-1-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['ACENES-2-monoA-CP' ] = 462.51351405
DATA['NUCLEAR REPULSION ENERGY']['ACENES-2-monoB-CP' ] = 462.51351405
DATA['NUCLEAR REPULSION ENERGY']['ACENES-3-monoA-CP' ] = 776.53779489
DATA['NUCLEAR REPULSION ENERGY']['ACENES-3-monoB-CP' ] = 776.53779489
DATA['NUCLEAR REPULSION ENERGY']['ACENES-4-monoA-CP' ] = 1131.16297168
DATA['NUCLEAR REPULSION ENERGY']['ACENES-4-monoB-CP' ] = 1131.16297168
DATA['NUCLEAR REPULSION ENERGY']['ACENES-5-monoA-CP' ] = 1517.73569587
DATA['NUCLEAR REPULSION ENERGY']['ACENES-5-monoB-CP' ] = 1517.73569587
DATA['NUCLEAR REPULSION ENERGY']['ACENES-6-monoA-CP' ] = 1930.62605849
DATA['NUCLEAR REPULSION ENERGY']['ACENES-6-monoB-CP' ] = 1930.62605849
DATA['NUCLEAR REPULSION ENERGY']['ACENES-7-monoA-CP' ] = 2365.88438821
DATA['NUCLEAR REPULSION ENERGY']['ACENES-7-monoB-CP' ] = 2365.88438821
DATA['NUCLEAR REPULSION ENERGY']['ACENES-8-monoA-CP' ] = 2820.58881529
DATA['NUCLEAR REPULSION ENERGY']['ACENES-8-monoB-CP' ] = 2820.58881529
DATA['NUCLEAR REPULSION ENERGY']['ACENES-9-monoA-CP' ] = 3292.49113688
DATA['NUCLEAR REPULSION ENERGY']['ACENES-9-monoB-CP' ] = 3292.49113688
DATA['NUCLEAR REPULSION ENERGY']['ACENES-10-monoA-CP' ] = 3779.80831432
DATA['NUCLEAR REPULSION ENERGY']['ACENES-10-monoB-CP' ] = 3779.80831432
DATA['NUCLEAR REPULSION ENERGY']['ACENES-11-monoA-CP' ] = 4281.09183900
DATA['NUCLEAR REPULSION ENERGY']['ACENES-11-monoB-CP' ] = 4281.09183900
DATA['NUCLEAR REPULSION ENERGY']['ACENES-12-monoA-CP' ] = 4795.14176671
DATA['NUCLEAR REPULSION ENERGY']['ACENES-12-monoB-CP' ] = 4795.14176671
DATA['NUCLEAR REPULSION ENERGY']['ACENES-13-monoA-CP' ] = 5320.94785288
DATA['NUCLEAR REPULSION ENERGY']['ACENES-13-monoB-CP' ] = 5320.94785288
DATA['NUCLEAR REPULSION ENERGY']['ACENES-14-monoA-CP' ] = 5857.64789898
DATA['NUCLEAR REPULSION ENERGY']['ACENES-14-monoB-CP' ] = 5857.64789898
DATA['NUCLEAR REPULSION ENERGY']['ACENES-15-monoA-CP' ] = 6404.49745327
DATA['NUCLEAR REPULSION ENERGY']['ACENES-15-monoB-CP' ] = 6404.49745327
DATA['NUCLEAR REPULSION ENERGY']['ACENES-16-monoA-CP' ] = 6960.84724639
DATA['NUCLEAR REPULSION ENERGY']['ACENES-16-monoB-CP' ] = 6960.84724639
DATA['NUCLEAR REPULSION ENERGY']['ACENES-17-monoA-CP' ] = 7526.12604311
DATA['NUCLEAR REPULSION ENERGY']['ACENES-17-monoB-CP' ] = 7526.12604311
DATA['NUCLEAR REPULSION ENERGY']['ACENES-18-monoA-CP' ] = 8099.82737807
DATA['NUCLEAR REPULSION ENERGY']['ACENES-18-monoB-CP' ] = 8099.82737807
DATA['NUCLEAR REPULSION ENERGY']['ACENES-19-monoA-CP' ] = 8681.49913522
DATA['NUCLEAR REPULSION ENERGY']['ACENES-19-monoB-CP' ] = 8681.49913522
DATA['NUCLEAR REPULSION ENERGY']['ACENES-20-monoA-CP' ] = 9270.73524802
DATA['NUCLEAR REPULSION ENERGY']['ACENES-20-monoB-CP' ] = 9270.73524802
|
jH0ward/psi4
|
psi4/share/psi4/databases/ACENES.py
|
Python
|
lgpl-3.0
| 162,818
|
[
"Psi4"
] |
5b7269bd93e8fa764c93598fc2fa10caf99f159b6491fe78819c4a861fc7ab14
|
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
def display_reader(reader, renderView) :
# show data in view
Display = Show(reader, renderView)
# trace defaults for the display properties.
Display.AmbientColor = [0.0, 0.0, 0.0]
Display.ColorArrayName = [None, '']
Display.DiffuseColor = [0.6666666666666666, 0.6666666666666666, 1.0]
Display.BackfaceDiffuseColor = [0.6666666666666666, 0.6666666666666666, 1.0]
Display.OSPRayScaleFunction = 'PiecewiseFunction'
Display.SelectOrientationVectors = 'None'
Display.ScaleFactor = 0.29113320297760004
Display.SelectScaleArray = 'None'
Display.GlyphType = 'Arrow'
Display.GaussianRadius = 0.14556660148880002
Display.SetScaleArray = [None, '']
Display.ScaleTransferFunction = 'PiecewiseFunction'
Display.OpacityArray = [None, '']
Display.OpacityTransferFunction = 'PiecewiseFunction'
return Display
def display_plan(fname, renderView) :
plan_1vtk = LegacyVTKReader(FileNames=[fname])
plan_1vtkDisplay = display_reader(plan_1vtk, renderView)
plan_1vtkDisplay.SetRepresentationType('Wireframe')
plan_1vtkDisplay.AmbientColor = [0.61, 0.8, 1.0]
def display_target(fname, renderView) :
targetvtk = LegacyVTKReader(FileNames=[fname])
targetvtkDisplay = display_reader(targetvtk, renderView)
targetvtkDisplay.SetRepresentationType('Wireframe')
targetvtkDisplay.LineWidth = 8.0
targetvtkDisplay.AmbientColor = [0.76, 0.29, 1.0]
def display_model(fname, renderView) :
model_1vtk = LegacyVTKReader(FileNames=[fname])
generateIds1 = GenerateIds(Input=model_1vtk)
idsLUT = GetColorTransferFunction('Ids')
# show data in view
generateIds1Display = Show(generateIds1, renderView)
# trace defaults for the display properties.
generateIds1Display.AmbientColor = [0.0, 0.0, 0.0]
generateIds1Display.ColorArrayName = ['POINTS', 'Ids']
generateIds1Display.DiffuseColor = [0.6666666666666666, 0.6666666666666666, 1.0]
generateIds1Display.LookupTable = idsLUT
generateIds1Display.BackfaceDiffuseColor = [0.6666666666666666, 0.6666666666666666, 1.0]
generateIds1Display.OSPRayScaleArray = 'Ids'
generateIds1Display.OSPRayScaleFunction = 'PiecewiseFunction'
generateIds1Display.SelectOrientationVectors = 'Ids'
generateIds1Display.ScaleFactor = 0.29750560522100006
generateIds1Display.SelectScaleArray = 'Ids'
generateIds1Display.GlyphType = 'Arrow'
generateIds1Display.GaussianRadius = 0.14875280261050003
generateIds1Display.SetScaleArray = ['POINTS', 'Ids']
generateIds1Display.ScaleTransferFunction = 'PiecewiseFunction'
generateIds1Display.OpacityArray = ['POINTS', 'Ids']
generateIds1Display.OpacityTransferFunction = 'PiecewiseFunction'
generateIds1Display.SetScalarBarVisibility(renderView, False)
idsPWF = GetOpacityTransferFunction('Ids')
generateIds1Display.SetRepresentationType('Wireframe')
generateIds1Display.LineWidth = 8.0
idsLUT.ApplyPreset('Rainbow Desaturated', True)
def get_view() :
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [2100, 1700]
renderView1.OrientationAxesVisibility = 0
return renderView1
def screenshot(renderView1, fname) :
# reset view to fit data
#renderView1.ResetCamera()
#renderView1.InteractionMode = '2D'
renderView1.CameraViewAngle = 45
renderView1.CameraPosition = [0.0, 0.0, 1.]
renderView1.CameraFocalPoint = [0.0, 0.0, 0.0]
#renderView1.CameraParallelScale = 1.7482104584020597
# save screenshot
SaveScreenshot(fname, magnification=1, quality=100, view=renderView1)
def display_final_matching(folder, ruler = None) :
targetfname = 'results/vtk_files/' + folder + '/Target.vtk'
modelfname = 'results/vtk_files/' + folder + '/Model.vtk'
outfname = 'results/images/matching_' + folder + '.png'
renderView1 = get_view()
#display_plan(planfname, renderView1)
display_target(targetfname, renderView1)
display_model(modelfname, renderView1)
if ruler is not None :
display_ruler(ruler[0], ruler[1], renderView1)
screenshot(renderView1, outfname)
def display_first_plan(folder, ruler = None) :
targetfname = 'results/vtk_files/' + folder + '/Target.vtk'
modelfname = 'results/vtk_files/' + folder + '/Descent/Models/Model_1.vtk'
planfname = 'results/vtk_files/' + folder + '/Descent/Plans/Plan_1.vtk'
outfname = 'results/images/firstplan_' + folder + '.png'
renderView1 = get_view()
display_target(targetfname, renderView1)
display_plan(planfname, renderView1)
display_model(modelfname, renderView1)
if ruler is not None :
display_ruler(ruler[0], ruler[1], renderView1)
screenshot(renderView1, outfname)
def display_descent(folder, iterations) :
targetfname = 'results/vtk_files/' + folder + '/Target.vtk'
for it in iterations :
modelfname = 'results/vtk_files/' + folder + '/Descent/Models/Model_' + str(it) + '.vtk'
planfname = 'results/vtk_files/' + folder + '/Descent/Plans/Plan_' + str(it) + '.vtk'
outfname = 'results/images/descent_' + folder + '_it-' + str(it) + '.png'
renderView1 = get_view()
display_target(targetfname, renderView1)
display_plan(planfname, renderView1)
display_model(modelfname, renderView1)
screenshot(renderView1, outfname)
def display_ruler(name, length, view) :
# create a new 'Ruler'
ruler1 = Ruler()
# Properties modified on ruler1
ruler1.Point1 = [-0.2-length/2, 0.27, 0.0]
ruler1.Point2 = [-0.2+length/2, 0.27, 0.0]
# show data in view
ruler1Display = Show(ruler1, view)
# trace defaults for the display properties.
ruler1Display.Color = [0.0, 0.0, 0.0]
# Properties modified on ruler1Display
ruler1Display.LabelFormat = name + ' = %6.3g'
# Properties modified on ruler1Display
ruler1Display.AxisLineWidth = 8.0
# Properties modified on ruler1Display
ruler1Display.AxisColor = [0.2, 0.6, 1.0]
# Properties modified on ruler1Display
ruler1Display.FontSize = 11
# Properties modified on ruler1Display
ruler1Display.FontFamily = 'Courier'
def display_dataset() :
folder = 'kernel_big'
targetfname = 'results/vtk_files/' + folder + '/Target.vtk'
modelfname = 'results/vtk_files/' + folder + '/Template.vtk'
gridfname = 'results/vtk_files/' + folder + '/Grid/grid_0.vtk'
outfname = 'results/images/dataset.png'
renderView = get_view()
grid = LegacyVTKReader(FileNames=[gridfname])
gridDisplay = display_reader(grid, renderView)
gridDisplay.SetRepresentationType('Wireframe')
gridDisplay.LineWidth = 1.0
gridDisplay.AmbientColor = [0.75, 0.75, .75]
display_target(targetfname, renderView)
display_model(modelfname, renderView)
screenshot(renderView, outfname)
display_dataset()
display_final_matching('kernel_big', ruler = ('$\\sigma$', .2))
display_final_matching('kernel_small', ruler = ('$\\sigma$', .05))
display_final_matching('sinkhorn_eps-l_rho-l', ruler = ('$\\sqrt{\\epsilon}$', .1))
display_final_matching('sinkhorn_eps-m_rho-l', ruler = ('$\\sqrt{\\epsilon}$', .03))
display_final_matching('sinkhorn_eps-s_rho-l', ruler = ('$\\sqrt{\\epsilon}$', .015))
display_first_plan('sinkhorn_eps-m_rho-s', ruler = ('$\\sqrt{\\rho}$', .1))
display_first_plan('sinkhorn_eps-m_rho-m', ruler = ('$\\sqrt{\\rho}$', .15))
display_first_plan('sinkhorn_eps-m_rho-l', ruler = ('$\\sqrt{\\rho}$', .5))
display_descent('sinkhorn_eps-s_rho-l', [1, 5, 10, 20, 40])
|
jeanfeydy/lddmm-ot
|
LDDMM_Python/demo/Notebooks/Feb2017_paper/render.py
|
Python
|
mit
| 7,282
|
[
"ParaView",
"VTK"
] |
8d67e846a0b39234cfae0421715fb856bbabe7fef04a8376972093af9aa99a16
|
# coding: utf-8
# # Test out standardized ADCIRC, SELFE and FVCOM datasets with pyugrid, IRIS and Cartopy
# The datasets being accessed here are NetCDF files from ADCIRC, SELFE and FVCOM, with attributes added or modified virtually using NcML to meet the [UGRID conevntions standard for unstructured grid models](https://github.com/ugrid-conventions/ugrid-conventions/blob/v0.9.0/ugrid-conventions.md).
#
# This example was developed for the Integrated Ocean Observing System (IOOS) Coastal and Ocean Modeling Testbed.
#
# You can quickly and easily [set up the IOOS Anaconda python environment that can run this notebook](https://github.com/ioos/conda-recipes/wiki).
# In[1]:
get_ipython().magic(u'matplotlib inline')
from __future__ import (absolute_import, division, print_function)
import numpy as np
import matplotlib.tri as tri
import datetime as dt
import matplotlib.pyplot as plt
# In[2]:
import cartopy.crs as ccrs
import iris
iris.FUTURE.netcdf_promote = True
import pyugrid
# In[3]:
# specify UGRID compliant OPeNDAP Data URL
#ADCIRC
url = 'http://comt.sura.org/thredds/dodsC/data/comt_1_archive/inundation_tropical/UND_ADCIRC/Hurricane_Rita_2D_final_run_without_waves'
#FVCOM
#url = 'http://comt.sura.org/thredds/dodsC/data/comt_1_archive/inundation_tropical/USF_FVCOM/Hurricane_Rita_2D_final_run_without_waves'
#SELFE
#url = 'http://comt.sura.org/thredds/dodsC/data/comt_1_archive/inundation_tropical/VIMS_SELFE/Hurricane_Rita_2D_final_run_without_waves'
# set parameters
bbox = [-95, -85, 27, 32] # set the bounding box [lon_min, lon_max, lat_min, lat_max]
var = 'sea_surface_height_above_geoid' # standard_name (or long_name, if no standard_name)
levs = np.arange(-1,5.0,.2) # set the contour levels
start = dt.datetime(2005, 9, 24, 5, 0, 0) # time in UTC
#start = dt.datetime.utcnow() + dt.timedelta(hours=6)
# In[4]:
cube = iris.load_cube(url,var)
# In[5]:
print(cube)
# In[ ]:
ug = pyugrid.UGrid.from_ncfile(url)
# What's in there?
#print("There are %i nodes"%ug.nodes.shape[0])
#print("There are %i edges"%ug.edges.shape[0])
#print("There are %i faces"%ug.faces.shape[0])
# In[ ]:
cube.mesh = ug
cube.mesh_dimension = 1 # (0:time,1:node)
# In[ ]:
lon = cube.mesh.nodes[:,0]
lat = cube.mesh.nodes[:,1]
nv = cube.mesh.faces
# In[ ]:
triang = tri.Triangulation(lon,lat,triangles=nv)
# In[ ]:
tvar = cube.coord('time')
itime = tvar.nearest_neighbour_index(tvar.units.date2num(start))
# In[ ]:
zcube = cube[itime]
# In[ ]:
plt.figure(figsize=(16,6))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(bbox)
ax.coastlines()
plt.tricontourf(triang, zcube.data, levels=levs)
plt.colorbar(fraction=0.046, pad=0.04)
plt.tricontour(triang, zcube.data, colors='k',levels=levs)
tstr = tvar.units.num2date(tvar.points[itime])
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
plt.title('%s: %s: %s' % (var,tstr,zcube.attributes['title']));
# In[ ]:
|
rsignell-usgs/notebook
|
pyugrid_test.py
|
Python
|
mit
| 2,987
|
[
"NetCDF"
] |
553f53830daa78019a1ebe88816409962eae662b98dd2a897f3c89aa3f8eb74a
|
from __future__ import division, print_function
import numpy as np, models as m, sql_utils as sql, pandas as pd,\
multiprocessing as mp, gus_utils as gu
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.integrate import fixed_quad
from functools import partial
from scipy.special import gamma
def construct_interpolator(data,tracer):
"""
Construct an interpolator for p(r) of the data
Arguments
---------
data: DataFrame
pandas dataframe with relevant info for the tracer
tracer: string
name of tracer
Returns
-------
endpoints: list
the centres of the extremal bins
spline: InterpolatedUnivariateSpline
spline object for p(r)
"""
r = m.compute_galactocentric_radii(data,tracer,append_dataframe=False)
if tracer=="kgiant" or tracer=="bhb":
r = r[r<50.]
elif tracer=="main_sequence":
r = r[r<20.]
pdf,bins = np.histogram(r,10,normed=True)
r_nodes = np.array([.5*(bins[i]+bins[i+1]) for i in np.arange(10)])
return ([np.min(r_nodes), np.max(r_nodes)],InterpolatedUnivariateSpline(r_nodes,pdf))
def vLOS_probability(v,vmin,k,spline,limits,params,model):
"""Calculate the probability density at a line of sight velocity given a model
with a particular set of parameters, and a selection function p(r).
Arguments
---------
v: float
line of sight velocity at which to evaluate the probability
vmin: float
the minimum line of sight velocity in the sample
k: float
the power law index of the speed distribution
spline: InterpolatedUnivariateSpline
a spline object for p(r)
limits: list
the upper and lower limits of p(r)
params: array_like
model parameters
model: string
the name of the model
Returns
-------
pdf: float
the probability density at v
"""
if v<vmin: return 0.
def numerator_integrand(r):
out = np.zeros_like(r)
vesc = m.vesc_model(r,0.,0.,params,model)
out[vesc<=v] = 0.
out[vesc>v] = (m.vesc_model(r[vesc>v],0.,0.,params,model) - v)**(k+1.) * spline(r[vesc>v])
return out
numerator = fixed_quad(numerator_integrand,limits[0],limits[1],n=12)[0]
def denominator_integrand(r):
return spline(r)*(m.vesc_model(r,0.,0.,params,model) - vmin)**(k+2.) / (k+2.)
denominator = fixed_quad(denominator_integrand,limits[0],limits[1],n=12)[0]
return numerator/denominator
def draw_samples(N,vmin,k,spline,limits,params,model):
"""
Given a model, draw a sample of size N from p(vLOS).
Arguments
---------
N: int
the number of points to draw
vmin: float
the minimum speed considered
k: float
the power law index of the speed distribution
spline: InterpolatedUnivariateSpline
spline of p(r) for this tracer
limits: list
the upper and lower limits of the spline
params: array_like
model parameters
model: string
name of model
Returns
-------
v: array_like
list of velocities sampled from p(vLOS)
"""
v = np.linspace(vmin,600.,100)
pdf = np.array([vLOS_probability(vi,vmin,k,spline,limits,params,model) for vi in v])
v_spline = InterpolatedUnivariateSpline(v,pdf)
cdf = np.array([fixed_quad(v_spline,vmin,vi)[0] for vi in v])
try:
idx = np.where(np.diff(cdf)<0.)[0][0]+1
except:
idx = None
v,cdf = v[:idx],cdf[:idx]
inv_cdf = InterpolatedUnivariateSpline(cdf,v)
u = np.random.uniform(size=N)
return inv_cdf(u)
def posterior_predictive_check(chain,tracer,model,vmin,nbins=20,thin_by=1,burnin=200):
"""
For every set of parameters in an MCMC chain, generate a mock data set of the same
size as the data.
Arguments
---------
chain: array_like [nsamples,ndim]
mcmc chain
tracer: string
type of tracer
model: string
the name of the model
vmin: float
the minimum speed considered
nbins: int (=20)
the number of bins in vLOS to use
thin_by: int(=1)
thin the chains by this factor
burnin: int(=200)
number of steps per walker to burn in
Returns
-------
bin_centres: array_like
centres of bins in vLOS
counts: array_like
the number counts of the data in each of the above bins
model_counts: array_like[nsamples,nstars]
the counts generated in each of the above bins in each mock sample
"""
n = m.get_numparams(model)
c = gu.reshape_chain(chain)[:,burnin::thin_by,:]
c = np.reshape(c, (c.shape[0]*c.shape[1],c.shape[2]))
samples = c[:,-n:]
if tracer == "main_sequence":
k = c[:,2]
data = pd.read_csv("/data/aamw3/SDSS/main_sequence.csv")
data = data[data.vgsr!=np.max(data.vgsr)].reset_index(drop=True) #remove the one outlier
elif tracer == "kgiant":
k = c[:,1]
data = pd.read_csv("/data/aamw3/SDSS/kgiant.csv")
else:
k = c[:,0]
data = pd.read_csv("/data/aamw3/SDSS/bhb.csv")
lims,spline = construct_interpolator(data,tracer)
data = data[np.abs(data.vgsr)>vmin].reset_index(drop=True)
N = len(data)
counts,bin_edges = np.histogram(np.abs(data.vgsr.values),nbins)
bin_centres = np.array([.5*(bin_edges[i] + bin_edges[i+1]) for i in np.arange(nbins)])
model_counts = np.zeros((len(k), nbins))
for i,theta in enumerate(samples):
v = draw_samples(N,vmin,k[i],spline,lims,theta,model)
model_counts[i,:],_ = np.histogram(v,bin_edges)
return bin_centres,counts,model_counts
def ppc_alltracers(fname,chain,model,vmin,nbins=[20,20,10],thin_by=1,burnin=200):
"""
generate mock samples for all of our tracer groups. Save all of the information
to file.
Arguments
---------
fname: string
name of file to write dictionaries to
chain: array_like [nsamples,ndim]
mcmc chain
model: string
the name of the model
vmin: float
the minimum speed considered
nbins: list(=[20,20,10])
the number of bins to use for each of the three tracers
thin_by: int(=1)
thin the chains by this factor
burnin: int(=200)
number of steps per walker to burn in
"""
tracers = ["main_sequence","kgiant","bhb"]
for i,tracer in enumerate(tracers):
bin_centres,data_counts,model_counts = posterior_predictive_check(chain,tracer,\
model,vmin,nbins=nbins[i],thin_by=thin_by,burnin=burnin)
summary = {'bin_centres': bin_centres, 'data_counts': data_counts, 'model_counts': model_counts}
np.save(fname+"_"+tracer,summary)
return None
def posterior_predictive(v,vmin,k_samples,spline,limits,param_samples,model):
"""
Compute the posterior predictive distribution at v given samples from the posterior
from an MCMC.
Arguments
---------
v: float
the line-of-sight velocity at which to compute the posterior predictive distribution
vmin: float
cut off speed
k_samples: array_like
MCMC samples of the slope of the speed distribution
spline: InterpolatedUnivariateSpline
a spline object for p(r)
limits: list
[rmin,rmax] for the spline
param_samples: array_like [n_params, n_samples]
samples of the potential parameters
model: string
name of model
"""
return np.mean(np.array([ vLOS_probability(v,vmin,k_samples[i],spline,limits,param_samples[i],model) \
for i in np.arange(len(k_samples))]))
def posterior_predictive_grid(v_grid,vmin,chain,model,tracer,burnin=200,pool_size=8):
"""
Compute the posterior predictive distribution given an MCMC chain and a model. Parallelise
over a given number of threads to speed up computation.
Arguments
---------
v_grid: array_like
an array of speeds at which to evaluate the posterior predictive distribution
vmin: float
the minimum speed considered
chain: array_like [nsamples,ndim]
an MCMC chain of model parameters
model: string
the name of the model
tracer: string
the type of tracer
burnin: int (=200)
the number of steps per walker to disregard as burn-in
pool_size: int (=8)
the size of the multiprocessing pool over which to distribute computation
Returns
-------
ppd: array_like
array of the same shape as v_grid, containing the posterior predictive probabilities
at each speed in v_grid
"""
#reshape the chain according to which model we're looking it
n = m.get_numparams(model)
c = gu.reshape_chain(chain)[:,burnin:,:]
c = np.reshape(c, (c.shape[0]*c.shape[1],c.shape[2]))
samples = c[:,-n:]
if tracer == "main_sequence":
k = c[:,2]
data = pd.read_csv("/data/aamw3/SDSS/main_sequence.csv")
lims,spline = construct_interpolator(data,"main_sequence")
elif tracer == "kgiant":
k = c[:,1]
data = pd.read_csv("/data/aamw3/SDSS/kgiant.csv")
lims,spline = construct_interpolator(data,"kgiant")
elif tracer == "bhb":
k = c[:,0]
data = pd.read_csv("/data/aamw3/SDSS/bhb.csv")
lims,spline = construct_interpolator(data,"bhb")
parfun = partial(posterior_predictive,vmin=vmin,k_samples=k,spline=spline,limits=lims\
,param_samples=samples,model=model)
pool = mp.Pool(pool_size)
output = pool.map(parfun,v_grid)
pool.close()
return output
def outlier_probabilities(params, data, vmin, model):
"""
Likelihood function template for given model.
Arguments
---------
params: array_like
the model parameters
data: list
the output of sample_distances_multiple_tracers
vmin: float
the minimum radial velocity considered
Returns
-------
logL: array_like
the sum of the log-likelihoods for this set of parameters.
"""
kbhb,kkgiant,kms,f = params[:4]
pot_params = params[4:]
outlier_probabilities = [None,None,None]
k = [kbhb,kkgiant,kms]
outlier_normalisation = ( .5*m.erfc( vmin / (np.sqrt(2.)*1000.) ) )**-1.
for i,tracer in enumerate(data):
l,b,v,s = tracer
x,y,z = gu.galactic2cartesian(s,b,l)
vesc = m.vesc_model(x,y,z,pot_params,model)
out = np.zeros_like(v)
with m.warnings.catch_warnings():
#deliberately getting NaNs here so stop python from telling us about it
m.warnings.simplefilter("ignore",category=RuntimeWarning)
out = (1.-f)*(k[i]+2)*(vesc - np.abs(v))**(k[i]+1.) / (vesc - vmin)**(k[i]+2.) + \
f*outlier_normalisation*m.Gaussian(np.abs(v),0.,1000.)
out[np.isnan(out)] = f*outlier_normalisation*m.Gaussian(np.abs(v[np.isnan(out)]),0.,1000.)
outlier = f*outlier_normalisation*m.Gaussian(np.abs(v),0.,1000.)
outlier_probabilities[i] = np.mean(outlier,axis=1) / np.mean(out, axis=1)
return outlier_probabilities
def check_outliers(chain,vmin,model,burnin=200):
"""
Compute the probabilities that stars are outliers using our MCMC chains. We are
being lazy and not marginalising over the posterior because this is a quick check.
"""
res = gu.ChainResults(chain,burnin=200)[:,0]
n = m.get_numparams(model)
data = m.sample_distances_multiple_tracers(n_samples=200,vmin=vmin)
return outlier_probabilities(res,data,vmin,model)
def gaia_crossmatch():
"""
Cross-match our MS targets to TGAS and check that they have small tangential motions
"""
ms = pd.read_csv("/data/aamw3/SDSS/main_sequence.csv")
query_str = "select ss.pmra_new,ss.pmdec_new from mytable as t\
left join lateral (select g.pmra_new,g.pmdec_new \
from gaia_dr1_aux.gaia_source_sdssdr9_xm_new as g \
where g.objid=t.objid order by g.dist \
asc limit 1) as ss on true"
pmra,pmdec = sql.local_join(query_str,'mytable',(ms.objid.values,),('objid',))
ms.loc[:,'pmra'] = pd.Series(pmra,index=ms.index)
ms.loc[:,'pmdec'] = pd.Series(pmdec,index=ms.index)
return ms
def main():
fname = "/data/aamw3/SDSS/model_comparison"
chain = np.genfromtxt("/data/aamw3/mcmc/escape_chains/spherical_powerlaw.dat")
ppc_alltracers(fname,chain,"spherical_powerlaw",200.,nbins=[20,20,10],thin_by=1,burnin=200)
if __name__ == "__main__":
main()
|
anguswilliams91/OnTheRun
|
code/fits.py
|
Python
|
mit
| 12,759
|
[
"Gaussian"
] |
d3142d2ae5c9d641428f094de59b17a834db5267a6765f9885f66dd418fdae0b
|
import os
import pytest
from numpy.testing import (assert_almost_equal,
assert_array_equal,
assert_array_almost_equal)
import numpy as np
import oddt
from oddt.spatial import (angle,
dihedral,
rmsd,
distance,
rotate)
from .utils import shuffle_mol
test_data_dir = os.path.dirname(os.path.abspath(__file__))
ASPIRIN_SDF = """
RDKit 3D
13 13 0 0 0 0 0 0 0 0999 V2000
3.3558 -0.4356 -1.0951 C 0 0 0 0 0 0 0 0 0 0 0 0
2.0868 -0.6330 -0.3319 C 0 0 0 0 0 0 0 0 0 0 0 0
2.0284 -0.9314 0.8534 O 0 0 0 0 0 0 0 0 0 0 0 0
1.0157 -0.4307 -1.1906 O 0 0 0 0 0 0 0 0 0 0 0 0
-0.2079 -0.5332 -0.5260 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.9020 -1.7350 -0.6775 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.1373 -1.8996 -0.0586 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.6805 -0.8641 0.6975 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.9933 0.3419 0.8273 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7523 0.5244 0.2125 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.0600 1.8264 0.3368 C 0 0 0 0 0 0 0 0 0 0 0 0
0.9397 2.1527 -0.2811 O 0 0 0 0 0 0 0 0 0 0 0 0
-0.6931 2.6171 1.2333 O 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
2 4 1 0
4 5 1 0
5 6 2 0
6 7 1 0
7 8 2 0
8 9 1 0
9 10 2 0
10 11 1 0
11 12 2 0
11 13 1 0
10 5 1 0
M END
"""
def test_angles():
"""Test spatial computations - angles"""
# Angles
assert_array_almost_equal(angle(np.array((1, 0, 0)),
np.array((0, 0, 0)),
np.array((0, 1, 0))), 90)
assert_array_almost_equal(angle(np.array((1, 0, 0)),
np.array((0, 0, 0)),
np.array((1, 1, 0))), 45)
# Check benzene ring angle
mol = oddt.toolkit.readstring('smi', 'c1ccccc1')
mol.make3D()
assert_array_almost_equal(angle(mol.coords[0],
mol.coords[1],
mol.coords[2]), 120, decimal=1)
def test_dihedral():
"""Test dihedrals"""
# Dihedrals
assert_array_almost_equal(dihedral(np.array((1, 0, 0)),
np.array((0, 0, 0)),
np.array((0, 1, 0)),
np.array((1, 1, 0))), 0)
assert_array_almost_equal(dihedral(np.array((1, 0, 0)),
np.array((0, 0, 0)),
np.array((0, 1, 0)),
np.array((1, 1, 1))), -45)
# Check benzene ring dihedral
mol = oddt.toolkit.readstring('smi', 'c1ccccc1')
mol.make3D()
assert abs(dihedral(*mol.coords[:4])) < 2.
def test_distance():
mol1 = oddt.toolkit.readstring('sdf', ASPIRIN_SDF)
d = distance(mol1.coords, mol1.coords)
n_atoms = len(mol1.coords)
assert d.shape, (n_atoms == n_atoms)
assert_array_equal(d[np.eye(len(mol1.coords), dtype=bool)], np.zeros(n_atoms))
d = distance(mol1.coords, mol1.coords.mean(axis=0).reshape(1, 3))
assert d.shape, (n_atoms == 1)
ref_dist = [[3.556736951371501], [2.2058040428631056], [2.3896002745745415],
[1.6231668718498249], [0.7772981740050453], [2.0694947503940004],
[2.8600587871157184], [2.9014207091233857], [2.1850791695403564],
[0.9413368403116871], [1.8581710293650173], [2.365629642108773],
[2.975007440512798]]
assert_array_almost_equal(d, ref_dist)
def test_spatial():
"""Test spatial misc computations"""
mol = oddt.toolkit.readstring('smi', 'c1ccccc1')
mol.make3D()
mol2 = mol.clone
# Test rotation
assert_almost_equal(mol2.coords, rotate(mol2.coords, np.pi, np.pi, np.pi))
# Rotate perpendicular to ring
mol2.coords = rotate(mol2.coords, 0, 0, np.pi)
# RMSD
assert_almost_equal(rmsd(mol, mol2, method=None), 2.77, decimal=1)
# Hungarian must be close to zero (RDKit is 0.3)
assert_almost_equal(rmsd(mol, mol2, method='hungarian'), 0, decimal=0)
# Minimized by symetry must close to zero
assert_almost_equal(rmsd(mol, mol2, method='min_symmetry'), 0, decimal=0)
def test_rmsd():
# pick one molecule from docked poses
mols = list(oddt.toolkit.readfile('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf')))
mols = list(filter(lambda x: x.title == '312335', mols))
res = {
'method=None':
[4.7536, 2.5015, 2.7942, 1.1282, 0.7444, 1.6257, 4.7625,
2.7168, 2.5504, 1.9304, 2.6201, 3.1742, 3.2254, 4.7785,
4.8035, 7.8963, 2.2385, 4.8625, 3.2037],
'method=hungarian':
[0.9013, 1.0730, 1.0531, 1.0286, 0.7353, 1.4094, 0.5391,
1.3297, 1.0881, 1.7796, 2.6064, 3.1577, 3.2135, 0.8126,
1.2909, 2.5217, 2.0836, 1.8325, 3.1874],
'method=min_symmetry':
[0.9013, 1.0732, 1.0797, 1.0492, 0.7444, 1.6257, 0.5391,
1.5884, 1.0935, 1.9304, 2.6201, 3.1742, 3.2254, 1.1513,
1.5206, 2.5361, 2.2385, 1.971, 3.2037],
}
kwargs_grid = [{'method': None},
{'method': 'hungarian'},
{'method': 'min_symmetry'}]
for kwargs in kwargs_grid:
res_key = '_'.join('%s=%s' % (k, v)
for k, v in sorted(kwargs.items()))
assert_array_almost_equal([rmsd(mols[0], mol, **kwargs)
for mol in mols[1:]],
res[res_key], decimal=4)
# test shuffled rmsd
for _ in range(5):
for kwargs in kwargs_grid:
# dont use method=None in shuffled tests
if kwargs['method'] is None:
continue
res_key = '_'.join('%s=%s' % (k, v)
for k, v in sorted(kwargs.items()))
assert_array_almost_equal([rmsd(mols[0],
shuffle_mol(mol),
**kwargs)
for mol in mols[1:]],
res[res_key], decimal=4)
def test_rmsd_errors():
mol = oddt.toolkit.readstring('smi', 'c1ccccc1')
mol.make3D()
mol.addh()
mol2 = next(oddt.toolkit.readfile('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf')))
for method in [None, 'hungarian', 'min_symmetry']:
with pytest.raises(ValueError, match='Unequal number of atoms'):
rmsd(mol, mol2, method=method)
for _ in range(5):
with pytest.raises(ValueError, match='Unequal number of atoms'):
rmsd(shuffle_mol(mol), shuffle_mol(mol2), method=method)
|
oddt/oddt
|
tests/test_spatial.py
|
Python
|
bsd-3-clause
| 7,062
|
[
"RDKit"
] |
3f3a0dd70cf841a0dd4ffaea640450399a1eb20e2925afc9e377162a2cf34cb8
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import os
import re
import json
import warnings
import numpy as np
from io import open
from enum import Enum
from pymatgen.core.units import Mass, Length, unitized, FloatWithUnit, Unit, \
SUPPORTED_UNIT_NAMES
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
from itertools import product, combinations
from collections import Counter
"""
Module contains classes presenting Element and Specie (Element + oxidation
state) and PeriodicTable.
"""
__author__ = "Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
# Loads element data from json file
with open(os.path.join(os.path.dirname(__file__),
"periodic_table.json"), "rt") as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
class Element(Enum):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: long_name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: atomic_orbitals
Atomic Orbitals. Energy of the atomic orbitals as a dict.
E.g., The orbitals energies in eV are represented as
{'1s': -1.0, '2s': -0.1}
Data is obtained from
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
The LDA values for neutral atoms are used
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
# This name = value convention is redundant and dumb, but unfortunately is
# necessary to preserve backwards compatibility with a time when Element is
# a regular object that is constructed with Element(symbol).
H = "H"
He = "He"
Li = "Li"
Be = "Be"
B = "B"
C = "C"
N = "N"
O = "O"
F = "F"
Ne = "Ne"
Na = "Na"
Mg = "Mg"
Al = "Al"
Si = "Si"
P = "P"
S = "S"
Cl = "Cl"
Ar = "Ar"
K = "K"
Ca = "Ca"
Sc = "Sc"
Ti = "Ti"
V = "V"
Cr = "Cr"
Mn = "Mn"
Fe = "Fe"
Co = "Co"
Ni = "Ni"
Cu = "Cu"
Zn = "Zn"
Ga = "Ga"
Ge = "Ge"
As = "As"
Se = "Se"
Br = "Br"
Kr = "Kr"
Rb = "Rb"
Sr = "Sr"
Y = "Y"
Zr = "Zr"
Nb = "Nb"
Mo = "Mo"
Tc = "Tc"
Ru = "Ru"
Rh = "Rh"
Pd = "Pd"
Ag = "Ag"
Cd = "Cd"
In = "In"
Sn = "Sn"
Sb = "Sb"
Te = "Te"
I = "I"
Xe = "Xe"
Cs = "Cs"
Ba = "Ba"
La = "La"
Ce = "Ce"
Pr = "Pr"
Nd = "Nd"
Pm = "Pm"
Sm = "Sm"
Eu = "Eu"
Gd = "Gd"
Tb = "Tb"
Dy = "Dy"
Ho = "Ho"
Er = "Er"
Tm = "Tm"
Yb = "Yb"
Lu = "Lu"
Hf = "Hf"
Ta = "Ta"
W = "W"
Re = "Re"
Os = "Os"
Ir = "Ir"
Pt = "Pt"
Au = "Au"
Hg = "Hg"
Tl = "Tl"
Pb = "Pb"
Bi = "Bi"
Po = "Po"
At = "At"
Rn = "Rn"
Fr = "Fr"
Ra = "Ra"
Ac = "Ac"
Th = "Th"
Pa = "Pa"
U = "U"
Np = "Np"
Pu = "Pu"
Am = "Am"
Cm = "Cm"
Bk = "Bk"
Cf = "Cf"
Es = "Es"
Fm = "Fm"
Md = "Md"
No = "No"
Lr = "Lr"
def __init__(self, symbol):
self.symbol = "%s" % symbol
d = _pt_data[symbol]
# Store key variables for quick access
self.Z = d["Atomic no"]
at_r = d.get("Atomic radius", "no data")
if str(at_r).startswith("no data"):
self.atomic_radius = None
else:
self.atomic_radius = Length(at_r, "ang")
self.atomic_mass = Mass(d["Atomic mass"], "amu")
self.long_name = d["Name"]
self._data = d
@property
def X(self):
if "X" in self._data:
return self._data["X"]
else:
warnings.warn("No electronegativity for %s. Setting to infinity. "
"This has no physical meaning, and is mainly done to "
"avoid errors caused by the code expecting a float."
% self.symbol)
return float("inf")
def __getattr__(self, item):
if item in ["mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius", "atomic_orbitals",
"coefficient_of_linear_thermal_expansion",
"ground_state_term_symbol", "valence"]:
kstr = item.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
elif type(val) == dict:
pass
else:
try:
val = float(val)
except ValueError:
nobracket = re.sub(r'\(.*\)', "", val)
toks = nobracket.replace("about", "").strip().split(" ", 1)
if len(toks) == 2:
try:
if "10<sup>" in toks[1]:
base_power = re.findall(r'([+-]?\d+)', toks[1])
factor = "e" + base_power[1]
if toks[0] in [">", "high"]:
toks[0] = "1" # return the border value
toks[0] += factor
if item == "electrical_resistivity":
unit = "ohm m"
elif (
item ==
"coefficient_of_linear_thermal_expansion"
):
unit = "K^-1"
else:
unit = toks[1]
val = FloatWithUnit(toks[0], unit)
else:
unit = toks[1].replace("<sup>", "^").replace(
"</sup>", "").replace("Ω",
"ohm")
units = Unit(unit)
if set(units.keys()).issubset(
SUPPORTED_UNIT_NAMES):
val = FloatWithUnit(toks[0], unit)
except ValueError as ex:
# Ignore error. val will just remain a string.
pass
return val
raise AttributeError
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
@unitized("ang")
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
return sum(radii.values()) / len(radii)
else:
return 0
@property
@unitized("ang")
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): v for k, v in self._data["Ionic radii"].items()}
else:
return {}
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def icsd_oxidation_states(self):
"""Tuple of all oxidation states with at least 10 instances in
ICSD database AND at least 1% of entries for that element"""
return tuple(self._data.get("ICSD oxidation states", list()))
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match(r"(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
@property
def valence(self):
"""
# From full electron config obtain valence subshell
# angular moment (L) and number of valence e- (v_e)
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
valence = []
full_electron_config = self.full_electronic_structure
for _, l_symbol, ne in full_electron_config[::-1]:
l = L_symbols.lower().index(l_symbol)
if ne < (2 * l + 1) * 2:
valence.append((l, ne))
if len(valence) > 1:
raise ValueError("Ambiguous valence")
return valence[0]
@property
def term_symbols(self):
"""
All possible Russell-Saunders term symbol of the Element
eg. L = 1, n_e = 2 (s2)
returns
[['1D2'], ['3P0', '3P1', '3P2'], ['1S0']]
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
L, v_e = self.valence
# for one electron in subshell L
ml = list(range(-L, L + 1))
ms = [1 / 2, -1 / 2]
# all possible configurations of ml,ms for one e in subshell L
ml_ms = list(product(ml, ms))
# Number of possible configurations for r electrons in subshell L.
n = (2 * L + 1) * 2
# the combination of n_e electrons configurations
# C^{n}_{n_e}
e_config_combs = list(combinations(range(n), v_e))
# Total ML = sum(ml1, ml2), Total MS = sum(ms1, ms2)
TL = [sum([ml_ms[comb[e]][0] for e in range(v_e)])
for comb in e_config_combs]
TS = [sum([ml_ms[comb[e]][1] for e in range(v_e)])
for comb in e_config_combs]
comb_counter = Counter([r for r in zip(TL, TS)])
term_symbols = []
while sum(comb_counter.values()) > 0:
# Start from the lowest freq combination,
# which corresponds to largest abs(L) and smallest abs(S)
L, S = min(comb_counter)
J = list(np.arange(abs(L - S), abs(L) + abs(S) + 1))
term_symbols.append([str(int(2 * (abs(S)) + 1))
+ L_symbols[abs(L)]
+ str(j) for j in J])
# Without J
# term_symbols.append(str(int(2 * (abs(S)) + 1)) \
# + L_symbols[abs(L)])
# Delete all configurations included in this term
for ML in range(-L, L - 1, -1):
for MS in np.arange(S, -S + 1, 1):
if (ML, MS) in comb_counter:
comb_counter[(ML, MS)] -= 1
if comb_counter[(ML, MS)] == 0:
del comb_counter[(ML, MS)]
return term_symbols
@property
def ground_state_term_symbol(self):
"""
Ground state term symbol
Selected based on Hund's Rule
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
term_symbols = self.term_symbols
term_symbol_flat = {term: {"multiplicity": int(term[0]),
"L": L_symbols.index(term[1]),
"J": float(term[2:])}
for term in sum(term_symbols, [])}
multi = [int(item['multiplicity'])
for terms, item in term_symbol_flat.items()]
max_multi_terms = {symbol: item
for symbol, item in term_symbol_flat.items()
if item['multiplicity'] == max(multi)}
Ls = [item['L'] for terms, item in max_multi_terms.items()]
max_L_terms = {symbol: item
for symbol, item in term_symbol_flat.items()
if item['L'] == max(Ls)}
J_sorted_terms = sorted(max_L_terms.items(),
key=lambda k: k[1]['J'])
L, v_e = self.valence
if v_e <= (2 * L + 1):
return J_sorted_terms[0][0]
else:
return J_sorted_terms[-1][0]
def __eq__(self, other):
return isinstance(other, Element) and self.Z == other.Z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.Z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
if self.X != other.X:
return self.X < other.X
else:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row, group):
"""
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
raise ValueError("No element with this row and group!")
@staticmethod
def is_valid_symbol(symbol):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
try:
Element(symbol)
return True
except:
return False
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
elif 89 <= z <= 103:
return 9
for i in range(len(_pt_row_sizes)):
total += _pt_row_sizes[i]
if total >= z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
z = self.Z
if z == 1:
return 1
if z == 2:
return 18
if 3 <= z <= 18:
if (z - 2) % 8 == 0:
return 18
elif (z - 2) % 8 <= 2:
return (z - 2) % 8
else:
return 10 + (z - 2) % 8
if 19 <= z <= 54:
if (z - 18) % 18 == 0:
return 18
else:
return (z - 18) % 18
if (z - 54) % 32 == 0:
return 18
elif (z - 54) % 32 >= 18:
return (z - 54) % 32 - 14
else:
return (z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
block = ""
if (self.is_actinoid or self.is_lanthanoid) and \
self.Z not in [71, 103]:
block = "f"
elif self.is_actinoid or self.is_lanthanoid:
block = "d"
elif self.group in [1, 2]:
block = "s"
elif self.group in range(13, 19):
block = "p"
elif self.group in range(3, 13):
block = "d"
else:
raise ValueError("unable to determine block")
return block
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self.Z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self.Z in ns
@property
def is_rare_earth_metal(self):
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metalloid(self):
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self):
"""
True if element is an alkali metal.
"""
return self.Z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self):
"""
True if element is an alkaline earth metal (group II).
"""
return self.Z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self.Z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self.Z in (8, 16, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self.Z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self.Z < 104
@property
def is_quadrupolar(self):
"""
Checks if this element can be quadrupolar
"""
return len(self.data.get("NMR Quadrupole Moment",{})) > 0
@property
def nmr_quadrupole_moment(self):
"""
Get a dictionary the nuclear electric quadrupole moment in units of
e*millibarns for various isotopes
"""
return {k: FloatWithUnit(v,"mbarn") for k,v in self.data.get("NMR Quadrupole Moment",{}).items()}
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@staticmethod
def print_periodic_table(filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
class Specie(MSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
supported_properties = ("spin",)
def __init__(self, symbol, oxidation_state=None, properties=None):
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doesn't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Equal Specie should have the same str representation, hence
should hash equally. Unequal Specie will have differnt str
representations.
"""
return self.__str__().__hash__()
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state, followed by spin.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
elif self.oxi_state:
other_oxi = 0 if (isinstance(other, Element)
or other.oxi_state is None) else other.oxi_state
return self.oxi_state < other_oxi
elif getattr(self, "spin", False):
other_spin = getattr(other, "spin", 0)
return self.spin < other_spin
else:
return False
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
elif oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).replace(",", "").split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
else:
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_nmr_quadrupole_moment(self,isotope=None):
"""
Gets the nuclear electric quadrupole moment in units of
e*millibarns
Args:
isotope (str): the isotope to get the quadrupole moment for
default is None, which gets the lowest mass isotope
"""
quad_mom = self._el.nmr_quadrupole_moment
if len(quad_mom) == 0:
return 0.0
if isotope is None:
isotopes = list(quad_mom.keys())
isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False)
return quad_mom.get(isotopes[0],0.0)
else:
if isotope not in quad_mom:
raise ValueError("No quadrupole moment for isotope {}".format(isotope))
return quad_mom.get(isotope,0.0)
def get_shannon_radius(self, cn, spin="", radius_type="ionic"):
"""
Get the local environment specific ionic radius for species.
Args:
cn (str): Coordination using roman letters. Supported values are
I-IX, as well as IIIPY, IVPY and IVSQ.
spin (str): Some species have different radii for different
spins. You can get specific values using "High Spin" or
"Low Spin". Leave it as "" if not available. If only one spin
data is available, it is returned and this spin parameter is
ignored.
radius_type (str): Either "crystal" or "ionic" (default).
Returns:
Shannon radius for specie in the specified environment.
"""
radii = self._el.data["Shannon radii"]
# if cn == 1:
# cn_str = "I"
# elif cn == 2:
# cn_str = "II"
# elif cn == 3:
# cn_str = "III"
# elif cn == 4:
# cn_str = "IV"
# elif cn == 5:
# cn_str = "V"
# elif cn == 6:
# cn_str = "VI"
# elif cn == 7:
# cn_str = "VII"
# elif cn == 8:
# cn_str = "VIII"
# elif cn == 9:
# cn_str = "IX"
# else:
# raise ValueError("Invalid coordination number")
if len(radii[str(int(self._oxi_state))][cn]) == 1:
k, data = list(radii[str(int(self._oxi_state))][cn].items())[0]
if k != spin:
warnings.warn(
"Specified spin state of %s not consistent with database "
"spin of %s. Because there is only one spin data available, "
"that value is returned." % (spin, k)
)
else:
data = radii[str(int(self._oxi_state))][cn][spin]
return data["%s_radius" % radius_type]
def get_crystal_field_spin(self, coordination="oct", spin_config="high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or \
spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}"
.format(self.oxi_state, self.symbol))
if spin_config == "high":
return nelectrons if nelectrons <= 5 else 10 - nelectrons
elif spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
elif nelectrons <= 6:
return 6 - nelectrons
elif nelectrons <= 8:
return nelectrons - 6
else:
return 10 - nelectrons
elif coordination == "tet":
if nelectrons <= 2:
return nelectrons
elif nelectrons <= 4:
return 4 - nelectrons
elif nelectrons <= 7:
return nelectrons - 4
else:
return 10 - nelectrons
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
class DummySpecie(Specie):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
.. attribute:: symbol
Symbol for the DummySpecie.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number equal to the hash
number of the symbol. Obviously, it makes no sense whatsoever to use
the atomic number of a Dummy specie for anything scientific. The purpose
of this is to ensure that for most use cases, a DummySpecie behaves no
differently from an Element or Specie.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self, symbol="X", oxidation_state=0, properties=None):
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
raise AttributeError(a)
def __hash__(self):
return self.symbol.__hash__()
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self):
"""
DummySpecie is always assigned an atomic number equal to the hash of
the symbol. The expectation is that someone would be an actual dummy
to use atomic numbers for a Dummy specie.
"""
return self.symbol.__hash__()
@property
def oxi_state(self):
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self):
"""
DummySpecie is always assigned an electronegativity of 0. The effect of
this is that DummySpecie are always sorted in front of actual Specie.
"""
return 0
@property
def symbol(self):
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0
else:
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
@classmethod
def safe_from_composition(cls, comp, oxidation_state=0):
"""
Returns a DummySpecie object that can be safely used
with (i.e. not present in) a given composition
"""
# We don't want to add a DummySpecie with the same
# symbol as anything in the composition, even if the
# oxidation state is different
els = comp.element_composition.elements
for c in 'abcdfghijklmnopqrstuvwxyz':
if DummySpecie('X' + c) not in els:
return DummySpecie('X' + c, oxidation_state)
raise ValueError("All attempted DummySpecies already "
"present in {}".format(comp))
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj))
|
gpetretto/pymatgen
|
pymatgen/core/periodic_table.py
|
Python
|
mit
| 46,496
|
[
"CRYSTAL",
"pymatgen"
] |
06ddbb0ac8bf8c9e887b5bcba670e729018d58da137dd2ec1bb30c2ad884ca36
|
# -*- coding: utf-8 -*-
#
#Created on Fri Apr 14 13:37:08 2017
#
#author: Elina Thibeau-Sutre
#
from .initializations import initialize_log_assignements,initialize_mcw
from .base import _log_B,_log_C
from .base import BaseMixture
from .base import _log_normal_matrix
from .base import _full_covariance_matrices,_spherical_covariance_matrices
import numpy as np
import scipy.special
from scipy.misc import logsumexp
class VariationalGaussianMixture(BaseMixture):
"""
Variational Bayesian Estimation of a Gaussian Mixture
This class allows to infer an approximate posterior distribution over the
parameters of a Gaussian mixture distribution.
The weights distribution is a Dirichlet distribution with parameter alpha
(see Bishop's book p474-486)
Parameters
----------
n_components : int, defaults to 1.
Number of clusters used.
init : str, defaults to 'kmeans'.
Method used in order to perform the initialization,
must be in ['random', 'plus', 'AF_KMC', 'kmeans', 'GMM'].
reg_covar : float, defaults to 1e-6
In order to avoid null covariances this float is added to the diagonal
of covariance matrices.
type_init : str, defaults to 'resp'.
The algorithm is initialized using this data (responsibilities if 'resp'
or means, covariances and weights if 'mcw').
Other parameters
----------------
alpha_0 : float, Optional | defaults to None.
The prior parameter on the weight distribution (Dirichlet).
A high value of alpha_0 will lead to equal weights, while a low value
will allow some clusters to shrink and disappear. Must be greater than 0.
If None, the value is set to 1/n_components
beta_0 : float, Optional | defaults to None.
The precision prior on the mean distribution (Gaussian).
Must be greater than 0.
If None, the value is set to 1.0
nu_0 : float, Optional | defaults to None.
The prior of the number of degrees of freedom on the covariance
distributions (Wishart). Must be greater or equal to dim.
If None, the value is set to dim
means_prior : array (dim,), Optional | defaults to None
The prior value to compute the value of the means.
If None, the value is set to the mean of points_data
cov_wishart_prior : type depends on covariance_type, Optional | defaults to None
If covariance_type is 'full' type must be array (dim,dim)
If covariance_type is 'spherical' type must be float
The prior value to compute the value of the precisions.
If None, the value is set to the covariance of points_data
Attributes
----------
name : str
The name of the method : 'VBGMM'
alpha : array of floats (n_components,)
Contains the parameters of the weight distribution (Dirichlet)
beta : array of floats (n_components,)
Contains coefficients which are multipied with the precision matrices
to form the precision matrix on the Gaussian distribution of the means.
nu : array of floats (n_components,)
Contains the number of degrees of freedom on the distribution of
covariance matrices.
_inv_prec : array of floats (n_components,dim,dim)
Contains the equivalent of the matrix W described in Bishop's book. It
is proportional to cov.
_log_det_inv_prec : array of floats (n_components,)
Contains the logarithm of the determinant of W matrices.
cov : array of floats (n_components,dim,dim)
Contains the computed covariance matrices of the mixture.
means : array of floats (n_components,dim)
Contains the computed means of the mixture.
log_weights : array of floats (n_components,)
Contains the logarithm of weights of each cluster.
iter : int
The number of iterations computed with the method fit()
convergence_criterion_data : array of floats (iter,)
Stores the value of the convergence criterion computed with data
on which the model is fitted.
convergence_criterion_test : array of floats (iter,) | if _early_stopping only
Stores the value of the convergence criterion computed with test data
if it exists.
_is_initialized : bool
Ensures that the method _initialize() has been used before using other
methods such as score() or predict_log_assignements().
Raises
------
ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']...
References
----------
'Pattern Recognition and Machine Learning', Bishop
"""
def __init__(self, n_components=1,init="kmeans",alpha_0=None,beta_0=None,
nu_0=None,means_prior=None,cov_wishart_prior=None,
reg_covar=1e-6,type_init='resp',n_jobs=1,
boost=None):
super(VariationalGaussianMixture, self).__init__()
self.name = 'VBGMM'
self.n_components = n_components
self.covariance_type = "full"
self.init = init
self.type_init = type_init
self.reg_covar = reg_covar
self.boost = boost
self.alpha_0 = alpha_0
self.beta_0 = beta_0
self.nu_0 = nu_0
self._means_prior = means_prior
self._inv_prec_prior = cov_wishart_prior
self.n_jobs = n_jobs
self._is_initialized = False
self.iter = 0
self.convergence_criterion_data = []
self.convergence_criterion_test = []
self._check_common_parameters()
self._check_parameters()
def _check_parameters(self):
"""
Check the value of the init parameter
"""
if self.init not in ['random', 'random_sk', 'plus', 'kmeans', 'AF_KMC', 'GMM']:
raise ValueError("Invalid value for 'init': %s "
"'init' should be in "
"['random', 'plus', 'kmeans','AF_KMC','GMM']"
% self.init)
if self.boost is not None :
if self.boost < 0:
raise ValueError("Invalid value for 'boost': %s "
"'boost' should be positive"
% self.init)
if self.init == 'random_sk' and self.type_init=='mcw':
raise ValueError("random_sk is only compatible with"
"type_init = resp")
def _initialize(self,points_data,points_test=None):
"""
This method initializes the Variational Gaussian Mixture by setting the values
of the means, the covariances and other specific parameters (alpha, beta, nu)
Parameters
----------
points_data : an array (n_points,dim)
Data on which the model is fitted.
points_test: an array (n_points,dim) | Optional
Data used to do early stopping (avoid overfitting)
"""
n_points,dim = points_data.shape
self._check_prior_parameters(points_data)
if self.type_init=='resp':
log_assignements = initialize_log_assignements(self.init,self.n_components,points_data,
points_test)
self._inv_prec = np.empty((self.n_components,dim,dim))
self._log_det_inv_prec = np.empty(self.n_components)
self.cov = np.empty((self.n_components,dim,dim))
self._step_M(points_data,log_assignements)
# Boosting covariance matrices
if self.boost is not None:
self.cov *= self.boost
self._inv_prec *= self.boost
self._log_det_inv_prec += dim * np.log(self.boost)
elif self.type_init=='mcw':
# Means, covariances and weights
means,cov,log_weights = initialize_mcw(self.init,self.n_components,points_data,
points_test)
self.cov = cov
self.means = means
self.log_weights = log_weights
# Hyperparametres
N = np.exp(log_weights) * n_points
self.alpha = self.alpha_0 + N
self.beta = self.beta_0 + N
self.nu = self.nu_0 + N
# Matrix W
self._inv_prec = cov * self.nu[:,np.newaxis,np.newaxis]
self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
elif self.init=='user':
if self.type_init=='kmeans':
self._initialize_cov(points_data)
# Hyperparametres
N = np.exp(self.log_weights) * n_points
self.alpha = self.alpha_0 + N
self.beta = self.beta_0 + N
self.nu = self.nu_0 + N
# Matrix W
self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis]
self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
self._is_initialized = True
def _step_E(self, points):
"""
In this step the algorithm evaluates the responsibilities of each points in each cluster
Parameters
----------
points : an array (n_points,dim)
Returns
-------
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
"""
n_points,dim = points.shape
log_gaussian = _log_normal_matrix(points,self.means,self.cov,self.covariance_type,self.n_jobs)
log_gaussian -= 0.5 * dim * np.log(self.nu)
digamma_sum = np.sum(scipy.special.psi(.5 * (self.nu - np.arange(0, dim)[:,np.newaxis])),0)
log_lambda = digamma_sum + dim * np.log(2)
log_prob = self.log_weights + log_gaussian + 0.5 * (log_lambda - dim/self.beta)
log_prob_norm = logsumexp(log_prob, axis=1)
log_resp = log_prob - log_prob_norm[:,np.newaxis]
return log_prob_norm,log_resp
def _estimate_wishart_full(self,N,X_barre,S):
"""
This method computes the new value of _inv_prec with given parameteres
(in the case of full covariances)
Parameters
----------
N : an array (n_components,)
the empirical weights
X_barre: an array (n_components,dim)
the empirical means
S: an array (n_components,dim,dim)
the empirical covariances
"""
for i in range(self.n_components):
diff = X_barre[i] - self._means_prior
product = self.beta_0 * N[i]/self.beta[i] * np.outer(diff,diff)
self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product
def _estimate_wishart_spherical(self,N,X_barre,S):
"""
This method computes the new value of _inv_prec with given parameteres
(in the case of spherical covariances)
Parameters
----------
N : an array (n_components,)
the empirical weights
X_barre: an array (n_components,dim)
the empirical means
S: an array (n_components,dim,dim)
the empirical covariances
"""
for i in range(self.n_components):
diff = X_barre[i] - self._means_prior
product = self.beta_0 * N[i] / self.beta[i] * np.mean(np.square(diff), 1)
self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product
# To test
def _step_M(self,points,log_resp):
"""
In this step the algorithm updates the values of the parameters (means, covariances,
alpha, beta, nu).
Parameters
----------
points : an array (n_points,dim)
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
"""
n_points,dim = points.shape
resp = np.exp(log_resp)
# Convenient statistics
N = np.sum(resp,axis=0) + 10 * np.finfo(resp.dtype).eps #Array (n_components,)
X_barre = np.dot(resp.T,points) / N[:,np.newaxis] #Array (n_components,dim)
if self.covariance_type=='full':
S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs) #Array (n_components,dim,dim)
elif self.covariance_type=='spherical':
S = _spherical_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs) #Array (n_components,)
#Parameters update
self.alpha = self.alpha_0 + N
self.beta = self.beta_0 + N
self.nu = self.nu_0 + N
# Weights update
self.log_weights = scipy.special.psi(self.alpha) - scipy.special.psi(np.sum(self.alpha))
# Means update
self.means = (self.beta_0 * self._means_prior + N[:, np.newaxis] * X_barre) / self.beta[:, np.newaxis]
# Covariance update
if self.covariance_type=="full":
self._estimate_wishart_full(N,X_barre,S)
det_inv_prec = np.linalg.det(self._inv_prec)
self._log_det_inv_prec = np.log(det_inv_prec)
self.cov = self._inv_prec / self.nu[:,np.newaxis,np.newaxis]
elif self.covariance_type=="spherical":
self._estimate_wishart_spherical(N,X_barre,S)
det_inv_prec = self._inv_prec**dim
self._log_det_inv_prec = np.log(det_inv_prec)
self.cov = self._inv_prec / self.nu
def _convergence_criterion_simplified(self,points,log_resp,log_prob_norm):
"""
Compute the lower bound of the likelihood using the simplified Bishop's
book formula. Can only be used with data which fits the model.
Parameters
----------
points : an array (n_points,dim)
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
Returns
-------
result : float
the lower bound of the likelihood
"""
resp = np.exp(log_resp)
n_points,dim = points.shape
prec = np.linalg.inv(self._inv_prec)
prec_prior = np.linalg.inv(self._inv_prec_prior)
lower_bound = np.zeros(self.n_components)
for i in range(self.n_components):
lower_bound[i] = _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])
resp_i = resp[:,i:i+1]
log_resp_i = log_resp[:,i:i+1]
lower_bound[i] -= np.sum(resp_i*log_resp_i)
lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))
result = np.sum(lower_bound)
result += _log_C(self.alpha_0 * np.ones(self.n_components)) - _log_C(self.alpha)
result -= n_points * dim * 0.5 * np.log(2*np.pi)
return result
def _convergence_criterion(self,points,log_resp,log_prob_norm):
"""
Compute the lower bound of the likelihood using the Bishop's book formula.
The formula cannot be simplified (as it is done in scikit-learn) as we also
use it to calculate the lower bound of test points, in this case no
simplification can be done.
Parameters
----------
points : an array (n_points,dim)
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
Returns
-------
result : float
the lower bound of the likelihood
"""
resp = np.exp(log_resp)
n_points,dim = points.shape
# Convenient statistics
N = np.exp(logsumexp(log_resp,axis=0)) + 10*np.finfo(resp.dtype).eps #Array (n_components,)
X_barre = np.tile(1/N, (dim,1)).T * np.dot(resp.T,points) #Array (n_components,dim)
S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs)
prec = np.linalg.inv(self._inv_prec)
prec_prior = np.linalg.inv(self._inv_prec_prior)
lower_bound = np.zeros(self.n_components)
for i in range(self.n_components):
digamma_sum = np.sum(scipy.special.psi(.5 * (self.nu[i] - np.arange(0, dim)[:,np.newaxis])),0)
log_det_prec_i = digamma_sum + dim * np.log(2) - self._log_det_inv_prec[i] #/!\ Inverse
#First line
lower_bound[i] = log_det_prec_i - dim/self.beta[i] - self.nu[i]*np.trace(np.dot(S[i],prec[i]))
diff = X_barre[i] - self.means[i]
lower_bound[i] += -self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T))
lower_bound[i] *= 0.5 * N[i]
#Second line
lower_bound[i] += (self.alpha_0 - self.alpha[i]) * self.log_weights[i]
lower_bound[i] += _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i])
resp_i = resp[:,i:i+1]
log_resp_i = log_resp[:,i:i+1]
lower_bound[i] += np.sum(resp_i) * self.log_weights[i] - np.sum(resp_i*log_resp_i)
lower_bound[i] += 0.5 * (self.nu_0 - self.nu[i]) * log_det_prec_i
lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i]))
lower_bound[i] += dim*0.5*(1 - self.beta_0/self.beta[i] + self.nu[i])
#Third line without the last term which is not summed
diff = self.means[i] - self._means_prior
lower_bound[i] += -0.5*self.beta_0*self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T))
lower_bound[i] += -0.5*self.nu[i]*np.trace(np.dot(self._inv_prec_prior,prec[i]))
result = np.sum(lower_bound)
result += _log_C(self.alpha_0 * np.ones(self.n_components))- _log_C(self.alpha)
result -= n_points * dim * 0.5 * np.log(2*np.pi)
return result
def _get_parameters(self):
return (self.log_weights, self.means, self.cov,
self.alpha, self.beta, self.nu)
def _set_parameters(self, params,verbose=True):
(self.log_weights, self.means, self.cov,
self.alpha, self.beta, self.nu )= params
# Matrix W
self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis]
self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec))
if self.n_components != len(self.means) and verbose:
print('The number of components changed')
self.n_components = len(self.means)
def _limiting_model(self,points):
n_points,dim = points.shape
log_resp = self.predict_log_resp(points)
_,n_components = log_resp.shape
exist = np.zeros(n_components)
for i in range(n_points):
for j in range(n_components):
if np.argmax(log_resp[i])==j:
exist[j] = 1
idx_existing = np.where(exist==1)
log_weights = self.log_weights[idx_existing]
means = self.means[idx_existing]
cov = self.cov[idx_existing]
alpha = self.alpha[idx_existing]
beta = self.beta[idx_existing]
nu = self.nu[idx_existing]
params = (log_weights, means, cov,
alpha, beta, nu)
return params
|
14thibea/megamix
|
megamix/batch/VBGMM.py
|
Python
|
apache-2.0
| 20,615
|
[
"Gaussian"
] |
b5403282a3130e0fb8901f6147fa2669b25d40adf6bd2d1b3750d52dc4cb861b
|
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
N = 6
scale = 100.0
ppr = np.array([[0.389, 0.486531845513, 0.563139309706, 0.525082089958, 0.956529670642, 0.584492],
[0.0888, 0.201719007817, 0.0650382409885, 0.28306446735, 0.893289636035, 0.337310]])
ppr = np.divide(ppr[0] - ppr[1], ppr[0]) * scale
orca = np.array([[0.5610000000000000542, 0.610615939137, 0.555998, 0.665191365668, 0.746871, 0.619335],
[0.471600, 0.559310569063, 0.302385, 0.626107818416, 0.671885, 0.551048]]) * scale
orca = np.divide(orca[0] - orca[1], orca[0]) * scale
sf = np.array([[0.2606, 0.409060995479, 0.496210651886, 0.451420123905, 0.870840975302, 0.497626641698],
[0.044000, 0.197546, 0.294940, 0.233776, 0.777800, 0.318901]])
sf = np.divide((sf[0] - sf[1]), sf[0] )*scale
# menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ppr, width, color='r')
# womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, orca, width, color='b')
rects3 = ax.bar(ind+(width*2.0), sf, width, color='g')
# add some
# ax.set_ylabel('Scores')
# ax.set_title('Scores by group and gender')
ax.set_xticks(ind+(width*1.5))
# ax.set_xticklabels( ('d', 'q^d', 'q^t', 'q^e', 'e', 'u') )
ax.set_xticklabels( ('', '', '', '', '', '') )
ax.legend( (rects1[0], rects2[0], rects3), ('PPR', 'ORCA', 'SF', ) )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
# autolabel(rects1)
# autolabel(rects2)
# autolabel(rects3)
plt.show()
|
CG-F16-24-Rutgers/steersuite-rutgers
|
steerstats/tools/plotting/plotMetricDataBar.py
|
Python
|
gpl-3.0
| 1,790
|
[
"ORCA"
] |
1745246b6083ab1b86162e831a6f79674afa35e1d0b26fd5b40345ac76493878
|
__all__ = ["run_tli"]
import numpy as np
from scipy.signal import fftconvolve as convolve
import utils
def pad_image_and_weight(image, weight, final_shape, offset=None):
final_image = np.zeros(final_shape, dtype=float)
final_weight = np.zeros(final_shape, dtype=float)
shape = image.shape
rng = (0.5 * (np.atleast_1d(final_shape) - np.atleast_1d(shape))) \
.astype(int)
if offset is not None:
rng -= offset
final_image[rng[0]:rng[0] + shape[0], rng[1]:rng[1] + shape[1]] = \
image
final_weight[rng[0]:rng[0] + shape[0], rng[1]:rng[1] + shape[1]] = \
weight
return final_image, final_weight
def run_tli(image_list, top=None, top_percent=None, shift=True,
mask_list=None, invert=False, square=False, scene=None,
hdu=0):
"""
Run traditional lucky imaging on a stream of data.
## Arguments
* `image_list` (list): The list of filenames for the images which
will be ranked and combined using TLI.
## Keyword Arguments
* `top` (int or list): How many images should be co-added? This can be
a list of `int`s so that multiple sets can be co-added simultaneously.
* `top_percent` (float): An alternative notation for `top` instead
specified by a percentage.
* `shift` (bool): Should the images be shifted before co-adding? This
defaults to `True`.
* `hdu` (int): The HDU number for the data.
## Returns
* `fns` (list): The filenames ordered from best to worst as ranked
by TLI.
* `ranks` (list): The value of the ranking scalar corresponding to
the images listed in `fns`.
* `centers` (list): The coordinates of the centers of each image as
determined by centroiding.
* `coadd` (numpy.ndarray): The resulting co-added images. The shape will
be `(len(top) + 1, N, M)` where the `-1` entry is the full co-add and
`N` and `M` are decided based on the size of result needed based on the
offsets.
"""
# Build the scene to convolve with. It's a small, pixelated Gaussian.
if scene is None:
dim = 10
x = np.linspace(-0.5 * dim, 0.5 * dim, dim) ** 2
r = np.sqrt(x[:, None] + x[None, :])
scene = 0.5 * np.exp(-0.5 * r) / np.pi
s_dim = (np.array(scene.shape) - 1) / 2
# Initialized the first time through the data.
final_shape = None
# Calculate the centers and ranks of the images.
centers = {}
offsets = {}
ranks = {}
images = {}
weights = {}
for n, fn in enumerate(image_list):
img = utils.load_image(fn, hdu=hdu)
if mask_list is not None:
weight = utils.load_image(mask_list[n])
if invert:
inds = np.isnan(weight) + np.isinf(weight)
weight[~inds] = 1.0 / weight[~inds]
weight[inds] = 0.0
if square:
weight *= weight
else:
weight = np.ones_like(img)
weight[np.isnan(img) + np.isinf(img)] = 0.0
# Discard the image if no pixels are included.
if np.sum(weight):
# This is a sky subtraction hack.
img -= np.median(img[weight > 0])
# Set those same pixels to the median value. This is a hack to
# make the centroiding work.
img[weight == 0.0] = 0.0
# Do the centroiding and find the rank.
convolved = convolve(img, scene, mode="valid")
ind_max = convolved.argmax()
center = np.unravel_index(ind_max, convolved.shape)
rank = convolved.flat[ind_max]
# Because of the "valid" in the convolve, we need to offset
# based on the size of the "scene".
center = np.array(center) + s_dim
offset = (center - 0.5 * np.array(img.shape)).astype(int)
# Keep track of how the largest offset affects the final shape
# of the image.
shape = np.array(img.shape)
if final_shape is None:
final_shape = shape
if shift:
final_shape = np.max(np.vstack(
[final_shape, shape + 2 * np.abs(offset)]), axis=0)
# Save the image, weight and metadata.
centers[fn] = center
offsets[fn] = offset
images[fn] = img
weights[fn] = weight
ranks[fn] = (n, rank)
# Sort by brightest centroided pixel.
ranked = sorted(ranks, reverse=True, key=lambda k: ranks[k][1])
ordered_fns, ordered_masks, ordered_ranks, ordered_centers = [], [], [], []
for k in ranked:
ordered_fns.append(k)
if mask_list is not None:
ordered_masks.append(mask_list[ranks[k][0]])
else:
ordered_masks.append(None)
ordered_ranks.append(ranks[k][-1])
ordered_centers.append(list(centers[k]))
ordered_ranks = np.array(ordered_ranks)
ordered_centers = np.array(ordered_centers)
# Pad the images to the right size.
for k in ordered_fns:
if shift:
offset = offsets[k]
else:
offset = np.zeros(2)
images[k], weights[k] = \
pad_image_and_weight(images[k], weights[k], final_shape,
offset=offset)
# Figure out the number of images that should be co-added.
if top is None and top_percent is None:
top = len(ordered_fns)
elif top_percent is not None:
top = [max(1, int(top_percent * 0.01 * len(ranked))), len(ordered_fns)]
else:
top = np.append(np.atleast_1d(top), len(ordered_fns))
top = np.atleast_1d(top)
# Allocate the memory for the final image.
final_image = np.zeros([len(top)] + list(final_shape))
final_weight = np.zeros([len(top)] + list(final_shape))
# Do the co-add.
for j, t in enumerate(top):
for i, k in enumerate(ordered_fns[:t]):
final_image[j] += images[k] * weights[k]
final_weight[j] += weights[k]
m = final_weight > 0
final_image[m] /= final_weight[m]
final_image[~m] = np.nan
return ordered_fns, ordered_masks, ordered_ranks, ordered_centers, \
final_image
|
davidwhogg/TheThresher
|
thresher/tli.py
|
Python
|
gpl-2.0
| 6,238
|
[
"Gaussian"
] |
b32913175337717e5a803e5afc22e51fbf1ea6b3dfb5b49620cbb1b21694555a
|
""" Module for ginga routines. Mainly for debugging
"""
try:
basestring
except NameError:
basestring = str
import os
import numpy as np
import time
# A note from ejeschke on how to use the canvas add command in ginga: https://github.com/ejeschke/ginga/issues/720
# c
# The add() command can add any of the shape types that are defined under ginga.canvas.types. The good part is that if you
# go to that directory in the ginga source tree (ginga/canvas/types) and browse the source, you will find a parameter
# description table at the beginning of each type definition, describing each parameter in the type and what it is for.
# Most of the standard geometric types are in basic.py and there are specialized ones in utils.py, astro.py and layer.py. Looking at
# the classes will also tell you which parameters are positional and which are keyword.
# CANNOT LOAD DEBUGGER AS THIS MODULE IS CALLED BY ARDEBUG
#from pypeit import ardebug as debugger
#import pdb as debugger
#from pypeit import scienceimage
from ginga.util import grc
from astropy.io import fits
from pypeit import msgs
def connect_to_ginga(host='localhost', port=9000, raise_err=False):
""" Connect to an active RC Ginga
Args:
host (str, optional):
port (int, optional): Probably should remain at 9000
raise_err (bool, optional): Raise an error if no connection is made,
otherwise just raise a warning and continue
Returns:
RemoteClient: connection to ginga viewer
"""
# Start
viewer = grc.RemoteClient(host, port)
# Test
ginga = viewer.shell()
try:
tmp = ginga.get_current_workspace()
except:
if raise_err:
raise ValueError
else:
msgs.warn("Problem connecting to Ginga. Launch an RC Ginga viewer: ginga --modules=RC then continue.")
# Return
return viewer
def show_image(inp, chname='Image', waveimg=None, bitmask=None, mask=None, exten=0, cuts=None,
clear=False, wcs_match=False):
"""
Display an image using Ginga.
.. todo::
- implement instrument specific reading
- use the `mask` as a boolean mask if `bitmask` is not provided.
Args:
inp (:obj:`str`, numpy.ndarray):
The image to view. If a string is provided, it must be the
name of a fits image that can be read by `astropy.io.fits`.
chname (:obj:`str`, optional):
The name of the ginga channel to use.
waveimg (:obj:`str`, optional):
The name of a FITS image with the relevant WCS coordinates
in its header, mainly for wavelength array. If None, no WCS
is used.
bitmask (:class:`pypeit.bitmask.BitMask`, optional):
The object used to unpack the mask values. If this is
provided, mask must also be provided and the expectation is
that a extraction image is being shown.
mask (numpy.ndarray, optional):
A boolean or bitmask array that designates a pixel as being
masked. Currently this is only used when displaying the
spectral extraction result.
exten (:obj:`int`, optional):
The extension of the fits file with the image to show. This
is only used if the input is a file name.
cuts (array-like, optional):
Initial cut levels to apply when displaying the image. This
object must have a length of 2 with the lower and upper
levels, respectively.
clear (:obj:`bool`, optional):
Clear any existing ginga viewer and its channels.
wcs_match(:obj:`bool`, optional):
Use this as a reference image for the WCS and match all
image in other channels to it.
Returns:
ginga.util.grc.RemoteClient, ginga.util.grc._channel_proxy: The
ginga remote client and the channel with the displayed image.
Raises:
ValueError:
Raised if `cuts` is provided and does not have two elements
or if bitmask is provided but mask is not.
"""
# Input checks
if cuts is not None and len(cuts) != 2:
raise ValueError('Input cuts must only have two elements, the lower and upper cut.')
if bitmask is not None and mask is None:
raise ValueError('If providing a bitmask, must also provide the mask values.')
# Read or set the image data. This will fail if the input is a
# string and astropy.io.fits cannot read the image.
img = fits.open(inp)[exten].data if isinstance(inp, basestring) else inp
# Instantiate viewer
viewer = connect_to_ginga()
if clear:
# Clear existing channels
shell = viewer.shell()
chnames = shell.get_channel_names()
for ch in chnames:
shell.delete_channel(ch)
ch = viewer.channel(chname)
# Header
header = {}
header['NAXIS1'] = img.shape[1]
header['NAXIS2'] = img.shape[0]
if waveimg is not None:
header['WCS-XIMG'] = waveimg
# Giddy up
ch.load_np(chname, img, 'fits', header)
canvas = viewer.canvas(ch._chname)
# These commands set up the viewer. They can be found at
# ginga/ginga/ImageView.py
out = canvas.clear()
if cuts is not None:
out = ch.cut_levels(cuts[0], cuts[1])
out = ch.set_color_map('ramp')
out = ch.set_intensity_map('ramp')
out = ch.set_color_algorithm('linear')
out = ch.restore_contrast()
out = ch.restore_cmap()
# WCS Match this to other images with this as the reference image?
if wcs_match:
# After displaying all the images since up the images with WCS_MATCH
shell = viewer.shell()
out = shell.start_global_plugin('WCSMatch')
out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel', [chname], {})
# TODO: I would prefer to change the color map to indicate these
# pixels rather than overplot points. Because for large numbers of
# masked pixels, this is super slow. Need to ask ginga folks how to
# do that.
# If bitmask was passed in, assume this is an extraction qa image
# and use the mask to identify why each pixel was masked
if bitmask is not None:
# Unpack the bitmask
bpm, crmask, satmask, minmask, offslitmask, nanmask, ivar0mask, ivarnanmask, extractmask \
= bitmask.unpack(mask)
# These are the pixels that were masked by the bpm
spec_bpm, spat_bpm = np.where(bpm & ~offslitmask)
nbpm = len(spec_bpm)
# note: must cast numpy floats to regular python floats to pass the remote interface
points_bpm = [dict(type='point', args=(float(spat_bpm[i]), float(spec_bpm[i]), 2),
kwargs=dict(style='plus', color='magenta')) for i in range(nbpm)]
# These are the pixels that were masked by LACOSMICS
spec_cr, spat_cr = np.where(crmask & ~offslitmask)
ncr = len(spec_cr)
# note: must cast numpy floats to regular python floats to pass the remote interface
points_cr = [dict(type='point', args=(float(spat_cr[i]), float(spec_cr[i]), 2),
kwargs=dict(style='plus', color='cyan')) for i in range(ncr)]
# These are the pixels that were masked by the extraction
spec_ext, spat_ext = np.where(extractmask & ~offslitmask)
next = len(spec_ext)
# note: must cast numpy floats to regular python floats to pass the remote interface
points_ext = [dict(type='point', args=(float(spat_ext[i]), float(spec_ext[i]), 2),
kwargs=dict(style='plus', color='red')) for i in range(next)]
# These are the pixels that were masked for any other reason
spec_oth, spat_oth = np.where(satmask | minmask | nanmask | ivar0mask | ivarnanmask
& ~offslitmask)
noth = len(spec_oth)
# note: must cast numpy floats to regular python floats to pass
# the remote interface
points_oth = [dict(type='point', args=(float(spat_oth[i]), float(spec_oth[i]), 2),
kwargs=dict(style='plus', color='yellow')) for i in range(noth)]
nspat = img.shape[1]
nspec = img.shape[0]
# Labels for the points
text_bpm = [dict(type='text', args=(nspat / 2 -40, nspec / 2, 'BPM'),
kwargs=dict(color='magenta', fontsize=20))]
text_cr = [dict(type='text', args=(nspat / 2 -40, nspec / 2 - 30, 'CR'),
kwargs=dict(color='cyan', fontsize=20))]
text_ext = [dict(type='text', args=(nspat / 2 -40, nspec / 2 - 60, 'EXTRACT'),
kwargs=dict(color='red', fontsize=20))]
text_oth = [dict(type='text', args=(nspat / 2 -40, nspec / 2 - 90, 'OTHER'),
kwargs=dict(color='yellow', fontsize=20))]
canvas_list = points_bpm + points_cr + points_ext + points_oth + text_bpm + text_cr \
+ text_ext + text_oth
canvas.add('constructedcanvas', canvas_list)
return viewer, ch
def show_slits(viewer, ch, lord_in, rord_in, slit_ids=None, rotate=False, pstep=50, clear=False):
""" Overplot slits on the image in Ginga in the given channel
Args:
viewer (ginga.util.grc.RemoteClient): Ginga RC viewer
ch (ginga.util.grc._channel_proxy): Ginga channel
lord_in (ndarray): One or 2d array of left slit edges
rord_in (ndarray): One or 2d array of right slit edges
slit_ids (list, optional): List of slit IDs (int)
rotate (bool, optional):
Rotate the image?
pstep (int, optional):
Show every pstep point of the edges as opposed to *every* point, recommended for speed
clear (bool, optional):
Clear the canvas?
"""
# This allows the input lord and rord to either be (nspec, nslit) arrays or a single
# vectors of size (nspec)
if lord_in.ndim == 2:
nslit = lord_in.shape[1]
lordloc = lord_in
rordloc = rord_in
else:
nslit = 1
lordloc = lord_in.reshape(lord_in.size,1)
rordloc = rord_in.reshape(rord_in.size,1)
if slit_ids is None:
slit_ids = [str(slit) for slit in np.arange(nslit)]
# Deal with case that slit_ids is input as a scalar
if hasattr(slit_ids,"__len__") == False:
slit_ids = [slit_ids]
# Canvas
canvas = viewer.canvas(ch._chname)
if clear:
canvas.clear()
# y-axis
y = (np.arange(lordloc.shape[0])).tolist()
#ohf = lordloc.shape[0] // 2
tthrd = int(2*lordloc.shape[0]/3.)
# Loop on slits
for slit in range(lordloc.shape[1]):
# Edges
for kk,item in enumerate([lordloc, rordloc]):
if kk == 0:
clr = str('green')
else:
clr = str('red')
if rotate:
points = list(zip(y[::pstep],item[::pstep,slit].tolist()))
else:
points = list(zip(item[::pstep,slit].tolist(),y[::pstep]))
canvas.add(str('path'), points, color=clr)
# Text -- Should use the 'real' name
if rotate:
xt, yt = float(y[tthrd]), float(lordloc[tthrd,slit])
else:
xt, yt = float(lordloc[tthrd,slit]), float(y[tthrd])
canvas.add(str('text'), xt, yt, str('S{:}'.format(slit_ids[slit])), color=str('red'),
fontsize=20.)
def show_trace(viewer, ch, trace, trc_name='Trace', color='blue', clear=False,
rotate=False, pstep=50, yval=None):
"""
Args:
viewer (ginga.util.grc.RemoteClient):
Ginga RC viewer
ch (ginga.util.grc._channel_proxy):
Ginga channel
trace (np.ndarray):
x-positions on the detector
trc_name (str, optional):
Trace name
color (str, optional):
Color for the trace
clear (bool, optional):
Clear the canvas?
rotate (bool, optional):
Rotate the image?
pstep (int, optional):
Show every pstep point of the edges as opposed to *every* point, recommended for speed
yval (np.ndarray, optional):
If not provided, it is assumed the input x values track y=0,1,2,3,etc.
"""
# Canvas
canvas = viewer.canvas(ch._chname)
if clear:
canvas.clear()
# Show
if yval is None:
y = (np.arange(trace.size)[::pstep]).tolist()
else:
y = yval[::pstep].tolist()
xy = [trace[::pstep].tolist(), y]
if rotate:
xy[0], xy[1] = xy[1], xy[0]
points = list(zip(xy[0], xy[1]))
canvas.add(str('path'), points, color=str(color))
# Text
ohf = trace.size // (2*pstep)
xyt = [float(trace[ohf]), float(y[ohf])]
if rotate:
xyt[0], xyt[1] = xyt[1], xyt[0]
# Do it
canvas.add(str('text'), xyt[0], xyt[1], trc_name, rot_deg=90., color=str(color), fontsize=17.)
def clear_canvas(cname):
"""
Clear the ginga canvas
Args:
cname (str): Channel name
"""
viewer = connect_to_ginga()
ch = viewer.channel(cname)
canvas = viewer.canvas(ch._chname)
canvas.clear()
def clear_all():
"""
Clear all of the ginga canvasses
"""
viewer = connect_to_ginga()
shell = viewer.shell()
chnames = shell.get_channel_names()
for ch in chnames:
shell.delete_channel(ch)
def show_tilts(viewer, ch, trc_tilt_dict, sedges=None, yoff=0., xoff=0., pstep=1,
points=True, clear_canvas=False):
"""
Show the arc tilts on the input channel
Not sure this is actually working correctly...
Args:
viewer (ginga.util.grc.RemoteClient):
Ginga RC viewer
ch (ginga.util.grc._channel_proxy):
Ginga channel
trc_tilt_dict (dict):
Contains tilts info
sedges (tuple, optional):
Contains slit edges; passed to show_slits()
yoff (float, optional):
Offset tilts by this amount
xoff (float, optional):
Offset tilts by this amount
pstep (int, optional):
Show every pstep point of the edges as opposed to *every* point, recommended for speed
points (bool, optional):
Plot the Gaussian-weighted tilt centers
clear_canvas (bool, optional):
Clear the canvas first?
"""
canvas = viewer.canvas(ch._chname)
if clear_canvas:
canvas.clear()
if sedges is not None:
show_slits(viewer, ch, sedges[0], sedges[1])
tilts = trc_tilt_dict['tilts']
# Crutch is set plot the crutch instead of the tilt itself
tilts_fit = trc_tilt_dict['tilts_fit']
tilts_spat = trc_tilt_dict['tilts_spat']
tilts_mask = trc_tilt_dict['tilts_mask']
tilts_err = trc_tilt_dict['tilts_err']
use_tilt = trc_tilt_dict['use_tilt']
# Show a trace
nspat = trc_tilt_dict['nspat']
nspec = trc_tilt_dict['nspec']
nlines = tilts.shape[1]
for iline in range(nlines):
x = tilts_spat[:,iline] + xoff # FOR IMAGING (Ginga offsets this value by 1 internally)
this_mask = tilts_mask[:,iline]
this_err = (tilts_err[:,iline] > 900)
if np.sum(this_mask) > 0:
if points: # Plot the gaussian weighted tilt centers
y = tilts[:, iline] + yoff
# Plot the actual flux weighted centroids of the arc lines that were traced
goodpix = (this_mask == True) & (this_err == False)
ngood = np.sum(goodpix)
if ngood > 0:
xgood = x[goodpix]
ygood = y[goodpix]
# note: must cast numpy floats to regular python floats to pass the remote interface
points_good = [dict(type='squarebox',
args=(float(xgood[i]), float(ygood[i]), 0.7),
kwargs=dict(color='cyan',fill=True, fillalpha=0.5)) for i in range(ngood)]
canvas.add('constructedcanvas', points_good)
badpix = (this_mask == True) & (this_err == True)
nbad = np.sum(badpix)
if nbad > 0:
xbad = x[badpix]
ybad = y[badpix]
# Now show stuff that had larger errors
# note: must cast numpy floats to regular python floats to pass the remote interface
points_bad = [dict(type='squarebox',
args=(float(xbad[i]), float(ybad[i]), 0.7),
kwargs=dict(color='red', fill=True,fillalpha=0.5)) for i in range(nbad)]
canvas.add('constructedcanvas', points_bad)
# Now plot the polynomial fits to the the Gaussian weighted centroids
y = tilts_fit[:, iline] + yoff
points = list(zip(x[this_mask][::pstep].tolist(),y[this_mask][::pstep].tolist()))
if use_tilt[iline]:
clr = 'blue' # Good line
else:
clr = 'yellow' # Bad line
canvas.add('path', points, color=clr, linewidth=3)
canvas.add(str('text'), nspat//2 - 40, nspec//2, 'good tilt fit', color=str('blue'),fontsize=20.)
canvas.add(str('text'), nspat//2 - 40, nspec//2 - 30, 'bad tilt fit', color=str('yellow'),fontsize=20.)
canvas.add(str('text'), nspat//2 - 40, nspec//2 - 60, 'trace good', color=str('cyan'),fontsize=20.)
canvas.add(str('text'), nspat//2 - 40, nspec//2 - 90, 'trace masked', color=str('red'),fontsize=20.)
|
PYPIT/PYPIT
|
pypeit/ginga.py
|
Python
|
gpl-3.0
| 17,725
|
[
"Gaussian"
] |
4b821138de2e9cfda25e0072e00c7695ca677d6f3477709fed6ee97924d94284
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import time
import string
import rpm
import sys
try:
# python 2
import xmlrpclib
except ImportError:
# python3
import xmlrpc.client as xmlrpclib
from spacewalk.common.usix import IntType, ListType, DictType
# common module
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import rhnCache, rhnFlags, rhn_rpm
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common.rhnTranslate import _
# local module
import rhnUser
import rhnSQL
import rhnLib
class NoBaseChannelError(Exception):
pass
class InvalidServerArchError(Exception):
pass
class BaseChannelDeniedError(Exception):
pass
class ChannelException(Exception):
def __init__(self, channel_id=None, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.channel_id = channel_id
self.channel = None
class ModifiedError(ChannelException):
pass
class IncompatibilityError(Exception):
pass
class InvalidDataError(Exception):
pass
class ChannelNotFoundError(Exception):
pass
class NoToolsChannel(Exception):
pass
class NoChildChannels(Exception):
pass
class InvalidChannel(Exception):
pass
class BaseDatabaseObject:
def __init__(self):
self._row = None
def __getattr__(self, name):
if name.startswith('get_'):
return rhnLib.CallableObj(name[4:], self._get)
if name.startswith('set_'):
return rhnLib.CallableObj(name[4:], self._set)
raise AttributeError(name)
def _set(self, name, val):
self._new_row()
self._row[name] = val
def _get(self, name):
return self._row[name]
def _new_row(self):
raise NotImplementedError()
def save(self, with_updates=1):
try:
return self._save(with_updates=with_updates)
except:
rhnSQL.rollback()
raise
def _save(self, with_updates=1):
try:
self._row.save(with_updates=with_updates)
except rhnSQL.ModifiedRowError:
raise_with_tb(ModifiedError(self._row['id']), sys.exc_info()[2])
class BaseChannelObject(BaseDatabaseObject):
_table_name = None
_sequence_name = None
_generic_fields = []
def load_by_label(self, label):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'label')
self._row.load(label)
return self
def load_by_id(self, obj_id):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'id')
self._row.load(obj_id)
return self
def load_from_dict(self, dict):
# Re-init
self.__init__()
for f in self._generic_fields:
method = getattr(self, 'set_' + f)
method(dict.get(f))
self._load_rest(dict)
return self
def _load_rest(self, dict):
pass
def exists(self):
if not self._row:
return 0
return self._row.real
def get_org_id(self):
org_id = self._row['org_id']
if org_id is None:
return None
row = self._lookup_org_id(org_id)
if row.real:
return row['login']
return org_id
def set_org_id(self, val):
self._new_row()
if val is None or isinstance(val, IntType):
self._row['org_id'] = val
return
row = self._lookup_org_by_login(val)
if not row.real:
raise InvalidDataError("No such org", val)
self._row['org_id'] = row['org_id']
def _lookup_org_id(self, org_id):
row = rhnSQL.Row('web_contact', 'org_id')
row.load(org_id)
return row
def _lookup_org_by_login(self, login):
row = rhnSQL.Row('web_contact', 'login')
row.load(login)
return row
def _lookup_channel_family_by_id(self, channel_family_id):
row = rhnSQL.Row('rhnChannelFamily', 'id')
row.load(channel_family_id)
return row
def _lookup_channel_family_by_label(self, channel_family):
row = rhnSQL.Row('rhnChannelFamily', 'label')
row.load(channel_family)
return row
def _new_row(self):
if self._row is None:
self._row = rhnSQL.Row(self._table_name, 'id')
channel_id = rhnSQL.Sequence(self._sequence_name).next()
self._row.create(channel_id)
def as_dict(self):
ret = {}
for f in self._generic_fields:
method = getattr(self, 'get_' + f)
val = method()
ret[f] = val
return ret
# Channel creation
class Channel(BaseChannelObject):
_table_name = 'rhnChannel'
_sequence_name = 'rhn_channel_id_seq'
_generic_fields = ['label', 'name', 'summary', 'description', 'basedir',
'org_id', 'gpg_key_url', 'gpg_key_id', 'gpg_key_fp', 'end_of_life',
'channel_families', 'channel_arch', ]
def __init__(self):
BaseChannelObject.__init__(self)
self._channel_families = []
self._dists = {}
self._parent_channel_arch = None
def load_by_label(self, label):
BaseChannelObject.load_by_label(self, label)
self._load_channel_families()
self._load_dists()
return self
def load_by_id(self, label):
BaseChannelObject.load_by_id(self, label)
self._load_channel_families()
self._load_dists()
return self
def _load_rest(self, dict):
dists = dict.get('dists')
if not dists:
return
for dist in dists:
release = dist.get('release')
os = dist.get('os')
self._dists[release] = os
_query_get_db_channel_families = rhnSQL.Statement("""
select channel_family_id
from rhnChannelFamilyMembers
where channel_id = :channel_id
""")
def _get_db_channel_families(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_channel_families)
h.execute(channel_id=channel_id)
return [x['channel_family_id'] for x in h.fetchall_dict() or []]
def _load_channel_families(self):
channel_id = self._row.get('id')
self._channel_families = self._get_db_channel_families(channel_id)
return 1
def _load_dists(self):
channel_id = self._row.get('id')
dists = self._get_db_dists(channel_id)
self.set_dists(dists)
_query_get_db_dists = rhnSQL.Statement("""
select os, release
from rhnDistChannelMap
where channel_id = :channel_id
and org_id is null
""")
def _get_db_dists(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_dists)
h.execute(channel_id=channel_id)
return h.fetchall_dict() or []
# Setters
def set_channel_arch(self, val):
self._new_row()
arch = self._sanitize_arch(val)
row = self._lookup_channel_arch(arch)
if not row.real:
raise InvalidDataError("No such architecture", arch)
self._row['channel_arch_id'] = row['id']
def _sanitize_arch(self, arch):
if arch == 'i386':
return 'channel-ia32'
p = 'channel-'
if arch[:len(p)] != p:
return p + arch
return arch
def set_parent_channel(self, val):
self._new_row()
if val is None:
self._row['parent_channel'] = None
return
row = self._lookup_channel_by_label(val)
if not row.real:
raise InvalidDataError("Invalid parent channel", val)
self._row['parent_channel'] = row['id']
self._parent_channel_arch = row['channel_arch_id']
def set_channel_families(self, val):
self._new_row()
self._channel_families = []
for cf_label in val:
self.add_channel_family(cf_label)
def set_end_of_life(self, val):
self._new_row()
if val is None:
self._row['end_of_life'] = None
return
t = time.strptime(val, "%Y-%m-%d")
seconds = time.mktime(t)
t = rhnSQL.TimestampFromTicks(seconds)
self._row['end_of_life'] = t
def add_channel_family(self, name):
self._new_row()
cf = self._lookup_channel_family_by_label(name)
if not cf.real:
raise InvalidDataError("Invalid channel family", name)
self._channel_families.append(cf['id'])
def add_dist(self, release, os=None):
if os is None:
os = 'Red Hat Linux'
self._dists[release] = os
def set_dists(self, val):
self._dists.clear()
for h in val:
release = h['release']
os = h['os']
self.add_dist(release, os)
# Getters
def get_parent_channel(self):
pc_id = self._row['parent_channel']
if pc_id is None:
return None
return self._lookup_channel_by_id(pc_id)['label']
def get_channel_families(self):
cf_labels = []
for cf_id in self._channel_families:
row = self._lookup_channel_family_by_id(cf_id)
if row.real:
cf_labels.append(row['label'])
return cf_labels
def get_channel_arch(self):
channel_arch_id = self._row['channel_arch_id']
row = self._lookup_channel_arch_by_id(channel_arch_id)
assert row.real
return row['label']
def get_end_of_life(self):
date_obj = self._row['end_of_life']
if date_obj is None:
return None
return "%s-%02d-%02d %02d:%02d:%02d" % (
date_obj.year, date_obj.month, date_obj.day,
date_obj.hour, date_obj.minute, date_obj.second)
def get_dists(self):
ret = []
for release, os in self._dists.items():
ret.append({'release': release, 'os': os})
return ret
def _lookup_channel_by_id(self, channel_id):
row = rhnSQL.Row('rhnChannel', 'id')
row.load(channel_id)
return row
def _lookup_channel_by_label(self, channel):
row = rhnSQL.Row('rhnChannel', 'label')
row.load(channel)
return row
def _lookup_channel_arch(self, channel_arch):
row = rhnSQL.Row('rhnChannelArch', 'label')
row.load(channel_arch)
return row
def _lookup_channel_arch_by_id(self, channel_arch_id):
row = rhnSQL.Row('rhnChannelArch', 'id')
row.load(channel_arch_id)
return row
def _save(self, with_updates=1):
if self._parent_channel_arch:
if not self._compatible_channel_arches(self._parent_channel_arch,
self._row['channel_arch_id']):
raise IncompatibilityError("Incompatible channel arches")
BaseChannelObject._save(self, with_updates=with_updates)
# Save channel families now
self._save_channel_families()
self._save_dists()
_query_remove_channel_families = rhnSQL.Statement("""
delete from rhnChannelFamilyMembers
where channel_id = :channel_id
and channel_family_id = :channel_family_id
""")
_query_add_channel_families = rhnSQL.Statement("""
insert into rhnChannelFamilyMembers (channel_id, channel_family_id)
values (:channel_id, :channel_family_id)
""")
def _save_channel_families(self):
channel_id = self._row['id']
db_cfids = self._get_db_channel_families(channel_id)
h = {}
for db_cfid in db_cfids:
h[db_cfid] = None
to_add = []
for cfid in self._channel_families:
if cfid in h:
del h[cfid]
continue
to_add.append(cfid)
to_delete = list(h.keys())
if to_delete:
h = rhnSQL.prepare(self._query_remove_channel_families)
cids = [channel_id] * len(to_delete)
h.executemany(channel_id=cids, channel_family_id=to_delete)
if to_add:
h = rhnSQL.prepare(self._query_add_channel_families)
cids = [channel_id] * len(to_add)
h.executemany(channel_id=cids, channel_family_id=to_add)
def _save_dists(self):
channel_id = self._row['id']
db_dists = self._get_db_dists(channel_id)
d = self._dists.copy()
to_add = [[], []]
to_remove = []
to_update = [[], []]
for h in db_dists:
release = h['release']
os = h['os']
if release not in d:
to_remove.append(release)
continue
# Need to update?
m_os = d[release]
if m_os == os:
# Nothing to do
del d[release]
continue
to_update[0].append(release)
to_update[1].append(os)
# Everything else should be added
for release, os in list(d.items()):
to_add[0].append(release)
to_add[1].append(os)
self._remove_dists(to_remove)
self._update_dists(to_update[0], to_update[1])
self._add_dists(to_add[0], to_add[1])
_query_add_dists = rhnSQL.Statement("""
insert into rhnDistChannelMap
(channel_id, channel_arch_id, release, os, org_id)
values (:channel_id, :channel_arch_id, :release, :os, null)
""")
def _add_dists(self, releases, oses):
self._modify_dists(self._query_add_dists, releases, oses)
def _modify_dists(self, query, releases, oses):
if not releases:
return
count = len(releases)
channel_ids = [self._row['id']] * count
query_args = {'channel_id': channel_ids, 'release': releases}
if oses:
channel_arch_ids = [self._row['channel_arch_id']] * count
query_args.update({'channel_arch_id': channel_arch_ids,
'os': oses})
h = rhnSQL.prepare(query)
h.executemany(**query_args)
_query_update_dists = rhnSQL.Statement("""
update rhnDistChannelMap
set channel_arch_id = :channel_arch_id,
os = :os
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _update_dists(self, releases, oses):
self._modify_dists(self._query_update_dists, releases, oses)
_query_remove_dists = rhnSQL.Statement("""
delete from rhnDistChannelMap
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _remove_dists(self, releases):
self._modify_dists(self._query_remove_dists, releases, None)
def _compatible_channel_arches(self, parent_channel_arch, channel_arch):
# This could get more complicated later
return (parent_channel_arch == channel_arch)
def as_dict(self):
ret = BaseChannelObject.as_dict(self)
ret['dists'] = self.get_dists()
return ret
class ChannelFamily(BaseChannelObject):
_table_name = 'rhnChannelFamily'
_sequence_name = 'rhn_channel_family_id_seq'
_generic_fields = ['label', 'name', 'product_url']
def _load_by_id(query, item_object, pattern=None):
qargs = {}
if pattern:
query += "and label like :pattern"
qargs['pattern'] = pattern
h = rhnSQL.prepare(query)
h.execute(**qargs)
ret = []
while 1:
row = h.fetchone_dict()
if not row:
break
c = item_object.load_by_id(row['id'])
ret.append(c.as_dict())
return ret
def list_channel_families(pattern=None):
query = """
select id
from rhnChannelFamily
where org_id is null
"""
return _load_by_id(query, ChannelFamily(), pattern)
def list_channels(pattern=None):
query = """
select id
from rhnChannel
where 1=1
"""
return _load_by_id(query, Channel(), pattern)
# makes sure there are no None values in dictionaries, etc.
def __stringify(object):
if object is None:
return ''
if type(object) == type([]):
return list(map(__stringify, object))
# We need to know __stringify converts immutable types into immutable
# types
if type(object) == type(()):
return tuple(map(__stringify, object))
if type(object) == type({}):
ret = {}
for k, v in object.items():
ret[__stringify(k)] = __stringify(v)
return ret
# by default, we just str() it
return str(object)
# return the channel information
def channel_info(channel):
log_debug(3, channel)
# get the channel information
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannel c,
rhnChannelArch ca
where
c.channel_arch_id = ca.id
and c.label = :channel
""")
h.execute(channel=str(channel))
ret = h.fetchone_dict()
return __stringify(ret)
# return information about a base channel for a server_id
def get_base_channel(server_id, none_ok=0):
log_debug(3, server_id)
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c, rhnChannelArch ca, rhnServerChannel sc
where sc.server_id = :server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
and c.parent_channel is NULL
""")
h.execute(server_id=str(server_id))
ret = h.fetchone_dict()
if not ret:
if not none_ok:
log_error("Server not subscribed to a base channel!", server_id)
return None
return __stringify(ret)
def channels_for_server(server_id):
"""channel info list for all channels accessible by this server.
list channels a server_id is subscribed to
We DO NOT want to cache this one because we depend on getting
accurate information and the caching would only introduce more
overhead on an otherwise very fast query
"""
log_debug(3, server_id)
try:
server_id = int(server_id)
except:
raise_with_tb(rhnFault(8, server_id), sys.exc_info()[2]) # Invalid rhnServer.id
# XXX: need to return unsubsubcribed channels and a way to indicate
# they arent already subscribed
# list all the channels this server is subscribed to. We also want
# to know if any of those channels has local packages in it... A
# local package has a org_id set.
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
c.gpg_key_url,
case s.org_id when c.org_id then 1 else 0 end local_channel,
TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s
where
c.id = sc.channel_id
and sc.server_id = :server_id
and s.id = :server_id
and ca.id = c.channel_arch_id
order by c.parent_channel nulls first
""")
h.execute(server_id=str(server_id))
channels = h.fetchall_dict()
if not channels:
log_error("Server not subscribed to any channels", server_id)
channels = []
return __stringify(channels)
def getSubscribedChannels(server_id):
"""
Format the response from channels_for_server in the way that the
handlers expect.
"""
channelList = channels_for_server(server_id)
channels = []
for each in channelList:
if 'last_modified' not in each:
# No last_modified attribute
# Probably an empty channel, so ignore
continue
channel = [each['label'], each['last_modified']]
# isBaseChannel
if each['parent_channel']:
flag = "0"
else:
flag = "1"
channel.append(flag)
# isLocalChannel
if each['local_channel']:
flag = "1"
else:
flag = "0"
channel.append(flag)
channels.append(channel)
return channels
def isCustomChannel(channel_id):
"""
Input: channel_id (from DB Table rhnChannel.id)
Returns: True if this is a custom channel
False if this is not a custom channel
"""
log_debug(3, channel_id)
h = rhnSQL.prepare("""
select
rcf.label
from
rhnChannelFamily rcf,
rhnChannelFamilyMembers rcfm
where
rcfm.channel_id = :channel_id
and rcfm.channel_family_id = rcf.id
and rcf.org_id is not null
""")
h.execute(channel_id=str(channel_id))
label = h.fetchone()
if label:
if label[0].startswith("private-channel-family"):
log_debug(3, channel_id, "is a custom channel")
return True
return False
# Fetch base channel for a given release and arch
def base_channel_for_rel_arch(release, server_arch, org_id=-1,
user_id=None):
log_debug(4, release, server_arch, org_id, user_id)
query = """
select ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnChannelArch ca
where c.channel_arch_id = ca.id
and c.id = rhn_channel.base_channel_for_release_arch(
:release, :server_arch, :org_id, :user_id)
"""
rhnSQL.transaction("base_channel_for_rel_arch")
h = rhnSQL.prepare(query)
try:
h.execute(release=str(release), server_arch=str(server_arch),
org_id=org_id, user_id=user_id)
except rhnSQL.SQLSchemaError:
e = sys.exc_info()[1]
rhnSQL.rollback("base_channel_for_rel_arch")
if e.errno == 20263:
# Insufficient permissions for subscription
log_debug(4, 'BaseChannelDeniedError')
raise_with_tb(BaseChannelDeniedError(), sys.exc_info()[2])
if e.errno == 20244:
# Server architecture could not be found
log_debug(4, 'InvalidServerArchError')
raise_with_tb(InvalidServerArchError(str(server_arch)), sys.exc_info()[2])
# Re-raise unknown eceptions
log_debug(4, 'unkown exception')
raise
log_debug(4, 'got past exceptions')
return h.fetchone_dict()
def base_eus_channel_for_ver_rel_arch(version, release, server_arch,
org_id=-1, user_id=None):
"""
given a redhat-release version, release, and server arch, return a list
of dicts containing the details of the channel z streams either match the
version/release pair, or are greater.
"""
log_debug(4, version, release, server_arch, org_id, user_id)
eus_channels_query = """
select c.id,
c.label,
c.name,
rcm.release,
c.receiving_updates
from
rhnChannelPermissions cp,
rhnChannel c,
rhnServerArch sa,
rhnServerChannelArchCompat scac,
rhnReleaseChannelMap rcm
where
rcm.version = :version
and scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = rcm.channel_arch_id
and rcm.channel_id = c.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and rhn_channel.loose_user_role_check(c.id, :user_id,
'subscribe') = 1
"""
eus_channels_prepared = rhnSQL.prepare(eus_channels_query)
eus_channels_prepared.execute(version=version,
server_arch=server_arch,
user_id=user_id,
org_id=org_id)
channels = []
while True:
channel = eus_channels_prepared.fetchone_dict()
if channel is None:
break
# the release part of redhat-release for rhel 4 is like
# 6.1 or 7; we just look at the first digit.
# for rhel 5 and up it's the full release number of rhel, followed by
# the true release number of the rpm, like 5.0.0.9 (for the 9th
# version of the redhat-release rpm, for RHEL GA)
db_release = channel['release']
if version in ['4AS', '4ES']:
parts = 1
else:
parts = 3
server_rel = '.'.join(release.split('.')[:parts])
channel_rel = '.'.join(db_release.split('.')[:parts])
# XXX we're no longer using the is_default column from the db
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) == 0:
channel['is_default'] = 'Y'
channels.append(channel)
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) < 0:
channel['is_default'] = 'N'
channels.append(channel)
return channels
def get_channel_for_release_arch(release, server_arch, org_id=None):
log_debug(3, release, server_arch)
server_arch = rhnLib.normalize_server_arch(str(server_arch))
log_debug(3, 'normalized arch as %s' % server_arch)
if org_id is None:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnDistChannelMap dcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = dcm.channel_arch_id
and dcm.release = :release
and dcm.channel_id = c.id
and dcm.channel_arch_id = c.channel_arch_id
and dcm.org_id is null
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
else:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = odcm.channel_arch_id
and odcm.release = :release
and odcm.channel_id = c.id
and odcm.channel_arch_id = c.channel_arch_id
and odcm.org_id = :org_id
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(release=str(release), server_arch=server_arch, org_id=org_id)
row = h.fetchone_dict()
if not row:
# No channles for this guy
log_debug(3, 'No channles for this guy')
return None
log_debug(3, 'row is %s' % str(row))
return row
def applet_channels_for_uuid(uuid):
log_debug(3, uuid)
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
to_char(s.channels_changed, 'YYYYMMDDHH24MISS') server_channels_changed
from rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s,
rhnServerUuid su
where su.uuid = :uuid
and su.server_id = s.id
and su.server_id = sc.server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(uuid=uuid)
rows = h.fetchall_dict() or []
return rows
# retrieve a list of public channels for a given release and architecture
# we cannot cache this if it involves an org_id
# If a user_id is passed to this function, and all the available base channels
# for this server_arch/release combination are denied by the org admin, this
# function raises BaseChannelDeniedError
def channels_for_release_arch(release, server_arch, org_id=-1, user_id=None):
if not org_id:
org_id = -1
org_id = string.strip(str(org_id))
log_debug(3, release, server_arch, org_id)
# Can raise BaseChannelDeniedError or InvalidServerArchError
base_channel = base_channel_for_rel_arch(release, server_arch,
org_id=org_id, user_id=user_id)
if not base_channel:
raise NoBaseChannelError()
# At this point, base_channel is not null
# We assume here that subchannels are compatible with the base channels,
# so there would be no need to check for arch compatibility from this
# point
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
-- If user_id is null, then the channel is subscribable
rhn_channel.loose_user_role_check(c.id, :user_id, 'subscribe')
subscribable
from
rhnChannelPermissions cp,
rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca
where
c.id = odcm.channel_id
and odcm.os in (
'Powertools'
)
and odcm.for_org_id = :org_id
and c.channel_arch_id = ca.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and c.parent_channel = :parent_channel
""")
h.execute(org_id=org_id,
parent_channel=base_channel['id'], user_id=user_id)
channels = [base_channel]
while 1:
row = h.fetchone_dict()
if not row:
break
subscribable = row['subscribable']
del row['subscribable']
if not subscribable:
# Not allowed to subscribe to this channel
continue
channels.append(row)
return __stringify(channels)
_query_get_source_packages_from_ids = rhnSQL.Statement("""
select srpm.name
from rhnChannelPackage cp,
rhnPackage p,
rhnSourceRPM srpm
where cp.channel_id = :channel_id
and cp.package_id = p.id
and p.source_rpm_id = srpm.id
""")
def list_packages_source(channel_id):
ret = []
h = rhnSQL.prepare(_query_get_source_packages_from_ids)
h.execute(channel_id=channel_id)
results = h.fetchall_dict()
if results:
for r in results:
r = r['name']
if string.find(r, ".rpm") != -1:
r = string.replace(r, ".rpm", "")
new_evr = rhnLib.make_evr(r, source=1)
new_evr_list = [new_evr['name'], new_evr['version'], new_evr['release'], new_evr['epoch']]
ret.append(new_evr_list)
return ret
# the latest packages from the specified channel
_query_all_packages_from_channel_checksum = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages with checksum info
def list_all_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
h = rhnSQL.prepare(_query_all_packages_from_channel_checksum)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']) for a in __stringify(ret)]
return ret
# This function executes the SQL call for listing latest packages with
# checksum info
def list_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size,
full_channel.checksum_type,
full_channel.checksum
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
h = rhnSQL.prepare(query)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']) for a in __stringify(ret)]
return ret
# This function executes the SQL call for listing packages
def _list_packages_sql(query, channel_id):
h = rhnSQL.prepare(query)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"]) for a in __stringify(ret)]
return ret
def list_packages_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id
from
rhnChannelPackage cp,
rhnPackage p
where
cp.channel_id = :channel_id
and cp.package_id = p.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
return _list_packages_sql(query, channel_id)
# the latest packages from the specified channel
_query_latest_packages_from_channel = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages
def list_all_packages_sql(channel_id):
log_debug(3, channel_id)
return _list_packages_sql(_query_latest_packages_from_channel, channel_id)
# This function executes the SQL call for listing packages with all the
# dep information for each package also
def list_all_packages_complete_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare(_query_latest_packages_from_channel)
# This gathers the provides, requires, conflicts, obsoletes info
g = rhnSQL.prepare("""
select
pp.package_id,
'provides' as capability_type,
pp.capability_id,
pp.sense,
pc.name,
pc.version
from
rhnPackageProvides pp,
rhnPackageCapability pc
where
pp.package_id = :package_id
and pp.capability_id = pc.id
union all
select
pr.package_id,
'requires' as capability_type,
pr.capability_id,
pr.sense,
pc.name,
pc.version
from
rhnPackageRequires pr,
rhnPackageCapability pc
where
pr.package_id = :package_id
and pr.capability_id = pc.id
union all
select
prec.package_id,
'recommends' as capability_type,
prec.capability_id,
prec.sense,
pc.name,
pc.version
from
rhnPackageRecommends prec,
rhnPackageCapability pc
where
prec.package_id = :package_id
and prec.capability_id = pc.id
union all
select
sugg.package_id,
'suggests' as capability_type,
sugg.capability_id,
sugg.sense,
pc.name,
pc.version
from
rhnPackageSuggests sugg,
rhnPackageCapability pc
where
sugg.package_id = :package_id
and sugg.capability_id = pc.id
union all
select
supp.package_id,
'supplements' as capability_type,
supp.capability_id,
supp.sense,
pc.name,
pc.version
from
rhnPackageSupplements supp,
rhnPackageCapability pc
where
supp.package_id = :package_id
and supp.capability_id = pc.id
union all
select
enh.package_id,
'enhances' as capability_type,
enh.capability_id,
enh.sense,
pc.name,
pc.version
from
rhnPackageEnhances enh,
rhnPackageCapability pc
where
enh.package_id = :package_id
and enh.capability_id = pc.id
union all
select
pcon.package_id,
'conflicts' as capability_type,
pcon.capability_id,
pcon.sense,
pc.name,
pc.version
from
rhnPackageConflicts pcon,
rhnPackageCapability pc
where
pcon.package_id = :package_id
and pcon.capability_id = pc.id
union all
select
po.package_id,
'obsoletes' as capability_type,
po.capability_id,
po.sense,
pc.name,
pc.version
from
rhnPackageObsoletes po,
rhnPackageCapability pc
where
po.package_id = :package_id
and po.capability_id = pc.id
union all
select
brks.package_id,
'breaks' as capability_type,
brks.capability_id,
brks.sense,
pc.name,
pc.version
from
rhnPackageBreaks brks,
rhnPackageCapability pc
where
brks.package_id = :package_id
and brks.capability_id = pc.id
union all
select
pdep.package_id,
'predepends' as capability_type,
pdep.capability_id,
pdep.sense,
pc.name,
pc.version
from
rhnPackagePredepends pdep,
rhnPackageCapability pc
where
pdep.package_id = :package_id
and pdep.capability_id = pc.id
""")
h.execute(channel_id=str(channel_id))
# XXX This query has to order the architectures somehow; the 7.2 up2date
# client was broken and was selecting the wrong architecture if athlons
# are passed first. The rank ordering here should make sure that i386
# kernels appear before athlons.
ret = h.fetchall_dict()
if not ret:
return []
for pkgi in ret:
pkgi['provides'] = []
pkgi['requires'] = []
pkgi['conflicts'] = []
pkgi['obsoletes'] = []
pkgi['recommends'] = []
pkgi['suggests'] = []
pkgi['supplements'] = []
pkgi['enhances'] = []
pkgi['breaks'] = []
pkgi['predepends'] = []
g.execute(package_id=pkgi["id"])
deps = g.fetchall_dict() or []
for item in deps:
version = item['version'] or ""
relation = ""
if version:
sense = item['sense'] or 0
if sense & 2:
relation = relation + "<"
if sense & 4:
relation = relation + ">"
if sense & 8:
relation = relation + "="
if relation:
relation = " " + relation
if version:
version = " " + version
dep = item['name'] + relation + version
pkgi[item['capability_type']].append(dep)
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['provides'],
a['requires'], a['conflicts'], a['obsoletes'], a['recommends'], a['suggests'], a['supplements'], a['enhances'], a['breaks'], a['predepends']) for a in __stringify(ret)]
return ret
def list_packages_path(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare("""
select
p.path
from
rhnPackage p,
rhnChannelPackage cp
where
cp.channel_id = :channel_id
and cp.package_id = p.id
""")
h.execute(channel_id=str(channel_id))
ret = h.fetchall()
if not ret:
return []
# process the results
# ret = map(lambda a: (a["path"]),
# __stringify(ret))
return ret
# list the latest packages for a channel
def list_packages(channel):
return _list_packages(channel, cache_prefix="list_packages",
function=list_packages_sql)
# list _all_ the packages for a channel
def list_all_packages(channel):
return _list_packages(channel, cache_prefix="list_all_packages",
function=list_all_packages_sql)
# list _all_ the packages for a channel, including checksum info
def list_all_packages_checksum(channel):
return _list_packages(channel, cache_prefix="list_all_packages_checksum",
function=list_all_packages_checksum_sql)
# list _all_ the packages for a channel
def list_all_packages_complete(channel):
return _list_packages(channel, cache_prefix="list_all_packages_complete",
function=list_all_packages_complete_sql)
# Common part of list_packages and list_all_packages*
# cache_prefix is the prefix for the file name we're caching this request as
# function is the generator function
def _list_packages(channel, cache_prefix, function):
log_debug(3, channel, cache_prefix)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "%s-%s" % (cache_prefix, channel)
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
return ret
ret = function(c_info["id"])
if not ret:
# we assume that channels with no packages are very fast to list,
# so we don't bother caching...
log_error("No packages found in channel",
c_info["id"], c_info["label"])
return []
# we need to append the channel label to the list
ret = list(map(lambda a, c=channel: a + (c,), ret))
ret = xmlrpclib.dumps((ret, ), methodresponse=1)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
# set the cache
rhnCache.set(cache_entry, ret, c_info["last_modified"])
return ret
def getChannelInfoForKickstart(kickstart):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart))
return h.fetchone_dict()
def getChannelInfoForKickstartOrg(kickstart, org_id):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
and kt.org_id = :org_id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart), org_id=int(org_id))
return h.fetchone_dict()
def getChannelInfoForKickstartSession(session):
# decode the session string
try:
session_id = int(session.split('x')[0].split(':')[0])
except Exception:
return None, None
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks
where c.id = kt.channel_id
and kt.id = ks.kstree_id
and ks.id = :session_id
"""
h = rhnSQL.prepare(query)
h.execute(session_id=session_id)
return h.fetchone_dict()
def getChildChannelInfoForKickstart(kickstart, child):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks,
rhnChannel c2
where c2.id = kt.channel_id
and kt.label = :kickstart_label
and c.label = :child_label
and c.parent_channel = c2.id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart), child_label=str(child))
return h.fetchone_dict()
def getChannelInfoForTinyUrl(tinyurl):
query = """
select tu.url
from rhnTinyUrl tu
where tu.enabled = 'Y'
and tu.token = :tinyurl
"""
h = rhnSQL.prepare(query)
h.execute(tinyurl=str(tinyurl))
return h.fetchone_dict()
# list the obsoletes for a channel
def list_obsoletes(channel):
log_debug(3, channel)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "list_obsoletes-%s" % channel
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
return ret
# Get the obsoleted packages
h = rhnSQL.prepare("""
select distinct
pn.name,
pe.version, pe.release, pe.epoch,
pa.label arch,
pc.name obsolete_name,
pc.version obsolete_version,
p_info.sense
from rhnPackageCapability pc,
rhnPackageArch pa,
rhnPackageEVR pe,
rhnPackageName pn,
rhnPackage p,
( select cp.channel_id,
po.package_id, po.capability_id, po.sense
from rhnPackageObsoletes po,
rhnChannelPackage cp,
rhnChannel c
where 1=1
and c.label = :channel
and c.id = cp.channel_id
and cp.package_id = po.package_id
) p_info
where 1=1
and p_info.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pe.id
and p.package_arch_id = pa.id
and p_info.capability_id = pc.id
""")
h.execute(channel=str(channel))
# Store stuff in a dictionary to makes things simpler
hash = {}
while 1:
row = h.fetchone_dict()
if not row:
break
row = __stringify(row)
key = (row['name'], row['version'], row['release'],
row["epoch"], row['arch'])
value = key + (row['obsolete_name'], row['obsolete_version'],
row['sense'])
if key not in hash:
hash[key] = []
hash[key].append(value)
# Now grab a listall and match it against what we got
pkglist = list_packages_sql(c_info["id"])
result = []
for pkg in pkglist:
key = tuple(pkg[:5])
if key in hash:
for p in hash[key]:
result.append(p)
# we can cache this now
rhnCache.set(cache_entry, result, c_info["last_modified"])
return result
def __auth_user(server_id, username, password):
""" Auth if user can add/remove channel from given server """
log_debug(3, server_id, username)
# check the username and password for compliance
user = rhnUser.auth_username_password(username, password)
# The user's password checks, verify that they have perms on that
# server.
h = rhnSQL.prepare("""
select count(*)
from rhnUserServerPerms usp
where usp.user_id = :user_id
and usp.server_id = :server_id
""")
h.execute(user_id=str(user.getid()), server_id=str(server_id))
res = h.fetchone_dict()
if not res:
# Not allowed to perform administrative tasks on this server
raise rhnFault(37)
return 1
# small wrapper around a PL/SQL function
def subscribe_sql(server_id, channel_id, commit=1):
log_debug(3, server_id, channel_id, commit)
subscribe_channel = rhnSQL.Procedure("rhn_channel.subscribe_server")
try:
# don't run the EC yet
subscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLSchemaError:
e = sys.exc_info()[1]
if e.errno == 20102: # channel_server_one_base
log_error("Channel subscribe failed, "
"%s already subscribed to %s (?)" % (server_id, channel_id))
raise_with_tb(rhnFault(38, "Server already subscribed to %s" % channel_id), sys.exc_info()[2])
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLSchemaError", e)
raise_with_tb(rhnException(e), sys.exc_info()[2])
except rhnSQL.SQLError:
e = sys.exc_info()[1]
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLError", e)
raise_with_tb(rhnException(e), sys.exc_info()[2])
if commit:
rhnSQL.commit()
return 1
_query_parent_channel_subscribed = rhnSQL.Statement("""
select 1
from rhnChannel c
join rhnServerChannel sc on c.parent_channel = sc.channel_id
where sc.server_id = :sid
and c.label = :channel
""")
_query_can_subscribe = rhnSQL.Statement("""
select rhn_channel.user_role_check(:cid, wc.id, 'subscribe') as can_subscribe
from web_contact wc
where wc.login_uc = upper(:username)
""")
# subscribe a server to a channel with authentication
def subscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# get the channel_id
h = rhnSQL.prepare("select id from rhnChannel where label = :channel")
h.execute(channel=str(channel))
ret = h.fetchone_dict()
if not ret:
log_error("Channel %s does not exist?" % channel)
raise rhnFault(40, "Channel %s does not exist?" % channel)
channel_id = ret['id']
# check if server is subscribed to the parent of the given channel
h = rhnSQL.prepare(_query_parent_channel_subscribed)
h.execute(sid=server_id, channel=str(channel))
ret = h.fetchone_dict()
if not ret:
log_error("Parent of channel %s is not subscribed to server" % channel)
raise rhnFault(32, "Parent of channel %s is not subscribed to server" % channel)
# check specific channel subscription permissions
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
subscribe_sql(server_id, channel_id)
return 1
raise rhnFault(71)
# This class is only a convenient encapsulation of a server's attributes:
# server_id, org_id, release, arch, user_id. Sometimes we only pass the
# server_id, and later down the road we have to message "no channel for
# release foo, arch bar", but we don't know the release and arch anymore
class LiteServer:
_attributes = ['id', 'org_id', 'release', 'arch']
def __init__(self, **kwargs):
# Initialize attributes from **kwargs (set to None if value is not
# present)
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr))
def init_from_server(self, server):
self.id = server.getid()
self.org_id = server.server['org_id']
self.release = server.server['release']
self.arch = server.archname
return self
def __repr__(self):
dict = {}
for attr in self._attributes:
dict[attr] = getattr(self, attr)
return "<%s instance at %s: attributes=%s>" % (
self.__class__.__name__, id(self), dict)
# If raise_exceptions is set, BaseChannelDeniedError, NoBaseChannelError are
# raised
def guess_channels_for_server(server, user_id=None, none_ok=0,
raise_exceptions=0):
log_debug(3, server)
if not isinstance(server, LiteServer):
raise rhnException("Server object is not a LiteServer")
if None in (server.org_id, server.release, server.arch):
# need to obtain the release and/or arch and/or org_id
h = rhnSQL.prepare("""
select s.org_id, s.release, sa.label arch
from rhnServer s, rhnServerArch sa
where s.id = :server_id and s.server_arch_id = sa.id
""")
h.execute(server_id=server.id)
ret = h.fetchone_dict()
if not ret:
log_error("Could not get the release/arch "
"for server %s" % server.id)
raise rhnFault(8, "Could not find the release/arch "
"for server %s" % server.id)
if server.org_id is None:
server.org_id = ret["org_id"]
if server.release is None:
server.release = ret["release"]
if server.arch is None:
server.arch = ret["arch"]
if raise_exceptions and not none_ok:
# Let exceptions pass through
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
try:
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
except NoBaseChannelError:
if none_ok:
return []
log_error("No available channels for (server, org)",
(server.id, server.org_id), server.release, server.arch)
msg = _("Your account does not have access to any channels matching "
"(release='%(release)s', arch='%(arch)s')%(www_activation)s")
error_strings = {
'release': server.release,
'arch': server.arch,
'www_activation': ''
}
if CFG.REFER_TO_WWW:
error_strings['www_activation'] = _("\nIf you have a "
"registration number, please register with it first at "
"http://www.redhat.com/apps/activate/ and then try again.\n\n")
raise_with_tb(rhnFault(19, msg % error_strings), sys.exc_info()[2])
except BaseChannelDeniedError:
if none_ok:
return []
raise raise_with_tb(rhnFault(71,
_("Insufficient subscription permissions for release (%s, %s")
% (server.release, server.arch)), sys.exc_info()[2])
# Subscribes the server to channels
# can raise BaseChannelDeniedError, NoBaseChannelError
# Only used for new server registrations
def subscribe_server_channels(server, user_id=None, none_ok=0):
s = LiteServer().init_from_server(server)
# bretm 02/19/2007 -- have to leave none_ok in here for now due to how
# the code is setup for reg token crap; it'd be very nice to clean up that
# path to eliminate any chance for a server to be registered and not have base
# channels, excluding expiration of channel entitlements
channels = guess_channels_for_server(s, user_id=user_id, none_ok=none_ok,
raise_exceptions=1)
rhnSQL.transaction('subscribe_server_channels')
for c in channels:
subscribe_sql(s.id, c["id"], 0)
return channels
# small wrapper around a PL/SQL function
def unsubscribe_sql(server_id, channel_id, commit=1):
log_debug(3, server_id, channel_id, commit)
unsubscribe_channel = rhnSQL.Procedure("rhn_channel.unsubscribe_server")
try:
# don't run the EC yet
unsubscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLError:
log_error("Channel unsubscribe from %s failed for %s" % (
channel_id, server_id))
return 0
if commit:
rhnSQL.commit()
return 1
# unsubscribe a server from a channel
def unsubscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# now get the id of the channel
h = rhnSQL.prepare("""
select id, parent_channel from rhnChannel where label = :channel
""")
h.execute(channel=channel)
ret = h.fetchone_dict()
if not ret:
log_error("Asked to unsubscribe server %s from non-existent channel %s" % (
server_id, channel))
raise rhnFault(40, "The specified channel '%s' does not exist." % channel)
if not ret["parent_channel"]:
log_error("Cannot unsubscribe %s from base channel %s" % (
server_id, channel))
raise rhnFault(72, "You can not unsubscribe %s from base channel %s." % (
server_id, channel))
# check specific channel subscription permissions
channel_id = ret['id']
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
return unsubscribe_sql(server_id, channel_id)
raise rhnFault(71)
# unsubscribe from all channels
def unsubscribe_all_channels(server_id):
log_debug(3, server_id)
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_all_channels")
h = rhnSQL.prepare("""
select
sc.channel_id id
from
rhnChannel c,
rhnServerChannel sc
where
sc.server_id = :server_id
and sc.channel_id = c.id
order by c.parent_channel nulls last
""")
h.execute(server_id=str(server_id))
while 1:
c = h.fetchone_dict()
if not c:
break
ret = unsubscribe_sql(server_id, c["id"], 0)
if not ret:
rhnSQL.rollback("unsub_all_channels")
raise rhnFault(36, "Could not unsubscribe server %s "
"from existing channels" % (server_id,))
# finished unsubscribing
return 1
# Unsubscribe the server from the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def unsubscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_channels")
base_channels = [x for x in channels if not x['parent_channel']]
child_channels = [x for x in channels if x['parent_channel']]
for channel in child_channels + base_channels:
ret = unsubscribe_sql(server_id, channel["id"], 0)
if not ret:
rhnSQL.rollback("unsub_channels")
raise rhnFault(36, "Could not unsubscribe server %s "
"from channel %s" % (server_id, channel["label"]))
# finished unsubscribing
return 1
# Subscribe the server to the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def subscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to subscribe the base channel before the child ones.
base_channels = [x for x in channels if not x['parent_channel']]
child_channels = [x for x in channels if x['parent_channel']]
for channel in base_channels + child_channels:
subscribe_sql(server_id, channel["id"], 0)
# finished subscribing
return 1
# check if a server is subscribed to a channel
def is_subscribed(server_id, channel):
log_debug(3, server_id, channel)
h = rhnSQL.prepare("""
select 1 subscribed
from rhnServerChannel sc, rhnChannel c
where
sc.channel_id = c.id
and c.label = :channel
and sc.server_id = :server_id
""")
h.execute(server_id=str(server_id), channel=str(channel))
ret = h.fetchone_dict()
if not ret:
# System not subscribed to channel
return 0
return 1
# Returns 0, "", "" if system does not need any message, or
# (error_code, message_title, message) otherwise
def system_reg_message(server):
server_id = server.server['id']
# Is this system subscribed to a channel?
h = rhnSQL.prepare("""
select sc.channel_id
from rhnServerChannel sc
where sc.server_id = :server_id
""")
h.execute(server_id=server_id)
ret = h.fetchone_dict()
if not ret:
# System not subscribed to any channel
#
return (-1, s_invalid_channel_title,
s_invalid_channel_message %
(server.server["release"], server.archname))
# System does have a base channel; check entitlements
from rhnServer import server_lib # having this on top, cause TB due circular imports
entitlements = server_lib.check_entitlement(server_id)
if not entitlements:
# No entitlement
# We don't have an autoentitle preference for now, so display just one
# message
templates = rhnFlags.get('templateOverrides')
if templates and 'hostname' in templates:
hostname = templates['hostname']
else:
# Default to www
hostname = "rhn.redhat.com"
params = {
'entitlement_url': "https://%s"
"/rhn/systems/details/Edit.do?sid=%s" %
(hostname, server_id)
}
return -1, no_entitlement_title, no_entitlement_message % params
return 0, "", ""
def subscribe_to_tools_channel(server_id):
"""
Subscribes server_id to the RHN Tools channel associated with its base channel, if one exists.
"""
base_channel_dict = get_base_channel(server_id, none_ok=1)
if base_channel_dict is None:
raise NoBaseChannelError("Server %s has no base channel." %
str(server_id))
lookup_child_channels = rhnSQL.Statement("""
select id, label, parent_channel
from rhnChannel
where parent_channel = :id
""")
child_channel_data = rhnSQL.prepare(lookup_child_channels)
child_channel_data.execute(id=base_channel_dict['id'])
child_channels = child_channel_data.fetchall_dict()
if child_channels is None:
raise NoChildChannels("Base channel id %s has no child channels associated with it." %
base_channel_dict['id'])
tools_channel = None
for channel in child_channels:
if 'label' in channel:
if 'rhn-tools' in channel['label']:
tools_channel = channel
if tools_channel is None:
raise NoToolsChannel("Base channel id %s does not have a RHN Tools channel as a child channel." %
base_channel_dict['id'])
else:
if 'id' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no id.")
if 'label' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no label.")
if 'parent_channel' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no parent_channel.")
subscribe_channels(server_id, [tools_channel])
# Various messages that can be reused
#
# bretm 02/07/2007 -- when we have better old-client documentation, probably
# will be safe to get rid of all this crap
h_invalid_channel_title = _("System Registered but Inactive")
h_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it is not subscribed to a channel. If you have not yet
activated your product for service, please visit our website at:
http://www.redhat.com/apps/activate/
...to activate your product.""")
s_invalid_channel_title = _("System Registered but Inactive")
s_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it could not be subscribed to a base channel.
Please contact your organization administrator for assistance.
""")
no_autoentitlement_message = _("""
This system has been successfully registered, but is not yet entitled
to service. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
no_entitlement_title = _("System Registered but Inactive")
no_entitlement_message = _("""
This system has been successfully registered, but no service entitlements
were available. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
|
wraiden/spacewalk
|
backend/server/rhnChannel.py
|
Python
|
gpl-2.0
| 70,207
|
[
"VisIt"
] |
0882c0edfbe25bd2982c0ded0b96954da88495bf21ef552a1c70827cba0c6ed8
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 17:40:07 2015
@author: casimp
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import h5py
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from pyxe.command_parsing import analysis_check
from pyxe.fitting_functions import strain_transformation, shear_transformation
from pyxe.plotting_tools import plot_complex, meshgrid_res, line_extract, \
az90, pawley_plot
from pyxe.command_parsing import complex_check, text_cleaning, name_convert
from pyxe.data_io import data_extract, detector_extract
from pyxe.fitting_functions import plane_stress, plane_strain
class DataViz(object):
def __init__(self, fpath):
""" Visualisation and data extraction from pyxe data object/hdf5 file.
Builds on the PeakAnalysis class, allowing for the 1d/2d vizualisation
of 1d/2d/3d data acquisition arrays. Provides functionality for
array re-alignment (flipping, swapping axes, re-centering). Also
allows for data to be saved to a text file.
Args:
fpath (str): Path to pyxe hdf5 file.
"""
self.fpath = fpath
with h5py.File(fpath, 'r') as f:
self.ndim, self.d1, self.d2, self.d3 = data_extract(f, 'dims')
self.q, self.I, self.phi = data_extract(f, 'raw')
self.peaks, self.peaks_err = data_extract(f, 'peaks')
self.fwhm, self.fwhm_err = data_extract(f, 'fwhm')
self.strain, self.strain_err = data_extract(f, 'strain')
self.strain_tensor = data_extract(f, 'tensor')[0]
self.E, self.v, self.G = data_extract(f, 'material')
self.stress_state, self.analysis_state = data_extract(f, 'state')
self.detector = detector_extract(f)
if self.stress_state is None:
self.stress_eqn = None
else:
p_strain = self.stress_state == 'plane strain'
self.stress_eqn = plane_strain if p_strain else plane_stress
def flipaxis(self, axis):
""" Flip axis (positive to negative). """
axes = {0: self.d1, 1: self.d2, 2: self.d3}
axes[axis] *= -1
def swapaxes(self, axis1, axis2):
""" Swap two axes of an array. Effectively rotates the data."""
if axis1 in [0, 1] and axis2 in [0, 1]:
self.d1, self.d2 = self.d2, self.d1
elif axis1 in [0, 2] and axis2 in [0, 2]:
self.d1, self.d3 = self.d3, self.d1
elif axis1 in [1, 2] and axis2 in [1, 2]:
self.d2, self.d3 = self.d3, self.d2
def centre(self, co_ord):
""" Centre array on point (tuple)."""
axes = {0: self.d1, 1: self.d2, 2: self.d3}
for idx, i in enumerate(co_ord):
axes[idx] -= i
def plot_intensity(self, pnt=None, az_idx=0, figsize=(9, 6), pawley=False,
q_lim=None, func='gaussian'):
""" Plots q against intensity (optionally including Pawley fit).
Args:
pnt (tuple): Define data point (index) else point (0, ) x ndim.
az_idx (int): Azimuthal slice index
figsize (tuple): Figure size
pawley (bool): Compute and overlay Pawley type fit to data
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
pnt = (0,) * len(self.I[..., 0, 0].shape) if pnt is None else pnt
q, I = self.q[az_idx], self.I[pnt][az_idx]
if pawley:
pawley_plot(q, I, self.detector, az_idx, ax, q_lim, func)
else:
ax.plot(q, I, 'k-', linewidth=0.5)
ax.set_xlabel('q (A$^{-1}$)')
ax.set_ylabel('Intensity')
return ax
@analysis_check('strain fit')
def plot_strain_fit(self, pnt=None, figsize=(11, 5)):
""" Plots fitted in-plane strain field and tensor for given data point.
Plots strain wrt. phi and associated tensor fit. The tensor fit is
represented as a Mohr's circle, with e_xx, e_yy (for that point) being
highlighted on both plots.
Args:
pnt (tuple): Define data point (index) else point (0, ) x ndim.
figsize (tuple): Figure size
"""
pnt = (0,) * (self.strain.ndim - 1) if pnt is None else pnt
fig, (ax_1, ax_2) = plt.subplots(1, 2, figsize=figsize)
p = self.strain_tensor[pnt]
ax_1.plot(self.phi, self.strain[pnt], 'k*')
phi_2 = np.linspace(self.phi[0], self.phi[-1], 1000)
ax_1.plot(phi_2, strain_transformation(phi_2, *p), 'k-', linewidth=0.5)
ax_1.set_xlabel(r'$\phi$ (rad)', size=14)
ax_1.set_ylabel(r'$\epsilon$', size=14)
ax_1.ticklabel_format(axis='both', style='sci', scilimits=(-3, 3))
ax_2.ticklabel_format(axis='both', style='sci', scilimits=(-3, 3))
ax_2.set_xlabel(r'$\epsilon$', size=14)
ax_2.set_ylabel(r'$\gamma}$', size=14)
e_xx, e_yy, e_xy = self.strain_tensor[pnt]
mean = (e_xx + e_yy) / 2
e_1 = mean + (e_xy**2 + ((e_xx - e_yy) / 2)**2)**0.5
e_2 = e_xx + e_yy - e_1
radius = (e_1 - e_2) / 2
for x, text in zip([self.phi[0], self.phi[0] + np.pi/2],
[r'$\epsilon_{xx}$', r'$\epsilon_{yy}$']):
ax_1.axvline(x, ymax=0.93, linewidth=0.5, ls='--', color='k')
y = ax_1.get_ylim()[1] * 0.96
ax_1.text(x, y, text, ha='center', va='bottom')
circ = plt.Circle((mean, 0), radius=radius, color='k', fill=False)
ax_2.add_patch(circ)
for x, y, text in zip([e_1, e_2, e_xx, e_yy],
[0, 0, e_xy, -e_xy],
[r'$\epsilon_{1}$', r'$\epsilon_{2}$',
r'$(\epsilon_{xx}$, $\epsilon_{xy})$',
r'$(\epsilon_{yy}$, $\epsilon_{yx})$']):
ax_2.plot(x, y, 'k.')
ax_2.annotate(' %s' % text, xy=(x, y), xytext=(x, y), ha='left')
ax_2.plot([e_xx, e_yy], [e_xy, -e_xy], 'k--', linewidth=0.5)
fig.tight_layout()
def extract_line(self, data='strain', phi=None, az_idx=None, pnt=None,
theta=0, z_idx=None, res=0.1):
""" Extracts line from data slice wrt. az slice index or angle (phi).
The line is defined by a point and an angle. The extracted data is
defined using the data variable (str), which must be one of the
following: peaks, peaks error, fwhm, fwhm error, strain, strain error,
shear strain, stress, shear stress.
Certain combinations of data type and azimuthal index/phi will not
work (e.g. can't extract peaks wrt. phi only wrt. az. index).
Note: must define EITHER phi or az_idx
Args:
data (str): Data type to extract (see above)
phi (float): Azimuthal angle in rad
az_idx (int): Azimuthal slice index
pnt (tuple): Define data point (index) else point (0, ) x ndim.
theta (float): Angle (rad) though 2D array
z_idx (int): Index of slice height in 3D array
res (float): Point spacing or interpolated data
"""
data = self.extract_slice(data, phi, az_idx, z_idx)
if self.ndim == 1:
return self.d1, data
else:
x, y, d = line_extract(self.d1, self.d2, pnt, theta, res)
co_ords = (self.d1.flatten(), self.d2.flatten())
line = griddata(co_ords, data.flatten(), (x, y))
return x, y, d, line
def extract_slice(self, data='strain', phi=None, az_idx=None, z_idx=None):
""" Extract 2D data slice wrt. azimuthal slice index or angle (phi).
The extracted data is defined using the data variable (str), which
must be one of the following: peaks, peaks error, fwhm, fwhm error,
strain, strain error, shear strain, stress, shear stress.
Certain combinations of data type and azimuthal index/phi will not
work (e.g. can't extract peaks wrt. phi only wrt. az. index).
Note: must define EITHER phi or az_idx
Args:
data (str): Data type to extract (see above)
phi (float): Azimuthal angle in rad
az_idx (int): Azimuthal slice index
z_idx (int): Index of slice height in 3D array
"""
complex_check(data, self.analysis_state, phi, az_idx)
command = text_cleaning(data)
az_command = 'phi' if phi is not None else 'az_idx'
if az_command == 'az_idx':
az_idx = int(az_idx)
if 'stress' not in command:
data_command = {'peaks': self.peaks,
'peaks error': self.peaks_err,
'fwhm': self.fwhm,
'fwhm error': self.fwhm_err,
'strain': self.strain,
'strain error': self.strain_err}
data = data_command[command][..., az_idx]
else:
d = self.strain if 'err' not in command else self.strain_err
e_xx, e_yy = d[..., az_idx], d[..., az90(self.phi, az_idx)]
data = self.stress_eqn(e_xx, e_yy, self.E, self.v)
else:
tensor = self.strain_tensor
tensor = tensor[..., 0], tensor[..., 1], tensor[..., 2]
shear = True if 'shear' in command else False
stress = True if 'stress' in command else False
if shear:
e_xy = shear_transformation(phi, *tensor)
data = self.G * e_xy if stress else e_xy
elif stress:
e_xx = strain_transformation(phi, *tensor)
e_yy = strain_transformation(phi + np.pi / 2, *tensor)
data = self.stress_eqn(e_xx, e_yy, self.E, self.v)
else:
data = strain_transformation(phi, *tensor)
data = data[z_idx] if z_idx is not None else data
return data
def plot_line(self, data='strain', phi=None, az_idx=None, z_idx=None,
pnt=(0, 0), theta=0, res=0.1, pos='d', ax=False):
""" Plots line through data slice wrt. az slice index or angle (phi).
The line is defined by a point and an angle. The extracted data is
defined using the data variable (str), which must be one of the
following: peaks, peaks error, fwhm, fwhm error, strain, strain error,
shear strain, stress, shear stress.
Certain combinations of data type and azimuthal index/phi will not
work (e.g. can't extract peaks wrt. phi only wrt. az. index).
Note: must define EITHER phi or az_idx
Args:
data (str): Data type to extract (see above)
phi (float): Azimuthal angle in rad
az_idx (int): Azimuthal slice index
pnt (tuple): Define data point (index) else point (0, ) x ndim.
theta (float): Angle (rad) though 2D array
z_idx (int): Index of slice height in 3D array
res (float): Point spacing or interpolated data
pos (str): Plot data against 'x', 'y' or line length 'd'
ax: Supply axis to plot on or (False) create new plot
"""
x, y, d, line = self.extract_line(data, phi, az_idx, pnt,
theta, z_idx, res)
position = {'x': x, 'y': y, 'd': d}
if not ax:
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(1, 1, 1)
ax.plot(position[pos], line, 'k-')
ax.set_xlabel('Position ({})'.format('mm'))
ax.set_ylabel(data)
return ax
def plot_slice(self, data='strain', phi=None, az_idx=None, z_idx=None,
res=0.051, plot_func=None, **kwargs):
""" Plot 2D data slice wrt. azimuthal slice index or angle (phi).
The extracted data is defined using the data variable (str), which
must be one of the following: peaks, peaks error, fwhm, fwhm error,
strain, strain error, shear strain, stress, shear stress.
Certain combinations of data type and azimuthal index/phi will not
work (e.g. can't plot peaks wrt. phi only wrt. az. index):
Note: must define EITHER phi or az_idx
Args:
data (str): Data type to extract (see above)
phi (float): Azimuthal angle in rad
az_idx (int): Azimuthal slice index
z_idx (int): Index of slice height in 3D array
res (float): Resolution of re-gridded/plotted data points in mm
plot_func (func): User defined plotting function
kwargs (dict): Dict of params to be passed to plot_func.
"""
data = self.extract_slice(data, phi, az_idx, z_idx)
finite = np.isfinite(data)
# If x and y are vectos not 2d
if self.d1.shape != self.peaks.shape[:-1]:
d1, d2 = np.meshgrid(self.d1, self.d2)
else:
d1, d2 = self.d1, self.d2
d1, d2 = meshgrid_res(d1, d2, spatial_resolution=res)
co_ords = (self.d1[finite], self.d2[finite])
z = griddata(co_ords, data[finite], (d1, d2))
plot_func = plot_complex if plot_func is None else plot_func
ax = plot_func(self.d1, self.d2, d1, d2, z, **kwargs)
return ax
def save_to_txt(self, fname, data, phi=None, az_idx=None, perp=True):
""" Save flattened, extracted data to text (csv) file.
Args:
fname (str): Fname/path to save extracted text to
data (list): List of data types to extract (see above)
phi (float): Azimuthal angle in rad
az_idx (int): Azimuthal slice index
perp (bool): For defined angle/idx, save data perp to this.
"""
n_lst = [d for d in ['d1', 'd2', 'd3'] if getattr(self, d) is not None]
d_lst = [getattr(self, d) for d in n_lst]
for d in data:
print(d)
name = name_convert(d, phi, az_idx)
d_lst.append(self.extract_slice(d, phi=phi, az_idx=az_idx))
n_lst.append(name)
if perp:
a90 = az90(self.phi, az_idx) if az_idx is not None else az_idx
p90 = phi + np.pi/2 if phi is not None else phi
name = name_convert(d, p90, a90, perp)
d_lst.append(self.extract_slice(d, phi=p90, az_idx=a90))
n_lst.append(name)
data = np.hstack([d.reshape(d.size, 1) for d in d_lst])
headers = ','.join(n_lst)
np.savetxt(fname, data, delimiter=',', header=headers)
|
Simclass/EDXD_Analysis
|
pyxe/plotting.py
|
Python
|
mit
| 14,755
|
[
"Gaussian"
] |
270019aff841d35c1b7002c6e7eb619e986e9c3f9385e43c922cb9f8e93f495a
|
# search.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first [p 85].
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm [Fig. 3.7].
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
from game import Actions
# fringe: list of active nodes
# explr: list of explored nodes
#i = 0
soln = []
explr = []
visit = []
fringe = util.Stack()
node = [None, problem.getStartState(), '', 0]
fringe.push(node)
#while i < 5:
while not fringe.isEmpty():
node = parent, state, dirctn, cost = fringe.pop()
if problem.isGoalState(state):
visit.append(node)
#print str(node[1]) + '--' + str(node[2]) + '-->' + str(node[0])
soln.append(node[2])
#explr.append(state)
break
if not (state in explr):# and \
#not (state in fringe.getList()):
for successor in problem.getSuccessors(state):
#for successor in reversed(problem.getSuccessors(state)):
fringe.push([state, successor[0], successor[1], successor[2]])
visit.append(node)
explr.append(state)
#print explr
#print visit
parentNode = visit.pop()
while len(visit) != 1:
curNode = visit.pop()
#print str(curNode) + str(parentNode)
#print str(curNode[0]) + ', ' + str(curNode[1]) + ' == ' + str(goalState)
while curNode[1] != parentNode[0]:
curNode = visit.pop()
if curNode[0] is None:
break
parentNode = curNode
#print str(curNode[1]) + '--' + str(curNode[2]) + '-->' + str(curNode[0])
soln.append(curNode[2])
#i = i + 1
#print explor
#print '-----------'
#print soln[::-1]
#print visit
return soln[::-1]
util.raiseNotDefined()
def breadthFirstSearch(problem):
"Search the shallowest nodes in the search tree first. [p 81]"
"*** YOUR CODE HERE ***"
soln = []
explr = []
visit = []
fringe = util.Queue()
node = [None, problem.getStartState(), '', 0]
fringe.push(node)
while not fringe.isEmpty():
node = parent, state, dirctn, cost = fringe.pop()
if problem.isGoalState(state):
visit.append(node)
soln.append(node[2])
break
if not (state in explr):
for successor in problem.getSuccessors(state):
fringe.push([state, successor[0], successor[1], successor[2]])
visit.append(node)
explr.append(state)
parentNode = visit.pop()
while len(visit) != 1:
curNode = visit.pop()
while curNode[1] != parentNode[0]:
curNode = visit.pop()
if curNode[0] is None:
break
parentNode = curNode
soln.append(curNode[2])
return soln[::-1]
util.raiseNotDefined()
def uniformCostSearch(problem):
"Search the node of least total cost first. "
"*** YOUR CODE HERE ***"
soln = []
explr = []
visit = []
fringe = util.PriorityQueue()
node = [None, problem.getStartState(), '', 0]
fringe.push(node, 0)
while not fringe.isEmpty():
flag = True
node = parent, state, dirctn, cost = fringe.pop()
#print '-------------------------'
#print node
#print explr
if problem.isGoalState(state):
visit.append(node)
soln.append(node[2])
break
for vState, vCost in explr:
if state == vState and cost >= vCost:
#print str(vState) + ' $$ ' + str(vCost)
flag = False
if flag:
for successor in problem.getSuccessors(state):
#print cost + successor[2]
fringe.push([state, successor[0], successor[1], cost+successor[2]], cost+successor[2])
visit.append(node)
explr.append((state, cost))
parentNode = visit.pop()
while len(visit) != 1:
curNode = visit.pop()
while curNode[1] != parentNode[0]:
curNode = visit.pop()
if curNode[0] is None:
break
parentNode = curNode
soln.append(curNode[2])
return soln[::-1]
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
"*** YOUR CODE HERE ***"
soln = []
explr = []
visit = []
fringe = util.PriorityQueue()
node = [None, problem.getStartState(), '', 0]
fringe.push(node, heuristic(node[1], problem))
while not fringe.isEmpty():
flag = True
node = parent, state, dirctn, cost = fringe.pop()
if problem.isGoalState(state):
visit.append(node)
soln.append(node[2])
break
for vState, vCost in explr:
if state == vState and cost >= vCost:
flag = False
if flag:
for successor in problem.getSuccessors(state):
fringe.push([state, successor[0], successor[1], cost + successor[2]], \
cost + successor[2] + heuristic(state, problem))
visit.append(node)
explr.append((state, cost))
parentNode = visit.pop()
while len(visit) != 1:
curNode = visit.pop()
while curNode[1] != parentNode[0]:
curNode = visit.pop()
if curNode[0] is None:
break
parentNode = curNode
soln.append(curNode[2])
return soln[::-1]
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
|
evilzone/Artificial-Intelligence
|
search.py
|
Python
|
gpl-2.0
| 7,805
|
[
"VisIt"
] |
ee7d7207666b8c85928aed7847fb76c6003a5e2c508af684a0e76c2ba9f523c4
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla-specific Buildbot steps.
#
# The Initial Developer of the Original Code is
# Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Brian Warner <warner@lothar.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from zope.interface import implements
from twisted.python import log
from twisted.internet import defer
from twisted.application import service
from buildbot import interfaces, util
class ChangeManager(service.MultiService):
"""This is the master-side service which receives file change
notifications from a VCS. It keeps a log of these changes, enough to
provide for the HTML waterfall display, and to tell
temporarily-disconnected bots what they missed while they were
offline.
Change notifications come from two different kinds of sources. The first
is a PB service (servicename='changemaster', perspectivename='change'),
which provides a remote method called 'addChange', which should be
called with a dict that has keys 'filename' and 'comments'.
The second is a list of objects derived from the
L{buildbot.changes.base.ChangeSource} class. These are added with
.addSource(), which also sets the .changemaster attribute in the source
to point at the ChangeMaster. When the application begins, these will
be started with .start() . At shutdown time, they will be terminated
with .stop() . They must be persistable. They are expected to call
self.changemaster.addChange() with Change objects.
There are several different variants of the second type of source:
- L{buildbot.changes.mail.MaildirSource} watches a maildir for CVS
commit mail. It uses DNotify if available, or polls every 10
seconds if not. It parses incoming mail to determine what files
were changed.
- L{buildbot.changes.freshcvs.FreshCVSSource} makes a PB
connection to the CVSToys 'freshcvs' daemon and relays any
changes it announces.
"""
implements(interfaces.IEventSource)
changeHorizon = 0
name = "changemanager"
def __init__(self):
service.MultiService.__init__(self)
self._cache = util.LRUCache()
def addSource(self, source):
assert interfaces.IChangeSource.providedBy(source)
assert service.IService.providedBy(source)
source.setServiceParent(self)
def removeSource(self, source):
assert source in self
return defer.maybeDeferred(source.disownServiceParent)
def addChange(self, change):
"""Deliver a file change event. The event should be a Change object.
This method will timestamp the object as it is received."""
log.msg("adding change, who %s, %d files, rev=%s, branch=%s, repository=%s, "
"comments %s, category %s" % (change.who, len(change.files),
change.revision, change.branch, change.repository,
change.comments, change.category))
#self.pruneChanges() # use self.changeHorizon
# for now, add these in the background, without waiting for it. TODO:
# return a Deferred.
#self.queue.add(db.runInteraction, self.addChangeToDatabase, change)
# this sets change.number, if it wasn't already set (by the
# migration-from-pickle code). It also fires a notification which
# wakes up the Schedulers.
self.parent.addChange(change)
# IEventSource methods
def eventGenerator(self, branches=[], categories=[], committers=[], minTime=0):
return self.parent.db.changeEventGenerator(branches, categories,
committers, minTime)
def getChangeNumberedNow(self, changeid, t=None):
return self.parent.db.getChangeNumberedNow(changeid, t)
def getChangeByNumber(self, changeid):
return self.parent.db.getChangeByNumber(changeid)
def getChangesGreaterThan(self, last_changeid, t=None):
return self.parent.db.getChangesGreaterThan(last_changeid, t)
def getChangesByNumber(self, changeids):
return self.parent.db.getChangesByNumber(changeids)
def getLatestChangeNumberNow(self, t=None):
return self.parent.db.getLatestChangeNumberNow(t)
|
centrumholdings/buildbot
|
buildbot/changes/manager.py
|
Python
|
gpl-2.0
| 5,753
|
[
"Brian"
] |
b2af8e5b962bd0dec77102af9614f9d4ab6ec44ae92e66f0211995eeb6496bad
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python collection that can contain further collections as well as other objects
called atoms. Note that numpy arrays are considered atoms.
nest recognizes the following types of collections:
1.tuple
2.namedtuple
3.dict
4.orderedDict
5.MutableMapping
6.attr.s
attr.s decorated classes (http://www.attrs.org) are also supported, in the
same way as `namedtuple`.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
import wrapt as _wrapt
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import _pywrap_nest
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util.compat import collections_abc as _collections_abc
from tensorflow.python.util.tf_export import tf_export
_SHALLOW_TREE_HAS_INVALID_KEYS = (
"The shallow_tree's keys are not a subset of the input_tree's keys. The "
"shallow_tree has the following keys that are not in the input_tree: {}.")
_STRUCTURES_HAVE_MISMATCHING_TYPES = (
"The two structures don't have the same sequence type. Input structure has "
"type {input_type}, while shallow structure has type {shallow_type}.")
_STRUCTURES_HAVE_MISMATCHING_LENGTHS = (
"The two structures don't have the same sequence length. Input "
"structure has length {input_length}, while shallow structure has length "
"{shallow_length}."
)
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = (
"The input_tree has fewer elements than the shallow_tree. Input structure "
"has length {input_size}, while shallow structure has length "
"{shallow_size}.")
_IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ = (
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: {}.")
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = (a.name for a in attrs)
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dict_.keys())
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if
it is a "plain" namedtuple. For instance, a class inheriting
from a `namedtuple` will be considered to be a `namedtuple`
iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_utils.IsNamedtuple(instance, strict)
# See the swig file (util.i) for documentation.
_is_mapping_view = _pywrap_utils.IsMappingView
_is_attrs = _pywrap_utils.IsAttrs
_is_composite_tensor = _pywrap_utils.IsCompositeTensor
_is_type_spec = _pywrap_utils.IsTypeSpec
_is_mutable_mapping = _pywrap_utils.IsMutableMapping
_is_mapping = _pywrap_utils.IsMapping
@tf_export("__internal__.nest.is_attrs", v1=[])
def is_attrs(obj):
"""Returns a true if its input is an instance of an attr.s decorated class."""
return _is_attrs(obj)
@tf_export("__internal__.nest.is_mapping", v1=[])
def is_mapping(obj):
"""Returns a true if its input is a collections.Mapping."""
return _is_mapping(obj)
@tf_export("__internal__.nest.sequence_like", v1=[])
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`
or `type_spec.TypeSpec`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mutable_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
if instance_type == _collections.defaultdict:
d = _collections.defaultdict(instance.default_factory)
else:
d = instance_type()
for key in instance:
d[key] = result[key]
return d
elif _is_mapping(instance):
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
tf_logging.log_first_n(
tf_logging.WARN, "Mapping types may not work well with tf.nest. Prefer"
" using MutableMapping for {}".format(instance_type), 1)
try:
return instance_type((key, result[key]) for key in instance)
except TypeError as err:
raise TypeError("Error creating an object of type {} like {}. Note that "
"it must accept a single positional argument "
"representing an iterable of key-value pairs, in "
"addition to self. Cause: {}".format(
type(instance), instance, err))
elif _is_mapping_view(instance):
# We can't directly construct mapping views, so we create a list instead
return list(args)
elif _is_namedtuple(instance) or _is_attrs(instance):
if isinstance(instance, _wrapt.ObjectProxy):
instance_type = type(instance.__wrapped__)
else:
instance_type = type(instance)
return instance_type(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, _six.moves.range):
return _sequence_like(list(instance), args)
elif isinstance(instance, _wrapt.ObjectProxy):
# For object proxies, first create the underlying type and then re-wrap it
# in the proxy type.
return type(instance)(_sequence_like(instance.__wrapped__, args))
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
for _, v in _yield_sorted_items(iterable):
yield v
def _yield_sorted_items(iterable):
"""Yield (key, value) pairs for `iterable` in a deterministic order.
For Sequences, the key will be an int, the array index of a value.
For Mappings, the key will be the dictionary key.
For objects (e.g. namedtuples), the key will be the attribute name.
In all cases, the keys will be iterated in sorted order.
Args:
iterable: an iterable.
Yields:
The iterable's (key, value) pairs, in order of sorted keys.
"""
# Ordered to check common structure types (list, tuple, dict) first.
if isinstance(iterable, list):
for item in enumerate(iterable):
yield item
# namedtuples handled separately to avoid expensive namedtuple check.
elif type(iterable) == tuple: # pylint: disable=unidiomatic-typecheck
for item in enumerate(iterable):
yield item
elif isinstance(iterable, (dict, _collections_abc.Mapping)):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield key, iterable[key]
elif _is_attrs(iterable):
for item in _get_attrs_items(iterable):
yield item
elif _is_namedtuple(iterable):
for field in iterable._fields:
yield field, getattr(iterable, field)
elif _is_composite_tensor(iterable):
type_spec = iterable._type_spec # pylint: disable=protected-access
yield type_spec.value_type.__name__, type_spec._to_components(iterable) # pylint: disable=protected-access
elif _is_type_spec(iterable):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access
else:
for item in enumerate(iterable):
yield item
# See the swig file (util.i) for documentation.
is_sequence = _pywrap_utils.IsSequence
# See the swig file (util.i) for documentation.
is_sequence_or_composite = _pywrap_utils.IsSequenceOrComposite
@tf_export("nest.is_nested")
def is_nested(seq):
"""Returns true if its input is a collections.abc.Sequence (except strings).
>>> tf.nest.is_nested("1234")
False
>>> tf.nest.is_nested([1, 3, [4, 5]])
True
>>> tf.nest.is_nested(((7, 8), (5, 6)))
True
>>> tf.nest.is_nested([])
True
>>> tf.nest.is_nested({"a": 1, "b": 2})
True
>>> tf.nest.is_nested({"a": 1, "b": 2}.keys())
True
>>> tf.nest.is_nested({"a": 1, "b": 2}.values())
True
>>> tf.nest.is_nested({"a": 1, "b": 2}.items())
True
>>> tf.nest.is_nested(set([1, 2]))
False
>>> ones = tf.ones([2, 3])
>>> tf.nest.is_nested(ones)
False
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.abc.Sequence
or a dict.
"""
return is_sequence(seq)
@tf_export("nest.flatten")
def flatten(structure, expand_composites=False):
"""Returns a flat list from a given nested structure.
If nest is not a structure , tuple (or a namedtuple), dict, or an attrs class,
then returns a single-element list:
[nest].
This is the inverse of the `nest.pack_sequence_as` method that takes in a
flattened list and re-packs it into the nested structure.
In the case of dict instances, the sequence consists of the values, sorted by
key to ensure deterministic behavior. This is true also for OrderedDict
instances: their sequence order is ignored, the sorting order of keys is used
instead. The same convention is followed in `nest.pack_sequence_as`. This
correctly repacks dicts and OrderedDicts after they have been flattened, and
also allows flattening an OrderedDict and then repacking it back using a
corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys
cannot be flattened.
Users must not modify any collections used in nest while this function is
running.
Examples:
1. Python dict (ordered by key):
>>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" }
>>> tf.nest.flatten(dict)
['value1', 'value2', 'value3']
2. For a nested python tuple:
>>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)
>>> tf.nest.flatten(tuple)
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
3. For a nested dictionary of dictionaries:
>>> dict = { "key3": {"c": (1.0, 2.0), "a": (3.0)},
... "key1": {"m": "val1", "g": "val2"} }
>>> tf.nest.flatten(dict)
['val2', 'val1', 3.0, 1.0, 2.0]
4. Numpy array (will not flatten):
>>> array = np.array([[1, 2], [3, 4]])
>>> tf.nest.flatten(array)
[array([[1, 2],
[3, 4]])]
5. `tf.Tensor` (will not flatten):
>>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> tf.nest.flatten(tensor)
[<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=float32)>]
6. `tf.RaggedTensor`: This is a composite tensor thats representation consists
of a flattened list of 'values' and a list of 'row_splits' which indicate how
to chop up the flattened list into different rows. For more details on
`tf.RaggedTensor`, please visit
https://www.tensorflow.org/api_docs/python/tf/RaggedTensor.
with `expand_composites=False`, we just return the RaggedTensor as is.
>>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]])
>>> tf.nest.flatten(tensor, expand_composites=False)
[<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2]]>]
with `expand_composites=True`, we return the component Tensors that make up
the RaggedTensor representation (the values and row_splits tensors)
>>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]])
>>> tf.nest.flatten(tensor, expand_composites=True)
[<tf.Tensor: shape=(7,), dtype=int32, numpy=array([3, 1, 4, 1, 5, 9, 2],
dtype=int32)>,
<tf.Tensor: shape=(4,), dtype=int64, numpy=array([0, 4, 4, 7])>]
Args:
structure: an arbitrarily nested structure. Note, numpy arrays are
considered atoms and are not flattened.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the flattened version of the input.
Raises:
TypeError: The nest is or contains a dict with non-sortable keys.
"""
if structure is None:
return [None]
expand_composites = bool(expand_composites)
return _pywrap_utils.Flatten(structure, expand_composites)
# See the swig file (util.i) for documentation.
_same_namedtuples = _pywrap_utils.SameNamedtuples
class _DotString(object):
__slots__ = []
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
@tf_export("nest.assert_same_structure")
def assert_same_structure(nest1, nest2, check_types=True,
expand_composites=False):
"""Asserts that two structures are nested in the same way.
Note the method does not check the types of data inside the structures.
Examples:
* These scalar vs. scalar comparisons will pass:
>>> tf.nest.assert_same_structure(1.5, tf.Variable(1, tf.uint32))
>>> tf.nest.assert_same_structure("abc", np.array([1, 2]))
* These sequence vs. sequence comparisons will pass:
>>> structure1 = (((1, 2), 3), 4, (5, 6))
>>> structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
>>> structure3 = [(("a", "b"), "c"), "d", ["e", "f"]]
>>> tf.nest.assert_same_structure(structure1, structure2)
>>> tf.nest.assert_same_structure(structure1, structure3, check_types=False)
>>> import collections
>>> tf.nest.assert_same_structure(
... collections.namedtuple("bar", "a b")(1, 2),
... collections.namedtuple("foo", "a b")(2, 3),
... check_types=False)
>>> tf.nest.assert_same_structure(
... collections.namedtuple("bar", "a b")(1, 2),
... { "a": 1, "b": 2 },
... check_types=False)
>>> tf.nest.assert_same_structure(
... { "a": 1, "b": 2, "c": 3 },
... { "c": 6, "b": 5, "a": 4 })
>>> ragged_tensor1 = tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8])
>>> ragged_tensor2 = tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4],
... row_splits=[0, 3])
>>> tf.nest.assert_same_structure(
... ragged_tensor1,
... ragged_tensor2,
... expand_composites=True)
* These examples will raise exceptions:
>>> tf.nest.assert_same_structure([0, 1], np.array([0, 1]))
Traceback (most recent call last):
...
ValueError: The two structures don't have the same nested structure
>>> tf.nest.assert_same_structure(
... collections.namedtuple('bar', 'a b')(1, 2),
... collections.namedtuple('foo', 'a b')(2, 3))
Traceback (most recent call last):
...
TypeError: The two structures don't have the same nested structure
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as well,
including the keys of dictionaries. If set to `False`, for example a
list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
considered to have the same shallow structure. Two types will also be
considered the same if they are both list subtypes (which allows "list"
and "_ListWrapper" from trackable dependency tracking to compare
equal).
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
# Convert to bool explicitly as otherwise pybind will not be able# to handle
# type mismatch message correctly. See GitHub issue 42329 for details.
check_types = bool(check_types)
expand_composites = bool(expand_composites)
try:
_pywrap_utils.AssertSameStructure(nest1, nest2, check_types,
expand_composites)
except (ValueError, TypeError) as e:
str1 = str(map_structure(lambda _: _DOT, nest1))
str2 = str(map_structure(lambda _: _DOT, nest2))
raise type(e)("%s\n"
"Entire first structure:\n%s\n"
"Entire second structure:\n%s"
% (str(e), str1, str2))
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value do not have the same structure layout, or
if keys are not unique.
"""
return _pywrap_nest.FlattenDictItems(dictionary)
def _packed_nest_with_indices(structure, flat, index, is_seq, sequence_fn=None):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_seq: Function used to test if a value should be treated as a sequence.
sequence_fn: Function used to generate a new sequence instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
sequence_fn = sequence_fn or _sequence_like
for s in _yield_value(structure):
if is_seq(s):
new_index, child = _packed_nest_with_indices(s, flat, index, is_seq,
sequence_fn)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def _pack_sequence_as(structure, flat_sequence, expand_composites,
sequence_fn=None):
"""Implements sequence packing, with the option to alter the structure."""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
sequence_fn = sequence_fn or _sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_seq(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a sequence, but found "
"incompatible type `{}` instead."
.format(truncate(flat_sequence, 100), type(flat_sequence)))
if not is_seq(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"structure is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure), truncate(structure, 100), type(flat_sequence),
len(flat_sequence), truncate(flat_sequence, 100)))
return flat_sequence[0]
try:
final_index, packed = _packed_nest_with_indices(structure, flat_sequence,
0, is_seq, sequence_fn)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but "
"flat_sequence had %d elements. Structure: %s, flat_sequence: %s." %
(len(flat_structure), len(flat_sequence), structure, flat_sequence))
return sequence_fn(structure, packed)
@tf_export("nest.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence, expand_composites=False):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Examples:
1. Python dict:
>>> structure = { "key3": "", "key1": "", "key2": "" }
>>> flat_sequence = ["value1", "value2", "value3"]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
{'key3': 'value3', 'key1': 'value1', 'key2': 'value2'}
2. For a nested python tuple:
>>> structure = (('a','b'), ('c','d','e'), 'f')
>>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)
3. For a nested dictionary of dictionaries:
>>> structure = { "key3": {"c": ('alpha', 'beta'), "a": ('gamma')},
... "key1": {"e": "val1", "d": "val2"} }
>>> flat_sequence = ['val2', 'val1', 3.0, 1.0, 2.0]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
{'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}}
4. Numpy array (considered a scalar):
>>> structure = ['a']
>>> flat_sequence = [np.array([[1, 2], [3, 4]])]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
[array([[1, 2],
[3, 4]])]
5. tf.Tensor (considered a scalar):
>>> structure = ['a']
>>> flat_sequence = [tf.constant([[1., 2., 3.], [4., 5., 6.]])]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
[<tf.Tensor: shape=(2, 3), dtype=float32,
numpy= array([[1., 2., 3.], [4., 5., 6.]], dtype=float32)>]
6. `tf.RaggedTensor`: This is a composite tensor thats representation consists
of a flattened list of 'values' and a list of 'row_splits' which indicate how
to chop up the flattened list into different rows. For more details on
`tf.RaggedTensor`, please visit
https://www.tensorflow.org/api_docs/python/tf/RaggedTensor.
With `expand_composites=False`, we treat RaggedTensor as a scalar.
>>> structure = { "foo": tf.ragged.constant([[1, 2], [3]]),
... "bar": tf.constant([[5]]) }
>>> flat_sequence = [ "one", "two" ]
>>> tf.nest.pack_sequence_as(structure, flat_sequence,
... expand_composites=False)
{'foo': 'two', 'bar': 'one'}
With `expand_composites=True`, we expect that the flattened input contains
the tensors making up the ragged tensor i.e. the values and row_splits
tensors.
>>> structure = { "foo": tf.ragged.constant([[1., 2.], [3.]]),
... "bar": tf.constant([[5.]]) }
>>> tensors = tf.nest.flatten(structure, expand_composites=True)
>>> print(tensors)
[<tf.Tensor: shape=(1, 1), dtype=float32, numpy=array([[5.]],
dtype=float32)>,
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([1., 2., 3.],
dtype=float32)>,
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([0, 2, 3])>]
>>> verified_tensors = [tf.debugging.check_numerics(t, 'invalid tensor: ')
... if t.dtype==tf.float32 else t
... for t in tensors]
>>> tf.nest.pack_sequence_as(structure, verified_tensors,
... expand_composites=True)
{'foo': <tf.RaggedTensor [[1.0, 2.0], [3.0]]>,
'bar': <tf.Tensor: shape=(1, 1), dtype=float32, numpy=array([[5.]],
dtype=float32)>}
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
element counts.
TypeError: `structure` is or contains a dict with non-sortable keys.
"""
return _pack_sequence_as(structure, flat_sequence, expand_composites)
@tf_export("nest.map_structure")
def map_structure(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain results with the same structure layout.
Examples:
* A single Python dict:
>>> a = {"hello": 24, "world": 76}
>>> tf.nest.map_structure(lambda p: p * 2, a)
{'hello': 48, 'world': 152}
* Multiple Python dictionaries:
>>> d1 = {"hello": 24, "world": 76}
>>> d2 = {"hello": 36, "world": 14}
>>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2)
{'hello': 60, 'world': 90}
* A single Python list:
>>> a = [24, 76, "ab"]
>>> tf.nest.map_structure(lambda p: p * 2, a)
[48, 152, 'abab']
* Scalars:
>>> tf.nest.map_structure(lambda x, y: x + y, 3, 4)
7
* Empty structures:
>>> tf.nest.map_structure(lambda x: x + 1, ())
()
*. Check the types of iterables:
>>> s1 = (((1, 2), 3), 4, (5, 6))
>>> s1_list = [[[1, 2], 3], 4, [5, 6]]
>>> tf.nest.map_structure(lambda x, y: None, s1, s1_list)
Traceback (most recent call last):
...
TypeError: The two structures don't have the same nested structure
* Type check is set to False:
>>> s1 = (((1, 2), 3), 4, (5, 6))
>>> s1_list = [[[1, 2], 3], 4, [5, 6]]
>>> tf.nest.map_structure(lambda x, y: None, s1, s1_list, check_types=False)
(((None, None), None), None, (None, None))
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or dict or list of constructed scalars and/or
other tuples/lists, or scalars. Note: numpy arrays are considered as
scalars.
**kwargs: Valid keyword args are:
* `check_types`: If set to `True` (default) the types of
iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Note that namedtuples with identical name and fields are always
considered to have the same shallow structure.
* `expand_composites`: If set to `True`, then composite tensors such
as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into
their component tensors. If `False` (the default), then composite
tensors are not expanded.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
if kwargs:
raise ValueError(
"Only valid keyword arguments are `check_types` and "
"`expand_composites`, not: `%s`" % ("`, `".join(kwargs.keys())))
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types,
expand_composites=expand_composites)
flat_structure = (flatten(s, expand_composites) for s in structure)
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries],
expand_composites=expand_composites)
def map_structure_with_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in
`structure[i]` and `path` is the common path to x[i] in the structures. All
structures in `structure` must have the same arity, and the return value will
contain the results with the same structure layout. Special kwarg
`check_types` determines whether the types of iterables within the structure
must be the same-- see **kwargs definition below.
Args:
func: A callable with the signature func(path, *values, **kwargs) that is
evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.,
`map_structure(func, [1], (1,))` raises a `TypeError` exception). By
default, the types must match. To allow iteration over structures of
different types (but common arity), set this kwarg to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
def wrapper_func(tuple_path, *inputs, **kwargs):
string_path = "/".join(str(s) for s in tuple_path)
return func(string_path, *inputs, **kwargs)
return map_structure_with_tuple_paths_up_to(structure[0],
wrapper_func,
*structure,
**kwargs)
def map_structure_with_tuple_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry
in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary
keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the
common path to x[i] in the structures. All structures in `structure` must have
the same arity, and the return value will contain the results in the same
structure. Special kwarg `check_types` determines whether the types of
iterables within the structure must be the same-- see **kwargs definition
below.
Args:
func: A callable with the signature `func(tuple_path, *values, **kwargs)`
that is evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
return map_structure_with_tuple_paths_up_to(structure[0],
func,
*structure,
**kwargs)
def _yield_flat_up_to(shallow_tree, input_tree, is_seq, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
Args:
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_seq: Function used to test if a value should be treated as a sequence.
path: Tuple. Optional argument, only used when recursing. The path from the
root of the original shallow_tree, down to the root of the shallow_tree
arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if not is_seq(shallow_tree):
yield (path, input_tree)
else:
input_tree = dict(_yield_sorted_items(input_tree))
for shallow_key, shallow_subtree in _yield_sorted_items(shallow_tree):
subpath = path + (shallow_key,)
input_subtree = input_tree[shallow_key]
for leaf_path, leaf_value in _yield_flat_up_to(shallow_subtree,
input_subtree, is_seq,
path=subpath):
yield (leaf_path, leaf_value)
def assert_shallow_structure(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = {"a": "A", "b": "B"}
input_tree = {"a": 1, "c": 2}
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
if is_seq(shallow_tree):
if not is_seq(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if isinstance(shallow_tree, _wrapt.ObjectProxy):
shallow_type = type(shallow_tree.__wrapped__)
else:
shallow_type = type(shallow_tree)
if check_types and not isinstance(input_tree, shallow_type):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)
input_is_namedtuple = _is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not _same_namedtuples(shallow_tree, input_tree):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
elif isinstance(shallow_tree, list) and isinstance(input_tree, list):
# List subclasses are considered the same,
# e.g. python list vs. _ListWrapper.
pass
elif ((_is_composite_tensor(shallow_tree) or
_is_composite_tensor(input_tree)) and
(_is_type_spec(shallow_tree) or _is_type_spec(input_tree))):
pass # Compatibility will be checked below.
elif not (isinstance(shallow_tree, _collections_abc.Mapping) and
isinstance(input_tree, _collections_abc.Mapping)):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree):
if not (
(_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) and
(_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree))):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
type_spec_1 = (shallow_tree if _is_type_spec(shallow_tree) else
shallow_tree._type_spec) # pylint: disable=protected-access
type_spec_2 = (input_tree if _is_type_spec(input_tree) else
input_tree._type_spec) # pylint: disable=protected-access
try:
_ = type_spec_1.most_specific_compatible_type(type_spec_2)
except (TypeError, ValueError) as e:
raise ValueError(
"Incompatible CompositeTensor TypeSpecs: %s vs. %s -- %s" %
(type_spec_1, type_spec_2, e))
elif _is_type_spec(shallow_tree):
if not _is_type_spec(input_tree):
raise TypeError("If shallow structure is a TypeSpec, input must also "
"be a TypeSpec. Input has type: %s."
% type(input_tree))
else:
if len(input_tree) != len(shallow_tree):
raise ValueError(
_STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)))
elif len(input_tree) < len(shallow_tree):
raise ValueError(
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
input_size=len(input_tree), shallow_size=len(shallow_tree)))
if isinstance(shallow_tree, _collections_abc.Mapping):
absent_keys = set(shallow_tree) - set(input_tree)
if absent_keys:
raise ValueError(_SHALLOW_TREE_HAS_INVALID_KEYS
.format(sorted(absent_keys)))
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types,
expand_composites=expand_composites)
@tf_export("__internal__.nest.flatten_up_to", v1=[])
def flatten_up_to(shallow_tree, input_tree, check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Discard paths returned by _yield_flat_up_to.
return [v for _, v in _yield_flat_up_to(shallow_tree, input_tree, is_seq)]
def flatten_with_tuple_paths_up_to(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flattened output.
Returns a list of (path, value) pairs, where value a leaf node in the
flattened tree, and path is the tuple path of that leaf in input_tree.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[((), input_tree)]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree,
input_tree)
flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree,
shallow_tree)
# Output is:
# [((0, 0), [2, 2]),
# ((0, 1), [3, 3]),
# ((1, 0), [4, 9]),
# ((1, 1), [5, 5])]
#
# [((0, 0), True),
# ((0, 1), True),
# ((1, 0), False),
# ((1, 1), True)]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [((0, 0), ('a', 1)),
# ((0, 1, 0), ('b', 2)),
# ((0, 1, 1, 0), ('c', 3)),
# ((0, 1, 1, 1), ('d', 4))]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0]
flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]]
flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError
flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2])
# Output: [((0,) 0), ((1,), 1), ((2,), 2)]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
return list(_yield_flat_up_to(shallow_tree, input_tree, is_seq))
@tf_export("__internal__.nest.map_structure_up_to", v1=[])
def map_structure_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure layout as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
shallow_tree = [None, None]
inp_val = [1, 2, 3]
out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val)
# Output is: [2, 4]
```
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with the same structure layout as
`shallow_tree`.
"""
return map_structure_with_tuple_paths_up_to(
shallow_tree,
lambda _, *values: func(*values), # Discards the path arg.
*inputs,
**kwargs)
def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
Like map_structure_up_to(), except that the 'func' argument takes a path
tuple as its first argument, followed by the corresponding values from
*inputs.
Example:
```python
lowercase = {'a': 'a', 'b': ('b0', 'b1')}
uppercase = {'a': 'A', 'b': ('B0', 'B1')}
def print_path_and_values(path, *values):
print("path: {}, values: {}".format(path, values))
shallow_tree = {'a': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase)
path: ('a',), values: ('a', 'A')
path: ('b', 0), values: ('b0', 'B0')
path: ('b', 1), values: ('b1', 'B1')
shallow_tree = {'b': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1'))
shallow_tree = {'a': None, 'b': {1: None}}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('a',), values: ('a', 'A')
path: ('b', 1), values: ('b1', B1')
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable that takes args (path, inputs_0_value, ... , inputs_N_value),
where path is a tuple path to a leaf node in shallow_tree, and
inputs_i_value is the corresponding value from inputs[i].
*inputs: nested structures that are all structurally compatible with
shallow_tree.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but one of `*inputs` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
Result of repeatedly applying `func`. Has the same structure layout as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for input_tree in inputs:
assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
flat_value_gen = (
flatten_up_to( # pylint: disable=g-complex-comprehension
shallow_tree,
input_tree,
check_types,
expand_composites=expand_composites) for input_tree in inputs)
flat_path_gen = (
path for path, _ in _yield_flat_up_to(shallow_tree, inputs[0], is_seq))
results = [
func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen)
]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results,
expand_composites=expand_composites)
@tf_export("__internal__.nest.get_traverse_shallow_structure", v1=[])
def get_traverse_shallow_structure(traverse_fn, structure,
expand_composites=False):
"""Generates a shallow structure from a `traverse_fn` and `structure`.
`traverse_fn` must accept any possible subtree of `structure` and return
a depth=1 structure containing `True` or `False` values, describing which
of the top-level subtrees may be traversed. It may also
return scalar `True` or `False` "traversal is OK / not OK for all subtrees."
Examples are available in the unit tests (nest_test.py).
Args:
traverse_fn: Function taking a substructure and returning either a scalar
`bool` (whether to traverse that substructure or not) or a depth=1
shallow structure of the same type, describing which parts of the
substructure to traverse.
structure: The structure to traverse.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A shallow structure containing python bools, which can be passed to
`map_structure_up_to` and `flatten_up_to`.
Raises:
TypeError: if `traverse_fn` returns a sequence for a non-sequence input,
or a structure with depth higher than 1 for a sequence input,
or if any leaf values in the returned structure or scalar are not type
`bool`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
to_traverse = traverse_fn(structure)
if not is_seq(structure):
if not isinstance(to_traverse, bool):
raise TypeError("traverse_fn returned structure: %s for non-structure: %s"
% (to_traverse, structure))
return to_traverse
level_traverse = []
if isinstance(to_traverse, bool):
if not to_traverse:
# Do not traverse this substructure at all. Exit early.
return False
else:
# Traverse the entire substructure.
for branch in _yield_value(structure):
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch,
expand_composites=expand_composites))
elif not is_seq(to_traverse):
raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s"
% (to_traverse, structure))
else:
# Traverse some subset of this substructure.
assert_shallow_structure(to_traverse, structure,
expand_composites=expand_composites)
for t, branch in zip(_yield_value(to_traverse),
_yield_value(structure)):
if not isinstance(t, bool):
raise TypeError(
"traverse_fn didn't return a depth=1 structure of bools. saw: %s "
" for structure: %s" % (to_traverse, structure))
if t:
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
else:
level_traverse.append(False)
return _sequence_like(structure, level_traverse)
@tf_export("__internal__.nest.yield_flat_paths", v1=[])
def yield_flat_paths(nest, expand_composites=False):
"""Yields paths for some nested structure.
Paths are lists of objects which can be str-converted, which may include
integers or other types which are used as indices in a dict.
The flat list will be in the corresponding order as if you called
`nest.flatten` on the structure. This is handy for naming Tensors such
the TF scope structure matches the tuple structure.
E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`
```shell
nest.flatten(value)
[3, 23, 42]
list(nest.yield_flat_paths(value))
[('a',), ('b', 'c'), ('b', 'd')]
```
```shell
list(nest.yield_flat_paths({'a': [3]}))
[('a', 0)]
list(nest.yield_flat_paths({'a': 3}))
[('a',)]
```
Args:
nest: the value to produce a flattened paths list for.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Yields:
Tuples containing index or key values which form the path to a specific
leaf value in the nested structure.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for k, _ in _yield_flat_up_to(nest, nest, is_seq):
yield k
def flatten_with_joined_string_paths(structure, separator="/",
expand_composites=False):
"""Returns a list of (string path, data element) tuples.
The order of tuples produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information.
Args:
structure: the nested structure to flatten.
separator: string to separate levels of hierarchy in the results, defaults
to '/'.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of (string, data element) tuples.
"""
flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)
def stringify_and_join(path_elements):
return separator.join(str(path_element) for path_element in path_elements)
flat_string_paths = (stringify_and_join(path) for path in flat_paths)
return list(zip(flat_string_paths,
flatten(structure, expand_composites=expand_composites)))
def flatten_with_tuple_paths(structure, expand_composites=False):
"""Returns a list of `(tuple_path, leaf_element)` tuples.
The order of pairs produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information about tuple paths.
Args:
structure: the nested structure to flatten.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of `(tuple_path, leaf_element)` tuples. Each `tuple_path` is a tuple
of indices and/or dictionary keys that uniquely specify the path to
`leaf_element` within `structure`.
"""
return list(zip(yield_flat_paths(structure,
expand_composites=expand_composites),
flatten(structure, expand_composites=expand_composites)))
@tf_export("__internal__.nest.list_to_tuple", v1=[])
def list_to_tuple(structure):
"""Replace all lists with tuples.
The fork of nest that tf.data uses treats lists as single elements, while
tf.nest treats them as structures to recurse into. Keras has chosen to adopt
the latter convention, and must therefore deeply replace all lists with tuples
before passing structures to Dataset.from_generator.
Args:
structure: A nested structure to be remapped.
Returns:
structure mapped to replace all lists with tuples.
"""
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return _sequence_like(instance, args)
return _pack_sequence_as(structure, flatten(structure), False,
sequence_fn=sequence_fn)
_pywrap_utils.RegisterType("Mapping", _collections_abc.Mapping)
_pywrap_utils.RegisterType("MutableMapping", _collections_abc.MutableMapping)
_pywrap_utils.RegisterType("Sequence", _collections_abc.Sequence)
_pywrap_utils.RegisterType("MappingView", _collections_abc.MappingView)
_pywrap_utils.RegisterType("ObjectProxy", _wrapt.ObjectProxy)
|
annarev/tensorflow
|
tensorflow/python/util/nest.py
|
Python
|
apache-2.0
| 64,128
|
[
"VisIt"
] |
5be7a352a4f5416f2b932e2ee254aad8b66b26b1c0918965b27555a701da2ec8
|
"""This module contains the main classes for handling geophysical (climate)
variables and dimensions. It also reads and writes NetCDF files.
The :mod:`~geodat.nc.Dimension` and :mod:`~geodat.nc.Variable` classes in this
module act as containers of :py:mod:`~numpy` arrays which can be easily
accessed.
"""
from __future__ import print_function
import os
import sys
import copy
import warnings
import logging
from functools import wraps, partial
import datetime
import inspect
import collections
import numpy
import scipy.io.netcdf as netcdf
from scipy.ndimage.filters import gaussian_filter
import pylab
from dateutil.relativedelta import relativedelta
from . import keepdims
from . import arrays
from . import stat
from . import math
from . import monthly
from .plot import mapplot
from . import grid_func
from . import pyferret_func
from . import units
from . import time_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#-----------------------------------------------------------------
# If netCDF4 is not installed, some functions are not available
# When these functions are called, an Exception will be raised
#-----------------------------------------------------------------
try:
import netCDF4 as _netCDF4
_NETCDF4_IMPORTED = True
except ImportError:
_NETCDF4_IMPORTED = False
def _throw_error(error):
def new_func(*args,**kwargs):
raise error
return new_func
if _NETCDF4_IMPORTED:
_num2date = _netCDF4.num2date
_date2num = _netCDF4.date2num
_netCDF4_Dataset = _netCDF4.Dataset
_netCDF4_datetime = _netCDF4.netcdftime.datetime
else:
logger.warning("Failed to import netCDF4 package. "+\
"Some functions may not work")
_NETCDF4_IMPORT_ERROR = ImportError("The netCDF4 package is "+\
"required but fail to import. "+\
"See https://pypi.python.org/pypi/netCDF4/0.8.2")
_num2date = _date2num = _netCDF4_Dataset = \
_throw_error(_NETCDF4_IMPORT_ERROR)
#---------------------------------
# Finished import setup
#---------------------------------
def getvar(filename, varname, *args, **kwargs):
''' Short hand for retrieving variable from a netcdf file
Args:
filename (str): Name of input file
varname (str): Name of the variable
Returns:
geodat.nc.Variable
Optional arguments and keyword arguments are parsed to geodat.nc.Variable
Example:
var = getvar("sst.nc","sst")
'''
return Variable(netcdf.netcdf_file(filename), varname, *args, **kwargs)
def dataset(filenames, append_code="s", *args, **kwargs):
''' Extract all variables in one file or more files
Args:
filenames (str or a list of str): Input files
append_code (str): what to do if when variable names clash;
"o" for overwriting previously loaded variables;
"r" for renaming newly loaded variable (will prompt for input)
"s" to skip (default)
Returns:
dict: str and geodat.nc.Variable pairs
Optional arguments accepted by geodat.nc.Variable can be used here
'''
result = {}
if type(filenames) is not list:
filenames = [filenames]
for filename in filenames:
file_handle = netcdf.netcdf_file(filename)
for varname in file_handle.variables.keys():
if varname in file_handle.dimensions:
# Do not add dimensions to the dataset
continue
if varname in result:
print(varname, "alread loaded. ", end="")
if append_code.lower() == 'o':
print("Overwriting.")
result[varname] = Variable(file_handle, varname,
*args, **kwargs)
elif append_code.lower() == 'r':
print("Enter new name: ", end="")
newname = sys.stdin.readline()[:-1]
result[newname] = Variable(file_handle, varname,
*args, **kwargs)
result[newname].varname = newname
elif append_code.lower() == "s":
print("I am skipping the variable:", varname,
"in", filename)
continue
else:
raise ValueError("Invalid choice for append_code")
else:
result[varname] = Variable(file_handle, varname,
*args, **kwargs)
return result
def _genereal_axis(axis):
''' Standardize keyword for axes time/lat/lon
'T': 'time','t','TIME','T'
'X': 'x','X','lon','LON','longitude','LONGITUDE'
'Y': 'y','Y','lat','LAT','latitude','LATITUDE'
'Z': 'z','Z",'dep','DEP','depth','DEPTH','lev','LEV'
Anything not recognized will be returned in upper case
Args:
axis (str)
Returns:
str
Example: _genereal_axis('time') -> 'T'
Example: _genereal_axis('dummy') -> 'DUMMY'
'''
invaxnames = {'tim':'T', 'lon':'X', 'lat':'Y',
'lev':'Z', 'dep':'Z'}
if len(axis) > 1:
return invaxnames[axis[:3].lower()]
else:
return axis.upper()
def _general_region(region):
''' Standardize keyword for regional slicing.
Use by Variable.getRegion()
'T': 'time','t','TIME','T'
'X': 'x','X','lon','LON','longitude','LONGITUDE'
'Y': 'y','Y','lat','LAT','latitude','LATITUDE'
Args:
region (dict)
Returns:
dict
Example:
_general_region({'TIME':(10.,1000.),'LAT':(-5.,5.)})
--> {'T': (10.,1000.),'Y':(-5.,5.)}
'''
results = {}
for iax, value in region.items():
results[_genereal_axis(iax)] = value
return results
class Dimension(object):
"""
A container for handling physical dimensions such as time, latitude.
It can be indexed/sliced the same way as indexing a numpy array
"""
def __init__(self, data, dimname=None, units=None,
attributes=None, parent=None):
"""
Attributes:
data (numpy 1-d array): Array for the physical axis
dimname (str): Name of the dimension, e.g. "time"
units (str): Unit of the dimension, e.g. "days since 1990-01-01"
attributes (dict): Attributes for the dimension
Arguments:
data (numpy 1-d array): Array for the physical axis
dimname (str): Name of the dimension, e.g. "time"
units (str): Unit of the dimension, e.g. "days since 1990-01-01"
attributes (dict): Attributes for the dimension
parent (Dimension): from which dimname,units,attributes are copied
if they are not supplied already in the arguments
"""
self.data = data
self.units = units
self.attributes = {}
self.dimname = 'UNNAMED_DIM'
if parent is not None:
self.units = parent.units
self.attributes.update(parent.attributes)
self.dimname = parent.dimname
if units is not None:
self.units = units
if attributes is not None:
self.attributes.update(attributes)
if dimname is not None:
self.dimname = dimname
if isinstance(data, netcdf.netcdf_variable):
self.data = data.data
self.units = getattr(data, 'units', None)
self.attributes.update(data.__dict__['_attributes'])
# Make sure the dimension data is a numpy array
if numpy.isscalar(self.data):
self.data = numpy.array([self.data,],
dtype=getattr(self.data, "dtype", None))
if attributes is not None:
self.attributes.update(attributes)
self.attributes.update(dict(units=str(self.units)))
def __getitem__(self, sliceobj):
''' Apply the slice object on the data (numpy.ndarray) '''
return Dimension(data=self.data[sliceobj],
dimname=self.dimname,
units=self.units,
attributes=self.attributes)
def __getattr__(self, name):
if name in self.attributes:
return self.attributes[name]
else:
raise AttributeError("{} is not found".format(name))
def info(self, detailed=False, file_out=None):
""" Print brief info about the dimension
if detailed is True, attributes and length of axis are also printed
"""
info_str = 'Dim: '+ self.dimname
if numpy.isscalar(self.data):
info_str += ' = '+str(self.data)
else:
info_str += ' = '+ str(self.data[0]) + ':' + str(self.data[-1])
info_str += ' Unit: ' + str(self.units)
print(info_str, file=file_out)
if detailed:
print('Length=', str(len(self.data)), file=file_out)
print('Attributes:', file=file_out)
for attname, val in self.attributes.items():
print(" {dimname}:{attname} = {val}".format(
dimname=self.dimname, attname=attname, val=val),
file=file_out)
def getCAxis(self):
""" Get cartesian axis (T/Z/Y/X) for a dimension instance
if the dimension has a cartesian_axis attribute, the value of
the attribute is returned. Otherwise, the unit is used as a clue
Example:
dim.setattr("cartesian_axis","X")
dim.getCAxis() --> "X"
Example:
dim.units = "months"
dim.getCAxis() --> "T"
Example:
dim.units = "degreeN"
dim.getCAxis() --> "Y"
"""
atts = self.attributes
cax = atts.get('axis', atts.get('cartesian_axis'))
if cax is None:
if self.units is not None:
cax = units.assign_caxis(self.units)
return cax
def is_monotonic(self):
''' Return True if the axis is monotonic, False otherwise
'''
def strict_monotonic_func(data):
return (numpy.diff(data) > 0.).all() or \
(numpy.diff(data) < 0.).all()
strict_monotonic = strict_monotonic_func(self.data)
if not strict_monotonic and self.getCAxis() == 'X':
# Make sure it is not because of periodic boundary condition
# of longitude
x_diff = numpy.diff(self.data)
if sum(x_diff > 0.) > sum(x_diff < 0.):
# More often increasing
# Roll backward
return strict_monotonic_func(
numpy.roll(self.data, (numpy.argmin(x_diff)+1)*-1))
else:
# More often decreasing
# Roll forward
return strict_monotonic_func(
numpy.roll(self.data, numpy.argmin(x_diff)+1))
return strict_monotonic
def is_climo(self):
''' Return True if the axis is a climatological time axis '''
if self.getCAxis() != 'T':
return False
return all((x == y for x,y in zip(
sorted(self.getDate("m")), range(1, 13))))
def time2array(self):
''' Given a dimension object, if it is a time axis,
return ndarray of size (N,6) where N is the number of
time point, and the six indices represent:
YEAR,MONTH,DAY,HOUR,MINUTE,SECOND
Same as getDate()
'''
return self.getDate()
def time0(self):
''' Return a datetime.datetime object referring to the t0 of a time axis
'''
if self.getCAxis() != 'T':
raise Exception("This axis is not a time axis")
return time_utils.extract_t0_from_unit(self.units)
def getDate(self, toggle="YmdHMS"):
''' Return the time axis date in an array format of
"Year,Month,Day,Hour,Minute,Second"
Toggle one or many among Y/m/d/H/M/S to select a particular time format
Args:
toggle (iterable of str): each item should be among Y/m/d/H/M/S
Examples:
>>> # return an array of the month of the time axis
>>> var.getDate("m")
array([ 1, 2, 3, 4, 5, 6 ])
>>> # return an array with the first column showing the years,
>>> # second column showing the months, third column
>>> # for days
>>> getDate("Ymd")
array([[ 1990, 1, 15 ], [ 1990, 2, 15 ], [ 1990, 3, 15 ]])
'''
#------------------
# Sanity check
#------------------
if self.getCAxis() != 'T':
raise RuntimeError("Dimension.getDate: not a time axis")
try:
_ = iter(toggle)
except TypeError:
raise TypeError("toggle has to be iterable:\"Y/m/d/H/M/S\"")
if not all((t in "YmdHMS" for t in toggle)):
raise ValueError("toggle has to be one of \"Y/m/d/H/M/S\"")
alltimes = time_utils.num2date(self.data, self.units,
getattr(self, "calendar", "standard"))
# Convert flag to attribute names
flag2attr = dict(Y="year", m="month", d="day", H="hour",
M="minute", S="second")
return numpy.array([[getattr(t, flag2attr[flag])
for flag in toggle ]
for t in alltimes ]).squeeze()
def getMonthly(self):
""" If the dimension is a monthly time axis, return an array containing
the calendar months, corrected using monthly.filter_monthly. If the
dimension is not a monthly time axis, raise a TypeError
Returns: months (numpy 1d array, dtype=numpy.int)
"""
if self.getCAxis() != 'T':
raise TypeError("Dimension.getDate: not a time axis")
time, unit, calendar = (self.data, self.units,
getattr(self, "calendar", "standard"))
if not monthly.is_monthly(time, unit, calendar):
raise TypeError("Dimension.getDate: not a monthly time axis")
return monthly.filter_monthly(time, unit, calendar)
class Variable(object):
"""
A container for handling physical variable together with its dimensions
so that while the variable is manipulated (e.g. averaged along one axis),
the information of the dimensions change accordingly.
It can be indexed/sliced the same way as indexing a numpy array
"""
def __init__(self, reader=None, varname=None, data=None, dims=None,
attributes=None, history=None, parent=None,
ensureMasked=False, **kwargs):
"""
Attributes:
data (numpy.ndarray or numpy.ma.core.MaskedArray): Data array of the
variable
varname (str): Name of the variable
dims (list of Dimension instances): Dimensions of the variable
consistent with the shape of the data array
units (str): Unit of the variable
attributes (dict): Attributes of the variable
Arguments:
reader (netcdf.netcdf_file): if given, the variable is read
from the NetCDF file
varname (str) : variable name
data (numpy.ndarray): variable data
dims (a list of Dimension objects) : dimensions
attributes (dict): attributes of the variable
history (str): to be stored/appended to attributes['history']
parent (Variable): from which varname, dims and attributes are copied;
Copied `varname` and `dims` can be overwritten by assigning values in
the arguments. If `attributes` is copied from `parent`, the
dictionary assigned to the argument `attributes` is used to update the
copied `attributes`. `parent` is left unchanged.
ensureMasked (bool): whether the array is masked using _FillValue
upon initialization. default: False
Other keyword arguments would be parsed to getRegion
Examples:
>>> var = Variable(netcdf.netcdf_file,"temperature")
>>> var = Variable(netcdf.netcdf_file,"temperature",
lat=(-5.,5.),lon=(-170.,-120))
>>> # Copy varname, dims, attributes from var
>>> # If the dimension shape does not match data shape, raise an Error
>>> var2 = Variable(data=numpy.array([1,2,3,4]),parent=var)
>>> var = Variable(data=numpy.array([1,2,3,4]),
dims=[Dimension(data=numpy.array([0.,1.,2.,3.]),)],
varname='name')
"""
# Initialize the most basic properties.
self.data = data
self.dims = dims # a list of Dimension instances
self.varname = varname
self.attributes = {}
self._ncfile = None
if reader is not None and type(reader) is netcdf.netcdf_file:
assert varname is not None
try:
varobj = reader.variables[varname]
except KeyError:
print('Unknown variable name. Available variables: '+\
','.join(reader.variables.keys()))
return None
self.data = getattr(varobj, "data", None)
self.dims = [Dimension(reader.variables[dim], dim)
if dim in reader.variables
else Dimension(reader.dimensions[dim], dim)
for dim in varobj.dimensions]
self.attributes.update(varobj.__dict__['_attributes'])
self.addHistory('From file: '+reader.fp.name)
self._ncfile = reader
elif parent is not None:
self._copy_from_parent_(parent)
else:
# no recognizable reader or parent variable is given;
# data, varname should not be None
if data is None:
raise AttributeError('data is not provided')
if varname is None:
raise AttributeError('varname is not provided')
# If parent is given, these will overwrite the properties
# copied from parent
# If parent is not given, the following initializes the instances
if data is not None:
self.data = data
if dims is not None:
self.dims = dims
if varname is not None:
self.varname = varname
if attributes is not None:
self.attributes.update(attributes)
if self.dims is None:
raise AttributeError("dims (dimensions) is not provided")
if history is not None:
self.addHistory(history)
self.setRegion(**kwargs)
self.masked = False
# This is the one that takes the time while initializing variables
if ensureMasked:
self.ensureMasked()
# Check to make sure the variable data shape matches the dimensions'
if not self.is_shape_matches_dims():
raise ValueError("Dimension mismatch.")
def is_shape_matches_dims(self):
''' Check if the shape of the data matches the dimensions
Raise ValueError if the dimensions do not match
'''
var_data_shape = self.data.shape
dim_shape = tuple([dim.data.size for dim in self.dims])
if var_data_shape != dim_shape:
return False
else:
return True
def addHistory(self, string):
""" Append to the history attribute in the variable.
If history doesn't exist, create one
"""
old_history = getattr(self, 'history', None)
if old_history is None:
self.attributes["history"] = string
else:
self.attributes['history'] = "; ".join((old_history, string))
def __repr__(self):
result = "<{0}.{1} ".format(__name__, type(self).__name__) + \
self.varname +\
'(' + ",".join(self.getDimnames()) + '), shape: ' +\
str(self.data.shape) + '>'
return result
def info(self, detailed=False, file_out=None):
""" Print brief info about the variable
"""
# varname, dim, shape
print(self.__repr__(), file=file_out)
# Attributes:
print("Attributes:", file=file_out)
for attname, val in self.attributes.items():
print(" {varname}:{attname} = {val}".format(
varname=self.varname, attname=attname, val=val),
file=file_out)
# Dimension info:
for dim in self.dims:
dim.info(detailed=detailed, file_out=file_out)
def getCAxes(self):
""" get the cartesian axes for all the dimensions.
Return a list of cartesian axes.
if it is undefined, replace with dummy: A,B,C,...(excludes: T/Z/X/Y)
"""
dummies = list('ABCDEFGHIJKLMNOPQRSUVW')
caxes = []
for dim in self.dims:
# Using try-catch is clearly not ideal
# Previously the try block was an if-statement that
# getCAxis is called only if dim is an instance of
# geodat.nc.Dimension. However when the module is reload,
# objects created before reloading is no longer an
# instance of the reloaded module
try:
cax = dim.getCAxis()
except AttributeError:
cax = None
if cax is None:
cax = dummies.pop(0)
caxes.append(cax)
return caxes
def getDimnames(self):
"""Return a list of dimension names
"""
return [dim.dimname for dim in self.dims]
def __getattr__(self, att):
''' Return the value of an attribute of the variable
'''
if att in self.attributes:
return self.attributes[att]
else:
raise AttributeError("{} is not found".format(att))
def getAxes(self):
''' Return the dimensions of the variable as a list of numpy arrays
In the order of dimension
'''
axes = []
for idim, dim in enumerate(self.dims):
if dim.data is None:
dim.data = numpy.arange(1, self.data.shape[idim]+1)
axis = dim.data
axes.append(axis)
return axes
def getIAxis(self, axis):
'''Return the integer for the required cartesian axis
Input:
axis (int or str): if it is an integer, do nothing and return axis
if it is a str, look for index of the dimension
which matches the required cartesian axis using
the CAxes function
Returns:
int
See Also:
CAxes, getAxis, getDim
'''
if isinstance(axis, int):
return axis
if isinstance(axis, str):
caxes = self.getCAxes()
axis = _genereal_axis(axis)
if axis not in caxes:
raise KeyError(self.varname+" has no "+axis+" axis")
else:
return caxes.index(axis)
else:
raise ValueError("axis has to be either an integer or a string")
def getAxis(self, axis):
''' Return a numpy array of an axis of a variable
Input:
axis (int or str): if it is an integer, do nothing and return axis
if it is a str, look for index of the dimension
which matches the required cartesian axis using
the CAxes function
Returns:
numpy array
See Also:
CAxes, getIAxis, getDim
'''
return self.getDim(axis).data
def getDim(self, axis):
''' Return a the Dimension instance of an axis of a variable
Input:
axis (int or str): if it is an integer, do nothing and return axis
if it is a str, look for index of the dimension
which matches the required cartesian axis using
the CAxes function
Returns:
numpy array
See Also:
CAxes, getIAxis, getAxis
'''
return self.dims[self.getIAxis(axis)]
def getDomain(self, axis=None):
''' Return the domain of the variable
If the axis is a longitude axis, make all negative degree positive
(only for output; the variable longitude data is unchanged)
Args:
axis (str or int): query the domain of a particular dimension. If
it is not specified, the domains of all dimensions are returned
Returns:
dict
Examples:
>>> # var is a regional variable within (20S-20N, 140E-140W)
>>> var.getDomain()
{"X": (140.,220.), "Y": (-20.,20.)}
>>> var.getDomain("X")
{"X": (140.,220.)}
'''
if axis is None:
axis = self.getCAxes()
domain = {}
for ax_name in axis:
coor = self.getAxis(ax_name)
if ax_name == 'X':
coor = coor.copy()
coor[coor < 0.] += 360.
domain[_genereal_axis(ax_name)] = (min(coor), max(coor))
return domain
def _copy_from_parent_(self, parent):
""" Copy the dimensions, attributes and varname
from a parent variable
Use copy.copy instead of deepcopy
"""
if not isinstance(parent.dims, list):
raise TypeError("parent.dims must be a list")
if any((not isinstance(dim, Dimension) for dim in parent.dims)):
raise TypeError("parent.dims must be a list of Dimension instance")
self.dims = copy.copy(parent.dims)
if not isinstance(parent.attributes, dict):
raise TypeError("parent.attributes must be a dict instance")
self.attributes = copy.copy(parent.attributes)
if not isinstance(parent.varname, str):
raise TypeError("parent.varname must be a string instance")
self.varname = copy.copy(parent.varname)
def _broadcast_dim_(self, other, result):
''' Return a list of dimensions suitable for operations (__add__...)
between self and other
Arg:
other (geodat.nc.Variable or numpy.ndarray attribute)
result (numpy.ndarray) : the result of an operation e.g. __add__
Returns:
a list of geodat.nc.Dimension
Example:
varA <geodat.nc.Variable with shape (12,10)>
varB <geodat.nc.Variable with shape (1,10)>
varC = varA + varB
varC <geodat.nc.Variable with shape (12,10)> inherits dimensions
from varA
But varC = varB + varA
would require inheriting the first dimension of varA and the second
dimension of varB
'''
dims = []
for idim, (size1, size2) in enumerate(zip(self.data.shape,
result.shape)):
if size1 == 1 and size2 > 1:
dims.append(other.dims[idim])
else:
dims.append(self.dims[idim])
return dims
def __sub__(self, other):
var1 = _getdata(self)
var2 = _getdata(other)
history = ""
name1 = getattr(self, 'varname', str(self))
name2 = getattr(other, 'varname', str(other))
data = var1 - var2
history = name1 + '-' + name2
return Variable(data=data, dims=self._broadcast_dim_(other, data),
parent=self, history=history)
def __rsub__(self, other):
var1 = _getdata(self)
var2 = _getdata(other)
history = ""
name1 = getattr(self, 'varname', str(self))
name2 = getattr(other, 'varname', str(other))
data = var2 - var1
history = name2 + '-' + name1
return Variable(data=data, dims=self._broadcast_dim_(other, data),
parent=self, history=history)
def __add__(self, other):
var1 = _getdata(self)
var2 = _getdata(other)
history = ""
name1 = getattr(self, 'varname', str(self))
name2 = getattr(other, 'varname', str(other))
data = var1 + var2
history = name1 + '+' + name2
return Variable(data=data, dims=self._broadcast_dim_(other, data),
parent=self, history=history)
def __radd__(self, other):
return self.__add__(other)
def __div__(self, other):
var1 = _getdata(self)
var2 = _getdata(other)
history = ""
name1 = getattr(self, 'varname', str(self))
name2 = getattr(other, 'varname', str(other))
data = var1 / var2
history = name1 + '/' + name2
return Variable(data=data, dims=self._broadcast_dim_(other, data),
parent=self, history=history)
def __rdiv__(self, other):
var1 = _getdata(self)
var2 = _getdata(other)
history = ""
name1 = getattr(self, 'varname', str(self))
name2 = getattr(other, 'varname', str(other))
data = var2 / var1
history = name2 + '/' + name1
return Variable(data=data, dims=self._broadcast_dim_(other, data),
parent=self, history=history)
def __mul__(self, other):
var1 = _getdata(self)
var2 = _getdata(other)
history = ""
name1 = getattr(self, 'varname', str(self))
name2 = getattr(other, 'varname', str(other))
data = var1 * var2
history = name1 + '*' + name2
return Variable(data=data,
dims=self._broadcast_dim_(other, data),
parent=self, history=history)
def __rmul__(self, other):
return self.__mul__(other)
def __call__(self, **region):
'''Same as self.getRegion'''
return self.getRegion(**region)
def __getitem__(self, sliceobj):
a = Variable(data=self.data, varname=self.varname, parent=self)
sliceobj = numpy.index_exp[sliceobj]
a.slicing(sliceobj)
a.addHistory('__getitem__['+str(sliceobj)+']')
return a
def __setitem__(self, sliceobj, val):
if type(sliceobj) is dict:
sliceobj = self.getSlice(**sliceobj)
self.data[sliceobj] = val
def getRegion(self, **kwargs):
''' Return a new Variable object within the region specified.
Values have to be a length-2 iterable that specifies the range
Keys "time","t","TIME","T" are all considered as "T" for time axis.
Keys "x","X","lon","LON","longitude","LONGITUDE" are all considered as
"X" for the longitude axis or an axis with an attribute of
"cartesian_axis" set to "X"
Keys "y","Y","lat","LAT","latitude","LATITUDE" are all considered as "Y"
for latitude axis or an axis with an attribute of "cartesian_axis"
set to "Y"
Examples:
>>> # Extracts the region where -20. <= latitude <= 20.
>>> # and 100. <= longitude <= 200.
>>> var.getRegion(lat=(-20.,20.), lon=(100.,200.))
'''
a = Variable(data=self.data, parent=self)
a.setRegion(**kwargs)
return a
def getSlice(self, **kwargs):
''' Return a tuple of slice object corresponding a region specified.
Example: variable.getSlice(lat=(-30.,30.))
'''
region = _general_region(kwargs)
if region:
return self._create_slice(region)
else:
return None
def setRegion(self, **kwargs):
''' Change the region of interest for the variable
This function slices the data.
'''
region = _general_region(kwargs)
if region:
self.slicing(self._create_slice(region))
self.addHistory('setRegion('+str(region)+')')
return self
def setRegion_value(self, value, **kwargs):
''' Set values for a particular region
Example: variable.setRegion_value(0.,lat=(-90.,-30.))
'''
sl = self.getSlice(**kwargs)
self[sl] = value
return self
def _create_slice(self, region=None):
''' Generate a tuple of slice object for the given region
specifications
'''
if region is None or len(region) == 0:
return (slice(None),)*self.data.ndim
sliceobjs = []
# A list of 1d arrays
axes = self.getAxes()
caxes = self.getCAxes()
for axis in caxes:
sliceobj = region.get(axis, slice(None))
# if sliceobj is a single value
# lower bound == upper bound (exact value)
if numpy.isscalar(sliceobj):
sliceobj = (sliceobj, sliceobj)
if not isinstance(sliceobj, (slice, numpy.ndarray)):
iax = caxes.index(axis)
# Set modulo, if unset and axis is longitude, use 360 degree
modulo = getattr(self.dims[iax], 'modulo', None)
if axis == "X" and modulo is None:
modulo = 360.
sliceobj = arrays.getSlice(axes[caxes.index(axis)],
sliceobj[0], sliceobj[1],
modulo=modulo)
sliceobjs.append(sliceobj)
return tuple(sliceobjs)
def slicing(self, sliceobj):
''' Perform the slicing operation on both the data and axes
Args:
sliceobj (tuple): slice object
verbose (bool): warns if advanced indexing is invoked
Returns: None
'''
if not isinstance(sliceobj, tuple):
raise TypeError("slicing expects a tuple as sliceobj")
ndim = self.data.ndim
self.data = self.data[sliceobj]
'''
Check to see if advanced indexing is invoked. If so, notify the user.
Advanced indexing may be required for longitude axis as it wrapped
around 360 degree.
Basic slicing occurs if the sliceobj is a slice object, an integer
or a tuple of slice object/int/Ellipsis/newaxis.
Advanced indexing occurs when sliceobj is a non-tuple sequence object,
an ndarray or a tuple with at least one sequence object or ndarray
There isn't a reliable checking algorithm for finding out if two arrays
actually share memory
'''
for obj in sliceobj:
if isinstance(obj, (collections.Sequence, numpy.ndarray)) and\
all((isinstance(sl, (int, bool, numpy.bool_))
for sl in tuple(obj))):
logger.info("Advanced indexing is invoked for {}".format(
self.varname))
break
num_newaxis = 0
num_ellipsis = 0
for sl in sliceobj:
if sl is None:
num_newaxis += 1
if sl is Ellipsis:
num_ellipsis += 1
# replace Ellipsis with a number of slice(None)
# such that len(sliceobj) = ndim + num_newaxis
# but subsequent Ellipsis should be replaced with one slice(None) only
# This list is only needed if there is any Ellipsis at all
slice_None = [[slice(None)]]*(num_ellipsis-1) + \
[[slice(None)]*\
(ndim+num_newaxis-len(sliceobj)+1)]
new_sliceobj = []
for sl in sliceobj:
if sl is Ellipsis:
new_sliceobj += slice_None.pop()
else:
new_sliceobj.append(sl)
# Slice the Dimensions
for iax, sl in enumerate(new_sliceobj):
if sl is None:
# numpy.newaxis is asked
# create a dummy dimension
self.dims.insert(iax, Dimension(data=numpy.nan))
else:
self.dims[iax] = self.dims[iax][sl]
# If slice is an integer, numpy would squeeze the array
# Add the singlet dimension back
newaxis_list = [numpy.newaxis if isinstance(sl, int)
else slice(None) for sl in new_sliceobj]
if newaxis_list:
self.data = self.data[newaxis_list]
# Make sure the dimensions still match
if not self.is_shape_matches_dims():
raise ValueError("Dimension mismatch.")
def getLatitude(self):
''' Return a numpy array that contains the latitude axis
'''
return self.getAxis('Y')
def getLongitude(self):
''' Return a numpy array that contains the longitude axis
'''
return self.getAxis('X')
def getTime(self):
''' Return a numpy array that contains the time axis
'''
return self.getAxis('T')
@property
def lat(self):
''' Latitude axis (if exists) of the variable '''
return self.getLatitude()
@property
def lon(self):
''' Longitude axis (if exists) of the variable '''
return self.getLongitude()
@property
def time(self):
''' Time axis (if exists) of the variable '''
return self.getTime()
@property
def depth(self):
''' Vertical axis (if exists) of the variable '''
return self.getAxis("Z")
def apply_mask(self, mask):
''' mask the variable's last axes with a mask
This function changes the variable
'''
return apply_mask(self, mask)
def climatology(self, *args, **kwargs):
''' Compute the climatology
'''
return climatology(self, *args, **kwargs)
def zonal_ave(self):
''' Compute the zonal average
Same as wgt_ave('X')
'''
return wgt_ave(self, 'X')
def time_ave(self):
''' Compute the time average
Same as wgt_ave('T')
'''
return wgt_ave(self, 'T')
def lat_ave(self):
''' Compute meridional average
Same as wgt_ave('Y')
'''
return wgt_ave(self, 'Y')
def area_ave(self):
''' Compute area average
Same as wgt_ave('XY')
'''
return wgt_ave(self, 'XY')
def wgt_ave(self, axis=None):
''' Compute averge on one or more axes
Input:
axis - either integer or a string (T/X/Y/Z/...)
See getCAxes
'''
return wgt_ave(self, axis)
def getMissingValue(self):
''' Return "missing_value" if defined in the attributes
Otherwise "_FillValue" will be used as missing value
If both are undefined, the numpy default for the variable
data type is returned
'''
FillValue = getattr(self, '_FillValue', None)
missing_value = getattr(self, 'missing_value', None)
default = numpy.asscalar(numpy.ma.default_fill_value(self.data))
return missing_value or FillValue or default
def ensureMasked(self):
''' If the data in the variable is not a masked array and
missing_value is present
Read the data and convert the numpy ndarray into masked array,
note that this will be slow if the data is large. But this will
only be done once.
Returns: None
'''
if self.masked:
return None
missing_value = self.getMissingValue()
if isinstance(self.data, numpy.ma.core.MaskedArray):
self.data = numpy.ma.array(self.data.filled(missing_value))
self.data = numpy.ma.masked_values(self.data, missing_value)
if not isinstance(self.data, numpy.ma.core.MaskedArray):
raise AssertionError("Missing value: {}".format(missing_value))
self.data.set_fill_value(missing_value)
self.attributes['_FillValue'] = missing_value
self.masked = True
return None
def runave(self, N, axis=0, step=None):
'''Running mean along an axis.
N specifies the size of the window
Args:
N (int or float): size of the window
if axis is int, N is treated as the number of
array elements along the axis
if axis is str, N is treated as the absolute value
of the size of window on the axis
axis (int or str): axis on which running mean is computed
step (int): how many array element is skipped for each sample
Return:
geodat.nc.Variable
Examples:
# Running average for every 5 elements on the first axis
>>> var.runave(5, 0)
# Running average with a window of longitudinal-width of 40-degree
>>> var.runave(40., "X")
# Climatological running average with a window of 3 years
# axis=0 for the time axis
>>> var.runave(3, 0, step=12)
'''
self.ensureMasked()
cartesian_axes = self.getCAxes()
history = 'runave('+str(N)+','+str(axis)+',step='+str(step)+')'
if type(axis) is str:
axis = axis.upper()
axis = cartesian_axes.index(axis)
if self.dims[axis].is_monotonic():
N = N/numpy.abs(numpy.diff(self.getAxes()[axis]).mean())
else:
logger.warning('''{var}'s {dim} is not monotonic.
N is treated as integer'''.format(var=self.varname,
dim=self.dims[axis].dimname))
if not isinstance(N, int):
raise Exception('''N is treated as step.
It has to be an integer.''')
if N % 2 != 1:
N = N + 1
return Variable(data=stat.runave(self.data, N, axis, step),
parent=self, history=history)
def squeeze(self):
''' Remove singlet dimensions
'''
var = Variable(data=self.data, parent=self)
shape = self.data.shape
var.dims = [var.dims[idim]
for idim in range(var.data.ndim)
if shape[idim] > 1]
var.data = var.data.squeeze()
assert var.data.ndim == len(var.dims)
var.addHistory('squeeze()')
return var
def getDate(self, toggle="YmdHMS"):
''' Return the time axis date in an array format of
"Year,Month,Day,Hour,Minute,Second"
Toggle one or many among Y/m/d/H/M/S to select a particular time format
Args:
toggle (iterable of str): each item should be among Y/m/d/H/M/S
Examples:
>>> # return an array of the month of the time axis
>>> var.getDate("m")
array([ 1, 2, 3, 4, 5, 6 ])
>>> # return an array with the first column showing the years,
>>> # second column showing the months, third column
>>> # for days
>>> getDate("Ymd")
array([[ 1990, 1, 15 ], [ 1990, 2, 15 ], [ 1990, 3, 15 ]])
'''
if 'T' not in self.getCAxes():
raise Exception("There is no recognized time axis in Variable:"+\
self.varname)
return self.dims[self.getCAxes().index('T')].getDate(toggle=toggle)
def _getdata(other):
'''
If the input is a geodat.nc.Variable, run the ensureMasked function
and return the `data attribute
Otherise, return the input
Used by __sub__,__add__,...
'''
if isinstance(other, Variable):
other.ensureMasked()
return other.data
else:
return other
def apply_mask(var, mask):
''' Mask the last axes of the data with a mask array
example: apply_mask(v,land_mask>0.)
The data of var is copied to a new variable that is being returned
'''
newvar = Variable(data=var.data.copy(), parent=var)
newvar.ensureMasked()
newvar.data[..., mask] = numpy.ma.masked
return newvar
def nc_cal(func):
''' A decorator that returns a variable object
Accept only function that operates on a numpy array
'''
@wraps(func)
def newfun(var, *args, **kwargs):
history = "".join([func.__name__, args.__str__(), kwargs.__str__()])
var.ensureMasked()
return Variable(data=func(var.data, *args, **kwargs), parent=var,
history=history)
return newfun
def wgt_ave(var, axis=None, lat_weighted=True):
'''A more general routine for averaging
The method first reads the axes (x/y/z/t) needed for averaging,
finds the indices corresponding these axes, then uses the
geodat.stat.wgt_ave to sort the axis and do the weighted average
if the axis is a "Y" axis, weights are computed using the latitude
axis in the variable.
if no axis is given, all axes will be averaged over.
Arg:
var (Variable)
axis (int/str/an iterable of int or str):
the dimension along which the average is computed
lat_weight (bool, default True): if an area average is involved, whether
a latitudinal weight based on a convergence of meridians is applied.
The Y axis is assumed to have unit=degree
Optional args:
axis (str or a list of str or int) - axis to be averaged over
weights (scalar or a numpy array)
if axis is a string, e.g. "xy", the input argument weights will
be overwritten
E.g.
(1) wgt_ave(Variable,'xy') will do the area average
'''
var.ensureMasked()
data = var.data
cartesian_axes = var.getCAxes()
if axis is None:
axis = range(len(cartesian_axes))
# If the input axis is a single integer, convert it into a list
if type(axis) is int:
axis = [axis]
history = 'wgt_ave(axis='+','.join([str(ax) for ax in axis])+')'
if type(axis) is str:
axis = axis.upper()
axis = [cartesian_axes.index(ax) for ax in axis]
# apply varied lat_weights only if 'XY' are included
caxes = [cartesian_axes[ax] for ax in axis]
has_XY = 'X' in caxes and 'Y' in caxes
if has_XY and lat_weighted:
sliceobj = [numpy.newaxis if cax != 'Y' else slice(None)
for cax in cartesian_axes]
if "degree" not in var.getDim("Y").units:
logger.warning("Area mean is weighted by Y axis and Y is assumed"+\
" to have unit=degreeN/degreeE")
lat_weights = stat.lat_weights(var.getLatitude())[sliceobj]
else:
lat_weights = 1.
for iax in axis:
if cartesian_axes[iax] in 'XYZ':
assert var.dims[iax].is_monotonic()
weights = reduce(lambda x, y: x[..., numpy.newaxis]*y,
[numpy.gradient(var.dims[iax].data)
if iax in axis and cartesian_axes[iax] in 'XYZ'
else numpy.ones_like(var.dims[iax].data)
for iax in range(var.data.ndim)])
weights *= lat_weights
weights = numpy.ma.masked_where(data.mask, weights)
data = keepdims.mean(data*weights, axis=axis)/\
keepdims.mean(weights, axis=axis)
dims = [Dimension(numpy.array([1,], dtype='i4'),
var.dims[iax].dimname,
units=var.dims[iax].units)
if iax in axis else var.dims[iax]
for iax in range(var.data.ndim)]
return Variable(data=data.astype(var.data.dtype),
dims=dims, parent=var, history=history)
def wgt_sum(var, axis=None):
'''A more general routine for sum
The method first reads the axes (x/y/z/t) needed,
finds the indices corresponding these axes,
if the axis is a "Y" axis, weights are computed using the latitude
axis in the variable.
if no axis is given, all axes will be summed over.
Args:
var (geodat.nc.Variable)
axis (str/int/list of int, optional): along which the array is summed
Examples:
>>> # Area sum
>>> wgt_sum(var,'xy')
>>> # Sum along the first axis
>>> wgt_sum(var,0)
'''
var.ensureMasked()
data = var.data
caxes = var.getCAxes()
dimnames = var.getDimnames()
if axis is None:
axis = range(len(dimnames))
# If the input axis is a single integer, convert it into a list
if type(axis) is int:
axis = [axis]
history = 'wgt_sum(axis='+','.join([str(ax) for ax in axis])+')'
if type(axis) is str:
axis = axis.upper()
axis = [caxes.index(ax) for ax in axis]
has_XY = 'X' in caxes and 'Y' in caxes
if has_XY:
sliceobj = [numpy.newaxis if cax != 'Y' else slice(None)
for cax in caxes]
weights = stat.lat_weights(var.getLatitude())[sliceobj]
else:
weights = 1.
data = data*weights
data = keepdims.sum(data, axis=axis)
dims = [Dimension(numpy.array([1,], dtype='i4'),
var.dims[iax].dimname, units=var.dims[iax].units)
if iax in axis else var.dims[iax]
for iax in range(var.data.ndim)]
return Variable(data=data.astype(var.data.dtype),
dims=dims, parent=var, history=history)
def time_input_to_datetime(time, calendar, units):
''' Return a datetime.datetime object given time as string
Example: time_input_to_datetime("1999-01-01 00:00:00",
"julian","days since 0001-01-01")
'''
if isinstance(time, datetime.datetime):
return time
elif isinstance(time, str):
try:
return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
except ValueError:
return datetime.datetime.strptime(time, '%Y-%m-%d')
else:
return _num2date(time, units=units, calendar=calendar)
def time_array_to_dim(time_array, calendar, units, **kwargs):
''' Return a geodat.nc.Dimension object given a time array
time_array = [ [ year, month, day, hour, minute, second ],...]
calendar = string ("standard","julian",...)
units = string (e.g. "days since 0001-01-01")
'''
times = numpy.array([_date2num(
time_input_to_datetime(
"{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}".format(*time),
calendar=calendar, units=units),
calendar=calendar, units=units)
for time in time_array])
return Dimension(data=times, units=units,
attributes={'calendar':calendar}, **kwargs)
def create_monthly(calendar, units, time0, time_end=None):
''' Return a generator that return a scalar time value with
the specified calendar and unit.
time0 is the starting time.
if time_end is not specified, the generator will not stop iterating.
unit should take the form UNIT since DATETIME
example: days since 0001-01-01 00:00:00
Work in progress. Need to be rewritten using relativedelta
'''
time0 = time_input_to_datetime(time0, calendar=calendar, units=units)
if time_end is not None:
time_end = time_input_to_datetime(time_end,
calendar=calendar, units=units)
calendar = calendar.lower()
def days_to_next_month(time):
'''Hard coded number of days between calendar months
TODO: Should use relativedelta'''
days = [29.5, 29.5, 30.5, 30.5,
30.5, 30.5, 31.0, 30.5,
30.5, 30.5, 30.5, 31.]
isleap = lambda year: (year % 4) == 0
if isleap(time.year) and (time.month == 1 or time.month == 2) and \
calendar != 'noleap':
return days[time.month-1]+0.5
else:
return days[time.month-1]
def continue_iter(current_time, time_end):
''' Determine if the current time has passed the specified time_end'''
if time_end is None:
return True
else:
return current_time < _date2num(time_end, units=units,
calendar=calendar)
current_time = _date2num(time0, units=units, calendar=calendar)
while continue_iter(current_time, time_end):
yield current_time
current_time += days_to_next_month(_num2date(current_time,
units=units,
calendar=calendar))
def create_climatology_dimension(calendar, units, time0=None, **dim_args):
''' Create a monthly dimension for climatology time axis
Args:
calendar (str) : e.g. "julian"
units (str): e.g. "days since 0001-01-01 00:00:00"
time0 (str): default "0001-01-16 00:00:00", the first value on the time
axis
Returns:
geodat.nc.Dimension
Optional keyword arguments are passed to geodat.nc.Dimension
'''
if time0 is None:
time0 = '0001-01-16 00:00:00'
time_generator = create_monthly(calendar, units, time0)
times = [time_generator.next() for i in range(12)]
return Dimension(data=numpy.array(times),
dimname='time',
units=units,
attributes={'modulo':""}, **dim_args)
def create_monthly_dimension(calendar, units, time0, time_end, **dim_args):
time_generator = create_monthly(calendar, units, time0, time_end)
times = [t for t in time_generator]
return Dimension(data=numpy.array(times), units=units, **dim_args)
def create_monthly_dimension2(ref_dim=None):
if ref_dim is None:
time0 = datetime.datetime(1, 1, 1, 0, 0)
units = 'days since 0001-1-1 0'
calendar = 'standard'
attributes = {'modulo':" "}
else:
time0 = ref_dim.time0()
units = ref_dim.units
calendar = ref_dim.attributes.\
get('calendar', 'standard').lower()
attributes = ref_dim.attributes.copy()
attributes['modulo'] = " "
newaxis = [_date2num(time0+datetime.timedelta(days=int(day)),
units=units,
calendar=calendar)
for day in numpy.linspace(15, 365-15, 12)]
return Dimension(data=numpy.array(newaxis),
dimname='time',
units=units,
attributes=attributes)
def climatology(var, appendname=False, *args, **kwargs):
var.ensureMasked()
data = var.data
try:
months = var.getDim("T").getMonthly()
except TypeError:
months = var.getDate("m")
axis = var.getIAxis("T")
clim_data = monthly.climatology(data=data, months=months, axis=axis,
*args, **kwargs)
history = 'climatology'
long_name = getattr(var, 'long_name', '')
if appendname:
long_name += " climatology"
dims = [dim for dim in var.dims]
# units is forced to be "days since 0001-01-01 00:00:00" instead of
# inheriting the var's time unit
dims[axis] = create_climatology_dimension(
calendar=getattr(var.dims[axis], 'calendar', 'standard').lower(),
units='days since 0001-01-01 00:00:00',
parent=var.dims[axis])
return Variable(data=clim_data, dims=dims, parent=var,
history=history,
attributes=dict(long_name=long_name))
def anomaly(var, appendname=False, clim=None,
no_continuous_duplicate_month=True):
var.ensureMasked()
data = var.data
try:
months = var.getDim("T").getMonthly()
except TypeError:
months = var.getDate("m")
axis = var.getIAxis('T')
if clim is None:
anom_data = monthly.anomaly(data=data,
months=months, axis=axis)[0]
else:
anom_data = monthly.anomaly(data=data, months=months,
axis=axis, clim=clim.data)[0]
history = 'anomaly'
long_name = getattr(var, 'long_name', '')
if appendname:
long_name += " anomaly"
dims = [dim for dim in var.dims]
return Variable(data=anom_data, dims=dims,
parent=var, history=history,
attributes=dict(long_name=long_name))
def running_climatology(var, appendname, runave_window, step, need_anom=True):
''' Calculate the running climatology, with anomaly
Args:
var (geodat.nc.Variable)
appendname (bool): whether to append "_c" to the varname of the output
runave_window (int): size of the running average window
step (int): step for slicing the array
need_anom (bool): whether anomaly is returned
Returns:
climatology (geodat.nc.Variable), anomaly (None or geodat.nc.Variable if
need_anom is True)
Example:
If the time axis is monthly, compute a running climatology
with a 30-year window, with appended name and anomaly returned, like this::
>>> running_climatology(var,True,30,12,True)
'''
climo = var.runave(runave_window, var.getCAxes().index('T'), step)
climo.addHistory("Moving climatology with window:{}".format(runave_window))
if appendname:
climo.varname += '_c'
if need_anom:
anom = var - climo
anom.addHistory("Anomaly on a moving climatology")
if appendname:
anom.varname += '_a'
else:
anom = None
return climo, anom
def clim2long(clim, target):
# Copy the target time dimension
time_dim = target.dims[target.getCAxes().index("T")]
time_idim = clim.getCAxes().index("T")
new_dim = [time_dim
if idim == time_idim
else dim
for idim, dim in enumerate(clim.dims)]
try:
target_month = target.getDim("T").getMonthly()
except TypeError:
target_month = target.getDate("m")
return Variable(data=monthly.clim2long(
clim.data, 0, target_month),
dims=new_dim,
attributes=clim.attributes,
history="geodat.nc.clim2long({0},{1})".\
format(clim.varname, target.varname),
varname=clim.varname)
def concatenate(variables, axis=0):
''' Concatenate a list of variables similar to numpy.concatenate
Take care of numpy masked array and concatenate dimensions as well
Args:
variables (list of Variable)
axis (int): along which the variables are concatenated
Returns:
geodat.nc.Variable
'''
for var in variables:
var.ensureMasked()
data = numpy.ma.concatenate([var.data for var in variables], axis=axis)
# Concatenate dimensions
dim_data = numpy.concatenate([var.dims[axis].data
for var in variables])
dims = [dim for dim in variables[0].dims]
dims[axis] = Dimension(dim_data, parent=dims[axis])
return Variable(data=data, dims=dims, varname=variables[0].varname,
parent=variables[0],
history=getattr(variables[0], 'history', None))
def ensemble(variables, new_axis=None, new_axis_unit=None, **kwargs):
''' Given a list of variables, perform numpy.concatenate()
New axis is added as the left most axis
Optional arguments:
new_axis (numpy array) : for the new axis
new_axis_unit (str): defines the unit of the new axis
Other keyword arguments are parsed to geodat.nc.Variable
'''
for d in variables:
d.ensureMasked()
ensdata = numpy.ma.concatenate([d.data[numpy.newaxis, ...]
for d in variables], axis=0)
if new_axis is None:
new_axis = numpy.arange(1, len(variables)+1)
dims = [Dimension(new_axis, dimname='ensemble', units=new_axis_unit),] + \
variables[0].dims
return Variable(data=ensdata, parent=variables[0], dims=dims, **kwargs)
def div(u, v, varname='div', long_name='divergence', **kwargs):
''' Compute wind divergence by central difference
Args:
u (geodat.nc.Variable) - zonal wind
v (geodat.nc.Variable) - meridional wind
Returns:
geodat.nc.Variable
'''
# Longitude may be discontinuous at the dateline
lon = numpy.mod(u.getLongitude(), 360.)
lon = numpy.radians(lon)
lat = numpy.radians(u.getLatitude())
xaxis = u.getIAxis('X')
yaxis = u.getIAxis('Y')
assert xaxis == v.getIAxis('X')
assert yaxis == v.getIAxis('Y')
# dx,dy
R = 6371000.
# New axis to match with lon
lat_newaxis_slice = (slice(None),)*xaxis + (numpy.newaxis,)
# New axis to match with lat
lon_newaxis_slice = (slice(None),)*yaxis + (numpy.newaxis,)
# a function of latitude
dx = numpy.cos(lat)[lat_newaxis_slice]*\
numpy.gradient(lon)[lon_newaxis_slice]*R
#dx_slice = (numpy.newaxis,)*yaxis + (slice(None),) \
# + (numpy.newaxis,)*(u.data.ndim-yaxis-1)
#dx = dx[dx_slice]
dy = numpy.gradient(lat) * R
return Variable(data=math.div(u.data, v.data, dx, dy, xaxis, yaxis),
varname=varname, parent=u, history='divergence',
attributes=dict(long_name=long_name), **kwargs)
def gradient(var, axis, mask_boundary=True, **kwargs):
''' Compute the gradient of a variable taking into account the convergence
of meridians
Args:
var (geodat.nc.Variable)
axis (str or int) - the axis along which the gradient is computed
mask_boundary (bool, default=True) - whether boundary values are masked
Additional keyword arguments are parsed to geodat.nc.Variable
Returns:
geodat.nc.Variable
'''
if type(axis) is str:
axis = var.getCAxes().index(axis.upper())
R = 6371000.
if var.getCAxes()[axis] == 'X' and 'Y' in var.getCAxes():
yaxis = var.getCAxes().index('Y')
lon = numpy.radians(var.getLongitude())
lat = numpy.radians(var.getLatitude())
lat_slice = (numpy.newaxis,)*yaxis + (slice(None),) \
+ (numpy.newaxis,)*(var.data.ndim-yaxis-1)
lon_slice = (numpy.newaxis,)*axis + (slice(None),) \
+ (numpy.newaxis,)*(var.data.ndim-axis-1)
# a function of latitude
dx = numpy.cos(lat)[lat_slice] * numpy.gradient(lon)[lon_slice] * R
else:
if var.getCAxes()[axis] == 'X' or var.getCAxes()[axis] == 'Y':
dx = numpy.radians(numpy.gradient(var.getAxes()[axis])) * R
else:
dx = numpy.gradient(var.getAxes()[axis])
return Variable(data=math.gradient(var.data, dx, axis,
mask_boundary=mask_boundary),
parent=var,
history='gradient: '+var.getCAxes()[axis], **kwargs)
def integrate(var, axis, varname='int', versatile=False):
''' Integrate variable along one or more axes
var - geodat.nc.Variable
axis - a list of integer that select the dimension to be integrated along
'''
var.ensureMasked()
if type(axis) is str:
axis = axis.upper()
axis = [var.getCAxes().index(ax) for ax in axis]
if type(axis) is not list:
axis = [axis]
# Compute integration
re_data = math.integrate(data=var.data, axes=var.getAxes(), iax=axis)
# History attribute
history = 'Integrated along axis:'+ \
''.join([var.getCAxes()[iax] for iax in axis])
# It may take some time to compute integration, notify the user
if versatile:
print("Integrating along axis...", end="")
# This long name is probably not needed
long_name = var.attributes.get('long_name', '') + \
' integrated on ' + \
''.join([var.getCAxes()[iax] for iax in axis])
result = Variable(data=re_data,
varname=varname, parent=var, history=history,
attributes=dict(long_name=long_name))
# Reduce dimension to the mean of the domain
for ax in axis:
result.dims[ax].data = numpy.array([var.dims[ax].data.mean()],
dtype=var.dims[ax].data.dtype)
if versatile:
print('Done.')
return result
def conform_region(*args):
''' Return a dictionary with the common lat-lon region
Input:
args: a list (length > 1) of dictionary or geodat.nc.Variable
the dictionary resembles the input for geodat.nc.Variable.getRegion()
Return:
a dictionary {'X': (min_lon,max_lon), 'Y': (min_lat,max_lat)}
'''
if len(args) == 1:
raise Exception("Expect more than one domain in conform_region")
args = list(args)
for iarg, arg in enumerate(args):
# For backward compatibility, get the domains for Variable inputs
try:
argdomain = arg.getDomain()
except AttributeError:
argdomain = arg
# Generalise the form of the dictionary
args[iarg] = _general_region(argdomain)
minlon = max([domain.get('X', (numpy.inf*-1, numpy.inf))[0]
for domain in args])
maxlon = min([domain.get('X', (numpy.inf*-1, numpy.inf))[1]
for domain in args])
minlat = max([domain.get('Y', (numpy.inf*-1, numpy.inf))[0]
for domain in args])
maxlat = min([domain.get('Y', (numpy.inf*-1, numpy.inf))[1]
for domain in args])
return dict(lat=(minlat, maxlat), lon=(minlon, maxlon))
def conform_regrid(*args, **kwargs):
''' Given a list of variable
Conform and regrid to match the region and grid
Return a list of variables
Unnamed optional argument go to conform_region
Named optional arguments:
ref - specify a reference variable to regrid to
The rest go to geodat.nc.pyferret_regrid
'''
# Conform the region first
region = conform_region(*args)
varstoregrid = [var.getRegion(**region) for var in args]
axes = 'X' if all(('X' in var.getCAxes() for var in varstoregrid)) else ''
axes += 'Y' if all(('Y' in var.getCAxes() for var in varstoregrid)) else ''
if 'ref' in kwargs:
ref = kwargs.pop('ref').getRegion(**region)
regridded = [pyferret_regrid(var, ref)
for var in varstoregrid]
else:
# Reference is not given
# The variable with the finest grid would be the reference
# area = cos(theta) dtheta dphi
def minarea(var, axes):
mindelta = lambda v: numpy.abs(numpy.gradient(v)).min()
if axes == 'XY':
phi = numpy.radians(var.getLongitude())
theta = numpy.radians(var.getLatitude())
area = numpy.cos(theta)[numpy.newaxis, :]*\
numpy.gradient(phi)[:, numpy.newaxis]*\
numpy.gradient(theta)[numpy.newaxis, :]
return numpy.abs(area).min()
elif axes == 'X':
return mindelta(var.getLongitude())
elif axes == 'Y':
return mindelta(var.getLatitude())
ires = numpy.array([minarea(var, axes) for var in args]).argmin()
ref = varstoregrid[ires]
regridded = [pyferret_regrid(varstoregrid[i], ref, axis=axes, **kwargs)
if i != ires
else ref
for i in range(len(varstoregrid))]
return regridded
def fer2var(var):
''' Convert the dictionary returned by pyferret.getdata into a
geodat.nc.Variable
Args:
var (dict): as is returned by pyferret.getdata
Returns:
geodat.nc.Variable
'''
if not pyferret_func.PYFERRET_INSTALLED:
raise ImportError("No pyferret installed")
result = pyferret_func.fer2num(var)
dims = [Dimension(data=result['coords'][i],
units=result['dimunits'][i],
dimname=result['dimnames'][i])
for i in range(len(result['coords']))]
newvar = Variable(data=result['data'], dims=dims,
varname=result['varname'],
history='From Ferret')
return newvar
def var2fer(var, name=None):
''' Given a geodat.nc.Variable, return a dictionary
that resemble the Ferret data variable structure
to be passed to pyferret.putdata
Args:
var (geodat.nc.Variable)
name (str): optional, new variable name (default var.varname)
Returns:
dict: to be used by pyferret.putdata
'''
if not pyferret_func.PYFERRET_INSTALLED:
raise ImportError("No pyferret installed")
num_input = _var2num(var)
if name is not None:
assert isinstance(name, str)
num_input["varname"] = name
return pyferret_func.num2fer(**num_input)
def _var2num(var):
''' Convert a geodat.nc.Variable instance to a dictionary ready to be used
by pyferret_func.num2fer
Arg:
var (geodat.nc.Variable)
Returns:
dict
'''
return dict(data=var.data, missing_value=var.getMissingValue(),
coords=var.getAxes(),
dimunits=[dim.units for dim in var.dims],
varname=var.varname, data_units=getattr(var, 'units', ""),
cartesian_axes=var.getCAxes(),
dimnames=var.getDimnames())
def _num2var(num):
''' Convert pyferret_func.fer2num output to a geodat.nc.Variable instance
Arg:
num (dict)
Returns:
geodat.nc.Variable
'''
var_attrs = dict(missing_value=num.get("missing_value",
numpy.asscalar(
numpy.ma.default_fill_value(
num["data"]))))
if "data_units" in num:
var_attrs["units"] = num["data_units"]
dims = [Dimension(data=dimdata, dimname=dimname, units=dimunit)
for dimdata, dimname, dimunit in zip(num["coords"], num["dimnames"],
num["dimunits"])]
return Variable(data=num["data"], varname=num.get("varname", "UNKNOWN"),
dims=dims, attributes=var_attrs)
def pyferret_regrid(var, ref_var=None, axis='XY', nlon=None, nlat=None,
verbose=False, prerun=None, transform="@lin"):
''' Use pyferret to perform regridding.
Args:
var (geodat.nc.Variable): input data
ref_var (geodat.nc.Variable): provide the target grid
axis (str): which axis needs regridding
nlon (int): if ref_var is not provided, a cartesian latitude-longitude global grid is created as the target grid. nlon is the number of longitudes
nlat (int): number of latitude, used with nlon and when ref_axis is None
verbose (bool): default False
prerun (str): Ferret command to be run before the regridding
transform (str): Mode of regridding. "@lin" means linear interpolation
"@ave" means preserving area mean. See `Ferret doc`_
Either ref_var or (nlon and nlat) has to be specified
Returns:
geodat.nc.Variable
.. _Ferret doc: http://ferret.pmel.noaa.gov/Ferret/documentation/users-guide
'''
if not pyferret_func.PYFERRET_INSTALLED:
raise ImportError("No pyferret installed")
if ref_var is None:
# If ref_var is not given, use nlon and nlat instead
if nlon is None or nlat is None:
raise Exception('''reference variable is not given.
nlon and nlat need to be specified''')
if ''.join(sorted(axis.upper())) != 'XY':
raise Exception('''ref_var not given and therefore assumed
regridding in the XY direction.
The axis/axes you chose:'''+str(axis))
# Create latitude and longitude using the sphere_grid and spharm modules
lon, lat = grid_func.grid_degree(NY=nlat, NX=nlon)
lon = Dimension(data=lon, units="degrees_E", dimname="lon")
lat = Dimension(data=lat, units="degrees_N", dimname="lat")
# Create new dimensions
dims = []
for idim, cax in enumerate(var.getCAxes()):
if cax == 'X':
dims.insert(idim, lon)
elif cax == 'Y':
dims.insert(idim, lat)
else:
dims.insert(idim, var.dims[idim])
data_shape = [dim.data.shape[0] for dim in dims]
ref_var = Variable(data=numpy.ones(data_shape, dtype=var.data.dtype),
dims=dims, parent=var)
if axis == 'XY' and transform.lower() != '@ave':
warnings.warn('''Regridding onto XY grid and
transformation: {} is used.'''.format(transform))
# Only a slice of ref_var is needed
# No need to copy the entire variable
# (reduce chance of running out of memory)
ref_var_slice = tuple([slice(0, 1) if cax not in axis.upper()
else slice(None)
for cax in ref_var.getCAxes()])
return _num2var(pyferret_func.regrid_primitive(
_var2num(var),
_var2num(ref_var[ref_var_slice].squeeze()),
axis, verbose=verbose, prerun=prerun, transform=transform))
def regrid(var, nlon, nlat, verbose=False):
''' Use spherical harmonic for regridding
May produce riggles.
Take an instance of geodat.nc.Variable,
Deduce the lat-lon grid on a complete sphere,
Return a regridded data on a spherical grid (nlat,nlon)
Return: a geodat.nc.Variable instance
TODO: grid.regrid now only handle 2D or 3D data,
extend the function to handle rank-3+ data
by flattening the extra dimension into one dimension
'''
ilat = var.getCAxes().index('Y')
ilon = var.getCAxes().index('X')
if var.data.ndim == 3:
if verbose:
print("Perform regridding on 3-D data.")
otherdim = [i for i in range(var.data.ndim)
if i != ilat and i != ilon][0]
# new axis order:
newaxorder = [ilat, ilon, otherdim]
# transformed data
trans_data = numpy.transpose(var.data, newaxorder)
result = grid_func.regrid(var.getLongitude(),
var.getLatitude(),
trans_data, nlon, nlat)
# transform back
newaxorder = sorted(range(var.data.ndim), key=lambda x: newaxorder[x])
regridded = numpy.transpose(result, newaxorder)
elif var.data.ndim > 3:
raise Exception('Right now the regrid function only take 2D or 3D data')
else:
regridded = grid_func.regrid(var.getLongitude(),
var.getLatitude(),
var.data, nlon, nlat)
newlon, newlat = grid_func.grid_degree(nlat, nlon)
lon_d = Dimension(data=newlon, units=var.dims[ilon].units, dimname='lon')
lat_d = Dimension(data=newlat, units=var.dims[ilat].units, dimname='lat')
dims = []
for i in range(var.data.ndim):
if i == ilat:
dims.append(lat_d)
elif i == ilon:
dims.append(lon_d)
else:
dims.append(var.dims[i])
return Variable(data=regridded, dims=dims, parent=var, history='Regridded')
def gaus_filter(var, gausize):
''' Filter a variable spatially (i.e. X-Y)
using a gaussian filter of size gausize
Args:
var (geodat.nc.Variable)
gausize (int) - the size of the window for gaussian filtering
Returns:
geodat.nc.Variable
'''
if var.data.mask.any():
warnings.warn('''There are masked values.
They are assigned zero before filtering''')
# Preserve the mask
mask = var.data.mask.copy()
var[var.data.mask] = 0.
var.data.mask = False
var.data.mask = mask
newvar = Variable(data=gaussian_filter(var.data, gausize),
parent=var,
history="Gaussian filter size:"+str(gausize))
return newvar
def savefile(filename, listofvar, overwrite=False,
recordax=-1, appendall=False):
'''
filename - a string that specifies the filename,
if it is not suffixed with .nc, .nc will be added
list of variable - a list of geodat.nc.Variable objects, can be a single
geodat.nc.Variable
overwrite - a boolean. Overwrite existing file if True.
default=False
recordax - an integer. Specify the axis that will be the
record axis, default = -1 (no record axis)
appendall - a boolean. Append to existing file if True.
default = False
This function, however different from other functions in the module,
uses NetCDF4 Dataset to write data
'''
# Handle endian
endian_code = {'>':'big', '<':'little'}
# if the file is not suffixed by .nc, add it
if filename[-3:] != '.nc':
filename += '.nc'
# check if the file exists.
# If it does, warn the user if the overwrite flag is not specified
# savedfile = None
if os.path.exists(filename):
if overwrite and appendall:
raise Exception('appendall and overwrite can\'t be both True.')
if not overwrite and not appendall:
print(filename,"exists. Overwrite or Append? [a/o]")
yn = sys.stdin.readline()
if yn[0].lower() == 'o':
overwrite = True
elif yn[0].lower() == 'a':
appendall = True
else:
raise Exception('''File exists. Action must be either
to append or to overwrite''')
if not os.path.exists(filename) or overwrite:
# Create temporary file
ncfile = _netCDF4_Dataset(filename+'.tmp.nc', 'w',
format='NETCDF3_CLASSIC')
else:
# Append existing file
assert appendall
assert os.path.exists(filename)
ncfile = _netCDF4_Dataset(filename, 'a',
format='NETCDF3_CLASSIC')
# Add history to the file
ncfile.history = 'Created from script: '+ inspect.stack()[1][1]
# if listofvar is a single object, convert it into a list
if type(listofvar) is not list:
listofvar = [listofvar]
for var in listofvar:
varname = var.varname if var.varname is not None else 'var'
if var.dims is None and var.data.ndim > 0:
raise Exception("There is/are missing dimension(s) for "+\
var.varname)
if var.dims is not None:
# Save dimension arrays
dimnames = var.getDimnames()
axes = var.getAxes()
for idim, dimname in enumerate(dimnames):
if idim == recordax:
dimsize = None
else:
dimsize = var.data.shape[idim]
# check if the dimension has already been saved
isnewdim = dimname not in ncfile.dimensions
# the dimension name exists already and is not a record axis
if not isnewdim and idim != recordax:
olddim = ncfile.variables[dimname]
# check if the dimensions are in fact the same one,
# if not, it is a new dimension
isnewdim = axes[idim].shape != olddim[:].shape or\
(not numpy.allclose(axes[idim], olddim[:]))
# create new dimension
if isnewdim:
# Rename the dimension if it is unique but has name
# collision within the file
dimsuffix = ''
newDcount = 0
while dimname+dimsuffix in ncfile.dimensions:
newDcount += 1
dimsuffix = str(newDcount)
dimnames[idim] += dimsuffix
ncfile.createDimension(dimnames[idim], dimsize)
## saveddims.append(dimnames[idim])
endian = endian_code.get(axes[idim].dtype.byteorder,
'native')
if not numpy.isscalar(axes[idim]):
dimvar = ncfile.createVariable(
dimnames[idim], numpy.dtype(axes[idim].dtype.name),
(dimnames[idim],), endian=endian)
dimvar[:] = axes[idim]
dimvar.setncatts(var.dims[idim].attributes)
varappend = False or (varname in ncfile.variables and appendall)
if varname in ncfile.variables and not varappend:
# Check again
print("Variable {} exists. ".format(varname),
"Append variable? [y/N]")
append_code = sys.stdin.readline()
if append_code[0].lower() == 'y':
varappend = True
else:
# Likely an unintended collision of variable names
# Change it!
print("Rename variable as :")
varname = sys.stdin.readline()[:-1]
var.varname = varname
if not varappend:
endian = endian_code.get(var.data.dtype.byteorder, 'native')
_ = ncfile.createVariable(varname,
numpy.dtype(var.data.dtype.name),
dimnames, endian=endian,
fill_value=var.getMissingValue())
var.ensureMasked()
data2save = var.data
# print varname
if varappend:
if float(var.getMissingValue()) != \
float(ncfile.variables[varname].getncattr('_FillValue')):
print("Warning: Existing var missing value:",
var.getMissingValue(),
"Appending var's missing value:",
ncfile.variables[varname].getncattr('_FillValue'))
oldvar = ncfile.variables[varname]
olddim = ncfile.variables[var.getDimnames()[recordax]]
oldnrec = ncfile.variables[varname].shape[recordax]
newnrec = var.data.shape[recordax]
s_lice = (slice(None),)*recordax + \
(slice(oldnrec, newnrec+oldnrec),)
print("Appending variable:", varname)
oldvar[s_lice] = data2save
print("Appending dimensions:", var.getDimnames()[recordax])
olddim[oldnrec:newnrec+oldnrec] = axes[recordax]
else:
if data2save.ndim == 0:
ncfile.variables[varname].assignValue(data2save)
else:
slice_obj = (slice(None),)*data2save.ndim
ncfile.variables[varname][slice_obj] = data2save
# Update attributes
atts = {att:val for att, val in var.attributes.items()
if att != '_FillValue'}
ncfile.variables[varname].setncatts(atts)
ncfile.close()
if overwrite or os.path.exists(filename) is False:
os.rename(filename+'.tmp.nc', filename)
print("Saved to file:", filename)
elif appendall:
print("Appended to file:", filename)
else:
print("Temporary file created:", filename, ".tmp.nc", sep="")
def TimeSlices(var, lower, upper, toggle, no_continuous_duplicate_month=False):
""" Return a time segment of the variable according to the lower (inclusive)
and upper limits (inclusive)
Args:
var (geodat.nc.Variable)
lower (numeric): lower time limit
upper (numeric): upper time limit
toggle (str): Y/m/d/H/M/S to select a particular time format
no_continuous_duplicate_month (bool): default False; make sure the
difference between calendar months in the time axis is always larger
than or equal to 1; only suitable for dealing with monthly data.
Returns:
geodat.nc.Variable
Examples:
>>> # time segments in Nov, Dec, Jan and Feb
>>> TimeSlices(var,11.,2.,"m")
>>> # time segments from year 1990 to 2000 (inclusive)
>>> TimeSlices(var,1990, 2000,"Y")
"""
if len(toggle) > 1 or toggle not in "YmdHMS":
raise Exception("Accept one and only one of the Y/m/d/H/M/S as toggle")
if no_continuous_duplicate_month and toggle != "m":
raise ValueError("no_continuous_duplicate_month is only applied when "+\
"toggle=='m'")
if no_continuous_duplicate_month:
time = monthly.filter_monthly(var.getTime(), var.getDim("T").units,
getattr(var.getDim("T"), "calendar",
"standard"))
else:
time = var.getDate(toggle)
taxis = var.getCAxes().index('T')
if upper < lower:
slices = (slice(None),)*taxis + \
(numpy.logical_or(time >= lower, time <= upper),) +\
(slice(None),)*(var.data.ndim-taxis-1)
else:
slices = (slice(None),)*taxis + \
(numpy.logical_and(time >= lower, time <= upper),) +\
(slice(None),)*(var.data.ndim-taxis-1)
return var[slices]
def plot_vs_axis(var, axis, *args, **kwargs):
axis = axis.upper()
line = pylab.plot(var.getAxis(axis), var.data, *args, **kwargs)
# Use date for the time axis
if axis == 'T':
times = var.getAxis(axis)
iticks = range(0, len(times), len(times)/10)
xticks = [times[i] for i in iticks]
dates = ["{}-{}-{}".format(*var.getDate("Ymd")[i]) for i in iticks]
pylab.gca().set_xticks(xticks)
pylab.gca().set_xticklabels(dates, rotation=20)
return line
def UseMapplot(f_pylab):
""" A decorator for using mapplot functions on an geodat.nc.Variable object
f_pylab is the pylab function for map plotting (e.g. contour, contourf,...)
"""
def plot_func(variable, *args, **kwargs):
''' Use mpl_toolkits.basemap.Basemap to plot
Args:
variable (geodat.nc.Variable): should be 2D (singlet dimension will
be removed when calling in this function)
basemap_kwargs (dict): optional. If provided, it is parsed to
mpl_toolkits.basemap.Basemap while setting up the map
Other arguments and keyword arguments are parsed to f_pylab (the pylab
function f_pylab provided).
Returns:
m, cs (mpl_toolkits.basemap.Basemap, output of f_pylab)
If the dimensions are not recognized as latitudes and longitudes, no map
is made; f_pylab(x,y,data) is called and its output(s) are returned
'''
basemap_kwargs = kwargs.pop("basemap_kwargs", None)
# args needed for quiver
args = list(args)
# Squeeze variable input
for iarg, arg in enumerate(args):
if hasattr(arg, "squeeze"):
args[iarg] = arg.squeeze()
if isinstance(args[iarg], Variable):
args[iarg] = arg.data
var_squeeze = variable.squeeze()
caxes = var_squeeze.getCAxes()
data = var_squeeze.data
if len(caxes) != 2:
raise Exception('UseMapplot is supposed to be used on 2D data')
if 'X' in caxes and 'Y' in caxes:
# Lat-Lon plot
lons = variable.getLongitude()
lats = variable.getLatitude()
m, cs = mapplot.MapSetup(f_pylab)(
lons, lats, data, basemap_kwargs, *args, **kwargs)
return m, cs
elif caxes[-1] == 'Z':
# Z axis is prefered as the vertical axis
# and the data needs to be transposed
data = data.T
for iarg, arg in enumerate(args):
if hasattr(arg, "T"):
args[iarg] = arg.T
y, x = var_squeeze.getAxes()
return f_pylab(x, y, data, *args, **kwargs)
return plot_func
contour = UseMapplot(pylab.contour)
contourf = UseMapplot(pylab.contourf)
quiver = UseMapplot(pylab.quiver)
pcolor = UseMapplot(pylab.pcolor)
def spatial_corr(var1, var2):
return numpy.ma.corrcoef(var1.data.ravel(), var2.data.ravel())[0, 1]
def regress(var1, var2):
return Variable(data=geodat.signal.regress(var1.data,
var2.data)[0],
dims=var1.dims[1:],
varname="{}_{}".format(var1.varname,
var2.varname),
history="{} regress to {}".format(var1.varname,
var2.varname))
|
kitchoi/geodat
|
geodat/nc.py
|
Python
|
mit
| 88,536
|
[
"Gaussian",
"NetCDF"
] |
5294cb27ce1ecd7c22f4e55620cd21c4b5680e9b1a28487c15cd6fe5f45f4060
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.utilities.transaction_base import TransactionBase
class MaintenanceVisit(TransactionBase):
def get_feed(self):
return _("To {0}").format(self.customer_name)
def validate_serial_no(self):
for d in self.get('purposes'):
if d.serial_no and not frappe.db.exists("Serial No", d.serial_no):
frappe.throw(_("Serial No {0} does not exist").format(d.serial_no))
def validate(self):
self.validate_serial_no()
def update_customer_issue(self, flag):
for d in self.get('purposes'):
if d.prevdoc_docname and d.prevdoc_doctype == 'Warranty Claim' :
if flag==1:
mntc_date = self.mntc_date
service_person = d.service_person
work_done = d.work_done
status = "Open"
if self.completion_status == 'Fully Completed':
status = 'Closed'
elif self.completion_status == 'Partially Completed':
status = 'Work In Progress'
else:
nm = frappe.db.sql("select t1.name, t1.mntc_date, t2.service_person, t2.work_done from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.completion_status = 'Partially Completed' and t2.prevdoc_docname = %s and t1.name!=%s and t1.docstatus = 1 order by t1.name desc limit 1", (d.prevdoc_docname, self.name))
if nm:
status = 'Work In Progress'
mntc_date = nm and nm[0][1] or ''
service_person = nm and nm[0][2] or ''
work_done = nm and nm[0][3] or ''
else:
status = 'Open'
mntc_date = None
service_person = None
work_done = None
wc_doc = frappe.get_doc('Warranty Claim', d.prevdoc_docname)
wc_doc.update({
'resolution_date': mntc_date,
'resolved_by': service_person,
'resolution_details': work_done,
'status': status
})
wc_doc.db_update()
def check_if_last_visit(self):
"""check if last maintenance visit against same sales order/ Warranty Claim"""
check_for_docname = None
for d in self.get('purposes'):
if d.prevdoc_docname:
check_for_docname = d.prevdoc_docname
#check_for_doctype = d.prevdoc_doctype
if check_for_docname:
check = frappe.db.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.name!=%s and t2.prevdoc_docname=%s and t1.docstatus = 1 and (t1.mntc_date > %s or (t1.mntc_date = %s and t1.mntc_time > %s))", (self.name, check_for_docname, self.mntc_date, self.mntc_date, self.mntc_time))
if check:
check_lst = [x[0] for x in check]
check_lst =','.join(check_lst)
frappe.throw(_("Cancel Material Visits {0} before cancelling this Maintenance Visit").format(check_lst))
raise Exception
else:
self.update_customer_issue(0)
def on_submit(self):
self.update_customer_issue(1)
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_if_last_visit()
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
|
gsnbng/erpnext
|
erpnext/maintenance/doctype/maintenance_visit/maintenance_visit.py
|
Python
|
agpl-3.0
| 3,120
|
[
"VisIt"
] |
ac45b1f2c39ac89fab7f260028d9d36f8550049299e835a96f17d0a7773c8f83
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
from django.conf import settings
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import mkt
from mkt.constants.applications import DEVICE_TYPES
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.reviewers.models import EscalationQueue
from mkt.site.fixtures import fixture
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage, public_storage)
from mkt.site.tests import formset, initial, MktPaths, TestCase, user_factory
from mkt.site.tests.test_utils_ import get_image_path
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.translations.models import Translation
from mkt.users.models import UserNotification, UserProfile
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AddonDeviceType, AddonUser, AppFeatures, Webapp
class TestSubmit(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
self.login(self.user.email)
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(email='regular@mozilla.com')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
subscribe_mock.assert_called_with(
self.user.email, 'app-dev', lang='en-US',
country='restofworld', format='H',
source_url='http://testserver/developers/submit')
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 0)
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
response = self.client.post(self.url, data, follow=True)
eq_(response.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'form' in response.context:
eq_(response.context['form'].errors, {})
return response
class BaseWebAppTest(BaseUploadTest, UploadAddon, TestCase):
fixtures = fixture('user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
self.upload.update(name=self.manifest_url)
self.url = reverse('submit.app')
self.login('regular@mozilla.com')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 0)
self.post(data=data)
return Webapp.objects.get()
class TestCreateWebApp(BaseWebAppTest):
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
self.login('clouserw@mozilla.com')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
eq_(addon.latest_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
ok_(addon.id)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.latest_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.latest_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, mkt.STATUS_PENDING)
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [mkt.DEVICE_GAIA])
eq_(app.premium_type, mkt.ADDON_FREE)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "zh" which is in the
# SHORTER_LANGUAGES setting and should get converted to "zh-CN".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'zh-CN')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-CA" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.latest_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class SetupFilesMixin(MktPaths):
def setup_files(self, filename='mozball.zip'):
# Local source filename must exist.
assert os.path.exists(self.packaged_app_path(filename))
# Remote filename must not be empty.
assert self.file.filename
# Original packaged file.
copy_stored_file(self.packaged_app_path(filename),
self.file.file_path,
src_storage=local_storage,
dst_storage=private_storage)
# Signed packaged file.
copy_stored_file(self.packaged_app_path(filename),
self.file.signed_file_path,
src_storage=local_storage,
dst_storage=public_storage)
class BasePackagedAppTest(SetupFilesMixin, BaseUploadTest, UploadAddon,
TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.latest_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.upload = self.get_upload(
abspath=self.package,
user=UserProfile.objects.get(email='regular@mozilla.com'))
self.upload.update(name='mozball.zip')
self.url = reverse('submit.app')
self.login('regular@mozilla.com')
@property
def package(self):
return self.packaged_app_path('mozball.zip')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 1)
self.post(data=data)
return Webapp.objects.order_by('-id')[0]
class TestEscalatePrereleaseWebApp(BasePackagedAppTest):
def setUp(self):
super(TestEscalatePrereleaseWebApp, self).setUp()
user_factory(email=settings.NOBODY_EMAIL_ADDRESS)
def post(self):
super(TestEscalatePrereleaseWebApp, self).post(data={
'free_platforms': ['free-firefoxos'],
'packaged': True,
})
def test_prerelease_permissions_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['moz-attention']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 1)
def test_prerelease_permissions_get_escalated_external_app(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['moz-external-app']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 1)
def test_normal_permissions_dont_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['contacts']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 0)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.latest_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=mkt.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
self.cat1 = 'books-comics'
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(
addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.webapp.update(categories=[self.cat1])
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
self.login('clouserw@mozilla.com')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1],
'publish_type': mkt.PUBLISH_IMMEDIATE,
'notes': 'yes'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'publish_type': mkt.PUBLISH_IMMEDIATE,
}
if expected:
expected_data.update(expected)
self.assertSetEqual(addon.device_types, self.device_types)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, mkt.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, mkt.STATUS_NULL)
eq_(self.webapp.highest_status, mkt.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = mkt.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_approved(self):
self._step()
data = self.get_dict(publish_type=mkt.PUBLISH_PRIVATE)
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data,
expected={'publish_type': mkt.PUBLISH_PRIVATE})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Webapp.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in mkt.CONTENT_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert public_storage.exists(
os.path.join(ad.get_icon_dir(), fn)), ('Expected %s in %s' % (
fn, public_storage.listdir(ad.get_icon_dir())[1]))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, mkt.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(
r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional_if_email_present(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_optional_if_url_present(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertNoFormErrors(r)
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid email address.')
def test_support_need_email_or_url(self):
self._step()
res = self.client.post(self.url, self.get_dict(support_email=None,
support_url=None))
self.assertFormError(
res, 'form_basic', 'support',
'You must provide either a website, an email, or both.')
ok_(pq(res.content)('#support-fields .error #trans-support_url'))
ok_(pq(res.content)('#support-fields .error #trans-support_email'))
# While the inputs will get the error styles, there is no need for an
# individual error message on each, the hint on the parent is enough.
eq_(pq(res.content)('#support-fields .error .errorlist').text(), '')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(mkt.MAX_CATEGORIES, 2)
cat2 = 'games'
cat3 = 'social'
cats = [self.cat1, cat2, cat3]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories), sorted(cats))
def test_categories_add(self):
self._step()
cat2 = 'games'
self._post_cats([self.cat1, cat2])
def test_categories_add_and_remove(self):
self._step()
cat2 = 'games'
self._post_cats([cat2])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = 'games'
self.webapp.update(categories=[self.cat1, cat2])
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1])
class TestDone(TestSubmit):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'],
'next_steps')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.user = UserProfile.objects.get(email='regular@mozilla.com')
self.login(self.user.email)
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=mkt.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
jasonthomas/zamboni
|
mkt/submit/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,479
|
[
"exciting"
] |
79ece097434dc55c37486444f3e2b99a88503c6c18c10300179ca320a7a128cb
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY2
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from itertools import zip_longest
from scipy._lib import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY2:
instancemethod = types.MethodType
else:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for distribution \"%s\": %s" % (self.name, repr(e)))
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
return (a < x) & (x < b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""
Return the support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : float
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
_a, _b = self._get_support(*args)
return _a * scale + loc, _b * scale + loc
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
_a, _b = self._get_support(*args)
if _a > -np.inf:
left = _a
if _b < np.inf:
right = _b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape (if applicable), location, and scale
parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
mle_tuple : tuple of floats
MLEs for any shape parameters (if applicable), followed by those
for location and scale. For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of np.nan, np.inf, or -np.inf, the fit routine
will throw a RuntimeError.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, _a, _b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None, axis=0):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=axis)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=axis)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis: int, optional
The axis along which the entropy is calculated. Default is 0.
Returns
-------
S : float
The calculated entropy.
Examples
--------
>>> from scipy.stats import entropy
Bernoulli trial with different p.
The outcome of a fair coin is the most uncertain:
>>> entropy([1/2, 1/2], base=2)
1.0
The outcome of a biased coin is less uncertain:
>>> entropy([9/10, 1/10], base=2)
0.46899559358928117
Relative entropy:
>>> entropy([1/2, 1/2], qk=[9/10, 1/10])
0.5108256237659907
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if qk.shape != pk.shape:
raise ValueError("qk and pk must have same shape.")
qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=axis)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1)
place(output, cond2, _b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _b)
place(output, cond2, _a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may
also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
jamestwebber/scipy
|
scipy/stats/_distn_infrastructure.py
|
Python
|
bsd-3-clause
| 126,866
|
[
"Gaussian"
] |
8f00a046b4b32660b37f3d4f38d43a6fc2ba59225fce53e2e5919a10ee17d3c0
|
"""som - Self-organising-map"""
from typing import Optional, Tuple
import numpy as np
from sklearn.decomposition import PCA
class SOM:
"""
SOM - Self-Organising-Map.
A 2D neural network that clusters high-dimensional data iteratively.
Parameters
----------
nx : Number of neurons on x-axis.
ny : Number of neurons on y-axis.
ndims : Dimension of input data.
iterations : Total number of iterations to perform.
Should be at least 10 times the number of neurons.
learning_rate : The learning rate specifies the
tradeoff between speed and accuracy of the SOM.
distance : { 'euclidean', 'periodic' }
The distance metric to use.
init : { 'random', 'pca' }
Initialization method. "pca" uses a grid spanned by the first two
eigenvectors of the principal component analysis of the input data.
grid : { 'rect', 'hex' }
Layout of the SOM, can be either rectangular or hexagonal with
equidistant nodes. The latter can provide smoother visualization.
train : { 'seq', 'batch' }
Training algorithm to use. Sequential picks random feature vectors one
at a time, while batch mode trains using all features per iteration.
This can significantly speed up convergence.
neighbour : { 'gaussian', 'bubble', 'epanechnikov' }
Type of neighbourhood decay function to use. "bubble" uses a hard
cutoff, "gaussian" falls off smoothly, and "epanechnikov" starts
smoothly and ends with a hard cutoff.
learning : { 'exponential', 'linear' }
Type of decay for the learning rate. A linear decay can
improve results in certain cases.
seed : Seed for the random number generator.
Attributes
----------
grid : Grid with all x, y positiions of the nodes.
Useful for visualization.
weights : Weight vectors of the SOM in shape = (nx, ny, ndims).
Examples
--------
Here we train a 20 by 30 SOM on some colors:
>>> from plumology.learn import SOM
>>> som = SOM(20, 30, 3, iterations=400, learning_rate=0.2)
>>> colors = np.array(
... [[0., 0., 0.],
... [0., 0., 1.],
... [0., 1., 1.],
... [1., 0., 1.],
... [1., 1., 0.],
... [1., 1., 1.],
... [.33, .33, .33],
... [.5, .5, .5],
... [.66, .66, .66]]
... )
>>> som.fit(colors)
References
----------
Kohonen, T., "Self-Organized Formation of Topologically Correct
Feature Maps". In: Biological Cybernetics 43 (1): 59–69 (1982).
"""
def __init__(
self,
nx: int,
ny: int,
ndims: int,
iterations: int,
learning_rate: float=0.5,
distance: str='euclid',
init: str='random',
grid: str='rect',
train: str='seq',
neighbour: str='gaussian',
learning: str='exp',
seed: Optional[int]=None
) -> None:
self._iterations = iterations
self._init_learning_rate = learning_rate
self._learning_rate = self._init_learning_rate
self._ndims = ndims
self._map_radius = max(nx, ny) / 2
self._dlambda = self._iterations / np.log(self._map_radius)
self._shape = (nx, ny)
self._trained = False
if seed is not None:
np.random.seed(seed)
# Establish training algorithm
if train.startswith('seq'):
self._type = 's'
elif train.startswith('batch'):
self._type = 'b'
else:
e = 'Invalid training type! Valid types: sequential, batch'
raise ValueError(e)
# Init distance type
if distance.startswith('euclid'):
self._dist = self._euclid_dist
elif distance.startswith('per'):
self._dist = self._periodic_dist
else:
e = 'Invalid distance type! Valid types: euclidean, periodic'
raise ValueError(e)
# Init weights
if init.startswith('r'):
self.weights = np.random.rand(nx, ny, ndims)
elif not init.startswith('p'):
e = 'Invalid initialization type! Valid types: random, pca'
raise ValueError(e)
# Init grid
self._X, self._Y = np.meshgrid(np.arange(ny), np.arange(nx))
if grid.startswith('r'):
self._locX = self._X
self._locY = self._Y
elif grid.startswith('h'):
self._locX = np.asarray([
x + 0.5 if i % 2 == 0 else x
for i, x in enumerate(self._X.astype(float))
])
self._locY = self._Y * 0.33333
else:
e = 'Invalid grid type! Valid types: rect, hex'
raise ValueError(e)
# Init neighbourhood function
if neighbour.startswith('gauss'):
self._nb = self._nb_gaussian
elif neighbour.startswith('bub'):
self._nb = self._nb_bubble
elif neighbour.startswith('epa'):
self._nb = self._nb_epanechnikov
else:
e = ('Invalid neighbourhood function!' +
'Valid types: gaussian, bubble, epanechnikov')
raise ValueError(e)
# Init learning-rate function
if learning.startswith('exp'):
self._lr = self._lr_exp
elif learning.startswith('pow'):
self._final_lr = self._init_learning_rate * np.exp(-1)
self._lr = self._lr_pow
elif learning.startswith('lin'):
self._lr = self._lr_lin
else:
e = ('Invalid learning rate function!' +
'Valid types: exp, power, linear')
raise ValueError(e)
# Create empty index grid
self.index = np.zeros(self._shape, dtype=np.int32)
# Output grid for easier plotting
self.grid = np.asarray(list(zip(self._locX.flatten(),
self._locY.flatten())))
def _init_weights(self, X: np.ndarray) -> None:
"""Initialize weights from PCA eigenvectors"""
if not hasattr(self, 'weights'):
pca = PCA(n_components=self._ndims)
comp = pca.fit(X).components_[:2]
coeff = X.mean(0) + 5 * X.std(0) / self._shape[0]
# Create grid based on PCA eigenvectors and std dev of features
raw_weights = np.asarray([
(coeff * (comp[0] * (x - 0.5 / self._shape[0]) +
comp[1] * (y - 0.5 / self._shape[1])))
for x, y in zip(np.nditer(self._X.flatten()),
np.nditer(self._Y.flatten()))
]).reshape(self._shape + (self._ndims,))
# Scale to (0, 1)
full_shape = self._shape + (1,)
self.weights = (
(raw_weights - raw_weights.min(2).reshape(full_shape)) /
raw_weights.ptp(2).reshape(full_shape)
)
@staticmethod
def _nb_gaussian(dist: np.ndarray, sigma: float) -> np.ndarray:
return np.exp(-dist ** 2 / (2 * sigma ** 2))
@staticmethod
def _nb_bubble(dist: np.ndarray, sigma: float) -> np.ndarray:
return dist
@staticmethod
def _nb_epanechnikov(dist: np.ndarray, sigma: float) -> np.ndarray:
return np.maximum(np.zeros_like(dist), 1 - dist ** 2)
def _lr_exp(self, t: int) -> float:
return self._init_learning_rate * np.exp(-t / self._iterations)
def _lr_pow(self, t: int) -> float:
return (self._init_learning_rate *
(self._final_lr / self._init_learning_rate) **
(t / self._iterations))
def _lr_lin(self, t: int) -> float:
return (self._init_learning_rate -
(self._init_learning_rate * t * (np.exp(1) - 1) /
(self._iterations * np.exp(1))))
def _euclid_dist(
self,
xmat: np.ndarray,
index: Tuple[int, int]=(),
axis: int=2
) -> np.ndarray:
return np.sqrt(((xmat - self.weights[index]) ** 2).sum(axis=axis))
def _periodic_dist(
self,
xmat: np.ndarray,
index: Tuple[int, int]=(),
axis: int=2
) -> np.ndarray:
pi2 = np.pi * 2
dx = (xmat - self.weights[index]) / pi2
return np.sqrt((((dx - round(dx)) * pi2) ** 2).sum(axis=axis))
def _train(self, X: np.ndarray) -> None:
for t in range(self._iterations):
# Update learning rate, reduce radius
lr = self._lr(t)
neigh_radius = self._map_radius * np.exp(-t / self._dlambda)
# Choose random feature vector
f = X[np.random.choice(len(X))]
# Calc euclidean distance
xmat = np.broadcast_to(f, self._shape + (self._ndims,))
index = self._dist(xmat).argmin()
bmu = np.unravel_index(index, self._shape)
# Create distance matrix
distmat = (
(self._locX - self._locX[bmu]) ** 2 +
(self._locY - self._locY[bmu]) ** 2
).reshape(self._shape + (1,))
# Mask out unaffected nodes
mask = (distmat < neigh_radius).astype(int)
theta = self._nb(distmat * mask, neigh_radius)
self.weights += mask * theta * lr * (f - self.weights)
def _batch_train(self, X: np.ndarray) -> None:
for t in range(self._iterations):
# Update learning rate, reduce radius
lr = self._lr(t)
neigh_radius = self._map_radius * np.exp(-t / self._dlambda)
for f in X:
# Calc euclidean distance
xmat = np.broadcast_to(f, self._shape + (self._ndims,))
index = self._dist(xmat).argmin()
bmu = np.unravel_index(index, self._shape)
# Create distance matrix
distmat = (
(self._locX - self._locX[bmu]) ** 2 +
(self._locY - self._locY[bmu]) ** 2
).reshape(self._shape + (1,))
# Mask out unaffected nodes
mask = (distmat < neigh_radius).astype(int)
theta = self._nb(distmat * mask, neigh_radius)
self.weights += mask * theta * lr * (f - self.weights)
def fit(self, X: np.ndarray) -> None:
"""
Run the SOM.
Parameters
----------
X : input data as array of vectors.
"""
self._init_weights(X)
if self._type == 's':
self._train(X)
else:
self._batch_train(X)
self._trained = True
def create_index(self, X: np.ndarray) -> None:
"""
Create an index grid, allowing the coloring of the map with arbitrary
feature data. For instance, one could train the SOM on a subset of the
data, and then create an index using the full dataset. The transform()
method will only need to check the created index grid featuring the
best matching datapoint index per node.
Parameters
----------
X : input data as used to train the SOM, can be significantly larger.
"""
if not self._trained:
raise ValueError('You need to train the SOM first!')
# For each node we calculate the distance to each datapoint
for index in np.ndindex(self._shape):
self.index[index] = self._dist(X, index=index, axis=1).argmin()
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform a dataset based on the index grid created by index().
This method will return a subset of the dataset in the shape of
the node matrix.
Parameters
----------
X : input data
Returns
-------
grid : subset of the input data assigned to the best nodes
"""
if not self._trained:
raise ValueError('You need to train the SOM first!')
if not hasattr(self, 'index'):
raise ValueError('You need to index the SOM first!')
grid = np.zeros(self._shape)
for index in np.ndindex(self.index.shape):
grid[index] = X[self.index[index]]
return grid
|
tlhr/plumology
|
plumology/learn/som.py
|
Python
|
mit
| 12,304
|
[
"Gaussian"
] |
881a4b09aad02924a63e9705bfbedd20617070e8b0954a8a8f7e2e99a92d304a
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espressomd.interactions
from espressomd import has_features
# Dict with Drude type infos
drude_dict = {}
# Lists with unique Drude and core types
core_type_list = []
drude_type_list = []
# Get core id from Drude id
core_id_from_drude_id = {}
# Drude IDs
drude_id_list = []
def add_drude_particle_to_core(system, harmonic_bond, thermalized_bond,
p_core, id_drude, type_drude, alpha,
mass_drude, coulomb_prefactor,
thole_damping=2.6, verbose=False):
"""
Adds a Drude particle with specified id, type, and mass to the system.
Checks if different Drude particles have different types.
Collects types/charges/polarizations/Thole factors for intramol. core-Drude short-range exclusion and Thole interaction.
Attributes
----------
system : Instance of :attr:`espressomd.system.System`
harmonic_bond: This method adds this harmonic bond to between Drude particle and core
thermalized_bond: This method adds this thermalized_bond to between Drude particle and core
p_core: The existing core particle
id_drude: :obj:`int`
This method creates the Drude particle and assigns this id.
type_drude: :obj:`int`
The type of the newly created Drude particle
alpha : :obj:`float`
The polarizability in units of inverse volume. Related to the charge of the Drude particle.
mass_drude : :obj:`float`
The mass of the newly created Drude particle
coulomb_prefactor : :obj:`float`
Required to calculate the charge of the Drude particle.
thole_damping : :obj:`float`
Thole damping factor of the Drude pair. Comes to effect if add_all_thole() method is used.
verbose : :obj:`bool`
Turns on verbosity.
"""
k = harmonic_bond.params["k"]
q_drude = -1.0 * pow(k * alpha / coulomb_prefactor, 0.5)
if has_features("PARTICLE_ANISOTROPY"):
gamma_off = [0.0, 0.0, 0.0]
else:
gamma_off = 0.0
system.part.add(id=id_drude, pos=p_core.pos, type=type_drude,
q=q_drude, mass=mass_drude, temp=0, gamma=gamma_off)
if verbose:
print(
"Adding to core", p_core.id, "drude id", id_drude, " pol", alpha,
" core charge", p_core.q, "->", p_core.q - q_drude, " drude charge", q_drude)
p_core.q -= q_drude
p_core.mass -= mass_drude
p_core.add_bond((harmonic_bond, id_drude))
p_core.add_bond((thermalized_bond, id_drude))
p_core.temp = 0.0
p_core.gamma = gamma_off
if type_drude in drude_dict and not (
drude_dict[type_drude]["q"] == q_drude and drude_dict[type_drude]["thole_damping"] == thole_damping):
raise Exception(
"Drude particles with different drude charges have to have different types for THOLE")
core_id_from_drude_id[id_drude] = p_core.id
# Add new Thole nonbonded interaction for D-D, D-C, C-C for all existing
# Drude types if this type is seen for the first time
if type_drude not in drude_dict:
# Bookkeeping of q, alphas and damping parameter
drude_dict[type_drude] = {}
drude_dict[type_drude]["q"] = q_drude
drude_dict[type_drude]["qc"] = p_core.q
drude_dict[type_drude]["alpha"] = alpha
drude_dict[type_drude]["thole_damping"] = thole_damping
drude_dict[type_drude]["core_type"] = p_core.type
# Save same information to get access to the parameters via core types
drude_dict[p_core.type] = {}
drude_dict[p_core.type]["q"] = -q_drude
drude_dict[p_core.type]["qc"] = p_core.q
drude_dict[p_core.type]["alpha"] = alpha
drude_dict[p_core.type]["thole_damping"] = thole_damping
drude_dict[p_core.type]["drude_type"] = type_drude
# Collect unique Drude types
if type_drude not in drude_type_list:
drude_type_list.append(type_drude)
# Collect unique core types
if p_core.type not in core_type_list:
core_type_list.append(p_core.type)
# Collect unique Drude ids
if id_drude not in drude_id_list:
drude_id_list.append(id_drude)
def add_thole_pair_damping(system, t1, t2, verbose=False):
"""
Calculates mixed Thole factors depending on Thole damping and polarization.
Adds non-bonded Thole interactions to the system.
Attributes
----------
system : Instance of :attr:`espressomd.system.System`
t1 : :obj:`int`
Type 1
t2 : :obj:`int`
Type 2
verbose : :obj:`bool`
Turns on verbosity.
"""
qq = drude_dict[t1]["q"] * drude_dict[t2]["q"]
s = 0.5 * (drude_dict[t1]["thole_damping"] + drude_dict[t2]["thole_damping"]
) / (drude_dict[t1]["alpha"] * drude_dict[t2]["alpha"])**(1.0 / 6.0)
system.non_bonded_inter[t1, t2].thole.set_params(scaling_coeff=s, q1q2=qq)
if verbose:
print("Added THOLE non-bonded interaction for types",
t1, "<->", t2, "S", s, "q1q2", qq)
def add_all_thole(system, verbose=False):
"""
Calls add_thole_pair_damping() for all necessary combinations to create the interactions.
Attributes
----------
system : Instance of :attr:`espressomd.system.System`
verbose : :obj:`bool`
Turns on verbosity.
"""
# Drude <-> Drude
for i in range(len(drude_type_list)):
for j in range(i, len(drude_type_list)):
add_thole_pair_damping(
system, drude_type_list[i], drude_type_list[j], verbose)
# core <-> core
for i in range(len(core_type_list)):
for j in range(i, len(core_type_list)):
add_thole_pair_damping(
system, core_type_list[i], core_type_list[j], verbose)
# Drude <-> core
for i in drude_type_list:
for j in core_type_list:
add_thole_pair_damping(system, i, j, verbose)
def setup_and_add_drude_exclusion_bonds(system, verbose=False):
"""
Creates electrostatic short-range exclusion bonds for global exclusion
between Drude particles and core charges and adds the bonds to the cores.
Has to be called once after all Drude particles have been created.
Attributes
----------
system : Instance of :attr:`espressomd.system.System`
verbose: :obj:`bool`
Turns on verbosity.
"""
# All Drude types need...
for td in drude_type_list:
#...exclusions with core
qd = drude_dict[td]["q"] # Drude charge
qc = drude_dict[td]["qc"] # Core charge
subtr_sr_bond = espressomd.interactions.BondedCoulombSRBond(
q1q2=-qd * qc)
system.bonded_inter.add(subtr_sr_bond)
drude_dict[td]["subtr_sr_bonds_drude-core"] = subtr_sr_bond
if verbose:
print("Added drude-core SR exclusion bond ",
subtr_sr_bond, "for drude", qd, "<-> core", qc, "to system")
for drude_id in drude_id_list:
core_id = core_id_from_drude_id[drude_id]
pd = system.part[drude_id]
pc = system.part[core_id]
bond = drude_dict[pd.type]["subtr_sr_bonds_drude-core"]
pc.add_bond((bond, drude_id))
if verbose:
print("Added drude-core SR bond", bond,
"between ids", drude_id, "and", core_id)
def setup_intramol_exclusion_bonds(system, mol_drude_types, mol_core_types,
mol_core_partial_charges, verbose=False):
"""
Creates electrostatic short-range exclusion bonds for intramolecular exclusion
between Drude particles and partial charges of the cores. Has to be called once
after all Drude particles have been created.
Attributes
----------
system : Instance of :attr:`espressomd.system.System`
mol_drude_types : List of types of Drude particles within the molecule
mol_core_types : List of types of core particles within the molecule
mol_core_partial_charges : List of partial charges of core particles within the molecule
verbose : :obj:`bool`
Turns on verbosity.
"""
# All Drude types need...
for td in mol_drude_types:
drude_dict[td]["subtr_sr_bonds_intramol"] = {}
#... sr exclusion bond with other partial core charges...
for tc, qp in zip(mol_core_types, mol_core_partial_charges):
#...excluding the Drude core partner
if drude_dict[td]["core_type"] != tc:
qd = drude_dict[td]["q"] # Drude charge
subtr_sr_bond = espressomd.interactions.BondedCoulombSRBond(
q1q2=-qd * qp)
system.bonded_inter.add(subtr_sr_bond)
drude_dict[td]["subtr_sr_bonds_intramol"][
tc] = subtr_sr_bond
if verbose:
print("Added intramolecular exclusion", subtr_sr_bond,
"for drude", qd, "<-> core", qp, "to system")
def add_intramol_exclusion_bonds(system, drude_ids, core_ids, verbose=False):
"""
Applies electrostatic short-range exclusion bonds for the given ids.
Has to be applied for all molecules.
Attributes
----------
system : Instance of :attr:`espressomd.system.System`
drude_ids : IDs of Drude particles within a molecule.
core_ids : IDs of core particles within a molecule.
verbose : :obj:`bool`
Turns on verbosity.
"""
for drude_id in drude_ids:
for core_id in core_ids:
if core_id_from_drude_id[drude_id] != core_id:
pd = system.part[drude_id]
pc = system.part[core_id]
bond = drude_dict[pd.type][
"subtr_sr_bonds_intramol"][pc.type]
pd.add_bond((bond, core_id))
if verbose:
print("Added subtr_sr bond", bond,
"between ids", drude_id, "and", core_id)
|
psci2195/espresso-ffans
|
src/python/espressomd/drude_helpers.py
|
Python
|
gpl-3.0
| 10,686
|
[
"ESPResSo"
] |
06a459e7ee2f6635416dee33ba034db43c249320da8d899ca96e61c08a884d1f
|
import unittest
import pysal
import numpy as np
from pysal.spreg import error_sp_hom_regimes as SP
from pysal.spreg.error_sp_hom import GM_Error_Hom, GM_Endog_Error_Hom, GM_Combo_Hom
class TestGM_Error_Hom_Regimes(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("HOVAL"))
X.append(db.by_col("INC"))
self.X = np.array(X).T
X2 = []
X2.append(db.by_col("INC"))
self.X2 = np.array(X2).T
yd = []
yd.append(db.by_col("HOVAL"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.r_var = 'NSA'
self.regimes = db.by_col(self.r_var)
#Artficial:
n = 256
self.n2 = n/2
self.x_a1 = np.random.uniform(-10,10,(n,1))
self.x_a2 = np.random.uniform(1,5,(n,1))
self.q_a = self.x_a2 + np.random.normal(0,1,(n,1))
self.x_a = np.hstack((self.x_a1,self.x_a2))
self.y_a = np.dot(np.hstack((np.ones((n,1)),self.x_a)),np.array([[1],[0.5],[2]])) + np.random.normal(0,1,(n,1))
latt = int(np.sqrt(n))
self.w_a = pysal.lat2W(latt,latt)
self.w_a.transform='r'
self.regi_a = [0]*(n/2) + [1]*(n/2)
self.w_a1 = pysal.lat2W(latt/2,latt)
self.w_a1.transform='r'
def test_model(self):
reg = SP.GM_Error_Hom_Regimes(self.y, self.X, self.regimes, self.w, A1='het')
betas = np.array([[ 62.95986466],
[ -0.15660795],
[ -1.49054832],
[ 60.98577615],
[ -0.3358993 ],
[ -0.82129289],
[ 0.54033921]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([-2.19031456])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 17.91629456])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n,6)
k = 6
self.assertAlmostEqual(reg.k,k,6)
y = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([[ 0. , 0. , 0. , 1. , 80.467003, 19.531 ]])
np.testing.assert_array_almost_equal(reg.x[0].toarray(),x,6)
e = np.array([ 2.72131648])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
my = 35.128823897959187
self.assertAlmostEqual(reg.mean_y,my)
sy = 16.732092091229699
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([ 49.16245801, -0.12493165, -1.89294614, 5.71968257,
-0.0571525 , 0.05745855, 0. ])
np.testing.assert_array_almost_equal(reg.vm[0],vm,6)
sig2 = 96.96108341267626
self.assertAlmostEqual(reg.sig2,sig2,5)
pr2 = 0.5515791216023577
self.assertAlmostEqual(reg.pr2,pr2)
std_err = np.array([ 7.01159454, 0.20701411, 0.56905515, 7.90537942, 0.10268949,
0.56660879, 0.15659504])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
chow_r = np.array([[ 0.03888544, 0.84367579],
[ 0.61613446, 0.43248738],
[ 0.72632441, 0.39407719]])
np.testing.assert_array_almost_equal(reg.chow.regi,chow_r,6)
chow_j = 0.92133276766189676
self.assertAlmostEqual(reg.chow.joint[0],chow_j)
def test_model_regi_error(self):
#Artficial:
model = SP.GM_Error_Hom_Regimes(self.y_a, self.x_a, self.regi_a, w=self.w_a, regime_err_sep=True, A1='het')
model1 = GM_Error_Hom(self.y_a[0:(self.n2)].reshape((self.n2),1), self.x_a[0:(self.n2)], w=self.w_a1, A1='het')
model2 = GM_Error_Hom(self.y_a[(self.n2):].reshape((self.n2),1), self.x_a[(self.n2):], w=self.w_a1, A1='het')
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_array_almost_equal(model.betas,tbetas)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
np.testing.assert_array_almost_equal(model.vm.diagonal(), vm, 6)
#Columbus:
reg = SP.GM_Error_Hom_Regimes(self.y, self.X2, self.regimes, self.w, regime_err_sep=True, A1='het')
betas = np.array([[ 60.66668194],
[ -1.72708492],
[ 0.62170311],
[ 61.4526885 ],
[ -1.90700858],
[ 0.1102755 ]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
vm = np.array([ 45.57956967, -1.65365774, 0. , 0. ,
0. , 0. ])
np.testing.assert_array_almost_equal(reg.vm[0],vm,6)
u = np.array([-8.48092392])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 24.20690392])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
e = np.array([-8.33982604])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
chow_r = np.array([[ 0.0050892 , 0.94312823],
[ 0.05746619, 0.81054651],
[ 1.65677138, 0.19803981]])
np.testing.assert_array_almost_equal(reg.chow.regi,chow_r,6)
chow_j = 1.7914221673031792
self.assertAlmostEqual(reg.chow.joint[0],chow_j)
def test_model_endog(self):
reg = SP.GM_Endog_Error_Hom_Regimes(self.y, self.X2, self.yd, self.q, self.regimes, self.w, A1='het')
betas = np.array([[ 77.26679984],
[ 4.45992905],
[ 78.59534391],
[ 0.41432319],
[ -3.20196286],
[ -1.13672283],
[ 0.22178164]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 20.50716917])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e = np.array([ 25.22635318])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([-4.78118917])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 6
self.assertAlmostEqual(reg.k,k)
y = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([[ 0. , 0. , 1. , 19.531]])
np.testing.assert_array_almost_equal(reg.x[0].toarray(),x,6)
yend = np.array([[ 0. , 80.467003]])
np.testing.assert_array_almost_equal(reg.yend[0].toarray(),yend,6)
z = np.array([[ 0. , 0. , 1. , 19.531 , 0. ,
80.467003]])
np.testing.assert_array_almost_equal(reg.z[0].toarray(),z,6)
my = 35.128823897959187
self.assertAlmostEqual(reg.mean_y,my)
sy = 16.732092091229699
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([ 403.76852704, 69.06920553, 19.8388512 , 3.62501395,
-40.30472224, -1.6601927 , -1.64319352])
np.testing.assert_array_almost_equal(reg.vm[0],vm,5)
pr2 = 0.19776512679498906
self.assertAlmostEqual(reg.pr2,pr2)
sig2 = 644.23810259214
self.assertAlmostEqual(reg.sig2,sig2,5)
std_err = np.array([ 20.09399231, 7.03617703, 23.64968032, 2.176846 ,
3.40352278, 0.92377997, 0.24462006])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
chow_r = np.array([[ 0.00191145, 0.96512749],
[ 0.31031517, 0.57748685],
[ 0.34994619, 0.55414359]])
np.testing.assert_array_almost_equal(reg.chow.regi,chow_r,6)
chow_j = 1.248410480025556
self.assertAlmostEqual(reg.chow.joint[0],chow_j)
def test_model_endog_regi_error(self):
#Columbus:
reg = SP.GM_Endog_Error_Hom_Regimes(self.y, self.X2, self.yd, self.q, self.regimes, self.w, regime_err_sep=True, A1='het')
betas = np.array([[ 7.92747424e+01],
[ 5.78086230e+00],
[ -3.83173581e+00],
[ 2.14725610e-01],
[ 8.26255251e+01],
[ 5.48294187e-01],
[ -1.28432891e+00],
[ 2.98658172e-02]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
vm = np.array([ 867.50930457, 161.04430783, -92.35637083, -1.13838767,
0. , 0. , 0. , 0. ])
np.testing.assert_array_almost_equal(reg.vm[0],vm,6)
u = np.array([ 25.73781918])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([-10.01183918])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
e = np.array([26.41176711])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
chow_r = np.array([[ 0.00909777, 0.92401124],
[ 0.24034941, 0.62395386],
[ 0.24322564, 0.62188603],
[ 0.32572159, 0.5681893 ]])
np.testing.assert_array_almost_equal(reg.chow.regi,chow_r,6)
chow_j = 1.4485058522307526
self.assertAlmostEqual(reg.chow.joint[0],chow_j)
#Artficial:
model = SP.GM_Endog_Error_Hom_Regimes(self.y_a, self.x_a1, yend=self.x_a2, q=self.q_a, regimes=self.regi_a, w=self.w_a, regime_err_sep=True, A1='het')
model1 = GM_Endog_Error_Hom(self.y_a[0:(self.n2)].reshape((self.n2),1), self.x_a1[0:(self.n2)], yend=self.x_a2[0:(self.n2)], q=self.q_a[0:(self.n2)], w=self.w_a1, A1='het')
model2 = GM_Endog_Error_Hom(self.y_a[(self.n2):].reshape((self.n2),1), self.x_a1[(self.n2):], yend=self.x_a2[(self.n2):], q=self.q_a[(self.n2):], w=self.w_a1, A1='het')
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_array_almost_equal(model.betas,tbetas)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
np.testing.assert_array_almost_equal(model.vm.diagonal(), vm, 6)
def test_model_combo(self):
reg = SP.GM_Combo_Hom_Regimes(self.y, self.X2, self.regimes, self.yd, self.q, w=self.w, A1='het')
betas = np.array([[ 36.93726782],
[ -0.829475 ],
[ 30.86675168],
[ -0.72375344],
[ -0.30190094],
[ -0.22132895],
[ 0.64190215],
[ -0.07314671]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 0.94039246])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e_filtered = np.array([ 0.74211331])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,5)
predy_e = np.array([ 18.68732105])
np.testing.assert_array_almost_equal(reg.predy_e[0],predy_e,6)
predy = np.array([ 14.78558754])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 7
self.assertAlmostEqual(reg.k,k)
y = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([[ 0. , 0. , 1. , 19.531]])
np.testing.assert_array_almost_equal(reg.x[0].toarray(),x,6)
yend = np.array([[ 0. , 80.467003 , 24.7142675]])
np.testing.assert_array_almost_equal(reg.yend[0].toarray(),yend,6)
z = np.array([[ 0. , 0. , 1. , 19.531 , 0. ,
80.467003 , 24.7142675]])
np.testing.assert_array_almost_equal(reg.z[0].toarray(),z,6)
my = 35.128823897959187
self.assertAlmostEqual(reg.mean_y,my)
sy = 16.732092091229699
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([ 111.54419614, -0.23476709, 83.37295278, -1.74452409,
-1.60256796, -0.13151396, -1.43857915, 2.19420848])
np.testing.assert_array_almost_equal(reg.vm[0],vm,4)
sig2 = 95.57694234438294
self.assertAlmostEqual(reg.sig2,sig2,4)
pr2 = 0.6504148883591536
self.assertAlmostEqual(reg.pr2,pr2)
pr2_e = 0.5271368969923579
self.assertAlmostEqual(reg.pr2_e,pr2_e)
std_err = np.array([ 10.56144858, 0.93986958, 11.52977369, 0.61000358,
0.44419535, 0.16191882, 0.1630835 , 0.41107528])
np.testing.assert_array_almost_equal(reg.std_err,std_err,5)
chow_r = np.array([[ 0.47406771, 0.49112176],
[ 0.00879838, 0.92526827],
[ 0.02943577, 0.86377672]])
np.testing.assert_array_almost_equal(reg.chow.regi,chow_r,6)
chow_j = 0.59098559257602923
self.assertAlmostEqual(reg.chow.joint[0],chow_j)
def test_model_combo_regi_error(self):
#Columbus:
reg = SP.GM_Combo_Hom_Regimes(self.y, self.X2, self.regimes, self.yd, self.q, w=self.w, regime_lag_sep=True, regime_err_sep=True, A1='het')
betas = np.array([[ 4.20115146e+01],
[ -1.39171512e-01],
[ -6.53001838e-01],
[ 5.47370644e-01],
[ 2.61860326e-01],
[ 3.42156975e+01],
[ -1.52360889e-01],
[ -4.91752171e-01],
[ 6.57331733e-01],
[ -2.68716241e-02]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
vm = np.array([ 154.23356187, 2.99104716, -3.29036767, -2.473113 ,
1.65247551, 0. , 0. , 0. ,
0. , 0. ])
np.testing.assert_array_almost_equal(reg.vm[0],vm,6)
u = np.array([ 7.81039418])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 7.91558582])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
e = np.array([ 7.60819283])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
chow_r = np.array([[ 9.59590706e-02, 7.56733881e-01],
[ 6.53130455e-05, 9.93551847e-01],
[ 4.65270134e-02, 8.29220655e-01],
[ 7.68939379e-02, 7.81551631e-01],
[ 5.04560098e-01, 4.77503278e-01]])
np.testing.assert_array_almost_equal(reg.chow.regi,chow_r,6)
chow_j = 0.74134991257940286
self.assertAlmostEqual(reg.chow.joint[0],chow_j)
#Artficial:
model = SP.GM_Combo_Hom_Regimes(self.y_a, self.x_a1, yend=self.x_a2, q=self.q_a, regimes=self.regi_a, w=self.w_a, regime_err_sep=True, regime_lag_sep=True, A1='het')
model1 = GM_Combo_Hom(self.y_a[0:(self.n2)].reshape((self.n2),1), self.x_a1[0:(self.n2)], yend=self.x_a2[0:(self.n2)], q=self.q_a[0:(self.n2)], w=self.w_a1, A1='het')
model2 = GM_Combo_Hom(self.y_a[(self.n2):].reshape((self.n2),1), self.x_a1[(self.n2):], yend=self.x_a2[(self.n2):], q=self.q_a[(self.n2):], w=self.w_a1, A1='het')
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_array_almost_equal(model.betas,tbetas)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
if __name__ == '__main__':
unittest.main()
|
darribas/pysal
|
pysal/spreg/tests/test_error_sp_hom_regimes.py
|
Python
|
bsd-3-clause
| 14,805
|
[
"COLUMBUS"
] |
f6d2138055b54f5d05f155053507c970c4e8bf44f4616c5d671fff3414ec894f
|
import numpy as np
from . import _marching_cubes_cy
def marching_cubes(volume, level, spacing=(1., 1., 1.)):
"""
Marching cubes algorithm to find iso-valued surfaces in 3d volumetric data
Parameters
----------
volume : (M, N, P) array of doubles
Input data volume to find isosurfaces. Will be cast to `np.float64`.
level : float
Contour value to search for isosurfaces in `volume`.
spacing : length-3 tuple of floats
Voxel spacing in spatial dimensions corresponding to numpy array
indexing dimensions (M, N, P) as in `volume`.
Returns
-------
verts : (V, 3) array
Spatial coordinates for V unique mesh vertices. Coordinate order
matches input `volume` (M, N, P).
faces : (F, 3) array
Define triangular faces via referencing vertex indices from ``verts``.
This algorithm specifically outputs triangles, so each face has
exactly three indices.
Notes
-----
The marching cubes algorithm is implemented as described in [1]_.
A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There are several known ambiguous cases in the marching cubes algorithm.
Using point labeling as in [1]_, Figure 4, as shown::
v8 ------ v7
/ | / | y
/ | / | ^ z
v4 ------ v3 | | /
| v5 ----|- v6 |/ (note: NOT right handed!)
| / | / ----> x
| / | /
v1 ------ v2
Most notably, if v4, v8, v2, and v6 are all >= `level` (or any
generalization of this case) two parallel planes are generated by this
algorithm, separating v4 and v8 from v2 and v6. An equally valid
interpretation would be a single connected thin surface enclosing all
four points. This is the best known ambiguity, though there are others.
This algorithm does not attempt to resolve such ambiguities; it is a naive
implementation of marching cubes as in [1]_, but may be a good beginning
for work with more recent techniques (Dual Marching Cubes, Extended
Marching Cubes, Cubic Marching Squares, etc.).
Because of interactions between neighboring cubes, the isosurface(s)
generated by this algorithm are NOT guaranteed to be closed, particularly
for complicated contours. Furthermore, this algorithm does not guarantee
a single contour will be returned. Indeed, ALL isosurfaces which cross
`level` will be found, regardless of connectivity.
The output is a triangular mesh consisting of a set of unique vertices and
connecting triangles. The order of these vertices and triangles in the
output list is determined by the position of the smallest ``x,y,z`` (in
lexicographical order) coordinate in the contour. This is a side-effect
of how the input array is traversed, but can be relied upon.
To quantify the area of an isosurface generated by this algorithm, pass
the outputs directly into `skimage.measure.mesh_surface_area`.
Regarding visualization of algorithm output, the ``mayavi`` package
is recommended. To contour a volume named `myvolume` about the level 0.0::
>>> from mayavi import mlab # doctest: +SKIP
>>> verts, tris = marching_cubes(myvolume, 0.0, (1., 1., 2.)) # doctest: +SKIP
>>> mlab.triangular_mesh([vert[0] for vert in verts],
... [vert[1] for vert in verts],
... [vert[2] for vert in verts],
... tris) # doctest: +SKIP
>>> mlab.show() # doctest: +SKIP
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
See Also
--------
skimage.measure.mesh_surface_area
"""
# Check inputs and ensure `volume` is C-contiguous for memoryviews
if volume.ndim != 3:
raise ValueError("Input volume must have 3 dimensions.")
if level < volume.min() or level > volume.max():
raise ValueError("Contour level must be within volume data range.")
volume = np.array(volume, dtype=np.float64, order="C")
# Extract raw triangles using marching cubes in Cython
# Returns a list of length-3 lists, each sub-list containing three
# tuples. The tuples hold (x, y, z) coordinates for triangle vertices.
# Note: this algorithm is fast, but returns degenerate "triangles" which
# have repeated vertices - and equivalent vertices are redundantly
# placed in every triangle they connect with.
raw_tris = _marching_cubes_cy.iterate_and_store_3d(volume, float(level),
spacing)
# Find and collect unique vertices, storing triangle verts as indices.
# Returns a true mesh with no degenerate faces.
verts, faces = _marching_cubes_cy.unpack_unique_verts(raw_tris)
return np.asarray(verts), np.asarray(faces)
def mesh_surface_area(verts, tris):
"""
Compute surface area, given vertices & triangular faces
Parameters
----------
verts : (V, 3) array of floats
Array containing (x, y, z) coordinates for V unique mesh vertices.
faces : (F, 3) array of ints
List of length-3 lists of integers, referencing vertex coordinates as
provided in `verts`
Returns
-------
area : float
Surface area of mesh. Units now [coordinate units] ** 2.
Notes
-----
The arguments expected by this function are the exact outputs from
`skimage.measure.marching_cubes`. For unit correct output, ensure correct
`spacing` was passed to `skimage.measure.marching_cubes`.
This algorithm works properly only if the ``faces`` provided are all
triangles.
See Also
--------
skimage.measure.marching_cubes
"""
# Fancy indexing to define two vector arrays from triangle vertices
actual_verts = verts[tris]
a = actual_verts[:, 0, :] - actual_verts[:, 1, :]
b = actual_verts[:, 0, :] - actual_verts[:, 2, :]
del actual_verts
# Area of triangle in 3D = 1/2 * Euclidean norm of cross product
return ((np.cross(a, b) ** 2).sum(axis=1) ** 0.5).sum() / 2.
|
almarklein/scikit-image
|
skimage/measure/_marching_cubes.py
|
Python
|
bsd-3-clause
| 6,374
|
[
"Mayavi"
] |
2801d604f7ddb23e66941d55bf057b6079852d605efebac8e0bbc40c295b8f0e
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome History database plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import chrome_history
from tests.parsers.sqlite_plugins import test_lib
class GoogleChrome8HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 8 history SQLite database plugin."""
def testProcess(self):
"""Tests the Process function on a Chrome History database file."""
plugin = chrome_history.GoogleChrome8HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History'], plugin)
# The History file contains 71 events (69 page visits, 1 file downloads).
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 71)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the first page visited entry.
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'date_time': '2011-04-07 12:03:11.000000',
'page_transition_type': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': 'Ubuntu Start Page',
'typed_count': 0,
'url': 'http://start.ubuntu.com/10.04/Google/',
'visit_source': 3}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the first file downloaded entry.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'date_time': '2011-05-23 08:35:30',
'full_path': '/home/john/Downloads/funcats_scr.exe',
'received_bytes': 1132155,
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
'total_bytes': 1132155,
'url': 'http://fatloss4idiotsx.com/download/funcats/funcats_scr.exe'}
self.CheckEventValues(storage_writer, events[69], expected_event_values)
class GoogleChrome27HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 27 history SQLite database plugin."""
def testProcess57(self):
"""Tests the Process function on a Google Chrome 57 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-57.0.2987.133'], plugin)
# The History file contains 3 events (1 page visit, 2 file downloads).
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'date_time': '2018-01-21 14:09:53.885478',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'date_time': '2018-01-21 14:09:53.900399',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp_desc': definitions.TIME_DESCRIPTION_START,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testProcess58(self):
"""Tests the Process function on a Google Chrome 58 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-58.0.3029.96'], plugin)
# The History file contains 3 events (1 page visit, 2 file downloads).
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'date_time': '2018-01-21 14:09:27.315765',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'date_time': '2018-01-21 14:09:27.200398',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp_desc': definitions.TIME_DESCRIPTION_START,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testProcess59(self):
"""Tests the Process function on a Google Chrome 59 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59.0.3071.86'], plugin)
# The History file contains 3 events (1 page visit, 2 file downloads).
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'date_time': '2018-01-21 14:08:52.037692',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'date_time': '2018-01-21 14:08:51.811123',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp_desc': definitions.TIME_DESCRIPTION_START,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testProcess59ExtraColumn(self):
"""Tests the Process function on a Google Chrome 59 History database,
manually modified to have an unexpected column.
"""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59_added-fake-column'], plugin)
# The History file contains 3 events (1 page visit, 2 file downloads).
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 3)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'date_time': '2018-01-21 14:08:52.037692',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'date_time': '2018-01-21 14:08:51.811123',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp_desc': definitions.TIME_DESCRIPTION_START,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
joachimmetz/plaso
|
tests/parsers/sqlite_plugins/chrome_history.py
|
Python
|
apache-2.0
| 10,175
|
[
"VisIt"
] |
fd2dc191267eaa1ddb29730d42dcad28c4dcacf0087410800e5a1038b723ee81
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
#
def comp_dm(ksnac2x, ksn2occ):
"""
Computes the density matrix
Args:
ksnac2x : eigenvectors
ksn2occ : occupations
Returns:
ksabc2dm :
"""
from numpy import einsum
if ksnac2x.shape[-1]==2 : raise RuntimeError('check, please.')
ksnac2x_occ = einsum('ksnac,ksn->ksnac', ksnac2x, ksn2occ)
ksabc2dm = einsum('ksnac,ksnbc->ksabc', ksnac2x_occ, ksnac2x)
return ksabc2dm
|
gkc1000/pyscf
|
pyscf/nao/m_comp_dm.py
|
Python
|
apache-2.0
| 1,074
|
[
"PySCF"
] |
234d4a5113c323d59be6f7285ef4eef8b2865739ee8be25bcc0083476a3c9381
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:09:57 2015
This script converts output from WrapDbihar code to the format expected by the
legacy Coupled Cells code.
The code reorders cells and produces the required files in TXT format.
VTK files are written out for visual verification.
This code assumes there are three branches (parent and two daughters) in the data.
"""
import os
import vtk
numECsPerCol = 4
numSMCsPerRow = 4
numECsPerRow = numSMCsPerRow * 5
numSMCsPerCol = numECsPerCol * 13
numECsPerQuad = numECsPerRow * numECsPerCol
numSMCsPerQuad = numSMCsPerCol * numSMCsPerRow
numQuadsPerRing = 0
meshSet = []
# VTK files to write.
taskVTKFiles = [
"vtk/parent.vtp",
"vtk/left_daughter.vtp",
"vtk/right_daughter.vtp",
]
ecCentroidVTKFiles = [
"vtk/ec_centeroid_parent.vtp",
"vtk/ec_centeroid_left_daughter.vtp",
"vtk/ec_centeroid_right_daughter.vtp",
]
ecVTKFiles = [
"vtk/ec_mesh_parent.vtp",
"vtk/ec_mesh_left_daughter.vtp",
"vtk/ec_mesh_right_daughter.vtp",
]
smcVTKFiles = [
"vtk/smc_mesh_parent.vtp",
"vtk/smc_mesh_left_daughter.vtp",
"vtk/smc_mesh_right_daughter.vtp"
]
def writeLegacyVTK():
# This is where the data is for testing purposes.
print("Current working directory:", os.getcwd())
if os.path.isdir("vtk") == False:
os.makedirs("vtk")
print("Cretated vtk output directory...")
if os.path.isdir("files") == False:
os.makedirs("files")
print("Created files ouptut directory...")
# Working with the task mesh.
taskMeshReader = vtk.vtkXMLPolyDataReader()
taskMeshReader.SetFileName(meshSet[0])
taskMeshReader.Update()
taskMesh = taskMeshReader.GetOutput()
# Get the range of branch labels.
labelRange = [0, 0]
taskMesh.GetCellData().GetScalars().GetRange(labelRange, 0)
# Convert label range to a list of labels.
labelRange = range(int(labelRange[0]), int(labelRange[1]) + 1)
print("Labels found in task mesh:", labelRange)
# Store the number of rings for each label.
numRingsPerLabel = {}
# For every label in the range of labels we want to extract all cells/quads.
for label in labelRange:
# Use this filter to extract the cells for a given label value.
branchSelector = vtk.vtkThreshold()
branchSelector.SetInputData(taskMesh)
branchSelector.ThresholdBetween(label,label);
branchSelector.Update()
taskMeshBranch = branchSelector.GetOutput()
# New vtkPoints for storing reordered points.
reorderedPoints = vtk.vtkPoints()
# New vtkCellArray for storing reordeced cells.
reorderedCellArray = vtk.vtkCellArray()
numQuadRowsPerBranch = taskMeshBranch.GetNumberOfCells() / numQuadsPerRing;
numRingsPerLabel[label] = numQuadRowsPerBranch
# Working with rows in reverse order: UPSTREAM.
ringIds = range(0, int(numQuadRowsPerBranch))
ringIds = list(ringIds)
ringIds.reverse()
rowBase = 0
# Iterate over the rings in reverse order.
for ringNum in ringIds:
# print("ringNum", ringNum)
# Iterate over the cells in normal order.
for cellNum in range(0, int(numQuadsPerRing)):
# Calculate the 'real' cell id and get the corresponding cell.
cellId = ringNum * numQuadsPerRing + cellNum
cell = taskMeshBranch.GetCell(cellId)
# The ids to be written to the TXT file.
pointIdList = [cell.GetNumberOfPoints()]
# Write the appropriate points to TXT file.
for pPos in range(0, cell.GetNumberOfPoints()):
newPoint = False
if ringNum == ringIds[0]:
if cellNum == 0:
newPoint = True
elif pPos == 1 or pPos == 2:
newPoint = True
else:
if cellNum == 0:
if pPos == 0 or pPos == 1:
newPoint = True
else:
if pPos == 1:
newPoint = True
if newPoint == True:
# Inserting a new point...
point = taskMeshBranch.GetPoint(cell.GetPointId(pPos))
# ... with a new id.
newId = reorderedPoints.InsertNextPoint(point)
pointIdList.append(newId)
# To make it easier for remembering the number of points instered in a row.
if cellNum == 0 and pPos == 0:
rowBasePrev = newId
else:
# Perhaps this can be done in a nicer way.
# Calculate the id of a previously inserted point.
if ringNum == ringIds[0]:
if cellNum == 1:
if pPos == 0:
pointIdList.append(1)
elif pPos == 3:
pointIdList.append(2)
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 2))
elif pPos == 3:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 3))
elif ringNum == ringIds[1]:
if cellNum == 0:
if pPos == 2:
pointIdList.append(1)
elif pPos == 3:
pointIdList.append(0)
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 1))
elif pPos == 2:
pointIdList.append(int(cellNum * 2 + 2))
elif pPos == 3:
if cellNum == 1:
pointIdList.append(1)
else:
pointIdList.append(int(cellNum * 2))
else:
if cellNum == 0:
if pPos == 2:
pointIdList.append(int(rowBase + 1))
elif pPos == 3:
pointIdList.append(int(rowBase))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 1))
elif pPos == 2:
pointIdList.append(int(rowBase + cellNum + 1))
elif pPos == 3:
pointIdList.append(int(rowBase + cellNum))
# print(pointIdList, rowBase)
# Insert the ids into the cell array.
newCell = vtk.vtkQuad()
newCell.GetPointIds().Reset()
for id in pointIdList[1:]:
newCell.GetPointIds().InsertNextId(id)
reorderedCellArray.InsertNextCell(newCell)
rowBase = rowBasePrev
# print('\n')
print("Inserted", reorderedPoints.GetNumberOfPoints(), "task mesh points for label", label, "...")
print("Inserted", reorderedCellArray.GetNumberOfCells(), "task mesh cells for label", label, "...")
# Create new vtkPolyData object for the new reordered mesh.
reorderedTaskMeshBranch = vtk.vtkPolyData()
# Put the reordered points and cells into the reordered mesh.
reorderedTaskMeshBranch.SetPoints(reorderedPoints)
reorderedTaskMeshBranch.SetPolys(reorderedCellArray)
# Write the VTK file.
reorderedMeshWriter = vtk.vtkXMLPolyDataWriter()
reorderedMeshWriter.SetInputData(reorderedTaskMeshBranch)
reorderedMeshWriter.SetFileName(taskVTKFiles[label])
reorderedMeshWriter.Update()
print("Rings per label:", numRingsPerLabel, "...")
ringsPerLabelVals = numRingsPerLabel.values()
# Check all rings per label values are the same.
# assert ringsPerLabelVals[1:] == ringsPerLabelVals[:-1], "All values of rings per label must be identical. Generated output is invalid ..."
print(ringsPerLabelVals)
# Working with EC mesh.
ecMeshReader = vtk.vtkXMLPolyDataReader()
ecMeshReader.SetFileName(meshSet[1])
ecMeshReader.Update()
# Original ECs mesh to work with.
ecMesh = ecMeshReader.GetOutput()
print("There are", ecMesh.GetNumberOfCells(), "ECs in total ...")
# For every label in the range of labels we want to extract all ECs.
for label in labelRange:
# Keep track of how many branches we need to skip.
numECsPerLabel = numQuadsPerRing * numRingsPerLabel[label] * numECsPerQuad
ecCellOffset = label * numECsPerLabel
print("ecCellOffset", ecCellOffset)
# Collect cell ids to select.
selectionIds = vtk.vtkIdTypeArray()
for sId in range(0, int(numECsPerLabel)):
selectionIds.InsertNextValue(int(ecCellOffset) + sId)
# Create selecion node.
selectionNode = vtk.vtkSelectionNode()
selectionNode.SetFieldType(selectionNode.CELL)
selectionNode.SetContentType(selectionNode.INDICES)
selectionNode.SetSelectionList(selectionIds)
# Create selection.
selection = vtk.vtkSelection()
selection.AddNode(selectionNode)
# Use vtkSelection filter.
selectionExtractor = vtk.vtkExtractSelection()
selectionExtractor.SetInputData(0, ecMesh)
selectionExtractor.SetInputData(1, selection)
selectionExtractor.Update()
extractedECs = selectionExtractor.GetOutput()
# Ring ids list for traversal.
ringIds = range(0, int(numRingsPerLabel[label]))
ringIds = list(ringIds)
ringIds.reverse()
# Number of ECs rows is the number of ECs per quad.
rowIds = range(0, numECsPerCol)
rowIds = list(rowIds)
rowIds.reverse()
# The ECs are organised in rings of blocks of cells.
# New vtkCellArray for storing reordeced cells.
reorderedCellArray = vtk.vtkCellArray()
# Iterate over the rings in reverse order.
for ringNum in ringIds:
# Iterate over the 'imaginary' quads of cells in normal order.
for quadNum in range(0, numQuadsPerRing):
# Iterate over the rows of cells in reverse order.
# Calculate the 'real' id for the 'imaginary' quad.
quadId = ringNum * numQuadsPerRing + quadNum
# Iterate over rows of cells in reverse order.
for rowNum in rowIds:
# Iterate over the rows of cells in normal order.
for ecNum in range(0, numECsPerRow):
# Calculate the 'real' ec cell id and get the corresponding cell.
ecId = quadId * numECsPerQuad + rowNum * numECsPerRow + ecNum
ecCell = extractedECs.GetCell(ecId)
reorderedCellArray.InsertNextCell(ecCell)
# Create new vtkPolyData object for the new reordered mesh.
reorderedECMeshBranch = vtk.vtkPolyData()
# Insert our new points.
reorderedECMeshBranch.SetPoints(extractedECs.GetPoints())
# Set the reordered cells to the reordered ECs mesh.
reorderedECMeshBranch.SetPolys(reorderedCellArray)
# New vtkPoints for storing reordered points.
reorderedPoints = vtk.vtkPoints()
# New vtkCellArray for storing reordeced cells.
reorderedCellArray = vtk.vtkCellArray()
rowBase = 0
# Iterate over quads in normal order because they have been reordered.
for quadNum in range(0, int(numRingsPerLabel[label]) * numQuadsPerRing):
# Iterate over rows in normal order because they have been reordered.
for rowNum in range(0, numECsPerCol):
# Iterate over the ECs in the row in normal order.
for ecNum in range(0, numECsPerRow):
# Calculate the 'real' ec cell id and get the corresponding cell.
ecId = quadNum * numECsPerQuad + rowNum * numECsPerRow + ecNum
ecCell = reorderedECMeshBranch.GetCell(ecId)
# The ids to be written to the TXT file.
pointIdList = [ecCell.GetNumberOfPoints()]
# Write the appropriate points to the TXT file.
for pPos in range(0, ecCell.GetNumberOfPoints()):
newPoint = False
if rowNum == 0:
if ecNum == 0:
newPoint = True
elif pPos == 1 or pPos == 2:
newPoint = True
else:
if ecNum == 0:
if pPos == 0 or pPos == 1:
newPoint = True
else:
if pPos == 1:
newPoint = True
if newPoint == True:
# Inserting a new point...
point = reorderedECMeshBranch.GetPoint(ecCell.GetPointId(pPos))
# ... with a new id.
newId = reorderedPoints.InsertNextPoint(point)
pointIdList.append(newId)
if ecNum == 0 and pPos == 0:
rowBasePrev = newId
else:
# Perhaps this can be done in a nicer way.
# Calculate the ide of a previously inserted point.
if rowNum == 0:
if ecNum == 1:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 3))
elif pPos == 3:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 4))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 2))
elif pPos == 3:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 3))
elif rowNum == 1:
if ecNum == 0:
if pPos == 2:
pointIdList.append(int(rowBase + 1))
elif pPos == 3:
pointIdList.append(int(rowBase))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 1))
elif pPos == 2:
pointIdList.append(int(rowBase + ecNum * 2 + 2))
elif pPos == 3:
if ecNum == 1:
pointIdList.append(int(rowBase + 1))
else:
pointIdList.append(int(rowBase + ecNum * 2))
else:
if ecNum == 0:
if pPos == 2:
pointIdList.append(int(rowBase + 1))
elif pPos == 3:
pointIdList.append(int(rowBase))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 1))
elif pPos == 2:
pointIdList.append(int(rowBase + ecNum + 1))
elif pPos == 3:
pointIdList.append(int(rowBase + ecNum))
# print(pointIdList, rowBase)
# Insert the ids into the cell array.
newCell = vtk.vtkQuad()
newCell.GetPointIds().Reset()
for id in pointIdList[1:]:
newCell.GetPointIds().InsertNextId(id)
reorderedCellArray.InsertNextCell(newCell)
rowBase = rowBasePrev
# print('\n')
print("There are", reorderedPoints.GetNumberOfPoints(), "ECs points for label", label, "...")
print("There are", reorderedCellArray.GetNumberOfCells(), "ECs cells for label", label, "...")
# Create new vtkPolyData object for the new reordered mesh.
reorderedECs = vtk.vtkPolyData()
# Put the reordered points and cells into the mesh.
reorderedECs.SetPoints(reorderedPoints)
reorderedECs.SetPolys(reorderedCellArray)
# Write the VTK EC mesh file.
reorderedMeshWriter = vtk.vtkXMLPolyDataWriter()
reorderedMeshWriter.SetInputData(reorderedECs)
reorderedMeshWriter.SetFileName(ecVTKFiles[label])
reorderedMeshWriter.Update()
# Use VTK centroid filter to get the centroids in the right order
# from the reorderedECMeshBranch.
centroidFilter = vtk.vtkCellCenters()
centroidFilter.SetInputData(reorderedECs)
centroidFilter.Update()
# Create a vertex cell for each point.
pointsToVerticesFilter = vtk.vtkVertexGlyphFilter()
pointsToVerticesFilter.SetInputData(centroidFilter.GetOutput())
pointsToVerticesFilter.Update()
reorderedCentroidBranch = pointsToVerticesFilter.GetOutput()
# Write the VTK EC centrouid file.
centroidWriter = vtk.vtkXMLPolyDataWriter()
centroidWriter.SetInputData(reorderedCentroidBranch)
centroidWriter.SetFileName(ecCentroidVTKFiles[label])
centroidWriter.Update()
# Write the centroids to the TXT points and cells files.
for cId in range(0, reorderedCentroidBranch.GetNumberOfCells()):
centCell = reorderedCentroidBranch.GetCell(cId)
centIds = [centCell.GetNumberOfPoints()]
# Write centroid ids.
ptId = centCell.GetPointId(0)
centIds.append(ptId)
# Write centroid points.
point = reorderedCentroidBranch.GetPoint(ptId)
# Working with SMC mesh.
# Working with SMC mesh.
# Working with SMC mesh.
smcMeshReader = vtk.vtkXMLPolyDataReader()
smcMeshReader.SetFileName(meshSet[2])
smcMeshReader.Update()
# Original SMCs mesh to work with.
smcMesh = smcMeshReader.GetOutput()
print("There are", smcMesh.GetNumberOfCells(), "SMCs in total ...")
# For every label in the range of labels we want to extract all SMCs.
for label in labelRange:
# Keep track of how many branches we need to skip.
numSMCsPerLabel = numQuadsPerRing * numRingsPerLabel[label] * numSMCsPerQuad
smcCellOffset = label * numSMCsPerLabel
print("smcCellOffset", smcCellOffset)
# Collect cell ids to select.
selectionIds = vtk.vtkIdTypeArray()
for sId in range(0, int(numSMCsPerLabel)):
selectionIds.InsertNextValue(int(smcCellOffset) + sId)
# Create selecion node.
selectionNode = vtk.vtkSelectionNode()
selectionNode.SetFieldType(selectionNode.CELL)
selectionNode.SetContentType(selectionNode.INDICES)
selectionNode.SetSelectionList(selectionIds)
# Create selection.
selection = vtk.vtkSelection()
selection.AddNode(selectionNode)
# Use vtkSelection filter.
selectionExtractor = vtk.vtkExtractSelection()
selectionExtractor.SetInputData(0, smcMesh)
selectionExtractor.SetInputData(1, selection)
selectionExtractor.Update()
extractedSMCs = selectionExtractor.GetOutput()
# Ring ids list for traversal.
ringIds = range(0, int(numRingsPerLabel[label]))
ringIds = list(ringIds)
ringIds.reverse()
# Number of SMCs rows is the number of ECs per quad times 13.
rowIds = range(0, numSMCsPerCol)
rowIds = list(rowIds)
rowIds.reverse()
# The SMCs are organised in rings of blocks of cells.
# New vtkCellArray for storing reordeced cells.
reorderedCellArray = vtk.vtkCellArray()
# Iterate over the rings in reverse order.
for ringNum in ringIds:
# Iterate over the 'imaginary' quads of cells in normal order.
for quadNum in range(0, numQuadsPerRing):
# Iterate over the rows of cells in reverse order.
# Calculate the 'real' id for the 'imaginary' quad.
quadId = ringNum * numQuadsPerRing + quadNum
# Iterate over rows of cells in reverse order.
for rowNum in rowIds:
# Iterate over the rows of cells in normal order.
for smcNum in range(0, numSMCsPerRow):
# Calculate the 'real' smc cell id and get the corresponding cell.
smcId = quadId * numSMCsPerQuad + rowNum * numSMCsPerRow + smcNum
smcCell = extractedSMCs.GetCell(smcId)
reorderedCellArray.InsertNextCell(smcCell)
# Create new vtkPolyData object for the new reordered mesh.
reorderedSMCMeshBranch = vtk.vtkPolyData()
# Insert our new points.
reorderedSMCMeshBranch.SetPoints(extractedSMCs.GetPoints())
# Set the reordered cells to the reordered SMCs mesh.
reorderedSMCMeshBranch.SetPolys(reorderedCellArray)
# New vtkPoints for storing reordered points.
reorderedPoints = vtk.vtkPoints()
# New vtkCellArray for storing reordeced cells.
reorderedCellArray = vtk.vtkCellArray()
rowBase = 0
# Iterate over quads in normal order because they have been reordered.
for quadNum in range(0, int(numRingsPerLabel[label]) * numQuadsPerRing):
# Iterate over rows in normal order because they have been reordered.
for rowNum in range(0, numSMCsPerCol):
# Iterate over the SMCs in the row in normal order.
for smcNum in range(0, numSMCsPerRow):
# Calculate the 'real' smc cell id and get the corresponding cell.
smcId = quadNum * numSMCsPerQuad + rowNum * numSMCsPerRow + smcNum
smcCell = reorderedSMCMeshBranch.GetCell(smcId)
# The ids to be written to the TXT file.
pointIdList = [smcCell.GetNumberOfPoints()]
# Write the appropriate points to the TXT file.
for pPos in range(0, smcCell.GetNumberOfPoints()):
newPoint = False
if rowNum == 0:
if smcNum == 0:
newPoint = True
elif pPos == 1 or pPos == 2:
newPoint = True
else:
if smcNum == 0:
if pPos == 0 or pPos == 1:
newPoint = True
else:
if pPos == 1:
newPoint = True
if newPoint == True:
# Inserting a new point...
point = reorderedSMCMeshBranch.GetPoint(smcCell.GetPointId(pPos))
# with a new id.
newId = reorderedPoints.InsertNextPoint(point)
pointIdList.append(newId)
if smcNum == 0 and pPos == 0:
rowBasePrev = newId
else:
# Perhaps this can be done in a nicer way.
# Calculate the ide of a previously inserted point.
if rowNum == 0:
if smcNum == 1:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 3))
elif pPos == 3:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 4))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 2))
elif pPos == 3:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 3))
elif rowNum == 1:
if smcNum == 0:
if pPos == 2:
pointIdList.append(int(rowBase + 1))
elif pPos == 3:
pointIdList.append(int(rowBase))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 1))
elif pPos == 2:
pointIdList.append(int(rowBase + smcNum * 2 + 2))
elif pPos == 3:
if smcNum == 1:
pointIdList.append(int(rowBase + 1))
else:
pointIdList.append(int(rowBase + smcNum * 2))
else:
if smcNum == 0:
if pPos == 2:
pointIdList.append(int(rowBase + 1))
elif pPos == 3:
pointIdList.append(int(rowBase))
else:
if pPos == 0:
pointIdList.append(int(reorderedPoints.GetNumberOfPoints() - 1))
elif pPos == 2:
pointIdList.append(int(rowBase + smcNum + 1))
elif pPos == 3:
pointIdList.append(int(rowBase + smcNum))
# print(pointIdList, rowBase)
# Insert the ids into the cell array.
newCell = vtk.vtkQuad()
newCell.GetPointIds().Reset()
for id in pointIdList[1:]:
newCell.GetPointIds().InsertNextId(id)
reorderedCellArray.InsertNextCell(newCell)
rowBase = rowBasePrev
# print('\n')
print("There are", reorderedPoints.GetNumberOfPoints(), "SMCs points for label", label, "...")
print("There are", reorderedCellArray.GetNumberOfCells(), "SMCs cells for label", label, "...")
# Create new vtkPolyData object for the new reordered mesh.
reorderedSMCs = vtk.vtkPolyData()
# Put the reordered points and cells in to the mesh.
reorderedSMCs.SetPoints(reorderedPoints)
reorderedSMCs.SetPolys(reorderedCellArray)
# Write the VTK SMC mesh file.
reorderedMeshWriter = vtk.vtkXMLPolyDataWriter()
reorderedMeshWriter.SetInputData(reorderedSMCs)
reorderedMeshWriter.SetFileName(smcVTKFiles[label])
reorderedMeshWriter.Update()
print("All done ...")
print("... Except the last configuration_info.txt file ...")
configFile = open("files/configuration_info.txt", 'w')
configFile.write("Processors information\n")
configFile.write("Total number of points per branch (vtk points) = %d\t\tm = %d n = %d\n" \
% ((numQuadsPerRing + 1) * (numRingsPerLabel[0] + 1), (numQuadsPerRing + 1), (numRingsPerLabel[0] + 1)))
configFile.write("Total number of cells per branch (vtk cells) = %d\t\tm = %d n = %d\n" \
% (numQuadsPerRing * numRingsPerLabel[0], numQuadsPerRing, numRingsPerLabel[0]))
configFile.write("Total number of SMC mesh points per processor mesh (vtk points) = %d\t\tm = %d n = %d\n" \
% ((numSMCsPerCol + 1) * (numSMCsPerRow + 1), (numSMCsPerCol + 1), (numSMCsPerRow + 1)))
configFile.write("Total number of SMC mesh cells per processor mesh (vtk cells) = %d\t\tm = %d n = %d\n" \
% (numSMCsPerCol * numSMCsPerRow, numSMCsPerCol, numSMCsPerRow))
configFile.write("Total number of EC mesh points per processor mesh (vtk points) = %d\t\tm = %d n = %d\n" \
% ((numECsPerCol + 1) * (numECsPerRow + 1), (numECsPerCol + 1), (numECsPerRow + 1)))
configFile.write("Total number of EC mesh cells per processor mesh (vtk cells) = %d\t\tm = %d n = %d\n" \
% (numECsPerCol *numECsPerRow, numECsPerCol, numECsPerRow))
configFile.write("Total number of EC mesh centeroid points per processor mesh (vtk points) = %d\t\tm = %d n = %d\n" \
% (numECsPerCol *numECsPerRow, numECsPerCol, numECsPerRow))
configFile.write("Total number of EC mesh centeroid cells per processor mesh (vtk cells) = %d\t\tm = %d n = %d\n" \
% (numECsPerCol *numECsPerRow, numECsPerCol, numECsPerRow))
configFile.close()
print("Now it is all done for real ...")
def main():
print("This script is to be run with global parameters (input, output files, etc.) set in the calling script.")
if __name__ == '__main__':
print("Starting", os.path.basename(__file__))
main()
print("Exiting", os.path.basename(__file__))
|
BlueFern/DBiharMesher
|
util/DumpMeshToLegacyFormat.py
|
Python
|
gpl-2.0
| 30,826
|
[
"VTK"
] |
882210a3b79ce8eb9b6e5e64dffd95e162cf35458279d607ba7bd2e382f306c7
|
import numpy as np
import pytest
from pysisyphus.calculators import XTB
from pysisyphus.calculators.PySCF import PySCF
from pysisyphus.helpers import geom_loader
from pysisyphus.optimizers.hessian_updates import (
bfgs_update,
damped_bfgs_update,
double_damp,
sr1_update,
psb_update,
flowchart_update,
mod_flowchart_update,
bofill_update,
)
from pysisyphus.optimizers.RFOptimizer import RFOptimizer
from pysisyphus.testing import using
from pysisyphus.tsoptimizers.RSIRFOptimizer import RSIRFOptimizer
@pytest.mark.parametrize(
"update_func",
[
bfgs_update,
damped_bfgs_update,
flowchart_update,
mod_flowchart_update,
bofill_update,
],
)
def test_hessian_updates(update_func):
N = 3
dx = np.ones(N)
dg = np.ones(N)
H = np.arange(N * N).reshape(-1, N)
dH = update_func(H, dx, dg)
@using("pyscf")
@pytest.mark.parametrize(
"hessian_update",
[
"bfgs",
"none",
],
)
def test_no_hessian_update(hessian_update):
geom = geom_loader("lib:h2o.xyz")
calc = PySCF(basis="sto3g", pal=2)
geom.set_calculator(calc)
opt = RFOptimizer(geom, thresh="gau", hessian_update=hessian_update)
opt.run()
assert geom.energy == pytest.approx(-74.96590119)
@using("xtb")
@pytest.mark.parametrize(
"hessian_update",
(
"bofill",
"ts_bfgs",
"ts_bfgs_org",
"ts_bfgs_rev",
),
)
def test_ts_hessian_update(this_dir, hessian_update):
geom = geom_loader("lib:tsbfgs_init.xyz", coord_type="redund")
calc = XTB(pal=6)
geom.set_calculator(calc)
opt = RSIRFOptimizer(
geom,
hessian_init=this_dir / "tsbfgs_init_hess.h5",
hessian_update=hessian_update,
)
opt.run()
assert opt.is_converged
assert geom.energy == pytest.approx(-17.81225910)
@using("pyscf")
@pytest.mark.parametrize(
"hessian_update",
(
"ts_bfgs",
"ts_bfgs_org",
"ts_bfgs_rev",
),
)
def test_ts_hessian_update_hcn(hessian_update):
geom = geom_loader("lib:hcn_iso_pm6_near_ts.xyz", coord_type="redund")
calc = PySCF(basis="321g", pal=2)
geom.set_calculator(calc)
opt = RSIRFOptimizer(
geom,
hessian_update=hessian_update,
)
opt.run()
assert opt.is_converged
assert geom.energy == pytest.approx(-92.24603904)
|
eljost/pysisyphus
|
tests/test_hessian_updates/test_hessian_updates.py
|
Python
|
gpl-3.0
| 2,385
|
[
"PySCF",
"xTB"
] |
0b1a1dae2f971f9cc12ed91bfe031a53064be6f0c5f351be587663256dc466cf
|
########################################################################
# This example demonstrates synaptic triggering of a wave of calcium
# release (CICR) from the endoplasmic reticulum. The wave is confined to
# a subset of a dendrite where there is elevated IP3 present.
#
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
########################################################################
import moose
import pylab
import rdesigneur as rd
rdes = rd.rdesigneur(
turnOffElec = False,
chemDt = 0.002,
chemPlotDt = 0.02,
diffusionLength = 1e-6,
numWaveFrames = 50,
useGssa = False,
addSomaChemCompt = False,
addEndoChemCompt = True,
# cellProto syntax: ['ballAndStick', 'name', somaDia, somaLength, dendDia, dendLength, numDendSeg]
cellProto = [['ballAndStick', 'soma', 10e-6, 10e-6, 2e-6, 40e-6, 4]],
spineProto = [['makeActiveSpine()', 'spine']],
chemProto = [['./chem/CICRspineDend.g', 'chem']],
spineDistrib = [['spine', '#dend#', '10e-6', '0.1e-6']],
chemDistrib = [['chem', 'dend#,spine#,head#', 'install', '1' ]],
adaptorList = [
[ 'Ca_conc', 'Ca', 'spine/Ca', 'conc', 0.00008, 8 ]
],
stimList = [
['head0', '0.5', 'glu', 'periodicsyn', '1 + 40*(t>5 && t<6)'],
['head0', '0.5', 'NMDA', 'periodicsyn', '1 + 40*(t>5 && t<6)'],
['dend#', 'g>10e-6 && g<=31e-6', 'dend/IP3', 'conc', '0.0006' ],
],
plotList = [
['head#', '1', 'spine/Ca', 'conc', 'Spine Ca conc'],
['dend#', '1', 'dend/Ca', 'conc', 'Dend Ca conc'],
['dend#', '1', 'dend/Ca', 'conc', 'Dend Ca conc', 'wave'],
['dend#', '1', 'dend_endo/CaER', 'conc', 'ER Ca conc', 'wave'],
['soma', '1', '.', 'Vm', 'Memb potl'],
],
)
moose.seed( 1234 )
rdes.buildModel()
moose.reinit()
moose.start( 16 )
rdes.display()
|
BhallaLab/moose
|
moose-examples/tutorials/Rdesigneur/ex8.1_synTrigCICR.py
|
Python
|
gpl-3.0
| 1,889
|
[
"MOOSE"
] |
7d9a7cf15ab3f1ab0d840ee2c42da31a73de9baa6a0ac6f6f9c3401b32d168ed
|
#!/usr/local/sci/bin/python
#***************************************
# 7 March 2018 KMW - v2
# Plots global time series from ASCII
# Can plot against other datasets
# Requires uncertainty values for HadISDH
#************************************************************************
# START
#************************************************************************
# USE python2.7
# python2.7 PlotGlobalTimeSeries_MAR2018.py
#
# REQUIRES
#
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import numpy as np
import sys, os
import scipy.stats
import struct
import os.path
import math
from mpl_toolkits.basemap import Basemap
import datetime as dt
from matplotlib.dates import date2num,num2date
#from netCDF4 import Dataset
from scipy.io import netcdf
from RandomsRanges import LetterRange
# Set up initial run choices
timetype='annual' #'monthly', 'annual'
nparams=7
param=list(['q','e','rh','tw','td','t','dpd']) # tw, q, e, rh, t, td, dpd
param2=list(['q','e','RH','Tw','Td','T','DPD']) # Tw, q, e, RH, T, Td, DPD
unitees=list(['g/kg','hPa','%rh','degrees C','degrees C','degrees C','degrees C'])
homogtype=list(['IDPHA','IDPHA','IDPHA','IDPHA','PHADPD','IDPHA','PHA']) # 'IDPHA','PHA','PHADPD'
sourceslist=list(['ERAINTERIM','CRUTS3_21','CRUTEM4','GHCNM'])
#others=list([['ERAINTERIM'],
# ['ERAINTERIM','CRUTS3_21'],
# ['ERAINTERIM'],
# ['ERAINTERIM'],
# ['ERAINTERIM'],
# ['ERAINTERIM','CRUTEM4','GHCNM'],
# ['ERAINTERIM']])
others=list([[],
[],
[],
[],
[],
[],
[]])
nowmon='MAR'
nowyear='2018'
thenmon='JAN'
thenyear='2018'
version='4.0.0.2017f'
styr=1973
edyr=2017
nyrs=(edyr-styr)+1
nmons=(nyrs)*12
climst=1981
climed=2010
stcl=climst-styr
edcl=climed-styr
# Set up directories and files
PLOTDIR='/data/local/hadkw/HADCRUH2/UPDATE'+str(edyr)+'/IMAGES/TIMESERIES/'
DATADIR='/data/local/hadkw/HADCRUH2/UPDATE'+str(edyr)+'/STATISTICS/TIMESERIES/'
IfType='.dat' #'.nc'
INHFILEST='HadISDH.land'
#INHFILEED='5by5_'+thenmon+thenyear+'_areaTS_19732013'
if timetype == 'monthly':
INHFILEED='.'+version+'_global_ts_monthly_anoms8110_'+thenmon+thenyear+'.dat'
else:
INHFILEED='.'+version+'_global_ts_annual_anoms8110_'+thenmon+thenyear+'.dat'
INOTHFULL='_areaTS_1973'+str(edyr)
INOTHMASK='_HadISDHMASKarea'+str(edyr)
OUTPLOT='PlotNiceTimeSeries.'+version+'_'+timetype+'_anoms8110_'+nowmon+nowyear
# Set up variables
mdi=-1e30
varH=[] # nvars(rows) by mons masked array
uncsHtot=[] # nvars(rows) by mons masked array
uncsHcov=[] # nvars(rows) by mons masked array
uncsHsamp=[] # nvars(rows) by mons masked array
uncsHstat=[] # nvars(rows) by mons masked array
othervarsFULL=[] # nvars(rows) by nothers (max) by mons masked array
othervarsMASK=[] # nvars(rows) by nothers (max) by mons masked array (HadISDH mask too)
#************************************************************************
# Subroutines
#************************************************************************
# READDATA
def ReadData(FileName,typee,delimee,skipee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee,delimiter=delimee,skip_footer=skipee) # ReadData
#************************************************************************
# PlotNiceTimeSeries
def PlotNiceTimeSeries(TheFile,TheHvars,TheHuncsC,TheHuncsSp,TheHuncsSt,
TheLablees,TheUnitees,TheVars,TheMASKVars,
TheMCount,TheYCount,TheTimeType,TheStYr,TheEdYr,TheMDI,TheColls,TheParams):
''' Plot a panel for each element of TheHvars '''
''' Add Coverage, Sampling and Station uncertainty ranges '''
''' Add lines for any extra estimates (TheVars) and HadISDH MASKED versions '''
''' Save as png and eps '''
''' TheHvars is a multi-row array: rows for vars, columns for months '''
''' Ditto TheHuncs C=coverage, Sp=sampling, St=station '''
''' TheLablees is the name list for all other vars '''
''' TheUnitees is the units name list '''
''' TheVars is a multirow array if there is 1+ var available - or [] '''
''' TheMASKVars - ditto above but masked to HadISDH coverage '''
''' TheMCount - number of months, TheStYr/EdYr - start and end years '''
''' TheMDI - missing data indicator for masking '''
''' TheColls - dictionary of colours for each dataset '''
# set up number of panels and number of lines
nplots=len(TheParams[:])
print('PLOT NUMBERS: ',nplots)
nlines=[]
for n in range(nplots):
print(n,TheLablees[n][:])
nlines.append(len(TheLablees[n][:]))
Letteree=[]
Letteree=LetterRange(0,nplots)
# set up x axes
if TheTimeType == 'monthly':
TheMonths=[]
yr=TheStYr
mon=1
for m in range(TheMCount):
TheMonths.append(dt.date(yr,mon,1))
mon=mon+1
if mon == 13:
mon=1
yr=yr+1
TheMonths=np.array(TheMonths)
else:
TheMonths=[]
yr=TheStYr
mon=1
for y in range(TheYCount):
TheMonths.append(dt.date(yr,mon,1))
yr=yr+1
TheMonths=np.array(TheMonths)
xtitlee='Years'
# set up dimensions and plot
xpos=[]
ypos=[]
xfat=[]
ytall=[]
totalyspace=0.90 # start 0.08 end 0.98
totalxspace=0.84 # start 0.12 end 0.98
for n in range(nplots):
xpos.append(0.14)
ypos.append(0.98-((n+1)*(totalyspace/nplots)))
xfat.append(totalxspace)
ytall.append(totalyspace/nplots)
# plt.clf()
# fig = plt.figure(1,figsize=(8,12))
# plt.axes([0.15,0.1,0.8,0.80])
f,axarr=plt.subplots(7,figsize=(6,12),sharex=True) #6,18
for pp in range(nplots):
print('Plot: ',pp,TheParams[pp])
# print(TheHvars[pp,0:10])
print(TheHuncsC[pp,0:10])
print(TheHuncsSp[pp,0:10])
print(TheHuncsSt[pp,0:10])
#axarr[pp].set_size(14)
axarr[pp].set_position([xpos[pp],ypos[pp],xfat[pp],ytall[pp]])
if TheTimeType == 'monthly':
axarr[pp].set_xlim([TheMonths[0],TheMonths[TheMCount-1]])
else:
axarr[pp].set_xlim([TheMonths[0],TheMonths[TheYCount-1]])
axarr[pp].set_ylim([math.floor(min(TheHvars[pp,:]-TheHuncsC[pp,:])),
math.ceil(max(TheHvars[pp,:]+TheHuncsC[pp,:]))])
if len(TheHuncsC[pp,:]) > 0:
axarr[pp].fill_between(TheMonths,TheHvars[pp,:]+TheHuncsC[pp,:],TheHvars[pp,:]-TheHuncsC[pp,:],
facecolor='LightGray',edgecolor='none')
axarr[pp].fill_between(TheMonths,TheHvars[pp,:]+TheHuncsSp[pp,:],TheHvars[pp,:]-TheHuncsSp[pp,:],
facecolor='LightSlateGray',edgecolor='none')
axarr[pp].fill_between(TheMonths,TheHvars[pp,:]+TheHuncsSt[pp,:],TheHvars[pp,:]-TheHuncsSt[pp,:],
facecolor='LightSlateGray',edgecolor='none')
if timetype == 'monthly':
axarr[pp].plot(TheMonths,TheHvars[pp,:],c='black',linewidth=0.5)
else:
axarr[pp].plot(TheMonths,TheHvars[pp,:],c='black',linewidth=2)
axarr[pp].annotate(Letteree[pp]+') '+TheParams[pp],xy=(0.03,0.9),xycoords='axes fraction',size=10)
axarr[pp].annotate('HadISDH',xy=(0.14,0.9),xycoords='axes fraction',color='black',size=10)
for ll in range(nlines[pp]): # no problem if 0
print('Other: ',ll,TheLablees[pp][ll])
# axarr[pp].plot(TheMonths,TheVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linewidth=0.5)
# axarr[pp].plot(TheMonths,TheMASKVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linestyle='dotted',linewidth=0.5)
if timetype == 'monthly':
axarr[pp].plot(TheMonths,TheMASKVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linewidth=0.5)
else:
axarr[pp].plot(TheMonths,TheMASKVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linewidth=2)
axarr[pp].annotate(TheLablees[pp][ll],xy=(0.14,0.82-(ll*0.08)),xycoords='axes fraction',
color=TheColls[TheLablees[pp][ll]],size=10)
axarr[pp].set_ylabel(TheUnitees[pp],fontsize=10)
if TheTimeType == 'monthly':
axarr[pp].hlines(0,TheMonths[0],TheMonths[TheMCount-1],color='black')
else:
axarr[pp].hlines(0,TheMonths[0],TheMonths[TheYCount-1],color='black')
axarr[nplots-1].set_xlabel(xtitlee,fontsize=10)
# Figure Watermark and Labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFile+".eps")
plt.savefig(TheFile+".png")
return #PlotNiceDotsMap
#************************************************************************
# MAIN PROGRAM
#************************************************************************
# set up loops to read in all time series
if timetype == 'monthly':
varH=np.zeros((nparams,nmons))
uncsHcov=np.zeros((nparams,nmons))
uncsHsamp=np.zeros((nparams,nmons))
uncsHstat=np.zeros((nparams,nmons))
uncsHtot=np.zeros((nparams,nmons))
othervarsFULL=np.zeros((nparams,3,nmons))
othervarsMASK=np.zeros((nparams,3,nmons))
else:
varH=np.zeros((nparams,nyrs))
uncsHcov=np.zeros((nparams,nyrs))
uncsHsamp=np.zeros((nparams,nyrs))
uncsHstat=np.zeros((nparams,nyrs))
uncsHtot=np.zeros((nparams,nyrs))
othervarsFULL=np.zeros((nparams,3,nyrs))
othervarsMASK=np.zeros((nparams,3,nyrs))
varH[:,:]=mdi
uncsHcov[:,:]=mdi
uncsHsamp[:,:]=mdi
uncsHstat[:,:]=mdi
uncsHtot[:,:]=mdi
othervarsFULL[:,:,:]=mdi
othervarsMASK[:,:,:]=mdi
for nv in range(nparams):
tmpvar=[]
tmpvarUcov=[]
tmpvarUsamp=[]
tmpvarUstat=[]
tmpvarUtot=[]
print('Reading in: ',param2[nv])
# read in HadISDH time series
if IfType == '.nc':
MyNCFile=DATADIR+INHFILEST+param2[nv]+'.'+version+'_FLATgrid'+homogtype[nv]+INHFILEED+'.nc'
f=netcdf.netcdf_file(MyNCFile,'r')
if param[nv]=='q':
var=f.variables['glob_q_anoms']
elif param[nv]=='e':
var=f.variables['glob_e_anoms']
elif param[nv]=='rh':
var=f.variables['glob_RH_anoms']
elif param[nv]=='t':
var=f.variables['glob_T_anoms']
elif param[nv]=='tw':
var=f.variables['glob_Tw_anoms']
elif param[nv]=='td':
var=f.variables['glob_Td_anoms']
elif param[nv]=='dpd':
var=f.variables['glob_DPD_anoms']
tmpvar=np.array(var.data)
f.close()
else: # its a text file
MyDatFile=DATADIR+INHFILEST+param2[nv]+INHFILEED
MyTypes=("|S10","float","float","float","float","float")
MyDelimiters=[10,10,10,10,10,10]
MySkips=1
RawData=ReadData(MyDatFile,MyTypes,MyDelimiters,MySkips)
tmpvar=np.array(RawData['f1'])
tmpvarUcov=np.array(RawData['f3'])
tmpvarUsamp=np.array(RawData['f2'])
tmpvarUstat=np.array(RawData['f4'])
tmpvarUtot=np.array(RawData['f5'])
# rezero HadISDH to non 1981-2010 anomalies if necessary - ASSUME NO MISSING DATA!!!
if (climst != 1981) | (climed != 2010):
print('Renorming...')
if timetype == 'monthly':
tmpvar=np.reshape(tmpvar,(nyrs,12))
for mm in range(12):
subarr=tmpvar[:,mm]
climarr=subarr[stcl:edcl]
subarr[:]=subarr[:]-np.mean(climarr)
tmpvar[:,mm]=subarr[:]
varH[nv,:]=np.reshape(tmpvar,(1,nmons))
else:
climarr=tmpvar[stcl:edcl]
tmpvar[:]=tmpvar[:]-np.mean(climarr)
varH[nv,:]=np.reshape(tmpvar,(1,nyrs))
if len(tmpvarUcov) > 0:
uncsHcov[nv,:]=tmpvarUcov
uncsHsamp[nv,:]=tmpvarUsamp
uncsHstat[nv,:]=tmpvarUstat
uncsHtot[nv,:]=tmpvarUtot
# GOT TO HERE
# read in all time series
for no in range(len(others[nv][:])):
print('Reading in others: ',no,others[nv][no])
MyNCFile=DATADIR+others[nv][no]+'_'+param2[nv]+INOTHFULL+'.nc'
f=netcdf.netcdf_file(MyNCFile,'r')
var=f.variables['glob_anoms']
newvar=np.array(var.data)
f.close()
if timetype == 'annual':
newvar=np.reshape(newvar,(nyrs,12))
for yy in range(nyrs):
if newvar[yy,0] > mdi:
othervarsFULL[nv,no,yy]=np.mean(newvar[yy,:])
else:
othervarsFULL[nv,no,:]=np.reshape(newvar,(1,nmons))
MyNCFile=DATADIR+others[nv][no]+'_'+param2[nv]+INOTHMASK+'.nc'
f=netcdf.netcdf_file(MyNCFile,'r')
var=f.variables['glob_anoms']
newvar=np.array(var.data)
f.close()
if timetype == 'annual':
newvar=np.reshape(newvar,(nyrs,12))
for yy in range(nyrs):
if newvar[yy,0] > mdi:
othervarsMASK[nv,no,yy]=np.mean(newvar[yy,:])
else:
othervarsMASK[nv,no,:]=np.reshape(newvar,(1,nmons))
# convert to masked arrays and mask out missing data
print('Masking')
varH=np.ma.masked_array(varH)
varH[varH <= mdi]=np.ma.masked
uncsHcov=np.ma.masked_array(uncsHcov)
uncsHcov[uncsHcov <= mdi]=np.ma.masked
uncsHsamp=np.ma.masked_array(uncsHsamp)
uncsHsamp[uncsHsamp <= mdi]=np.ma.masked
uncsHstat=np.ma.masked_array(uncsHstat)
uncsHstat[uncsHstat <= mdi]=np.ma.masked
uncsHtot=np.ma.masked_array(uncsHtot)
uncsHtot[uncsHtot <= mdi]=np.ma.masked
othervarsFULL=np.ma.masked_array(othervarsFULL)
othervarsFULL[othervarsFULL <= mdi]=np.ma.masked
othervarsMASK=np.ma.masked_array(othervarsMASK)
othervarsMASK[othervarsMASK <= mdi]=np.ma.masked
# sort out in quadrature quantities for uncs where
# uncsHcov is total combined in quadrature
# uncsHsamp is uncsHstat+uncsHsamp quadrature contributions
# uncsHstat is uncsHstat quadrature contribution
print('Sorting out Uncs...')
for nv in range(nparams):
RatsSamp=[]
RatsStat=[]
RatsSamp=(uncsHsamp[nv,:]**2)/((uncsHcov[nv,:]**2)+(uncsHsamp[nv,:]**2)+(uncsHstat[nv,:]**2))
RatsStat=(uncsHstat[nv,:]**2)/((uncsHcov[nv,:]**2)+(uncsHsamp[nv,:]**2)+(uncsHstat[nv,:]**2))
print(len(RatsSamp),len(RatsStat))
uncsHcov[nv,:]=uncsHtot[nv,:]
uncsHsamp[nv,:]=(uncsHtot[nv,:]*RatsSamp[:])+(uncsHtot[nv,:]*RatsStat[:])
uncsHstat[nv,:]=uncsHtot[nv,:]*RatsStat[:]
# set up colour dictionary - so that each dataset has an associated colour
diccols={}
diccols[sourceslist[0]]='Red'
diccols[sourceslist[1]]='MediumBlue'
diccols[sourceslist[2]]='DarkOrange'
diccols[sourceslist[3]]='MediumSlateBlue'
# call plotter
print('Plotting...')
MyFile=PLOTDIR+OUTPLOT
PlotNiceTimeSeries(MyFile,varH,uncsHcov,uncsHsamp,uncsHstat,
others,unitees,othervarsFULL,othervarsMASK,
nmons,nyrs,timetype,styr,edyr,mdi,
diccols,param2)
# stop()
print("And, we are done!")
|
Kate-Willett/Climate_Explorer
|
PYTHON/PlotGlobalTimeSeries_MAR2018.py
|
Python
|
cc0-1.0
| 14,911
|
[
"NetCDF"
] |
77d5bc2662517d5898630334bce7e35060621d2911c44a0715b78562a59de4f3
|
########################
# File to analyze the data produced by the networks
# Ramon Martinez February / 2014
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from visualization_functions import *
from time import localtime
import h5py
# Analysis functions
def calculate_mean_rate(spikes, N, T, dm, dt):
T_window = T - int(dm / dt)
rate = np.zeros((N,N,T_window))
for i in xrange(T_window):
rate[:,:,i] = np.mean(spikes[:,:,i:(i+dm)],2)
return rate * 1000 / dt #Transform to Hertz
def calculate_average(quantity):
return np.mean(quantity,2)
########################
# Load the data
########################
# Data to analyze
file = '2014-03-18T17-17-14experiment_hetero'
format = '.hdf5'
file = file + format
f = h5py.File('../data/'+file)
# f = h5py.File('experiment')
## Take voltage and spikes
voltage = f['voltage']
spikes = f['spikes']
initial_voltage = f['initial_state']
## Extract the Neuron's parameters
dt = voltage.attrs['dt']
N = np.shape(voltage)[0] # Number of neurons
T = np.shape(voltage)[2] # Total time
## Extract the connectivity patterns
network_data = f.require_group('/network')
alpha = network_data['alpha']
beta = network_data['beta']
r_alpha = network_data['r_alpha']
r_beta = network_data['r_beta']
########################
# Calculate the mean rate
########################
dm = 20 # Time window to the mean rate
rate = calculate_mean_rate(spikes, N, T, dm, dt)
########################
# Calculate the averages
########################
Vavg = calculate_average(voltage)
rate_avg = calculate_average(rate)
########################
# Visualize
########################
# Animation parameters
interval = 1 #Draws a new animation every interval millisecond
frames = int(T / dt) # Shows as many frames as data points
fps = int(20 * 1.0 / dt) # 20 data per second -multiply this if desired-
dpi = 120 # Quality
# Where to save the file
directory = '../data/'
date_stamp = '%4d-%02d-%02dT%02d-%02d-%02d' % localtime()[:6]
filename = directory + date_stamp
# alpha
plt.subplot(2,2,1)
plt.imshow(alpha[...], interpolation='nearest')
plt.colorbar()
plt.title('alpha')
# beta
plt.subplot(2,2,2)
plt.imshow(beta[...], interpolation='nearest')
plt.colorbar()
plt.title('beta')
# r_alpha
plt.subplot(2,2,3)
plt.imshow(r_alpha[...], interpolation='nearest')
plt.colorbar()
plt.title('r_alpha')
# r_alpha
plt.subplot(2,2,4)
plt.imshow(r_beta[...], interpolation='nearest')
plt.colorbar()
plt.title('r_beta')
plt.show()
# create_animation_voltage(voltage,frames,interval,fps,dpi,filename)
# create_animation_rate(rate,frames - int(dm/dt),interval,fps,dpi,filename)
# visualize_network_V(Vavg)
# visualize_network_rate(rate_avg)
# visualize_network_both(Vavg, rate_avg)
|
h-mayorquin/Neural_Pattern_Doursat
|
analyze_01.py
|
Python
|
gpl-2.0
| 2,827
|
[
"NEURON"
] |
1b49456af66f87ab93093c47c5afef74fed439932ad147cba9b2b94bb25bb43a
|
#!/usr/bin/env python
class VTK_XML_Serial_Unstructured:
"""
USAGE:
vtk_writer = VTK_XML_Serial_Unstructured()
vtk_writer.snapshot("filename.vtu", (x,y,z), optional arguments...)
vtk_writer.writePVD("filename.pvd")
"""
def __init__(self):
self.fileNames = []
def snapshot(self, fileName, crd, x_jump=[], y_jump=[], z_jump=[], spins = None,\
radii=[], colors=[]):
"""
ARGUMENTS:
fileName file name and/or path/filename
x array of x coordinates of particle centers
y array of y coordinates of particle centers
z array of z coordinates of particle centers
x_jump optional array of x components of particle jump vectors
y_jump optional array of y components of particle jump vectors
z_jump optional array of z components of particle jump vectors
x_force optional array of x components of force vectors
y_force optional array of y components of force vectors
z_force optional array of z components of force vectors
radii optional array of particle radii
colors optional array of scalars to use to set particle colors
The exact colors will depend on the color map you set up in Paraview.
"""
import xml.dom.minidom
#import xml.dom.ext # python 2.5 and later
# Document and root element
doc = xml.dom.minidom.Document()
root_element = doc.createElementNS("VTK", "VTKFile")
root_element.setAttribute("type", "UnstructuredGrid")
root_element.setAttribute("version", "0.1")
root_element.setAttribute("byte_order", "LittleEndian")
doc.appendChild(root_element)
# Unstructured grid element
unstructuredGrid = doc.createElementNS("VTK", "UnstructuredGrid")
root_element.appendChild(unstructuredGrid)
# Piece 0 (only one)
piece = doc.createElementNS("VTK", "Piece")
piece.setAttribute("NumberOfPoints", str(len(crd)))
piece.setAttribute("NumberOfCells", "0")
unstructuredGrid.appendChild(piece)
### Points ####
points = doc.createElementNS("VTK", "Points")
piece.appendChild(points)
# Point location data
point_coords = doc.createElementNS("VTK", "DataArray")
point_coords.setAttribute("type", "Float32")
point_coords.setAttribute("format", "ascii")
point_coords.setAttribute("NumberOfComponents", "3")
points.appendChild(point_coords)
string = str()
for i in range(len(crd)):
string = string + repr(crd[i,0]) + ' ' + repr(crd[i,1]) \
+ ' ' + repr(crd[i,2]) + ' '
point_coords_data = doc.createTextNode(string)
point_coords.appendChild(point_coords_data)
#### Cells ####
cells = doc.createElementNS("VTK", "Cells")
piece.appendChild(cells)
# Cell locations
cell_connectivity = doc.createElementNS("VTK", "DataArray")
cell_connectivity.setAttribute("type", "Int32")
cell_connectivity.setAttribute("Name", "connectivity")
cell_connectivity.setAttribute("format", "ascii")
cells.appendChild(cell_connectivity)
# Cell location data
connectivity = doc.createTextNode("0")
cell_connectivity.appendChild(connectivity)
cell_offsets = doc.createElementNS("VTK", "DataArray")
cell_offsets.setAttribute("type", "Int32")
cell_offsets.setAttribute("Name", "offsets")
cell_offsets.setAttribute("format", "ascii")
cells.appendChild(cell_offsets)
offsets = doc.createTextNode("0")
cell_offsets.appendChild(offsets)
cell_types = doc.createElementNS("VTK", "DataArray")
cell_types.setAttribute("type", "UInt8")
cell_types.setAttribute("Name", "types")
cell_types.setAttribute("format", "ascii")
cells.appendChild(cell_types)
types = doc.createTextNode("1")
cell_types.appendChild(types)
#### Data at Points ####
point_data = doc.createElementNS("VTK", "PointData")
piece.appendChild(point_data)
# Particle jump vectors
if len(x_jump) > 0:
jumps = doc.createElementNS("VTK", "DataArray")
jumps.setAttribute("Name", "jumps")
jumps.setAttribute("NumberOfComponents", "3")
jumps.setAttribute("type", "Float32")
jumps.setAttribute("format", "ascii")
point_data.appendChild(jumps)
string = str()
for i in range(len(x_jump)):
string = string + repr(x_jump[i]) + ' ' + repr(y_jump[i]) \
+ ' ' + repr(z_jump[i]) + ' '
jumpData = doc.createTextNode(string)
jumps.appendChild(jumpData)
# Force vectors
if spins is not None:
forces = doc.createElementNS("VTK", "DataArray")
forces.setAttribute("Name", "spins")
forces.setAttribute("NumberOfComponents", "3")
forces.setAttribute("type", "Float32")
forces.setAttribute("format", "ascii")
point_data.appendChild(forces)
string = str()
for i in range(len(spins)):
string = string + '0. 0. ' + repr(spins[i]) + ' '
# string = string + repr(x_force[i]) + ' ' + repr(y_force[i]) \
# + ' ' + repr(z_force[i]) + ' '
forceData = doc.createTextNode(string)
forces.appendChild(forceData)
# Particle radii
if len(radii) > 0:
radiiNode = doc.createElementNS("VTK", "DataArray")
radiiNode.setAttribute("Name", "radii")
radiiNode.setAttribute("type", "Float32")
radiiNode.setAttribute("format", "ascii")
point_data.appendChild(radiiNode)
string = str()
for i in range(len(crd)):
string = string + repr(radii[i]) + ' '
radiiData = doc.createTextNode(string)
radiiNode.appendChild(radiiData)
if len(colors) > 0:
# Particle colors
colorNode= doc.createElementNS("VTK", "DataArray")
colorNode.setAttribute("Name", "colors")
colorNode.setAttribute("type", "Float32")
colorNode.setAttribute("format", "ascii")
point_data.appendChild(colorNode)
string = str()
for i in range(len(colors)):
string = string + repr(colors[i]) + ' '
color_Data = doc.createTextNode(string)
colorNode.appendChild(color_Data)
#### Cell data (dummy) ####
cell_data = doc.createElementNS("VTK", "CellData")
piece.appendChild(cell_data)
# Write to file and exit
outFile = open(fileName, 'w')
# xml.dom.ext.PrettyPrint(doc, file)
doc.writexml(outFile, newl='\n')
outFile.close()
self.fileNames.append(fileName)
def writePVD(self, fileName):
outFile = open(fileName, 'w')
import xml.dom.minidom
pvd = xml.dom.minidom.Document()
pvd_root = pvd.createElementNS("VTK", "VTKFile")
pvd_root.setAttribute("type", "Collection")
pvd_root.setAttribute("version", "0.1")
pvd_root.setAttribute("byte_order", "LittleEndian")
pvd.appendChild(pvd_root)
collection = pvd.createElementNS("VTK", "Collection")
pvd_root.appendChild(collection)
for i in range(len(self.fileNames)):
dataSet = pvd.createElementNS("VTK", "DataSet")
dataSet.setAttribute("timestep", str(i))
dataSet.setAttribute("group", "")
dataSet.setAttribute("part", "0")
dataSet.setAttribute("file", str(self.fileNames[i]))
collection.appendChild(dataSet)
outFile = open(fileName, 'w')
pvd.writexml(outFile, newl='\n')
outFile.close()
|
ansobolev/shs
|
shs/vtkxml/xml_write.py
|
Python
|
mit
| 8,152
|
[
"ParaView",
"VTK"
] |
cc2bb39cd863c3d2e9316f340330cf80c9fc9853ee646c4cb2d3c2c3730e923e
|
# -*- coding: utf-8 -*-
import numpy as np
import parabem
from parabem.vtk_export import VtkWriter
from parabem import PanelVector2, Vector2, Panel2
from parabem.airfoil import Airfoil
from parabem.pan2d import NeumannSource0Case2 as Case
from parabem.utils import check_path
airfoil = Airfoil.joukowsky(m=-0.1 +0.1j)
airfoil.numpoints = 50
alpha = np.deg2rad(10)
# panelmethode
case = Case(airfoil.panels)
case.v_inf = Vector2(np.cos(alpha), np.sin(alpha))
case.run()
print(np.array(case.matrix.values))
nx = 300
ny = 300
space_x = np.linspace(-1, 2, nx)
space_y = np.linspace(-0.2, 0.2, ny)
vec = lambda x: parabem.Vector2(x[0], x[1])
vec3 = lambda x: [x[0], x[1], 0]
grid = [parabem.Vector2(x, y) for y in space_y for x in space_x]
velocity = list(map(vec3, map(case.off_body_velocity, grid)))
vel1 = [(i[0]**2 + i[1]**2)**(0.5) for i in velocity]
pot = list(map(case.off_body_potential, grid))
with open(check_path("results/neumann/field.vtk"), "w") as _file:
writer = VtkWriter()
writer.structed_grid(_file, "airfoil", [nx, ny, 1])
writer.points(_file, grid)
writer.data(_file, velocity, name="velocity", _type="VECTORS", data_type="POINT_DATA")
writer.data(_file, pot, name="pot", _type="SCALARS", data_type="POINT_DATA")
writer.data(_file, vel1, name="vel", _type="SCALARS", data_type="POINT_DATA")
with open(check_path("results/neumann/airfoil.vtk"), "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "airfoil")
writer.points(_file, [list(i.points[0]) + [0] for i in case.panels])
writer.lines(_file, [range(len(case.panels))])
|
booya-at/paraBEM
|
examples/vtk/vtk_airfoil_neumann_source.py
|
Python
|
gpl-3.0
| 1,600
|
[
"VTK"
] |
64b4960d7b7f5d6c1454559b3347029853cc5175e5c9df607f9a0f0d410655b5
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DataFile'
db.create_table(u'profiles_datafile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'profiles', ['DataFile'])
def backwards(self, orm):
# Deleting model 'DataFile'
db.delete_table(u'profiles_datafile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 8, 15, 31, 37, 799907)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 8, 15, 31, 37, 799517)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.DataDomainIndex']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datafile': {
'Meta': {'object_name': 'DataFile'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'table_label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_geo_key': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': "'255'", 'db_index': 'True'}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator_moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'ordering': "['summary_level']", 'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_id_segments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'domain': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'domain_index'", 'symmetrical': 'False', 'through': u"orm['profiles.DataDomainIndex']", 'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['name']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'ordering': "['name']", 'object_name': 'Indicator'},
'data_as_of': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_tasks': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'ind_tasks'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['profiles.IndicatorTask']"}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'next_update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.indicatortask': {
'Meta': {'object_name': 'IndicatorTask'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'ordering': "['name']", 'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
216software/Profiles
|
communityprofiles/profiles/oldmigrations/0004_auto__add_datafile.py
|
Python
|
mit
| 27,394
|
[
"MOE"
] |
75a9ecc9f426961b49be6879d10defa5330aa2fc790838fe62fea3a1cb6016d1
|
#!/usr/bin/env python
import os
import sys
import subprocess as sp
import argparse
if sys.version_info.major == 3:
PY3 = True
from urllib.request import urlretrieve
else:
PY3 = True
from urllib import urlretrieve
usage = """
The easy way to test recipes is by using `circleci build`. However this does
not allow local testing recipes using mulled-build (due to the technicalities
of running docker within docker and the CircleCI client).
This script makes it easy to do mulled-build tests. It works by using the same
code used in the .circleci/setup.sh script to build an isolated Miniconda
environment and a custom `activate` script.
Set up the environment like this:
./bootstrap.py /tmp/miniconda
It creates an activate script at ~/.config/bioconda/activate. So you can then use:
source ~/.config/bioconda/activate
and then use that isolated root environment independent of any other conda
installations you might have.
"""
ap = argparse.ArgumentParser(usage)
ap.add_argument('bootstrap', help='''Location to which a new Miniconda
installation plus bioconda-utils should be installed. This will
be separate from any existing conda installations.''')
ap.add_argument('--no-docker', action='store_true', help='''By default we
expect Docker to be present. Use this arg to disable that
behavior. This will reduce functionality, but is useful if
you're unable to install docker.''')
args = ap.parse_args()
# This is the "common" step in the CircleCI config which gets the versions of
# Miniconda and bioconda-utils that we're using.
urlretrieve(
'https://raw.githubusercontent.com/bioconda/bioconda-common/master/common.sh',
filename='.circleci/common.sh')
local_config_path = os.path.expanduser('~/.config/bioconda/activate')
def _write_custom_activate(install_path):
"""
Once the isolated Miniconda version has been installed, copy its activate
script over to a custom location, and then hard-code the paths and PS1. We
don't need a matching `deactivate` because the activate script properly
keeps track of the new location.
"""
config_dir = os.path.dirname(local_config_path)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
activate = os.path.join(install_path, 'miniconda/bin/activate')
lines = [i.rstrip() for i in open(activate)]
# Exact matches to lines we want to replace in the activate script, leading
# space included.
substitutions = [
(
'_CONDA_DIR=$(dirname "$_SCRIPT_LOCATION")',
'_CONDA_DIR="{0}/miniconda/bin"'.format(install_path)
),
(
' export PS1="(${CONDA_DEFAULT_ENV}) $PS1"',
' export PS1="(BIOCONDA-UTILS) $PS1"',
)
]
for orig, sub in substitutions:
# Be very picky so that we'll know if/when the activate script changes.
try:
pos = lines.index(orig)
except ValueError:
raise ValueError(
"Expecting '{0}' to be in {1} but couldn't find it"
.format(orig, activate)
)
lines[pos] = sub
with open(local_config_path, 'w') as fout:
for line in lines:
fout.write(line + '\n')
use_docker = "true"
if args.no_docker:
use_docker = "false"
env = {'WORKSPACE': args.bootstrap, 'BOOTSTRAP': "true", 'USE_DOCKER': use_docker}
sp.check_call(['.circleci/setup.sh'], env=env)
_write_custom_activate(args.bootstrap)
print("""
An isolated version of bioconda-utils has been installed to {1}. This is
separate from any other conda installations you might have.
To use it, source this custom activate script:
source ~/.config/bioconda/activate
When done:
source deactivate
""")
|
dmaticzka/bioconda-recipes
|
bootstrap.py
|
Python
|
mit
| 3,844
|
[
"Bioconda"
] |
c891dd359bae1cda382eb521e38da7ef2ee25d9a6d4b6c751e25da38629747da
|
""" core implementation of testing process: init, session, runtest loop. """
import functools
import os
import sys
import _pytest
import _pytest._code
import py
import pytest
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.config import directory_arg
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'])
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
type="args", default=[])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_const",
dest="maxfail", const=1,
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group._addoption("--continue-on-collection-errors", action="store_true",
default=False, dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.")
group.addoption('--noconftest', action="store_true",
dest="noconftest", default=False,
help="Don't load any conftest.py files.")
group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
dest="keepduplicates", default=False,
help="Keep duplicate tests.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except pytest.UsageError:
raise
except KeyboardInterrupt:
excinfo = _pytest._code.ExceptionInfo()
if initstate < 2 and isinstance(
excinfo.value, pytest.exit.Exception):
sys.stderr.write('{0}: {1}\n'.format(
excinfo.typename, excinfo.value.msg))
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = _pytest._code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if (session.testsfailed and
not session.config.option.continue_on_collection_errors):
raise session.Interrupted(
"%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i+1] if i+1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if path in ignore_paths:
return True
# Skip duplicate paths.
keepduplicates = config.getoption("keepduplicates")
duplicate_paths = config.pluginmanager._duplicatepaths
if not keepduplicates:
if path in duplicate_paths:
return True
else:
duplicate_paths.add(path)
return False
class FSHookProxy:
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
import warnings
warnings.warn("This usage is deprecated, please use pytest.{0} instead".format(name),
PendingDeprecationWarning, stacklevel=2)
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
else:
fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1)
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
nodeid=self.nodeid, fslocation=fslocation))
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator
if isinstance(marker, py.builtin._basestring):
marker = MarkDecorator(marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style="long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(funcargs=True, abspath=abspath,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
class Session(FSCollector):
Interrupted = Interrupted
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self.config.pluginmanager.register(self, name="session")
def _makeid(self):
return ""
@pytest.hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py filesall conftest.py
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
#XXX: test this
raise pytest.UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path.
"""
import pkgutil
try:
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise pytest.UsageError("file or package not found: " + arg + " (missing __init__.py?)")
else:
raise pytest.UsageError("file not found: " + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
|
razvanc-r/godot-python
|
tests/bindings/lib/_pytest/main.py
|
Python
|
mit
| 27,330
|
[
"VisIt"
] |
91d243848189dda41cf851fb0d06311c96fac3117759542e31bad8afa5e2d7f8
|
#!/usr/bin/env python
""" Patch utility to apply unified diffs
Brute-force line-by-line non-recursive parsing
Copyright (c) 2008-2012 anatoly techtonik
Available under the terms of MIT license
Project home: http://code.google.com/p/python-patch/
$Id: patch.py 181 2012-11-23 16:03:05Z techtonik $
$HeadURL: https://python-patch.googlecode.com/svn/trunk/patch.py $
This program needs further tweaking for how we use it at Galaxy.
"""
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__version__ = "1.12.11"
import copy
import logging
import re
# cStringIO doesn't support unicode in 2.5
from StringIO import StringIO
import urllib2
from os.path import exists, isfile, abspath
import os
import shutil
#------------------------------------------------
# Logging is controlled by logger named after the
# module name (e.g. 'patch' for patch.py module)
debugmode = False
logger = logging.getLogger(__name__)
debug = logger.debug
info = logger.info
warning = logger.warning
class NullHandler(logging.Handler):
""" Copied from Python 2.7 to avoid getting
`No handlers could be found for logger "patch"`
http://bugs.python.org/issue16539
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
logger.addHandler(NullHandler())
#------------------------------------------------
# Constants for Patch/PatchSet types
DIFF = PLAIN = "plain"
GIT = "git"
HG = MERCURIAL = "mercurial"
SVN = SUBVERSION = "svn"
# mixed type is only actual when PatchSet contains
# Patches of different type
MIXED = MIXED = "mixed"
#------------------------------------------------
# Helpers (these could come with Python stdlib)
# x...() function are used to work with paths in
# cross-platform manner - all paths use forward
# slashes even on Windows.
def xisabs(filename):
""" Cross-platform version of `os.path.isabs()`
Returns True if `filename` is absolute on
Linux, OS X or Windows.
"""
if filename.startswith('/'): # Linux/Unix
return True
elif filename.startswith('\\'): # Windows
return True
elif re.match(r'\w:[\\/]', filename): # Windows
return True
return False
def xnormpath(path):
""" Cross-platform version of os.path.normpath """
return os.path.normpath(path).replace(os.sep, '/')
def xstrip(filename):
""" Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security.
"""
while xisabs(filename):
# strip windows drive with all slashes
if re.match(r'\w:[\\/]', filename):
filename = re.sub(r'^\w+:[\\/]+', '', filename)
# strip all slashes
elif re.match(r'[\\/]', filename):
filename = re.sub(r'^[\\/]+', '', filename)
return filename
#-----------------------------------------------
# Main API functions
def fromfile(filename):
""" Parse patch file. If successful, returns
PatchSet() object. Otherwise returns False.
"""
patchset = PatchSet()
debug("reading %s" % filename)
fp = open(filename, "rb")
res = patchset.parse(fp)
fp.close()
if res == True:
return patchset
return False
def fromstring(s):
""" Parse text string and return PatchSet()
object (or False if parsing fails)
"""
ps = PatchSet( StringIO(s) )
if ps.errors == 0:
return ps
return False
def fromurl(url):
""" Parse patch from an URL, return False
if an error occured. Note that this also
can throw urlopen() exceptions.
"""
ps = PatchSet( urllib2.urlopen(url) )
if ps.errors == 0:
return ps
return False
# --- Utility functions ---
# [ ] reuse more universal pathsplit()
def pathstrip(path, n):
""" Strip n leading components from the given path """
pathlist = [path]
while os.path.dirname(pathlist[0]) != '':
pathlist[0:1] = os.path.split(pathlist[0])
return '/'.join(pathlist[n:])
# --- /Utility function ---
class Hunk(object):
""" Parsed hunk data container (hunk starts with @@ -R +R @@) """
def __init__(self):
self.startsrc=None #: line count starts with 1
self.linessrc=None
self.starttgt=None
self.linestgt=None
self.invalid=False
self.hasplus=False # True if any "+" lines in hunk
self.hasminus=False # True if any "-" lines in hunk
self.text=[]
def originalText(self):
return("@@ -" + str(self.startsrc) +
"," + str(self.linessrc) +
" +" + str(self.starttgt) +
"," + str(self.linestgt) +
"\n" +
self.printableText())
def printableText(self):
"""Reformat text into printable text"""
# yeah, there must be a better way to do this.
printable = ""
for line in self.text:
printable += line
return printable
# def apply(self, estream):
# """ write hunk data into enumerable stream
# return strings one by one until hunk is
# over
#
# enumerable stream are tuples (lineno, line)
# where lineno starts with 0
# """
# pass
class Patch(object):
""" Patch for a single file """
def __init__(self):
self.source = None
self.target = None
self.hunks = []
self.hunkends = []
self.header = []
self.type = None
class PatchSet(object):
def __init__(self, stream=None):
# --- API accessible fields ---
# name of the PatchSet (filename or ...)
self.name = None
# patch set type - one of constants
self.type = None
# list of Patch objects
self.items = []
self.errors = 0 # fatal parsing errors
self.warnings = 0 # non-critical warnings
# --- /API ---
if stream:
self.parse(stream)
def __len__(self):
return len(self.items)
def parse(self, stream):
""" parse unified diff
return True on success
"""
lineends = dict(lf=0, crlf=0, cr=0)
nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
p = None
hunk = None
# hunkactual variable is used to calculate hunk lines for comparison
hunkactual = dict(linessrc=None, linestgt=None)
class wrapumerate(enumerate):
"""Enumerate wrapper that uses boolean end of stream status instead of
StopIteration exception, and properties to access line information.
"""
def __init__(self, *args, **kwargs):
# we don't call parent, it is magically created by __new__ method
self._exhausted = False
self._lineno = False # after end of stream equal to the num of lines
self._line = False # will be reset to False after end of stream
def next(self):
"""Try to read the next line and return True if it is available,
False if end of stream is reached."""
if self._exhausted:
return False
try:
self._lineno, self._line = super(wrapumerate, self).next()
except StopIteration:
self._exhausted = True
self._line = False
return False
return True
@property
def is_empty(self):
return self._exhausted
@property
def line(self):
return self._line
@property
def lineno(self):
return self._lineno
# define states (possible file regions) that direct parse flow
headscan = True # start with scanning header
filenames = False # lines starting with --- and +++
hunkhead = False # @@ -R +R @@ sequence
hunkbody = False #
hunkskip = False # skipping invalid hunk mode
hunkparsed = False # state after successfully parsed hunk
# regexp to match start of hunk, used groups - 1,3,4,6
re_hunk_start = re.compile("^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
self.errors = 0
# temp buffers for header and filenames info
header = []
srcname = None
tgtname = None
# start of main cycle
# each parsing block already has line available in fe.line
fe = wrapumerate(stream)
while fe.next():
# -- deciders: these only switch state to decide who should process
# -- line fetched at the start of this cycle
if hunkparsed:
hunkparsed = False
if re_hunk_start.match(fe.line):
hunkhead = True
elif fe.line.startswith("--- "):
filenames = True
else:
headscan = True
# -- ------------------------------------
# read out header
if headscan:
while not fe.is_empty and not fe.line.startswith("--- "):
header.append(fe.line)
fe.next()
if fe.is_empty:
if p == None:
debug("no patch data found") # error is shown later
self.errors += 1
else:
info("%d unparsed bytes left at the end of stream" % len(''.join(header)))
self.warnings += 1
# TODO check for \No new line at the end..
# TODO test for unparsed bytes
# otherwise error += 1
# this is actually a loop exit
continue
headscan = False
# switch to filenames state
filenames = True
line = fe.line
lineno = fe.lineno
# hunkskip and hunkbody code skipped until definition of hunkhead is parsed
if hunkbody:
# [x] treat empty lines inside hunks as containing single space
# (this happens when diff is saved by copy/pasting to editor
# that strips trailing whitespace)
if line.strip("\r\n") == "":
debug("expanding empty line in a middle of hunk body")
self.warnings += 1
line = ' ' + line
# process line first
if re.match(r"^[- \+\\]", line):
# gather stats about line endings
if line.endswith("\r\n"):
p.hunkends["crlf"] += 1
elif line.endswith("\n"):
p.hunkends["lf"] += 1
elif line.endswith("\r"):
p.hunkends["cr"] += 1
if line.startswith("-"):
hunkactual["linessrc"] += 1
hunk.hasminus = True
elif line.startswith("+"):
hunkactual["linestgt"] += 1
hunk.hasplus = True
elif not line.startswith("\\"):
hunkactual["linessrc"] += 1
hunkactual["linestgt"] += 1
hunk.text.append(line)
# todo: handle \ No newline cases
else:
warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
# check exit conditions
if hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt:
warning("extra lines for hunk no.%d at %d for target %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
elif hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"]:
# hunk parsed successfully
p.hunks.append(hunk)
# switch to hunkparsed state
hunkbody = False
hunkparsed = True
# detect mixed window/unix line ends
ends = p.hunkends
if ((ends["cr"]!=0) + (ends["crlf"]!=0) + (ends["lf"]!=0)) > 1:
warning("inconsistent line ends in patch hunks for %s" % p.source)
self.warnings += 1
if debugmode:
debuglines = dict(ends)
debuglines.update(file=p.target, hunk=nexthunkno)
debug("crlf: %(crlf)d lf: %(lf)d cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
# fetch next line
continue
if hunkskip:
if re_hunk_start.match(line):
# switch to hunkhead state
hunkskip = False
hunkhead = True
elif line.startswith("--- "):
# switch to filenames state
hunkskip = False
filenames = True
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
if filenames:
if line.startswith("--- "):
if srcname != None:
# XXX testcase
warning("skipping false patch for %s" % srcname)
srcname = None
# XXX header += srcname
# double source filename line is encountered
# attempt to restart from this second line
re_filename = "^--- ([^\t]+)"
match = re.match(re_filename, line)
# todo: support spaces in filenames
if match:
srcname = match.group(1).strip()
else:
warning("skipping invalid filename at line %d" % lineno)
self.errors += 1
# XXX p.header += line
# switch back to headscan state
filenames = False
headscan = True
elif not line.startswith("+++ "):
if srcname != None:
warning("skipping invalid patch with no target for %s" % srcname)
self.errors += 1
srcname = None
# XXX header += srcname
# XXX header += line
else:
# this should be unreachable
warning("skipping invalid target patch")
filenames = False
headscan = True
else:
if tgtname != None:
# XXX seems to be a dead branch
warning("skipping invalid patch - double target at line %d" % lineno)
self.errors += 1
srcname = None
tgtname = None
# XXX header += srcname
# XXX header += tgtname
# XXX header += line
# double target filename line is encountered
# switch back to headscan state
filenames = False
headscan = True
else:
re_filename = "^\+\+\+ ([^\t]+)"
match = re.match(re_filename, line)
if not match:
warning("skipping invalid patch - no target filename at line %d" % lineno)
self.errors += 1
srcname = None
# switch back to headscan state
filenames = False
headscan = True
else:
if p: # for the first run p is None
self.items.append(p)
p = Patch()
p.source = srcname
srcname = None
p.target = match.group(1).strip()
p.header = header
header = []
# switch to hunkhead state
filenames = False
hunkhead = True
nexthunkno = 0
p.hunkends = lineends.copy()
continue
if hunkhead:
match = re.match("^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?", line)
if not match:
if not p.hunks:
warning("skipping invalid patch with no hunks for file %s" % p.source)
self.errors += 1
# XXX review switch
# switch to headscan state
hunkhead = False
headscan = True
continue
else:
# TODO review condition case
# switch to headscan state
hunkhead = False
headscan = True
else:
hunk = Hunk()
hunk.startsrc = int(match.group(1))
hunk.linessrc = 1
if match.group(3): hunk.linessrc = int(match.group(3))
hunk.starttgt = int(match.group(4))
hunk.linestgt = 1
if match.group(6): hunk.linestgt = int(match.group(6))
hunk.invalid = False
hunk.text = []
hunkactual["linessrc"] = hunkactual["linestgt"] = 0
# switch to hunkbody state
hunkhead = False
hunkbody = True
nexthunkno += 1
continue
# /while fe.next()
if p:
self.items.append(p)
if not hunkparsed:
if hunkskip:
warning("warning: finished with errors, some hunks may be invalid")
elif headscan:
if len(self.items) == 0:
warning("error: no patch data found!")
return False
else: # extra data at the end of file
pass
else:
warning("error: patch stream is incomplete!")
self.errors += 1
if len(self.items) == 0:
return False
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
# XXX fix total hunks calculation
debug("total files: %d total hunks: %d" % (len(self.items),
sum(len(p.hunks) for p in self.items)))
# ---- detect patch and patchset types ----
for idx, p in enumerate(self.items):
self.items[idx].type = self._detect_type(p)
types = set([p.type for p in self.items])
if len(types) > 1:
self.type = MIXED
else:
self.type = types.pop()
# --------
self._normalize_filenames()
return (self.errors == 0)
def _detect_type(self, p):
""" detect and return type for the specified Patch object
analyzes header and filenames info
NOTE: must be run before filenames are normalized
"""
# check for SVN
# - header starts with Index:
# - next line is ===... delimiter
# - filename is followed by revision number
# TODO add SVN revision
if (len(p.header) > 1 and p.header[-2].startswith("Index: ")
and p.header[-1].startswith("="*67)):
return SVN
# common checks for both HG and GIT
DVCS = ((p.source.startswith('a/') or p.source == '/dev/null')
and (p.target.startswith('b/') or p.target == '/dev/null'))
# GIT type check
# - header[-2] is like "diff --git a/oldname b/newname"
# - header[-1] is like "index <hash>..<hash> <mode>"
# TODO add git rename diffs and add/remove diffs
# add git diff with spaced filename
# TODO http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
# detect the start of diff header - there might be some comments before
if len(p.header) > 1:
for idx in reversed(range(len(p.header))):
if p.header[idx].startswith("diff --git"):
break
if re.match(r'diff --git a/[\w/.]+ b/[\w/.]+', p.header[idx]):
if (idx+1 < len(p.header)
and re.match(r'index \w{7}..\w{7} \d{6}', p.header[idx+1])):
if DVCS:
return GIT
# HG check
#
# - for plain HG format header is like "diff -r b2d9961ff1f5 filename"
# - for Git-style HG patches it is "diff --git a/oldname b/newname"
# - filename starts with a/, b/ or is equal to /dev/null
# - exported changesets also contain the header
# # HG changeset patch
# # User name@example.com
# ...
# TODO add MQ
# TODO add revision info
if len(p.header) > 0:
if DVCS and re.match(r'diff -r \w{12} .*', p.header[-1]):
return HG
if DVCS and p.header[-1].startswith('diff --git a/'):
if len(p.header) == 1: # native Git patch header len is 2
return HG
elif p.header[0].startswith('# HG changeset patch'):
return HG
return PLAIN
def _normalize_filenames(self):
""" sanitize filenames, normalizing paths, i.e.:
1. strip a/ and b/ prefixes from GIT and HG style patches
2. remove all references to parent directories (with warning)
3. translate any absolute paths to relative (with warning)
[x] always use forward slashes to be crossplatform
(diff/patch were born as a unix utility after all)
return None
"""
for i,p in enumerate(self.items):
if p.type in (HG, GIT):
# TODO: figure out how to deal with /dev/null entries
debug("stripping a/ and b/ prefixes")
if p.source != '/dev/null':
if not p.source.startswith("a/"):
warning("invalid source filename")
else:
p.source = p.source[2:]
if p.target != '/dev/null':
if not p.target.startswith("b/"):
warning("invalid target filename")
else:
p.target = p.target[2:]
p.source = xnormpath(p.source)
p.target = xnormpath(p.target)
sep = '/' # sep value can be hardcoded, but it looks nice this way
# references to parent are not allowed
if p.source.startswith(".." + sep):
warning("error: stripping parent path for source file patch no.%d" % (i+1))
self.warnings += 1
while p.source.startswith(".." + sep):
p.source = p.source.partition(sep)[2]
if p.target.startswith(".." + sep):
warning("error: stripping parent path for target file patch no.%d" % (i+1))
self.warnings += 1
while p.target.startswith(".." + sep):
p.target = p.target.partition(sep)[2]
# absolute paths are not allowed
if xisabs(p.source) or xisabs(p.target):
warning("error: absolute paths are not allowed - file no.%d" % (i+1))
self.warnings += 1
if xisabs(p.source):
warning("stripping absolute path from source name '%s'" % p.source)
p.source = xstrip(p.source)
if xisabs(p.target):
warning("stripping absolute path from target name '%s'" % p.target)
p.target = xstrip(p.target)
self.items[i].source = p.source
self.items[i].target = p.target
def diffstat(self):
""" calculate diffstat and return as a string
Notes:
- original diffstat ouputs target filename
- single + or - shouldn't escape histogram
"""
names = []
insert = []
delete = []
namelen = 0
maxdiff = 0 # max number of changes for single file
# (for histogram width calculation)
for patch in self.items:
i,d = 0,0
for hunk in patch.hunks:
for line in hunk.text:
if line.startswith('+'):
i += 1
elif line.startswith('-'):
d += 1
names.append(patch.target)
insert.append(i)
delete.append(d)
namelen = max(namelen, len(patch.target))
maxdiff = max(maxdiff, i+d)
output = ''
statlen = len(str(maxdiff)) # stats column width
for i,n in enumerate(names):
# %-19s | %-4d %s
format = " %-" + str(namelen) + "s | %" + str(statlen) + "s %s\n"
hist = ''
# -- calculating histogram --
width = len(format % ('', '', ''))
histwidth = max(2, 80 - width)
if maxdiff < histwidth:
hist = "+"*insert[i] + "-"*delete[i]
else:
iratio = (float(insert[i]) / maxdiff) * histwidth
dratio = (float(delete[i]) / maxdiff) * histwidth
# make sure every entry gets at least one + or -
iwidth = 1 if 0 < iratio < 1 else int(iratio)
dwidth = 1 if 0 < dratio < 1 else int(dratio)
#print iratio, dratio, iwidth, dwidth, histwidth
hist = "+"*int(iwidth) + "-"*int(dwidth)
# -- /calculating +- histogram --
output += (format % (names[i], insert[i] + delete[i], hist))
output += (" %d files changed, %d insertions(+), %d deletions(-)"
% (len(names), sum(insert), sum(delete)))
return output
def apply(self, strip=0):
""" apply parsed patch
return True on success
"""
total = len(self.items)
errors = 0
if strip:
# [ ] test strip level exceeds nesting level
# [ ] test the same only for selected files
# [ ] test if files end up being on the same level
try:
strip = int(strip)
except ValueError:
errors += 1
warning("error: strip parameter '%s' must be an integer" % strip)
strip = 0
#for fileno, filename in enumerate(self.source):
for i,p in enumerate(self.items):
f2patch = p.source
if strip:
debug("stripping %s leading component from '%s'" % (strip, f2patch))
f2patch = pathstrip(f2patch, strip)
if not exists(f2patch):
f2patch = p.target
if strip:
debug("stripping %s leading component from '%s'" % (strip, f2patch))
f2patch = pathstrip(f2patch, strip)
if not exists(f2patch):
warning("source/target file does not exist\n--- %s\n+++ %s" % (p.source, f2patch))
errors += 1
continue
if not isfile(f2patch):
warning("not a file - %s" % f2patch)
errors += 1
continue
filename = f2patch
debug("processing %d/%d:\t %s" % (i+1, total, filename))
# validate before patching
f2fp = open(filename)
hunkno = 0
hunk = p.hunks[hunkno]
hunkfind = []
hunkreplace = []
validhunks = 0
canpatch = False
for lineno, line in enumerate(f2fp):
if lineno+1 < hunk.startsrc:
continue
elif lineno+1 == hunk.startsrc:
hunkfind = [x[1:].rstrip("\r\n") for x in hunk.text if x[0] in " -"]
hunkreplace = [x[1:].rstrip("\r\n") for x in hunk.text if x[0] in " +"]
#pprint(hunkreplace)
hunklineno = 0
# todo \ No newline at end of file
# check hunks in source file
if lineno+1 < hunk.startsrc+len(hunkfind)-1:
if line.rstrip("\r\n") == hunkfind[hunklineno]:
hunklineno+=1
else:
info("file %d/%d:\t %s" % (i+1, total, filename))
info(" hunk no.%d doesn't match source file at line %d" % (hunkno+1, lineno))
info(" expected: %s" % hunkfind[hunklineno])
info(" actual : %s" % line.rstrip("\r\n"))
# not counting this as error, because file may already be patched.
# check if file is already patched is done after the number of
# invalid hunks if found
# TODO: check hunks against source/target file in one pass
# API - check(stream, srchunks, tgthunks)
# return tuple (srcerrs, tgterrs)
# continue to check other hunks for completeness
hunkno += 1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
continue
else:
break
# check if processed line is the last line
if lineno+1 == hunk.startsrc+len(hunkfind)-1:
debug(" hunk no.%d for file %s -- is ready to be patched" % (hunkno+1, filename))
hunkno+=1
validhunks+=1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
else:
if validhunks == len(p.hunks):
# patch file
canpatch = True
break
else:
if hunkno < len(p.hunks):
warning("premature end of source file %s at hunk %d" % (filename, hunkno+1))
errors += 1
f2fp.close()
if validhunks < len(p.hunks):
if self._match_file_hunks(filename, p.hunks):
warning("already patched %s" % filename)
else:
warning("source file is different - %s" % filename)
errors += 1
if canpatch:
backupname = filename+".orig"
if exists(backupname):
warning("can't backup original file to %s - aborting" % backupname)
else:
import shutil
shutil.move(filename, backupname)
if self.write_hunks(backupname, filename, p.hunks):
info("successfully patched %d/%d:\t %s" % (i+1, total, filename))
os.unlink(backupname)
else:
errors += 1
warning("error patching file %s" % filename)
shutil.copy(filename, filename+".invalid")
warning("invalid version is saved to %s" % filename+".invalid")
# todo: proper rejects
shutil.move(backupname, filename)
# todo: check for premature eof
return (errors == 0)
def can_patch(self, filename):
""" Check if specified filename can be patched. Returns None if file can
not be found among source filenames. False if patch can not be applied
clearly. True otherwise.
:returns: True, False or None
"""
filename = abspath(filename)
for p in self.items:
if filename == abspath(p.source):
return self._match_file_hunks(filename, p.hunks)
return None
def _match_file_hunks(self, filepath, hunks):
matched = True
fp = open(abspath(filepath))
class NoMatch(Exception):
pass
lineno = 1
line = fp.readline()
hno = None
try:
for hno, h in enumerate(hunks):
# skip to first line of the hunk
while lineno < h.starttgt:
if not len(line): # eof
debug("check failed - premature eof before hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
for hline in h.text:
if hline.startswith("-"):
continue
if not len(line):
debug("check failed - premature eof on hunk: %d" % (hno+1))
# todo: \ No newline at the end of file
raise NoMatch
if line.rstrip("\r\n") != hline[1:].rstrip("\r\n"):
debug("file is not patched - failed hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
except NoMatch:
matched = False
# todo: display failed hunk, i.e. expected/found
fp.close()
return matched
def patch_stream(self, instream, hunks):
""" Generator that yields stream patched with hunks iterable
Converts lineends in hunk lines to the best suitable format
autodetected from input
"""
# todo: At the moment substituted lineends may not be the same
# at the start and at the end of patching. Also issue a
# warning/throw about mixed lineends (is it really needed?)
hunks = iter(hunks)
srclineno = 1
lineends = {'\n':0, '\r\n':0, '\r':0}
def get_line():
"""
local utility function - return line from source stream
collecting line end statistics on the way
"""
line = instream.readline()
# 'U' mode works only with text files
if line.endswith("\r\n"):
lineends["\r\n"] += 1
elif line.endswith("\n"):
lineends["\n"] += 1
elif line.endswith("\r"):
lineends["\r"] += 1
return line
for hno, h in enumerate(hunks):
debug("hunk %d" % (hno+1))
if h.hasminus:
warning("Change removes/replaces some text; INVESTIGATE AND APPLY (OR NOT) MANUALLY")
warning("Change:")
changeText = h.originalText()
if len(changeText) > 1000:
changeText = changeText[0:999] + "...\n"
warning(changeText)
else:
# skip to line just before hunk starts
while srclineno < h.startsrc:
yield get_line()
srclineno += 1
for hline in h.text:
# todo: check \ No newline at the end of file
if hline.startswith("-") or hline.startswith("\\"):
get_line()
srclineno += 1
continue
else:
if not hline.startswith("+"):
get_line()
srclineno += 1
line2write = hline[1:]
# detect if line ends are consistent in source file
if sum([bool(lineends[x]) for x in lineends]) == 1:
newline = [x for x in lineends if lineends[x] != 0][0]
yield line2write.rstrip("\r\n")+newline
else: # newlines are mixed
yield line2write
for line in instream:
yield line
def write_hunks(self, srcname, tgtname, hunks):
src = open(srcname, "rb")
tgt = open(tgtname, "wb")
debug("processing target file %s" % tgtname)
tgt.writelines(self.patch_stream(src, hunks))
tgt.close()
src.close()
# [ ] TODO: add test for permission copy
shutil.copymode(srcname, tgtname)
return True
if __name__ == "__main__":
from optparse import OptionParser
from os.path import exists
import sys
opt = OptionParser(usage="1. %prog [options] unified.diff\n"
" 2. %prog [options] http://host/patch\n"
" 3. %prog [options] -- < unified.diff",
version="python-patch %s" % __version__)
opt.add_option("-q", "--quiet", action="store_const", dest="verbosity",
const=0, help="print only warnings and errors", default=1)
opt.add_option("-v", "--verbose", action="store_const", dest="verbosity",
const=2, help="be verbose")
opt.add_option("--debug", action="store_true", dest="debugmode", help="debug mode")
opt.add_option("--diffstat", action="store_true", dest="diffstat",
help="print diffstat and exit")
opt.add_option("-p", "--strip", type="int", metavar='N', default=0,
help="strip N path components from filenames")
(options, args) = opt.parse_args()
if not args and sys.argv[-1:] != ['--']:
opt.print_version()
opt.print_help()
sys.exit()
readstdin = (sys.argv[-1:] == ['--'] and not args)
debugmode = options.debugmode
verbosity_levels = {0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
loglevel = verbosity_levels[options.verbosity]
logformat = "%(message)s"
if debugmode:
loglevel = logging.DEBUG
logformat = "%(levelname)8s %(message)s"
logger.setLevel(loglevel)
loghandler = logging.StreamHandler()
loghandler.setFormatter(logging.Formatter(logformat))
logger.addHandler(loghandler)
if readstdin:
patch = PatchSet(sys.stdin)
else:
patchfile = args[0]
urltest = patchfile.split(':')[0]
if (':' in patchfile and urltest.isalpha()
and len(urltest) > 1): # one char before : is a windows drive letter
patch = fromurl(patchfile)
else:
if not exists(patchfile) or not isfile(patchfile):
sys.exit("patch file does not exist - %s" % patchfile)
patch = fromfile(patchfile)
if options.diffstat:
print patch.diffstat()
sys.exit(0)
#pprint(patch)
patch.apply(options.strip) or sys.exit(-1)
# todo: document and test line ends handling logic - patch.py detects proper line-endings
# for inserted hunks and issues a warning if patched file has incosistent line ends
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/doc/patch.py
|
Python
|
gpl-3.0
| 34,815
|
[
"Galaxy"
] |
8e0df9aa3a996f0a6ecc8ac56e476da5626385a8571385fbbaaac81181b89cca
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkBlankStructuredGrid(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkBlankStructuredGrid(), 'Processing.',
('vtkStructuredGrid',), ('vtkStructuredGrid',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkBlankStructuredGrid.py
|
Python
|
bsd-3-clause
| 511
|
[
"VTK"
] |
8ffa2166548caebb2e90200eabdfc03a3290d269f5f8dd1f2433e1a6f6c90402
|
import serial
import inspect
import time
import itertools
from util import two_byte_iter_to_str, to_two_bytes
# Message command bytes - straight from Firmata.h
DIGITAL_MESSAGE = 0x90 # send data for a digital pin
ANALOG_MESSAGE = 0xE0 # send data for an analog pin (or PWM)
DIGITAL_PULSE = 0x91 # SysEx command to send a digital pulse
# PULSE_MESSAGE = 0xA0 # proposed pulseIn/Out msg (SysEx)
# SHIFTOUT_MESSAGE = 0xB0 # proposed shiftOut msg (SysEx)
REPORT_ANALOG = 0xC0 # enable analog input by pin #
REPORT_DIGITAL = 0xD0 # enable digital input by port pair
START_SYSEX = 0xF0 # start a MIDI SysEx msg
SET_PIN_MODE = 0xF4 # set a pin to INPUT/OUTPUT/PWM/etc
END_SYSEX = 0xF7 # end a MIDI SysEx msg
REPORT_VERSION = 0xF9 # report firmware version
SYSTEM_RESET = 0xFF # reset from MIDI
QUERY_FIRMWARE = 0x79 # query the firmware name
# extended command set using sysex (0-127/0x00-0x7F)
# 0x00-0x0F reserved for user-defined commands */
SERVO_CONFIG = 0x70 # set max angle, minPulse, maxPulse, freq
STRING_DATA = 0x71 # a string message with 14-bits per char
SHIFT_DATA = 0x75 # a bitstream to/from a shift register
I2C_REQUEST = 0x76 # send an I2C read/write request
I2C_REPLY = 0x77 # a reply to an I2C read request
I2C_CONFIG = 0x78 # config I2C settings such as delay times and power pins
REPORT_FIRMWARE = 0x79 # report name and version of the firmware
SAMPLING_INTERVAL = 0x7A # set the poll rate of the main loop
SYSEX_NON_REALTIME = 0x7E # MIDI Reserved for non-realtime messages
SYSEX_REALTIME = 0x7F # MIDI Reserved for realtime messages
# Pin modes.
# except from UNAVAILABLE taken from Firmata.h
UNAVAILABLE = -1
INPUT = 0 # as defined in wiring.h
OUTPUT = 1 # as defined in wiring.h
ANALOG = 2 # analog pin in analogInput mode
PWM = 3 # digital pin in PWM output mode
SERVO = 4 # digital pin in SERVO mode
# Pin types
DIGITAL = OUTPUT # same as OUTPUT below
# ANALOG is already defined above
# Time to wait after initializing serial, used in Board.__init__
BOARD_SETUP_WAIT_TIME = 5
class PinAlreadyTakenError(Exception):
pass
class InvalidPinDefError(Exception):
pass
class NoInputWarning(RuntimeWarning):
pass
class Board(object):
"""The Base class for any board."""
firmata_version = None
firmware = None
firmware_version = None
_command_handlers = {}
_command = None
_stored_data = []
_parsing_sysex = False
def __init__(self, port, layout, baudrate=57600, name=None):
self.sp = serial.Serial(port, baudrate)
# Allow 5 secs for Arduino's auto-reset to happen
# Alas, Firmata blinks its version before printing it to serial
# For 2.3, even 5 seconds might not be enough.
# TODO Find a more reliable way to wait until the board is ready
self.pass_time(BOARD_SETUP_WAIT_TIME)
self.name = name
if not self.name:
self.name = port
self.setup_layout(layout)
# Iterate over the first messages to get firmware data
while self.bytes_available():
self.iterate()
# TODO Test whether we got a firmware name and version, otherwise there
# probably isn't any Firmata installed
def __str__(self):
return "Board %s on %s" % (self.name, self.sp.port)
def __del__(self):
"""
The connection with the a board can get messed up when a script is
closed without calling board.exit() (which closes the serial
connection). Therefore also do it here and hope it helps.
"""
self.exit()
def send_as_two_bytes(self, val):
self.sp.write(chr(val % 128) + chr(val >> 7))
def setup_layout(self, board_layout):
"""
Setup the Pin instances based on the given board layout. Maybe it will
be possible to do this automatically in the future, by polling the
board for its type.
"""
# Create pin instances based on board layout
self.analog = []
for i in board_layout['analog']:
self.analog.append(Pin(self, i))
self.digital = []
self.digital_ports = []
for i in xrange(0, len(board_layout['digital']), 8):
num_pins = len(board_layout['digital'][i:i+8])
port_number = i / 8
self.digital_ports.append(Port(self, port_number, num_pins))
# Allow to access the Pin instances directly
for port in self.digital_ports:
self.digital += port.pins
# Setup PWM pins
for i in board_layout['pwm']:
self.digital[i].PWM_CAPABLE = True
# Disable certain ports like Rx/Tx and crystal ports
for i in board_layout['disabled']:
self.digital[i].mode = UNAVAILABLE
# Create a dictionary of 'taken' pins. Used by the get_pin method
self.taken = { 'analog' : dict(map(lambda p: (p.pin_number, False), self.analog)),
'digital' : dict(map(lambda p: (p.pin_number, False), self.digital)) }
# Setup default handlers for standard incoming commands
self.add_cmd_handler(ANALOG_MESSAGE, self._handle_analog_message)
self.add_cmd_handler(DIGITAL_MESSAGE, self._handle_digital_message)
self.add_cmd_handler(REPORT_VERSION, self._handle_report_version)
self.add_cmd_handler(REPORT_FIRMWARE, self._handle_report_firmware)
def add_cmd_handler(self, cmd, func):
"""Adds a command handler for a command."""
len_args = len(inspect.getargspec(func)[0])
def add_meta(f):
def decorator(*args, **kwargs):
f(*args, **kwargs)
decorator.bytes_needed = len_args - 1 # exclude self
decorator.__name__ = f.__name__
return decorator
func = add_meta(func)
self._command_handlers[cmd] = func
def get_pin(self, pin_def):
"""
Returns the activated pin given by the pin definition.
May raise an ``InvalidPinDefError`` or a ``PinAlreadyTakenError``.
:arg pin_def: Pin definition as described below,
but without the arduino name. So for example ``a:1:i``.
'a' analog pin Pin number 'i' for input
'd' digital pin Pin number 'o' for output
'p' for pwm (Pulse-width modulation)
All seperated by ``:``.
"""
if type(pin_def) == list:
bits = pin_def
else:
bits = pin_def.split(':')
a_d = bits[0] == 'a' and 'analog' or 'digital'
part = getattr(self, a_d)
pin_nr = int(bits[1])
if pin_nr >= len(part):
raise InvalidPinDefError('Invalid pin definition: %s at position 3 on %s' % (pin_def, self.name))
if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE:
raise InvalidPinDefError('Invalid pin definition: UNAVAILABLE pin %s at position on %s' % (pin_def, self.name))
if self.taken[a_d][pin_nr]:
raise PinAlreadyTakenError('%s pin %s is already taken on %s' % (a_d, bits[1], self.name))
# ok, should be available
pin = part[pin_nr]
self.taken[a_d][pin_nr] = True
if pin.type is DIGITAL:
if bits[2] == 'p':
pin.mode = PWM
elif bits[2] == 's':
pin.mode = SERVO
elif bits[2] is not 'o':
pin.mode = INPUT
else:
pin.enable_reporting()
return pin
def pass_time(self, t):
"""Non-blocking time-out for ``t`` seconds."""
cont = time.time() + t
while time.time() < cont:
time.sleep(0)
def send_sysex(self, sysex_cmd, data=[]):
"""
Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
:arg data: A list of 7-bit bytes of arbitrary data (bytes may be
already converted to chr's)
"""
self.sp.write(chr(START_SYSEX))
self.sp.write(chr(sysex_cmd))
for byte in data:
try:
byte = chr(byte)
except TypeError:
pass # byte is already a chr
except ValueError:
raise ValueError('Sysex data can be 7-bit bytes only. '
'Consider using utils.to_two_bytes for bigger bytes.')
self.sp.write(byte)
self.sp.write(chr(END_SYSEX))
def bytes_available(self):
return self.sp.inWaiting()
def iterate(self):
"""
Reads and handles data from the microcontroller over the serial port.
This method should be called in a main loop or in an :class:`Iterator`
instance to keep this boards pin values up to date.
"""
byte = self.sp.read()
if not byte:
return
data = ord(byte)
received_data = []
handler = None
if data < START_SYSEX:
# These commands can have 'channel data' like a pin nummber appended.
try:
handler = self._command_handlers[data & 0xF0]
except KeyError:
return
received_data.append(data & 0x0F)
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
elif data == START_SYSEX:
data = ord(self.sp.read())
handler = self._command_handlers.get(data)
if not handler:
return
data = ord(self.sp.read())
while data != END_SYSEX:
received_data.append(data)
data = ord(self.sp.read())
else:
try:
handler = self._command_handlers[data]
except KeyError:
return
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
# Handle the data
try:
handler(*received_data)
except ValueError:
pass
def get_firmata_version(self):
"""
Returns a version tuple (major, minor) for the firmata firmware on the
board.
"""
return self.firmata_version
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):
"""
Configure a pin as servo with min_pulse, max_pulse and first angle.
``min_pulse`` and ``max_pulse`` default to the arduino defaults.
"""
if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:
raise IOError("Pin %s is not a valid servo pin")
data = itertools.chain([pin], to_two_bytes(min_pulse),
to_two_bytes(max_pulse))
self.send_sysex(SERVO_CONFIG, data)
# set pin._mode to SERVO so that it sends analog messages
# don't set pin.mode as that calls this method
self.digital[pin]._mode = SERVO
self.digital[pin].write(angle)
def exit(self):
"""Call this to exit cleanly."""
# First detach all servo's, otherwise it somehow doesn't want to close...
if hasattr(self, 'digital'):
for pin in self.digital:
if pin.mode == SERVO:
pin.mode = OUTPUT
if hasattr(self, 'sp'):
self.sp.close()
# Command handlers
def _handle_analog_message(self, pin_nr, lsb, msb):
value = round(float((msb << 7) + lsb) / 1023, 4)
# Only set the value if we are actually reporting
try:
if self.analog[pin_nr].reporting:
self.analog[pin_nr].value = value
except IndexError:
raise ValueError
def _handle_digital_message(self, port_nr, lsb, msb):
"""
Digital messages always go by the whole port. This means we have a
bitmask which we update the port.
"""
mask = (msb << 7) + lsb
try:
self.digital_ports[port_nr]._update(mask)
except IndexError:
raise ValueError
def _handle_report_version(self, major, minor):
self.firmata_version = (major, minor)
def _handle_report_firmware(self, *data):
major = data[0]
minor = data[1]
self.firmware_version = (major, minor)
self.firmware = two_byte_iter_to_str(data[2:])
class Port(object):
"""An 8-bit port on the board."""
def __init__(self, board, port_number, num_pins=8):
self.board = board
self.port_number = port_number
self.reporting = False
self.pins = []
for i in range(num_pins):
pin_nr = i + self.port_number * 8
self.pins.append(Pin(self.board, pin_nr, type=DIGITAL, port=self))
def __str__(self):
return "Digital Port %i on %s" % (self.port_number, self.board)
def enable_reporting(self):
"""Enable reporting of values for the whole port."""
self.reporting = True
msg = chr(REPORT_DIGITAL + self.port_number)
msg += chr(1)
self.board.sp.write(msg)
for pin in self.pins:
if pin.mode == INPUT:
pin.reporting = True # TODO Shouldn't this happen at the pin?
def disable_reporting(self):
"""Disable the reporting of the port."""
self.reporting = False
msg = chr(REPORT_DIGITAL + self.port_number)
msg += chr(0)
self.board.sp.write(msg)
def write(self):
"""Set the output pins of the port to the correct state."""
mask = 0
for pin in self.pins:
if pin.mode == OUTPUT:
if pin.value == 1:
pin_nr = pin.pin_number - self.port_number * 8
mask |= 1 << pin_nr
msg = chr(DIGITAL_MESSAGE + self.port_number)
msg += chr(mask % 128)
msg += chr(mask >> 7)
self.board.sp.write(msg)
def _update(self, mask):
"""Update the values for the pins marked as input with the mask."""
if self.reporting:
for pin in self.pins:
if pin.mode is INPUT:
pin_nr = pin.pin_number - self.port_number * 8
pin.value = (mask & (1 << pin_nr)) > 0
class Pin(object):
"""A Pin representation"""
def __init__(self, board, pin_number, type=ANALOG, port=None):
self.board = board
self.pin_number = pin_number
self.type = type
self.port = port
self.PWM_CAPABLE = False
self._mode = (type == DIGITAL and OUTPUT or INPUT)
self.reporting = False
self.value = None
def __str__(self):
type = {ANALOG : 'Analog', DIGITAL : 'Digital'}[self.type]
return "%s pin %d" % (type, self.pin_number)
def _set_mode(self, mode):
if mode is UNAVAILABLE:
self._mode = UNAVAILABLE
return
if self._mode is UNAVAILABLE:
raise IOError("%s can not be used through Firmata." % self)
if mode is PWM and not self.PWM_CAPABLE:
raise IOError("%s does not have PWM capabilities." % self)
if mode == SERVO:
if self.type != DIGITAL:
raise IOError("Only digital pins can drive servos! %s is not"
"digital." % self)
self._mode = SERVO
self.board.servo_config(self.pin_number)
return
# Set mode with SET_PIN_MODE message
self._mode = mode
command = chr(SET_PIN_MODE)
command += chr(self.pin_number)
command += chr(mode)
self.board.sp.write(command)
if mode == INPUT:
self.enable_reporting()
def _get_mode(self):
return self._mode
mode = property(_get_mode, _set_mode)
"""
Mode of operation for the pin. Can be one of the pin modes: INPUT, OUTPUT,
ANALOG, PWM. or SERVO (or UNAVAILABLE).
"""
def enable_reporting(self):
"""Set an input pin to report values."""
if self.mode is not INPUT:
raise IOError, "%s is not an input and can therefore not report" % self
if self.type == ANALOG:
self.reporting = True
msg = chr(REPORT_ANALOG + self.pin_number)
msg += chr(1)
self.board.sp.write(msg)
else:
self.port.enable_reporting() # TODO This is not going to work for non-optimized boards like Mega
def disable_reporting(self):
"""Disable the reporting of an input pin."""
if self.type == ANALOG:
self.reporting = False
msg = chr(REPORT_ANALOG + self.pin_number)
msg += chr(0)
self.board.sp.write(msg)
else:
self.port.disable_reporting() # TODO This is not going to work for non-optimized boards like Mega
def read(self):
"""
Returns the output value of the pin. This value is updated by the
boards :meth:`Board.iterate` method. Value is always in the range from
0.0 to 1.0.
"""
if self.mode == UNAVAILABLE:
raise IOError, "Cannot read pin %s"% self.__str__()
return self.value
def write(self, value):
"""
Output a voltage from the pin
:arg value: Uses value as a boolean if the pin is in output mode, or
expects a float from 0 to 1 if the pin is in PWM mode. If the pin
is in SERVO the value should be in degrees.
"""
if self.mode is UNAVAILABLE:
raise IOError, "%s can not be used through Firmata." % self
if self.mode is INPUT:
raise IOError, "%s is set up as an INPUT and can therefore not be written to" % self
if value is not self.value:
self.value = value
if self.mode is OUTPUT:
if self.port:
self.port.write()
else:
msg = chr(DIGITAL_MESSAGE)
msg += chr(self.pin_number)
msg += chr(value)
self.board.sp.write(msg)
elif self.mode is PWM:
value = int(round(value * 255))
msg = chr(ANALOG_MESSAGE + self.pin_number)
msg += chr(value % 128)
msg += chr(value >> 7)
self.board.sp.write(msg)
elif self.mode is SERVO:
value = int(value)
msg = chr(ANALOG_MESSAGE + self.pin_number)
msg += chr(value % 128)
msg += chr(value >> 7)
self.board.sp.write(msg)
|
nvazquez/Turtlebots
|
plugins/rodi/pyfirmata/pyfirmata.py
|
Python
|
mit
| 18,699
|
[
"CRYSTAL"
] |
1581b7244b16fddfd7c8d34702a5f9bf0d43aa877a9ac6c4cecf4360b5ab5172
|
#!/usr/bin/python
# -*- coding: utf-8
# Copyright (C) 2010 - 2012, A. Murat Eren
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
import os
import sys
import copy
import shutil
import pickle
import logging
import itertools
import math
import Oligotyping as o
from Oligotyping.utils import utils
from Oligotyping.utils import blast
from Oligotyping.utils.random_colors import random_colors
from Oligotyping.utils.random_colors import get_color_shade_dict_for_list_of_values
from Oligotyping.lib import fastalib as u
from Oligotyping.lib.shared import generate_default_figures
from Oligotyping.lib.shared import generate_exclusive_figures
from Oligotyping.visualization.frequency_curve_and_entropy import vis_freq_curve
from Oligotyping.visualization.oligotype_sets_distribution import vis_oligotype_sets_distribution
from Oligotyping.visualization.oligotype_distribution_stack_bar import oligotype_distribution_stack_bar
from Oligotyping.visualization.oligotype_distribution_across_samples import oligotype_distribution_across_samples
from functools import reduce
class Oligotyping:
def __init__(self, args = None):
self.analysis = 'oligotyping'
self.entropy = None
self.alignment = None
self.quals_dict = None
self.min_base_quality = None
self.number_of_auto_components = 5
self.selected_components = None
self.limit_oligotypes_to = None
self.exclude_oligotypes = None
self.min_number_of_samples = 1
self.min_percent_abundance = 0.0
self.min_actual_abundance = 0
self.min_substantive_abundance = 4
self.project = None
self.output_directory = None
self.sample_name_separator = '_'
self.limit_representative_sequences = sys.maxsize
self.quick = False
self.no_figures = False
self.no_display = False
self.keep_tmp = False
self.blast_ref_db = None
self.do_blast_search = False
self.skip_gen_html = False
self.colors_list_file = None
self.generate_sets = False
self.cosine_similarity_threshold = 0.1
self.sample_mapping = None
self.log_file_path = None
self.skip_check_input_file = False
self.skip_basic_analyses = False
self.skip_gexf_network_file = False
self.no_threading = False
self.number_of_threads = None
Absolute = lambda x: os.path.join(os.getcwd(), x) if not x.startswith('/') else x
if args:
self.entropy = Absolute(args.entropy)
self.alignment = Absolute(args.alignment)
self.quals_dict = utils.process_command_line_args_for_quality_files(args, _return = 'quals_dict')
self.min_base_quality = args.min_base_quality
self.number_of_auto_components = args.number_of_auto_components
self.selected_components = args.selected_components
self.limit_oligotypes_to = args.limit_oligotypes_to
self.exclude_oligotypes = args.exclude_oligotypes
self.min_number_of_samples = args.min_number_of_samples
self.min_percent_abundance = args.min_percent_abundance
self.min_actual_abundance = args.min_actual_abundance
self.min_substantive_abundance = args.min_substantive_abundance
self.project = args.project or os.path.basename(args.alignment).split('.')[0]
self.output_directory = args.output_directory
self.sample_name_separator = args.sample_name_separator
self.limit_representative_sequences = args.limit_representative_sequences or sys.maxsize
self.quick = args.quick
self.no_figures = args.no_figures
self.no_display = args.no_display
self.keep_tmp = args.keep_tmp
self.blast_ref_db = Absolute(args.blast_ref_db) if args.blast_ref_db else None
self.do_blast_search = args.do_blast_search
self.skip_gen_html = args.skip_gen_html
self.colors_list_file = args.colors_list_file
self.cosine_similarity_threshold = args.cosine_similarity_threshold
self.generate_sets = args.generate_sets
self.sample_mapping = args.sample_mapping
self.skip_check_input_file = args.skip_check_input_file
self.skip_basic_analyses = args.skip_basic_analyses
self.skip_gexf_network_file = args.skip_gexf_network_file
self.no_threading = args.no_threading
self.number_of_threads = args.number_of_threads
self.run = utils.Run()
self.progress = utils.Progress()
self.samples_dict = {}
self.sample_mapping_dict = {}
self.excluded_read_ids_tracker = {}
self.representative_sequences_per_oligotype = {}
self.across_samples_sum_normalized = {}
self.across_samples_max_normalized = {}
self.unit_counts = None
self.unit_percents = None
self.oligotype_sets = None
self.samples = []
self.abundant_oligos = []
self.final_oligo_counts_dict = {}
self.final_oligo_entropy_distribution_dict = {}
self.final_oligo_unique_distribution_dict = {}
self.final_purity_score_dict = {}
self.total_purity_score_dict = {}
self.colors_dict = None
self.score_color_dict = None
self.figures_directory = None
# be smart, turn the threading on if necessary.
if self.number_of_threads:
self.no_threading = False
def check_apps(self):
try:
blast.LocalBLAST(None, None, None)
except blast.ModuleVersionError:
raise utils.ConfigError(blast.version_error_text)
except blast.ModuleBinaryError:
raise utils.ConfigError(blast.missing_binary_error_text)
# FIXME: check R modules here.
def check_dirs(self):
if self.number_of_auto_components != None and self.selected_components != None:
raise utils.ConfigError("You either have to declare 'auto components' (-c) or 'selected components' (-C).")
if self.number_of_auto_components == None and self.selected_components == None:
raise utils.ConfigError("Both 'auto components' (-c), and 'selected components' (-C) were declared.")
# check output associated stuff
if not self.output_directory:
self.output_directory = os.path.join(os.getcwd(), '-'.join([self.project.replace(' ', '_'), self.get_prefix()]))
if not os.path.exists(self.output_directory):
try:
os.makedirs(self.output_directory)
except:
raise utils.ConfigError("Output directory does not exist (attempt to create one failed as well): '%s'" % \
(self.output_directory))
if not os.access(self.output_directory, os.W_OK):
raise utils.ConfigError("You do not have write permission for the output directory: '%s'" % self.output_directory)
self.tmp_directory = self.generate_output_destination('TMP', directory = True)
self.figures_directory = self.generate_output_destination('FIGURES', directory = True)
def check_input(self):
if (not os.path.exists(self.alignment)) or (not os.access(self.alignment, os.R_OK)):
raise utils.ConfigError("Alignment file is not accessible: '%s'" % self.alignment)
if (not os.path.exists(self.entropy)) or (not os.access(self.entropy, os.R_OK)):
raise utils.ConfigError("Entropy file is not accessible: '%s'" % self.entropy)
if self.sample_mapping:
if (not os.path.exists(self.sample_mapping)) or (not os.access(self.sample_mapping, os.R_OK)):
raise utils.ConfigError("Sample mapping file is not accessible: '%s'" % self.sample_mapping)
if self.colors_list_file:
if not os.path.exists(self.colors_list_file):
raise utils.ConfigError("Colors list file does not exist: '%s'" % self.colors_list_file)
first_characters = list(set([c.strip()[0] for c in open(self.colors_list_file)]))
if len(first_characters) != 1 or first_characters[0] != '#':
raise utils.ConfigError("Colors list file does not seem to be correctly formatted")
# set the alignment lentgh (it will be necessary to check certain params)
alignment = u.SequenceSource(self.alignment)
next(alignment)
self.alignment_length = len(alignment.seq)
alignment.close()
# now we know that input files are OK, lets check input params before we go any further.
self.check_params()
samples = None
if not self.skip_check_input_file:
self.progress.new('Checking the input FASTA')
samples = utils.check_input_alignment(self.alignment, self.sample_name_separator, self.progress)
if not samples:
raise utils.ConfigError('Exiting.')
self.progress.end()
if self.sample_mapping:
utils.mapping_file_simple_check(self.sample_mapping, samples)
sample_mapping_new_destination = self.generate_output_destination("SAMPLE-MAPPING.txt")
shutil.copy(self.sample_mapping, sample_mapping_new_destination)
self.sample_mapping = sample_mapping_new_destination
def check_params(self):
if self.selected_components:
try:
self.selected_components = [int(c) for c in self.selected_components.split(',')]
except:
raise utils.ConfigError("Selected components should be comma separated integer values (such as '4,8,15,25,47').")
if max(self.selected_components) >= self.alignment_length:
raise utils.ConfigError("There is at least one component ('%d') that is bigger than the alignment length."\
% max(self.selected_components))
if min(self.selected_components) < 0:
raise utils.ConfigError("Selected components can't be smaller than 0")
components_declared_more_than_once = [c[0] for c in itertools.groupby(sorted(self.selected_components))\
if len(list(c[1])) > 1]
N = len(components_declared_more_than_once)
if N:
raise utils.ConfigError("You declared %s component%s (%s) more than once."\
% ('a' if N == 1 else '%s' % str(N),
's' if N > 1 else '',
', '.join([str(c) for c in components_declared_more_than_once])))
if self.min_base_quality:
try:
self.min_base_quality = int(self.min_base_quality)
assert(self.min_base_quality >= 0 and self.min_base_quality <= 40)
except:
raise utils.ConfigError("Minimum base quality must be an integer between 0 and 40.")
if self.limit_oligotypes_to:
self.limit_oligotypes_to = [o.strip().upper() for o in self.limit_oligotypes_to.split(',')]
if len(self.limit_oligotypes_to) == 1:
raise utils.ConfigError("There must be more than one oligotype for --limit-oligotypes parameter.")
if len([n for n in ''.join(self.limit_oligotypes_to) if n not in ['A', 'T', 'C', 'G', '-']]):
raise utils.ConfigError("Oligotypes defined by --limit-oligotypes parameter seems to have ambiguous characters.")
if self.exclude_oligotypes:
self.exclude_oligotypes = [o.strip().upper() for o in self.exclude_oligotypes.split(',')]
if len([n for n in ''.join(self.exclude_oligotypes) if n not in ['A', 'T', 'C', 'G', '-']]):
raise utils.ConfigError("Oligotypes defined by --exclude-oligotypes parameter seems to have ambiguous characters.")
return True
def _init_logger(self, path = None):
self.logger = logging.getLogger('oligotyping')
if path:
self.log_file_path = path
else:
self.log_file_path = self.generate_output_destination('RUNINFO.log')
if os.path.exists(self.log_file_path):
os.remove(self.log_file_path)
hdlr = logging.FileHandler(self.log_file_path)
formatter = logging.Formatter('%(asctime)s\t%(levelname)s\t%(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.DEBUG)
def get_prefix(self):
prefix = 's%d-a%.1f-A%d-M%d' % (self.min_number_of_samples,
self.min_percent_abundance,
self.min_actual_abundance,
self.min_substantive_abundance)
if self.selected_components:
# I don't have any desire to solve dependencies of the initialization steps properly, so
# please have a cup of ugly hack:
if type(self.selected_components) == type(''):
num_sc = len(self.selected_components.split(','))
else:
num_sc = len(self.selected_components)
prefix = 'sc%d-%s' % (num_sc, prefix)
else:
prefix = 'c%d-%s' % (self.number_of_auto_components, prefix)
if self.quals_dict:
prefix = '%s-q%d' % (prefix, self.min_base_quality)
return prefix
def generate_output_destination(self, postfix, directory = False):
return_path = os.path.join(self.output_directory, postfix)
if directory == True:
if os.path.exists(return_path):
shutil.rmtree(return_path)
os.makedirs(return_path)
return return_path
def run_all(self):
self.check_apps()
self.check_dirs()
# ready to init logging
self._init_logger()
self.info_file_path = self.generate_output_destination('RUNINFO')
self.run.init_info_file_obj(self.info_file_path)
self.check_input()
self.progress.new('Initializing')
self.progress.update('Reading the input FASTA')
self.fasta = u.SequenceSource(self.alignment, lazy_init = False)
self.progress.end()
self.column_entropy = [int(x.strip().split()[0]) for x in open(self.entropy).readlines()]
if self.sample_mapping:
self.sample_mapping_dict = utils.get_sample_mapping_dict(self.sample_mapping)
self.run.info('project', self.project)
self.run.info('run_date', utils.get_date())
self.run.info('version', o.__version__)
self.run.info('multi_threaded', not self.no_threading)
self.run.info('alignment', self.alignment)
self.run.info('entropy', self.entropy)
self.run.info('sample_mapping', self.sample_mapping)
self.run.info('output_directory', self.output_directory)
self.run.info('tmp_directory', self.tmp_directory)
self.run.info('info_file_path', self.info_file_path)
self.run.info('quals_provided', True if self.quals_dict else False)
self.run.info('cmd_line', utils.get_cmd_line(sys.argv))
self.run.info('total_seq', self.fasta.total_seq)
self.run.info('alignment_length', self.alignment_length)
self.run.info('number_of_auto_components', self.number_of_auto_components or 0)
self.run.info('number_of_selected_components', len(self.selected_components) if self.selected_components else 0)
self.run.info('generate_sets', self.generate_sets)
self.run.info('skip_basic_analyses', self.skip_basic_analyses)
if self.generate_sets:
self.run.info('T', self.cosine_similarity_threshold)
self.run.info('s', self.min_number_of_samples)
self.run.info('a', self.min_percent_abundance)
self.run.info('A', self.min_actual_abundance)
self.run.info('M', self.min_substantive_abundance)
if self.quals_dict:
self.run.info('q', self.min_base_quality)
if self.limit_oligotypes_to:
self.run.info('limit_oligotypes_to', self.limit_oligotypes_to)
if self.exclude_oligotypes:
self.run.info('exclude_oligotypes', self.exclude_oligotypes)
if self.number_of_auto_components:
# locations of interest based on the entropy scores
self.bases_of_interest_locs = sorted([self.column_entropy[i] for i in range(0, self.number_of_auto_components)])
self.run.info('bases_of_interest_locs',', '.join([str(x) for x in self.bases_of_interest_locs]))
elif self.selected_components:
self.bases_of_interest_locs = sorted(self.selected_components)
self.run.info('bases_of_interest_locs',', '.join([str(x) for x in self.bases_of_interest_locs]))
if self.blast_ref_db:
self.run.info('blast_ref_db', self.blast_ref_db)
# set number of threads to be used
if not self.number_of_threads:
self.number_of_threads = utils.Multiprocessing(None).num_thread
self._construct_samples_dict()
self._contrive_abundant_oligos()
self._refine_samples_dict()
self._get_unit_counts_and_percents()
self._get_units_across_samples_dicts()
self._generate_random_colors()
self._generate_FASTA_file()
self._generate_NEXUS_file()
self._generate_ENVIRONMENT_file()
self._generate_MATRIX_files()
self._store_read_distribution_table()
if self.generate_sets:
self._generate_MATRIX_files_for_units_across_samples()
self._agglomerate_oligos_based_on_cosine_similarity()
self._generate_MATRIX_files_for_oligotype_sets()
if (not self.no_figures) and self.generate_sets:
self._generate_stack_bar_figure_with_agglomerated_oligos()
self._generate_oligos_across_samples_figure()
self._generate_sets_across_samples_figure()
if not self.quick:
self._generate_representative_sequences()
if self.representative_sequences_per_oligotype:
self._generate_representative_sequences_FASTA_file()
if ((not self.no_figures) and (not self.quick)):
self._generate_default_figures()
if ((not self.no_figures) and (not self.quick)) and self.sample_mapping:
self._generate_exclusive_figures()
if (not self.skip_gexf_network_file) and (not self.quick):
self._generate_gexf_network_file()
# store the final information about oligos
self.run.info('final_oligos', self.abundant_oligos, quiet = True)
self.run.info('final_oligo_counts_dict', self.final_oligo_counts_dict, quiet = True)
self.run.info('final_oligo_entropy_distribution_dict', self.final_oligo_entropy_distribution_dict, quiet = True)
self.run.info('final_oligo_unique_distribution_dict', self.final_oligo_unique_distribution_dict, quiet = True)
self.run.info('final_purity_score_dict', self.final_purity_score_dict, quiet = True)
self.run.info('total_purity_score_dict', self.total_purity_score_dict)
self.run.info('end_of_run', utils.get_date())
info_dict_file_path = self.generate_output_destination("RUNINFO.cPickle")
self.run.store_info_dict(info_dict_file_path)
if (not self.keep_tmp):
shutil.rmtree(self.tmp_directory)
self.run.quit()
if not self.skip_gen_html:
self._generate_html_output()
def _construct_samples_dict(self):
"""This is where oligotypes are being genearted based on bases of each
alignment at the location of interest"""
self.progress.new('Sample Dict Construction')
if self.quals_dict:
num_reads_eliminated_due_to_min_base_quality = 0
self.fasta.reset()
while next(self.fasta):
if self.fasta.pos % 1000 == 0:
self.progress.update('Analyzing: %s' \
% (utils.pretty_print(self.fasta.pos)))
sample = utils.get_sample_name_from_defline(self.fasta.id, self.sample_name_separator)
if sample not in self.samples_dict:
self.samples_dict[sample] = {}
self.samples.append(sample)
if self.quals_dict:
# if qual_dicts is available, each base of interest will be tested
# against --min-base-quality parameter to make sure that it is above
# the expected quality score.
quality_scores = self.quals_dict[self.fasta.id]
quality_scores_of_bases_of_interest = [quality_scores[o] for o in self.bases_of_interest_locs if not quality_scores[o] == None]
min_base_quality = min([base_quality for base_quality in quality_scores_of_bases_of_interest if base_quality] or [0])
if min_base_quality < self.min_base_quality:
# if True, discard the read
# FIXME: Discarded reads should be stored somewhere else for further analysis
num_reads_eliminated_due_to_min_base_quality += 1
continue
else:
oligo = ''.join(self.fasta.seq[o] for o in self.bases_of_interest_locs)
else:
# if quals_dict is not available, oligotypes will be generated without
# checking the base qualities
oligo = ''.join(self.fasta.seq[o] for o in self.bases_of_interest_locs)
if oligo in self.samples_dict[sample]:
self.samples_dict[sample][oligo] += 1
else:
self.samples_dict[sample][oligo] = 1
self.samples.sort()
self.progress.end()
self.run.info('num_samples_in_fasta', len(self.samples_dict))
if self.quals_dict:
self.run.info('num_reads_eliminated_due_to_min_base_quality', num_reads_eliminated_due_to_min_base_quality)
if self.fasta.total_seq == num_reads_eliminated_due_to_min_base_quality:
raise utils.ConfigError("All reads were eliminated due to --min-base-quality (%d) rule" % self.min_base_quality)
def _register_removal(self, oligo, reason = 'unknown'):
if reason not in self.excluded_read_ids_tracker:
self.excluded_read_ids_tracker[reason] = {}
for sample in self.samples:
if oligo in self.samples_dict[sample]:
if sample not in self.excluded_read_ids_tracker[reason]:
self.excluded_read_ids_tracker[reason][sample] = self.samples_dict[sample][oligo]
else:
self.excluded_read_ids_tracker[reason][sample] += self.samples_dict[sample][oligo]
def _contrive_abundant_oligos(self):
# cat oligos | uniq
self.progress.new('Contriving Abundant Oligos')
# a performance optimization workaround in order to lessen the
# number of expensive 'keys()' calls on samples_dict to be made
oligos_in_samples_dict = {}
for sample in self.samples:
oligos_in_samples_dict[sample] = set(self.samples_dict[sample].keys())
oligos_set = []
for sample in self.samples:
self.progress.update('Unique Oligos: ' + utils.P(self.samples.index(sample), len(self.samples)))
for oligo in oligos_in_samples_dict[sample]:
if oligo not in oligos_set:
oligos_set.append(oligo)
self.progress.end()
self.run.info('num_unique_oligos', len(oligos_set))
self.progress.new('Computing Oligo Abundances')
# count oligo abundance
oligo_sample_abundance = []
for i in range(0, len(oligos_set)):
oligo = oligos_set[i]
if i % 100 == 0 or i == len(oligos_set) - 1:
self.progress.update(utils.P(i, len(oligos_set)))
count = 0
for sample in self.samples:
if oligo in oligos_in_samples_dict[sample]:
count += 1
oligo_sample_abundance.append((count, oligo),)
oligo_sample_abundance.sort()
self.progress.end()
# eliminate oligos based on the number of samples they appear
# (any oligo required to appear in at least 'self.min_number_of_samples'
# samples)
self.progress.new('Applying -s parameter')
non_singleton_oligos = []
for i in range(0, len(oligo_sample_abundance)):
if i % 100 == 0 or i == len(oligo_sample_abundance) - 1:
self.progress.update(utils.P(i, len(oligo_sample_abundance)))
tpl = oligo_sample_abundance[i]
if tpl[0] >= self.min_number_of_samples:
non_singleton_oligos.append(tpl[1])
else:
self._register_removal(tpl[1], 'failed_s')
self.progress.end()
self.run.info('num_oligos_after_s_elim', len(non_singleton_oligos))
# sample_sums keeps the actual number of oligos that are present in non_singleton_oligos list,
# for each sample. computing it here once is more optimized.
sample_sums = {}
SUM = lambda sample: sum([self.samples_dict[sample][o] for o in non_singleton_oligos \
if o in self.samples_dict[sample]])
for sample in self.samples:
sample_sums[sample] = SUM(sample)
# eliminate very rare oligos (the percent abundance of every oligo should be
# more than 'self.min_percent_abundance' percent in at least one sample)
self.progress.new('Applying -a parameter')
for i in range(0, len(non_singleton_oligos)):
oligo = non_singleton_oligos[i]
if i % 100 == 0 or i == len(non_singleton_oligos) - 1:
self.progress.update(utils.P(i, len(non_singleton_oligos)))
percent_abundances = []
for sample in self.samples:
if oligo in self.samples_dict[sample]:
percent_abundances.append((self.samples_dict[sample][oligo] * 100.0 / sample_sums[sample],
self.samples_dict[sample][oligo],
sample_sums[sample],
sample))
percent_abundances.sort(reverse = True)
# NOTE: if a sample has less than 100 sequences, percent abundance doesn't mean much.
# if user wants to eliminate oligotypes that doesn't appear in at least one sample
# more than 1% abundance, a singleton of that oligotype that appears in a sample
# which has 50 sequences would make that oligotype pass the filter. I think if an
# oligotype passes the percent filter, sample size and actual count of the oligotype
# should also be considered before considering it as an abundant oligotype:
for abundance_percent, abundance_count, sample_size, sample in percent_abundances:
PercentAbundance_OK = abundance_percent >= self.min_percent_abundance
DatesetSize_OK = sample_size > 100 or abundance_count > self.min_percent_abundance
if PercentAbundance_OK and DatesetSize_OK:
self.abundant_oligos.append((sum([x[1] for x in percent_abundances]), oligo))
break
else:
self._register_removal(oligo, 'failed_a')
self.progress.end()
self.run.info('num_oligos_after_a_elim', len(self.abundant_oligos))
self.abundant_oligos = [x[1] for x in sorted(self.abundant_oligos, reverse = True)]
# eliminate very rare oligos (the ACTUAL ABUNDANCE, which is the sum of oligotype in all samples
# should should be more than 'self.min_actual_abundance'.
self.progress.new('Applying -A parameter')
if self.min_actual_abundance > 0:
oligos_for_removal = []
for i in range(0, len(self.abundant_oligos)):
oligo = self.abundant_oligos[i]
if i % 100 == 0 or i == len(self.abundant_oligos) - 1:
self.progress.update(utils.P(i, len(non_singleton_oligos)))
oligo_actual_abundance = sum([self.samples_dict[sample][oligo] for sample in self.samples_dict\
if oligo in self.samples_dict[sample]])
if self.min_actual_abundance > oligo_actual_abundance:
oligos_for_removal.append(oligo)
for oligo in oligos_for_removal:
self.abundant_oligos.remove(oligo)
self._register_removal(oligo, 'failed_A')
self.progress.end()
self.run.info('num_oligos_after_A_elim', len(self.abundant_oligos))
# eliminate oligos based on -M / --min-substantive-abundance parameter.
#
# Here is a pesky problem. -A parameter eliminates oligotypes based on the number of sequences
# represented by them. But this is not a very reliable way to eliminate noise, and sometimes it
# eliminates more signal than noise. Here is an example: Say Oligotype #1 and Oligotype #2 both
# represent 20 reads. But O#1 has only one unique sequence, so all reads that are being
# represented by O#1 are actually the same. Meanwhile O#2 has 20 unique reads in it. So each
# read differs from each other at bases that are not being used by oligotyping. Simply one could
# argue that O#2 is full of noise, while O#1 is a robust oligotype that probably represents one
# and only one organism. If you set -A to 25, both will be eliminated. But if there would be a
# parameter that eliminates oligotypes based on the number of most abundant unique sequence
# they entail, it could be set to, say '5', and O#1 would have survived that filter while O#2
# the crappy oligotype would be filtered out.
#
# Following function, _get_unique_sequence_distributions_within_abundant_oligos, returns the
# dictionary that can be used to do that.
#
# And here is the ugly part about implementing this: This has to be done before the generation
# of representative sequences. Upto the section where we generate representative sequences,
# we only work with 'abundances' and we don't actually know what is the distribution of unique
# sequences an oligotype conceals. This information is being computed when the representative
# sequences are being computed. But in order to compute representative sequences we need to
# know 'abundant' oligotypes first, and in order to finalize 'abundant' oligotypes
# we need to run this cool filter. Chicken/egg. It is extremely inefficient, and I hate
# to do this but this somewhat redundant step is mandatory and I can't think of any better
# solution... And if you read this comment all the way here you either must be very bored or
# very interested in using this codebase properly. Thanks.
self.progress.new('Applying -M parameter')
if self.min_substantive_abundance:
oligos_for_removal = []
unique_sequence_distributions = self._get_unique_sequence_distributions_within_abundant_oligos()
num_abundant_oligos = len(self.abundant_oligos)
for i in range(0, num_abundant_oligos):
self.progress.update(utils.P(i, num_abundant_oligos))
oligo = self.abundant_oligos[i]
if max(unique_sequence_distributions[oligo]) < self.min_substantive_abundance:
oligos_for_removal.append(oligo)
for oligo in oligos_for_removal:
self._register_removal(oligo, 'failed_M')
self.abundant_oligos.remove(oligo)
self.progress.end()
self.run.info('num_oligos_after_M_elim', len(self.abundant_oligos))
# if 'limit_oligotypes_to' is defined, eliminate all other oligotypes
if self.limit_oligotypes_to:
self.abundant_oligos = [oligo for oligo in self.abundant_oligos if oligo in self.limit_oligotypes_to]
for oligo in [oligo for oligo in self.abundant_oligos if not oligo in self.limit_oligotypes_to]:
self._register_removal(oligo, 'failed_limit')
self.run.info('num_oligos_after_l_elim', len(self.abundant_oligos))
if len(self.abundant_oligos) == 0:
raise utils.ConfigError("\n\n\t--limit-oligotypes parameter eliminated all oligotypes.\
\n\tPlease make sure --limit-oligotypes matches with actual oligotypes.\n\n\tQuiting.\n")
# if 'exclude_oligotypes' is defined, remove them from analysis if they are present
if self.exclude_oligotypes:
self.abundant_oligos = [oligo for oligo in self.abundant_oligos if not oligo in self.exclude_oligotypes]
for oligo in self.exclude_oligotypes:
self._register_removal(oligo, 'excluded')
self.run.info('num_oligos_after_e_elim', len(self.abundant_oligos))
# storing final counts
for oligo in self.abundant_oligos:
self.final_oligo_counts_dict[oligo] = sum([self.samples_dict[sample][oligo] for sample in self.samples_dict\
if oligo in self.samples_dict[sample]])
# in case no oligos left
if not len(self.abundant_oligos):
raise utils.ConfigError("\n\n\tAll oligotypes were discarded during the noise removal step.\
\n\tPlease check your parameters.\n\n\tQuiting.\n")
# if there is only one oligotype left, skip basic analyses
if len(self.abundant_oligos) == 1:
self.skip_basic_analyses = True
self.run.info('skip_basic_analyses', self.skip_basic_analyses)
def _refine_samples_dict(self):
# removing oligos from samples dictionary that didn't pass
# MIN_PERCENT_ABUNDANCE_OF_OLIGOTYPE_IN_AT_LEAST_ONE_SAMPLE and
# MIN_NUMBER_OF_SAMPLES_OLIGOTYPE_APPEARS filters.
self.progress.new('Refining Samples Dict')
self.progress.update('Deep-copying the dictionary .. ')
samples_dict_copy = copy.deepcopy(self.samples_dict)
self.progress.append('done')
samples_to_remove = []
for i in range(0, len(self.samples)):
sample = self.samples[i]
self.progress.update('Analyzing samples: ' + utils.P(i + 1, len(self.samples)))
for oligo in samples_dict_copy[sample]:
if oligo not in self.abundant_oligos:
self.samples_dict[sample].pop(oligo)
if not self.samples_dict[sample]:
samples_to_remove.append(sample)
for sample in samples_to_remove:
self.samples.remove(sample)
self.samples_dict.pop(sample)
self.num_sequences_after_qc = sum([sum(self.samples_dict[sample].values()) for sample in self.samples_dict])
self.progress.end()
self.run.info('num_sequences_after_qc', self.num_sequences_after_qc)
if len(samples_to_remove):
self.run.info('samples_removed_after_qc', samples_to_remove)
if len(self.samples) < 3:
self.skip_basic_analyses = True
self.run.info('skip_basic_analyses', self.skip_basic_analyses)
def _generate_FASTA_file(self):
# store abundant oligos
self.progress.new('FASTA File')
oligos_fasta_file_path = self.generate_output_destination("OLIGOS.fasta")
f = open(oligos_fasta_file_path, 'w')
self.progress.update('Being generated')
for oligo in self.abundant_oligos:
f.write('>' + oligo + '\n')
f.write(oligo + '\n')
f.close()
self.progress.end()
self.run.info('oligos_fasta_file_path', oligos_fasta_file_path)
def _generate_representative_sequences_FASTA_file(self):
# store representative sequences per oligotype if they are computed
self.progress.new('Representative Sequences FASTA File')
representative_seqs_fasta_file_path = self.generate_output_destination("OLIGO-REPRESENTATIVES.fasta")
f = open(representative_seqs_fasta_file_path, 'w')
self.progress.update('Being generated')
for oligo in self.abundant_oligos:
f.write('>' + oligo + '\n')
f.write(self.representative_sequences_per_oligotype[oligo] + '\n')
f.close()
self.progress.end()
self.run.info('representative_seqs_fasta_file_path', representative_seqs_fasta_file_path)
def _generate_NEXUS_file(self):
# generate NEXUS file of oligos
self.progress.new('NEXUS File')
oligos_nexus_file_path = self.generate_output_destination("OLIGOS.nexus")
f = open(oligos_nexus_file_path, 'w')
f.write("""begin data;
dimensions ntax=%d nchar=%d;
format datatype=dna interleave=no gap=-;
matrix\n""" % (len(self.abundant_oligos), len(self.abundant_oligos[0])))
self.progress.update('Being generated')
for oligo in self.abundant_oligos:
f.write(' %.40s %s\n' % (oligo, oligo))
f.write(' ;\n')
f.write('end;\n')
f.close()
self.progress.end()
self.run.info('oligos_nexus_file_path', oligos_nexus_file_path)
def _get_unit_counts_and_percents(self):
self.progress.new('Unit counts and percents')
self.progress.update('Data is being generated')
self.unit_counts, self.unit_percents = utils.get_unit_counts_and_percents(self.abundant_oligos, self.samples_dict)
self.progress.end()
def _generate_MATRIX_files_for_units_across_samples(self):
self.progress.new('Oligos across samples')
self.progress.update('Matrix files are being generated')
across_samples_MN_file_path = self.generate_output_destination("OLIGOS-ACROSS-DATASETS-MAX-NORM.txt")
across_samples_SN_file_path = self.generate_output_destination("OLIGOS-ACROSS-DATASETS-SUM-NORM.txt")
utils.generate_MATRIX_files_for_units_across_samples(self.abundant_oligos,
self.samples,
across_samples_MN_file_path,
across_samples_SN_file_path,
self.across_samples_max_normalized,
self.across_samples_sum_normalized)
self.progress.end()
self.run.info('across_samples_MN_file_path', across_samples_MN_file_path)
self.run.info('across_samples_SN_file_path', across_samples_SN_file_path)
def _get_units_across_samples_dicts(self):
self.progress.new('Oligos across samples')
self.progress.update('Data is being generated')
self.across_samples_sum_normalized, self.across_samples_max_normalized =\
utils.get_units_across_samples_dicts(self.abundant_oligos, self.samples, self.unit_percents)
self.progress.end()
def _generate_ENVIRONMENT_file(self):
self.progress.new('ENVIRONMENT File')
self.environment_file_path = self.generate_output_destination("ENVIRONMENT.txt")
self.progress.update('Being generated')
utils.generate_ENVIRONMENT_file(self.samples,
self.samples_dict,
self.environment_file_path)
self.progress.end()
self.run.info('environment_file_path', self.environment_file_path)
def _generate_MATRIX_files(self):
self.progress.new('Matrix Files')
self.progress.update('Being generated')
self.matrix_count_file_path = self.generate_output_destination("MATRIX-COUNT.txt")
self.matrix_percent_file_path = self.generate_output_destination("MATRIX-PERCENT.txt")
utils.generate_MATRIX_files(self.abundant_oligos,
self.samples,
self.unit_counts,
self.unit_percents,
self.matrix_count_file_path,
self.matrix_percent_file_path)
self.progress.end()
self.run.info('matrix_count_file_path', self.matrix_count_file_path)
self.run.info('matrix_percent_file_path', self.matrix_percent_file_path)
def _store_read_distribution_table(self):
self.progress.new('Read distribution table')
self.read_distribution_table_path = self.generate_output_destination("READ-DISTRIBUTION.txt")
def get_dict_entry_tmpl():
d = {'represented_reads': 0}
for reason in self.excluded_read_ids_tracker:
d[reason] = 0
return d
read_distribution_dict = {}
self.progress.update('Processing reads that were represented in results')
for sample in self.samples_dict:
if sample not in read_distribution_dict:
read_distribution_dict[sample] = get_dict_entry_tmpl()
read_distribution_dict[sample]['represented_reads'] = sum(self.samples_dict[sample].values())
for reason in self.excluded_read_ids_tracker:
self.progress.update('Processing excluded oligos (%s)' % (reason))
for sample in self.excluded_read_ids_tracker[reason]:
if sample not in read_distribution_dict:
read_distribution_dict[sample] = get_dict_entry_tmpl()
read_distribution_dict[sample][reason] = self.excluded_read_ids_tracker[reason][sample]
self.progress.update('Storing...')
utils.generate_TAB_delim_file_from_dict(read_distribution_dict,
self.read_distribution_table_path,
order = ['represented_reads'] + sorted(self.excluded_read_ids_tracker.keys()))
self.progress.end()
self.run.info('read_distribution_table_path', self.read_distribution_table_path)
def _generate_random_colors(self):
self.colors_file_path = self.generate_output_destination('COLORS')
if self.colors_list_file:
# it means user provided a list of colors to be used for oligotypes
colors = [c.strip() for c in open(self.colors_list_file).readlines()]
if len(colors) < len(self.abundant_oligos):
raise utils.ConfigError("Number of colors defined in colors file (%d),\
is smaller than the number of abundant oligotypes (%d)" % \
(len(colors), len(self.abundant_oligos)))
colors_dict = {}
for i in range(0, len(self.abundant_oligos)):
colors_dict[self.abundant_oligos[i]] = colors[i]
self.colors_dict = colors_dict
# generate COLORS file derived from --colors-list-file
colors_file = open(self.colors_file_path, 'w')
for oligotype in self.abundant_oligos:
colors_file.write('%s\t%s\n' % (oligotype, self.colors_dict[oligotype]))
colors_file.close()
else:
self.colors_dict = random_colors(self.abundant_oligos, self.colors_file_path)
self.run.info('colors_file_path', self.colors_file_path)
def _agglomerate_oligos_based_on_cosine_similarity(self):
from Oligotyping.utils.cosine_similarity import get_oligotype_sets
self.progress.new('Agglomerating Oligotypes into Sets')
oligotype_sets_file_path = self.generate_output_destination("OLIGOTYPE-SETS.txt")
self.progress.update('Computing')
self.oligotype_sets = get_oligotype_sets(self.abundant_oligos,
self.across_samples_sum_normalized,
self.cosine_similarity_threshold,
oligotype_sets_file_path)
self.progress.end()
self.run.info('oligotype_sets_file_path', oligotype_sets_file_path)
self.run.info('oligotype_sets_info', '%d oligotypes agglomerated into %d sets'\
% (len(self.abundant_oligos), len(self.oligotype_sets)))
self.progress.new('Generating data objects for newly generated oligotype sets')
self.progress.update('New Colors')
self.oligotype_set_ids = list(range(0, len(self.oligotype_sets)))
self.colors_dict_for_oligotype_sets = {}
for set_id in self.oligotype_set_ids:
self.colors_dict_for_oligotype_sets[set_id] = self.colors_dict[self.oligotype_sets[set_id][0]]
self.progress.update('New Samples Dict')
self.samples_dict_with_agglomerated_oligos = {}
for sample in self.samples:
self.samples_dict_with_agglomerated_oligos[sample] = {}
for set_id in self.oligotype_set_ids:
oligotype_set = self.oligotype_sets[set_id]
for sample in self.samples:
self.samples_dict_with_agglomerated_oligos[sample][set_id] = 0
for oligo in self.samples_dict[sample]:
if oligo in oligotype_set:
self.samples_dict_with_agglomerated_oligos[sample][set_id] += self.samples_dict[sample][oligo]
self.progress.end()
def _generate_MATRIX_files_for_oligotype_sets(self):
self.progress.new('Matrix Files for Oligotype Sets')
counts_file_path = self.generate_output_destination("MATRIX-COUNT-OLIGO-SETS.txt")
percents_file_path = self.generate_output_destination("MATRIX-PERCENT-OLIGO-SETS.txt")
d = self.samples_dict_with_agglomerated_oligos
oligotype_set_percents = {}
oligotype_set_counts = {}
self.progress.update('Generating the data')
for oligotype_set_id in self.oligotype_set_ids:
counts = []
percents = []
for sample in self.samples:
if oligotype_set_id in d[sample]:
counts.append(d[sample][oligotype_set_id])
percents.append(d[sample][oligotype_set_id] * 100.0 / sum(d[sample].values()))
else:
counts.append(0)
percents.append(0.0)
oligotype_set_percents[oligotype_set_id] = percents
oligotype_set_counts[oligotype_set_id] = counts
self.progress.update('Generating files')
counts_file = open(counts_file_path, 'w')
percents_file = open(percents_file_path, 'w')
counts_file.write('\t'.join([''] + self.samples) + '\n')
percents_file.write('\t'.join([''] + self.samples) + '\n')
for oligotype_set_id in self.oligotype_set_ids:
counts_file.write('\t'.join(['Set_' + str(oligotype_set_id)] + [str(c) for c in oligotype_set_counts[oligotype_set_id]]) + '\n')
percents_file.write('\t'.join(['Set_' + str(oligotype_set_id)] + [str(p) for p in oligotype_set_percents[oligotype_set_id]]) + '\n')
counts_file.close()
percents_file.close()
self.progress.end()
self.run.info('matrix_count_oligo_sets_file_path', counts_file_path)
self.run.info('matrix_percent_oligo_sets_file_path', percents_file_path)
def _get_unique_sequence_distributions_within_abundant_oligos(self):
# compute and return the unique sequence distribution within per oligo
# dictionary. see the explanation where the function is called. oligos
# listed in this dictionary MAY NOT be the final oligos once the noise
# filtering step has ended.
temp_unique_distributions = dict(list(zip(self.abundant_oligos, [{} for x in range(0, len(self.abundant_oligos))])))
self.fasta.reset()
while next(self.fasta):
if self.progress and self.fasta.pos % 1000 == 0:
self.progress.update('Computing sequence distributions: %.2f%%' \
% (self.fasta.pos * 100.0 / self.fasta.total_seq))
oligo = ''.join(self.fasta.seq[o] for o in self.bases_of_interest_locs)
if oligo in self.abundant_oligos:
try:
temp_unique_distributions[oligo][self.fasta.seq] += 1
except KeyError:
temp_unique_distributions[oligo][self.fasta.seq] = 1
for oligo in self.abundant_oligos:
temp_unique_distributions[oligo] = sorted(list(temp_unique_distributions[oligo].values()), reverse = True)
return temp_unique_distributions
def _generate_representative_sequences(self):
# create a fasta file with a representative full length consensus sequence for every oligotype
# this is what is going on here: we go through all oligotypes, gather sequences that are being
# represented by a particular oligotype, unique them and report the top ten unique sequences
# ordered by the frequency.
self.progress.new('Representative Sequences')
output_directory_for_reps = self.generate_output_destination("OLIGO-REPRESENTATIVES", directory = True)
fasta_files_dict = {}
unique_files_dict = {}
for oligo in self.abundant_oligos:
if oligo not in fasta_files_dict:
try:
fasta_file_path = os.path.join(output_directory_for_reps, '%.5d_' % self.abundant_oligos.index(oligo) + oligo)
fasta_files_dict[oligo] = {'file': open(fasta_file_path, 'w'),
'path': fasta_file_path}
unique_files_dict[oligo] = {'file': open(fasta_file_path + '_unique', 'w'),
'path': fasta_file_path + '_unique'}
except IOError:
print('\n\t'.join(['',
'WARNING: Oligotyping process has reached the maximum number of open files',
'limit defined by the operating system. There are "%d" oligotypes to be'\
% len(self.abundant_oligos),
'stored. You can learn the actual limit by typing "ulimit -n" in the.run.',
'',
'You can increase this limit temporarily by typing "ulimit -n NUMBER", and',
'restart the process. It seems using %d as NUMBER might be a good start.'\
% (len(self.abundant_oligos) * 1.1),
'',
'Until this issue is solved, representative sequences are not going to be',
'computed.',
'']))
# clean after yourself. close every file, delete directory, exit.
[[x.close() for x in [g[o]['file'] for o in g]] for g in [fasta_files_dict, unique_files_dict]]
shutil.rmtree(output_directory_for_reps)
sys.exit()
self.fasta.reset()
while next(self.fasta):
if self.fasta.pos % 1000 == 0:
self.progress.update('Generating Individual FASTA Files: %.2f%%' \
% (self.fasta.pos * 100.0 / self.fasta.total_seq))
oligo = ''.join(self.fasta.seq[o] for o in self.bases_of_interest_locs)
if oligo in self.abundant_oligos:
fasta_files_dict[oligo]['file'].write('>%s\n' % (self.fasta.id))
fasta_files_dict[oligo]['file'].write('%s\n' % self.fasta.seq)
self.progress.end()
self.progress.new('Representative Sequences')
for oligo in self.abundant_oligos:
fasta_files_dict[oligo]['file'].close()
self.progress.update('Unique reads for %s (%d of %d)' \
% (oligo,
self.abundant_oligos.index(oligo) + 1,
len(self.abundant_oligos)))
fasta_file_path = fasta_files_dict[oligo]['path']
fasta = u.SequenceSource(fasta_file_path, lazy_init = False, unique = True)
# this dict is going to hold the information of how unique sequences within an oligotype
# is distributed among samples:
distribution_among_samples = {}
next(fasta)
# this is the first read in the unique reads list, which is the most abundant unique sequence
# for the oligotype. so we are going to store it in a dict to generate
# representative sequences FASTA file:
self.representative_sequences_per_oligotype[oligo] = fasta.seq
fasta.reset()
# FIXME: I am going to come back to this and fix it at some point. Storing 'distribution_among_samples'
# information in separate cPickle files per oligo is not the smartest thing to do.
self.final_oligo_unique_distribution_dict[oligo] = []
while next(fasta) and fasta.pos <= self.limit_representative_sequences:
unique_files_dict[oligo]['file'].write('>%s_%d|freq:%d\n'\
% (oligo,
fasta.pos,
len(fasta.ids)))
unique_files_dict[oligo]['file'].write('%s\n' % fasta.seq)
# store only the first 20
if not fasta.pos > 20:
self.final_oligo_unique_distribution_dict[oligo].append(len(fasta.ids))
for sample_id in fasta.ids:
sample_name = utils.get_sample_name_from_defline(sample_id, self.sample_name_separator)
if sample_name not in distribution_among_samples:
distribution_among_samples[sample_name] = {}
d = distribution_among_samples[sample_name]
if fasta.pos not in d:
d[fasta.pos] = 1
else:
d[fasta.pos] += 1
fasta.close()
unique_files_dict[oligo]['file'].close()
unique_fasta_path = unique_files_dict[oligo]['path']
distribution_among_samples_dict_path = unique_fasta_path + '_distribution.cPickle'
pickle.dump(distribution_among_samples, open(distribution_among_samples_dict_path, 'wb'))
self.progress.end()
self._get_purity_score()
self._get_total_purity_score()
self.progress.new('Generating Entropy Figures')
if (not self.quick) and (not self.no_figures):
if self.no_threading:
for oligo in self.abundant_oligos:
self.progress.update('%s (%d of %d)' % (oligo,
self.abundant_oligos.index(oligo) + 1,
len(self.abundant_oligos)))
unique_fasta_path = unique_files_dict[oligo]['path']
self._generate_entropy_figure_for_abundant_oligotype(oligo, unique_fasta_path, self.final_oligo_entropy_distribution_dict)
else:
mp = utils.Multiprocessing(self._generate_entropy_figure_for_abundant_oligotype, self.number_of_threads)
entropy_per_oligo_shared_dict = mp.get_empty_shared_dict()
# arrange processes
processes_to_run = []
for oligo in self.abundant_oligos:
unique_fasta_path = unique_files_dict[oligo]['path']
processes_to_run.append((oligo, unique_fasta_path, entropy_per_oligo_shared_dict),)
# start the main loop to run all processes
mp.run_processes(processes_to_run, self.progress)
self.final_oligo_entropy_distribution_dict = copy.deepcopy(entropy_per_oligo_shared_dict)
self.progress.end()
if (not self.quick) and self.do_blast_search:
self.progress.new('Performing %s BLAST search for representative sequences'\
% ('LOCAL' if self.blast_ref_db else 'REMOTE'))
if self.blast_ref_db:
# if there is a local db to search representative sequences against,
# just perform the blast search in one thread
self._perform_local_BLAST_search_for_oligo_representative(unique_files_dict)
else:
# if the search is going to be on NCBI, parallelize it:
if self.no_threading:
for oligo in self.abundant_oligos:
self.progress.update('%s (%d of %d)' % (oligo,
self.abundant_oligos.index(oligo) + 1,
len(self.abundant_oligos)))
self._perform_remote_BLAST_search_for_oligo_representative(oligo, unique_files_dict)
else:
mp = utils.Multiprocessing(self._perform_remote_BLAST_search_for_oligo_representative, self.number_of_threads)
# arrange processes
processes_to_run = []
for oligo in self.abundant_oligos:
unique_fasta_path = unique_files_dict[oligo]['path']
processes_to_run.append((oligo, unique_files_dict,),)
# start the main loop to run all processes
mp.run_processes(processes_to_run, self.progress)
self.progress.end()
self.run.info('output_directory_for_reps', output_directory_for_reps)
def _get_purity_score(self):
for oligo in self.final_oligo_unique_distribution_dict:
freq_dict = self.final_oligo_unique_distribution_dict[oligo]
if len(self.final_oligo_unique_distribution_dict[oligo]) > 1:
bp = (freq_dict[1] / (freq_dict[0] * 1.0))
self.final_purity_score_dict[oligo] = 1 - bp
else:
self.final_purity_score_dict[oligo] = 1.00
def _get_total_purity_score(self):
sorted_scores = sorted(self.final_purity_score_dict.values())
last_quarter = sorted_scores[:int(math.ceil(len(sorted_scores)/4.0))] # take the last quarter of the unique sequences
final_total = reduce(lambda x, y: x + y, last_quarter) / len(last_quarter) # take the average of these sequences
self.total_purity_score_dict = "%.2f" %final_total
def _perform_local_BLAST_search_for_oligo_representative(self, unique_files_dict):
query, target, output = utils.get_temporary_file_names_for_BLAST_search(prefix = "REPS_", directory = self.tmp_directory)
representative_fasta_entries = []
for oligo in self.abundant_oligos:
self.progress.update('Storing representative sequences for "%s" ...' % oligo)
unique_fasta_path = unique_files_dict[oligo]['path']
unique_fasta = u.SequenceSource(unique_fasta_path)
next(unique_fasta)
representative_fasta_entries.append((oligo, unique_fasta.seq),)
unique_fasta.close()
utils.append_reads_to_FASTA(representative_fasta_entries, query)
self.progress.update('Generating a copy of target BLAST db ...')
self.logger.info('copying blastdb from "%s" to %s' % (self.blast_ref_db, target))
shutil.copy(self.blast_ref_db, target)
utils.mask_defline_whitespaces_in_FASTA(target, '<$!$>')
utils.mask_defline_whitespaces_in_FASTA(query, '<$!$>')
params = "-perc_identity 90"
job = 'reps'
s = blast.LocalBLAST(query, target, output, log = self.generate_output_destination('BLAST.log'))
self.logger.info('local blast request for job "%s": (q) %s (t) %s (o) %s (p) %s'\
% (job, query, target, output, params))
self.progress.update('Running makeblastdb ...')
s.make_blast_db()
self.logger.info('makeblastdb for %s: %s' % (job, s.makeblastdb_cmd))
s.params = params
self.progress.update('Performing blastn ...')
s.search()
self.logger.info('blastn for %s: %s' % (job, s.search_cmd))
self.progress.update('Processing BLAST results ...')
fancy_results_dict = s.get_fancy_results_dict(defline_white_space_mask = '<$!$>')
self.progress.update('Storing BLAST results ...')
for oligo in self.abundant_oligos:
unique_fasta_path = unique_files_dict[oligo]['path']
fancy_blast_result_output_path = unique_fasta_path + '_BLAST.cPickle'
if oligo in fancy_results_dict:
pickle.dump(fancy_results_dict[oligo], open(fancy_blast_result_output_path, 'w'))
else:
pickle.dump([], open(fancy_blast_result_output_path, 'w'))
def _perform_remote_BLAST_search_for_oligo_representative(self, oligo, unique_files_dict):
# will perform remote BLAST
r = blast.RemoteBLAST()
unique_fasta_path = unique_files_dict[oligo]['path']
unique_fasta = u.SequenceSource(unique_fasta_path)
next(unique_fasta)
blast_output_xml = unique_fasta_path + '_BLAST.xml'
blast_output_dict = unique_fasta_path + '_BLAST.cPickle'
# FIXME: this value should be paramaterized
max_blast_attempt = 3
def blast_search_wrapper(seq, xml_path, pickle_path):
try:
results = r.search(seq, xml_path)
results_list = r.get_fancy_results_list(results)
pickle.dump(results_list, open(pickle_path, 'w'))
return True
except:
return False
for blast_attempt in range(0, max_blast_attempt):
self.progress.update('searching for "%s" (%d of %d) (attempt #%d)' % (oligo,
self.abundant_oligos.index(oligo) + 1,
len(self.abundant_oligos), blast_attempt + 1))
if blast_search_wrapper(unique_fasta.seq, blast_output_xml, blast_output_dict):
break
else:
continue
unique_fasta.close()
return True
def _generate_entropy_figure_for_abundant_oligotype(self, oligo, unique_fasta_path, final_oligo_entropy_distribution_dict):
entropy_file_path = unique_fasta_path + '_entropy'
color_per_column_path = unique_fasta_path + '_color_per_column.cPickle'
# generate entropy output at 'entropy_file_path' along with the image
vis_freq_curve(unique_fasta_path, output_file = unique_fasta_path + '.png', entropy_output_file = entropy_file_path)
# use entropy output to generate a color shade for every columns in alignment
# for visualization purposes
entropy_values_per_column = [0] * self.alignment_length
for column, entropy in [x.strip().split('\t') for x in open(entropy_file_path)]:
entropy_values_per_column[int(column)] = float(entropy)
final_oligo_entropy_distribution_dict[oligo] = entropy_values_per_column
color_shade_dict = get_color_shade_dict_for_list_of_values(entropy_values_per_column)
color_per_column = [0] * self.alignment_length
for i in range(0, self.alignment_length):
color_per_column[i] = color_shade_dict[entropy_values_per_column[i]]
pickle.dump(color_per_column, open(color_per_column_path, 'wb'))
def _generate_oligos_across_samples_figure(self):
self.progress.new('Oligotypes Across Samples Figure')
oligos_across_samples_file_path = self.generate_output_destination('OLIGOS-ACROSS-DATASETS.png')
self.progress.update('Generating')
oligos = copy.deepcopy(self.abundant_oligos)
oligotype_distribution_across_samples(self.samples_dict, self.colors_dict, oligos_across_samples_file_path,\
oligos = oligos, project_title = self.project, display = False)
self.progress.end()
self.run.info('oligos_across_samples_file_path', oligos_across_samples_file_path)
def _generate_sets_across_samples_figure(self):
self.progress.new('Oligotype Sets Across Samples Figure')
figure_path = self.generate_output_destination('OLIGO-SETS-ACROSS-DATASETS.png')
self.progress.update('Generating')
vis_oligotype_sets_distribution(self.oligotype_sets, self.across_samples_sum_normalized, self.samples,\
display = False, colors_dict = self.colors_dict, output_file = figure_path,\
project_title = 'Oligotype Sets Across Samples for "%s", at Cosine Similarity Threshold of %.4f'\
% (self.project, self.cosine_similarity_threshold), legend = False)
self.progress.end()
self.run.info('oligotype_sets_across_samples_figure_path', figure_path)
def _generate_stack_bar_figure_with_agglomerated_oligos(self):
self.progress.new('Stackbar Figure with Agglomerated Oligos')
stack_bar_file_path = self.generate_output_destination('STACKBAR-AGGLOMERATED-OLIGOS.png')
self.progress.update('Generating')
oligotype_distribution_stack_bar(self.samples_dict_with_agglomerated_oligos, self.colors_dict_for_oligotype_sets,\
stack_bar_file_path, oligos = self.oligotype_set_ids, project_title = self.project,\
display = not self.no_display)
self.progress.end()
self.run.info('stack_bar_with_agglomerated_oligos_file_path', stack_bar_file_path)
def _generate_default_figures(self):
self.progress.new('Figures')
figures_dict = generate_default_figures(self)
figures_dict_file_path = self.generate_output_destination("FIGURES.cPickle")
pickle.dump(figures_dict, open(figures_dict_file_path, 'wb'))
self.progress.end()
self.run.info('figures_dict_file_path', figures_dict_file_path)
def _generate_exclusive_figures(self):
if len(self.samples) < 3:
return None
self.progress.new('Exclusive Figures')
exclusive_figures_dict = generate_exclusive_figures(self)
exclusive_figures_dict_file_path = self.generate_output_destination("EXCLUSIVE-FIGURES.cPickle")
pickle.dump(exclusive_figures_dict, open(exclusive_figures_dict_file_path, 'w'))
self.progress.end()
self.run.info('exclusive_figures_dict_file_path', exclusive_figures_dict_file_path)
def _generate_gexf_network_file(self):
self.gexf_network_file_path = self.generate_output_destination("NETWORK.gexf")
self.progress.new('GEXF Network File')
utils.generate_gexf_network_file(self.abundant_oligos,
self.samples_dict,
self.unit_percents,
self.gexf_network_file_path,
sample_mapping_dict = self.sample_mapping_dict,
project = self.project)
self.progress.end()
self.run.info('gexf_network_file_path', self.gexf_network_file_path)
def _generate_html_output(self):
if self.no_figures:
sys.stdout.write('\n\n\t"--no-figures" parameter is given, skipping HTML output...\n\n')
return
if self.quick:
sys.stdout.write('\n\n\t"--quick" parameter is given, skipping HTML output...\n\n')
return
from Oligotyping.utils.html.error import HTMLError
try:
from Oligotyping.utils.html.for_oligotyping import generate_html_output
except HTMLError as e:
sys.stdout.write('\n\n\t%s\n\n' % e)
sys.exit()
self.progress.new('HTML Output')
output_directory_for_html = self.generate_output_destination("HTML-OUTPUT", directory = True)
self.progress.update('Generating')
index_page = generate_html_output(self.run.info_dict, html_output_directory = output_directory_for_html)
self.progress.end()
sys.stdout.write('\n\n\tView results in your browser: "%s"\n\n' % index_page)
if __name__ == '__main__':
pass
|
meren/oligotyping
|
Oligotyping/lib/oligotyping.py
|
Python
|
gpl-2.0
| 70,869
|
[
"BLAST"
] |
cdba9d5a7174880627f3492525ed0c991385ad083a94c85041301d013532c182
|
########################################################################
# $HeadURL $
# File: LcgFileCatalogProxyClient.py
########################################################################
""" File catalog client for LCG File Catalog proxy service """
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client
class LcgFileCatalogProxyClient( Client ):
""" File catalog client for LCG File Catalog proxy service
"""
def __init__( self, url = False, **kwargs ):
""" Constructor of the LCGFileCatalogProxy client class
"""
Client.__init__( self, **kwargs )
self.method = None
self.name = 'LFCProxy'
self.valid = False
self.setServer( 'DataManagement/LcgFileCatalogProxy' )
if url:
self.setServer( url )
self.setTimeout( 120 )
self.call = 'ping'
self.valid = self.executeRPC()['OK']
def isOK( self ):
""" Is the Catalog available?
"""
return self.valid
def getName( self ):
""" Get the file catalog type name
"""
return self.name
def __getattr__( self, name ):
self.method = name
return self.execute
def execute( self, *parms, **kws ):
""" Magic method dispatcher """
self.call = 'callProxyMethod'
return self.executeRPC( self.method, parms, kws )
def testLcgFileCatalogProxyClient():
""" basic test of the module
"""
import os
import sys
import pprint
from DIRAC.Core.Base.Script import parseCommandLine
from DIRAC import gLogger, S_OK
parseCommandLine()
gLogger.setLevel( 'VERBOSE' )
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset pyhthon optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
gLogger.info( 'Testing LcgFileCatalogProxyClient class...' )
try:
result = S_OK()
lfcpc = LcgFileCatalogProxyClient()
gLogger.info( 'LcgFileCatalogProxyClient instantiated' )
server = lfcpc.getServer()
assert server == 'DataManagement/LcgFileCatalogProxy'
gLogger.info( ' Connecting to ', server )
timeout = lfcpc.timeout
assert timeout == 120
result = lfcpc.listDirectory( '/' )
assert result['OK']
gLogger.info( pprint.pformat( result['Value']['Successful'] ) )
gLogger.info( 'Server is alive' )
except AssertionError, x:
if result['OK']:
gLogger.error( x )
sys.exit( 1 )
else:
gLogger.info( 'Test OK, but could not connect to server' )
gLogger.info( result['Message'] )
if __name__ == '__main__':
testLcgFileCatalogProxyClient()
|
Sbalbp/DIRAC
|
Resources/Catalog/LcgFileCatalogProxyClient.py
|
Python
|
gpl-3.0
| 2,536
|
[
"DIRAC"
] |
8fac93c30a59784a483743a3344fd490bf903d97f4ab1b44f9784634c5a312dd
|
import logging
from typing import List
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.utils.molecule_feature_utils import one_hot_encode
from deepchem.feat.base_classes import MolecularFeaturizer
logger = logging.getLogger(__name__)
ZINC_CHARSET = [
'#', ')', '(', '+', '-', '/', '1', '3', '2', '5', '4', '7', '6', '8', '=',
'@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'S', '[', ']', '\\', 'c', 'l', 'o',
'n', 'p', 's', 'r'
]
class OneHotFeaturizer(MolecularFeaturizer):
"""Encodes SMILES as a one-hot array.
This featurizer encodes SMILES string as a one-hot array.
Notes
-----
This class requires RDKit to be installed.
"""
def __init__(self, charset: List[str] = ZINC_CHARSET, max_length: int = 100):
"""Initialize featurizer.
Parameters
----------
charset: List[str], optional (default ZINC_CHARSET)
A list of strings, where each string is length 1 and unique.
max_length: int, optional (default 100)
The max length for SMILES string. If the length of SMILES string is
shorter than max_length, the SMILES is padded using space.
"""
try:
from rdkit import Chem # noqa
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if len(charset) != len(set(charset)):
raise ValueError("All values in charset must be unique.")
self.charset = charset
self.max_length = max_length
def _featurize(self, mol: RDKitMol) -> np.ndarray:
"""Compute one-hot featurization of this molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
An one hot vector encoded from SMILES.
The shape is `(max_length, len(charset) + 1)`.
The index of unknown character is `len(charset)`.
"""
from rdkit import Chem
smiles = Chem.MolToSmiles(mol)
# validation
if len(smiles) > self.max_length:
logger.info(
"The length of {} is longer than `max_length`. So we return an empty array."
)
return np.array([])
smiles = self.pad_smile(smiles)
return np.array([
one_hot_encode(val, self.charset, include_unknown_set=True)
for val in smiles
])
def pad_smile(self, smiles: str) -> str:
"""Pad SMILES string to `self.pad_length`
Parameters
----------
smiles: str
The smiles string to be padded.
Returns
-------
str
SMILES string space padded to self.pad_length
"""
return smiles.ljust(self.max_length)
def untransform(self, one_hot_vectors: np.ndarray) -> str:
"""Convert from one hot representation back to SMILES
Parameters
----------
one_hot_vectors: np.ndarray
An array of one hot encoded features.
Returns
-------
str
SMILES string for an one hot encoded array.
"""
smiles = ""
for one_hot in one_hot_vectors:
try:
idx = np.argmax(one_hot)
smiles += self.charset[idx]
except IndexError:
smiles += ""
return smiles
|
lilleswing/deepchem
|
deepchem/feat/molecule_featurizers/one_hot_featurizer.py
|
Python
|
mit
| 3,091
|
[
"RDKit"
] |
f90125e55956a05086304e529475997f360aec07001e0dd1297e8f92480e9977
|
"""MDTraj: A modern, open library for the analysis of molecular dynamics trajectories
MDTraj is a python library that allows users to manipulate molecular dynamics
(MD) trajectories and perform a variety of analyses, including fast RMSD,
solvent accessible surface area, hydrogen bonding, etc. A highlight of MDTraj
is the wide variety of molecular dynamics trajectory file formats which are
supported, including RCSB pdb, GROMACS xtc, tng, and trr, CHARMM / NAMD dcd, AMBER
binpos, AMBER NetCDF, AMBER mdcrd, TINKER arc and MDTraj HDF5.
"""
from __future__ import print_function, absolute_import
DOCLINES = __doc__.split("\n")
import os
import sys
from glob import glob
from setuptools import setup, Extension, find_packages
sys.path.insert(0, '.')
from basesetup import (write_version_py, build_ext,
StaticLibrary, CompilerDetection)
try:
import numpy
import Cython
if Cython.__version__ < '0.19':
raise ImportError
from Cython.Build import cythonize
except ImportError:
print('-'*80, file=sys.stderr)
print('''Error: building mdtraj requires numpy and cython>=0.19
Try running the command ``pip install numpy cython`` or
``conda install numpy cython``.
or see http://docs.scipy.org/doc/numpy/user/install.html and
http://cython.org/ for more information.
If you're feeling lost, we recommend downloading the (free) Anaconda python
distribution https://www.continuum.io/downloads, because it comes with
these components included.''', file=sys.stderr)
print('-'*80, file=sys.stderr)
sys.exit(1)
try:
# add an optional --disable-openmp to disable OpenMP support
sys.argv.remove('--disable-openmp')
disable_openmp = True
except ValueError:
disable_openmp = False
##########################
VERSION = "1.8.0.dev0"
ISRELEASED = False
__version__ = VERSION
##########################
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
# Global info about compiler
compiler = CompilerDetection(disable_openmp)
compiler.initialize()
extra_cpp_libraries = []
if sys.platform == 'darwin':
extra_cpp_libraries.append('stdc++')
os.environ['CXX'] = 'clang++'
os.environ['CC'] = 'clang'
if sys.platform == 'win32':
extra_cpp_libraries.append('Ws2_32')
# For determining if a path is relative (for dtr)
extra_cpp_libraries.append('Shlwapi')
################################################################################
# Declaration of the compiled extension modules (cython + c)
################################################################################
def format_extensions():
compiler_args = compiler.compiler_args_warn
xtc = Extension('mdtraj.formats.xtc',
sources=['mdtraj/formats/xtc/src/xdrfile.c',
'mdtraj/formats/xtc/src/xdr_seek.c',
'mdtraj/formats/xtc/src/xdrfile_xtc.c',
'mdtraj/formats/xtc/xtc.pyx',
],
include_dirs=['mdtraj/formats/xtc/include/',
'mdtraj/formats/xtc/'],
extra_compile_args=compiler_args)
trr = Extension('mdtraj.formats.trr',
sources=['mdtraj/formats/xtc/src/xdrfile.c',
'mdtraj/formats/xtc/src/xdr_seek.c',
'mdtraj/formats/xtc/src/xdrfile_trr.c',
'mdtraj/formats/xtc/trr.pyx'],
include_dirs=['mdtraj/formats/xtc/include/',
'mdtraj/formats/xtc/'],
extra_compile_args=compiler_args)
zlib_include_dirs = []
zlib_library_dirs = []
if sys.platform == 'win32':
# Conda puts the zlib headers in ./Library/... on windows
# If you're not using conda, good luck!
# (on linux, zlib is a dependency of python and its headers/libraries
# go in the normal ./include ./lib directories)
zlib_include_dirs += ["{}/Library/include".format(sys.prefix)]
zlib_library_dirs += ["{}/Library/lib".format(sys.prefix)]
tng = Extension('mdtraj.formats.tng',
sources=glob('mdtraj/formats/tng/src/compression/*.c') +
['mdtraj/formats/tng/src/lib/tng_io.c',
'mdtraj/formats/tng/src/lib/md5.c',
'mdtraj/formats/tng/tng.pyx'],
include_dirs=['mdtraj/formats/tng/include']
+ zlib_include_dirs,
define_macros=[('USE_ZLIB', 1)],
library_dirs=zlib_library_dirs,
libraries=['z'],
)
dcd = Extension('mdtraj.formats.dcd',
sources=['mdtraj/formats/dcd/src/dcdplugin.c',
'mdtraj/formats/dcd/dcd.pyx'],
include_dirs=["mdtraj/formats/dcd/include/",
'mdtraj/formats/dcd/'],
extra_compile_args=compiler_args)
binpos = Extension('mdtraj.formats.binpos',
sources=['mdtraj/formats/binpos/src/binposplugin.c',
'mdtraj/formats/binpos/binpos.pyx'],
include_dirs=['mdtraj/formats/binpos/include/',
'mdtraj/formats/binpos/'],
extra_compile_args=compiler_args)
dtr = Extension('mdtraj.formats.dtr',
sources=['mdtraj/formats/dtr/src/dtrplugin.cxx',
'mdtraj/formats/dtr/dtr.pyx'],
include_dirs=['mdtraj/formats/dtr/include/',
'mdtraj/formats/dtr/'],
define_macros=[('DESRES_READ_TIMESTEP2', 1)],
language='c++',
extra_compile_args=compiler_args,
libraries=extra_cpp_libraries)
return [xtc, trr, tng, dcd, binpos, dtr]
def rmsd_extensions():
compiler_args = (compiler.compiler_args_openmp + compiler.compiler_args_sse2 +
compiler.compiler_args_sse3 + compiler.compiler_args_opt +
compiler.compiler_args_warn)
compiler_libraries = compiler.compiler_libraries_openmp
libtheobald = StaticLibrary(
'mdtraj.core.lib.libtheobald',
sources=[
'mdtraj/rmsd/src/theobald_rmsd.c',
'mdtraj/rmsd/src/center.c'],
include_dirs=[
'mdtraj/rmsd/include'],
export_include=['mdtraj/rmsd/include/theobald_rmsd.h',
'mdtraj/rmsd/include/center.h'],
# don't enable OpenMP
extra_compile_args=(compiler.compiler_args_sse2 +
compiler.compiler_args_sse3 +
compiler.compiler_args_opt))
rmsd = Extension('mdtraj._rmsd',
sources=[
'mdtraj/rmsd/src/theobald_rmsd.c',
'mdtraj/rmsd/src/rotation.c',
'mdtraj/rmsd/src/center.c',
'mdtraj/rmsd/_rmsd.pyx'],
include_dirs=['mdtraj/rmsd/include'],
extra_compile_args=compiler_args,
libraries=compiler_libraries)
lprmsd = Extension('mdtraj._lprmsd',
sources=[
'mdtraj/rmsd/src/theobald_rmsd.c',
'mdtraj/rmsd/src/rotation.c',
'mdtraj/rmsd/src/center.c',
'mdtraj/rmsd/src/fancy_index.cpp',
'mdtraj/rmsd/src/Munkres.cpp',
'mdtraj/rmsd/src/euclidean_permutation.cpp',
'mdtraj/rmsd/_lprmsd.pyx'],
language='c++',
include_dirs=['mdtraj/rmsd/include'],
extra_compile_args=compiler_args,
libraries=compiler_libraries + extra_cpp_libraries)
return rmsd, lprmsd, libtheobald
def geometry_extensions():
compiler.initialize()
compiler_args = (compiler.compiler_args_sse2 + compiler.compiler_args_sse3 +
compiler.compiler_args_opt + compiler.compiler_args_warn)
define_macros = None
return [
Extension('mdtraj.geometry._geometry',
sources=['mdtraj/geometry/src/sasa.cpp',
'mdtraj/geometry/src/dssp.cpp',
'mdtraj/geometry/src/geometry.cpp',
'mdtraj/geometry/src/_geometry.pyx',],
include_dirs=['mdtraj/geometry/include',
'mdtraj/geometry/src/kernels'],
define_macros=define_macros,
extra_compile_args=compiler_args,
libraries=extra_cpp_libraries,
language='c++'),
Extension('mdtraj.geometry.drid',
sources=["mdtraj/geometry/drid.pyx",
"mdtraj/geometry/src/dridkernels.cpp",
"mdtraj/geometry/src/moments.cpp"],
include_dirs=["mdtraj/geometry/include"],
define_macros=define_macros,
extra_compile_args=compiler_args,
language='c++'),
Extension('mdtraj.geometry.neighbors',
sources=["mdtraj/geometry/neighbors.pyx",
"mdtraj/geometry/src/neighbors.cpp"],
include_dirs=["mdtraj/geometry/include",],
define_macros=define_macros,
extra_compile_args=compiler_args,
language='c++'),
Extension('mdtraj.geometry.neighborlist',
sources=["mdtraj/geometry/neighborlist.pyx",
"mdtraj/geometry/src/neighborlist.cpp"],
include_dirs=["mdtraj/geometry/include",],
define_macros=define_macros,
extra_compile_args=compiler_args+compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
language='c++'),
]
extensions = format_extensions()
extensions.extend(rmsd_extensions())
extensions.extend(geometry_extensions())
write_version_py(VERSION, ISRELEASED, 'mdtraj/version.py')
setup(name='mdtraj',
author='Robert McGibbon',
author_email='rmcgibbo@gmail.com',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
license='LGPLv2.1+',
url='http://mdtraj.org',
download_url = "https://github.com/rmcgibbo/mdtraj/releases/latest",
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
cmdclass={'build_ext': build_ext},
ext_modules=cythonize(extensions),
# setup_requires really doesn't work sufficently well with `pip install`
# to use. See https://github.com/mdtraj/mdtraj/issues/984. Also
# setup_requires=['setuptools>=18', 'cython>=0.22', 'numpy>=1.6'],
# Also, install_requires is no better, especially with numpy.
# See http://article.gmane.org/gmane.comp.python.distutils.devel/24218
# install_requires=['numpy>=1.6'],
package_data={'mdtraj.formats.pdb': ['data/*'],
'mdtraj.testing': ['reference/*',
'reference/ala_dipeptide_trj/*',
'reference/ala_dipeptide_trj/not_hashed/*',
'reference/frame0.dtr/*',
'reference/frame0.dtr/not_hashed/*',],
'mdtraj.html': ['static/*']},
exclude_package_data={'mdtraj.testing': ['reference/ala_dipeptide_trj',
'reference/ala_dipeptide_trj/not_hashed',
'reference/frame0.dtr',
'reference/frame0.dtr/not_hashed',]},
zip_safe=False,
entry_points={'console_scripts':
['mdconvert = mdtraj.scripts.mdconvert:entry_point',
'mdinspect = mdtraj.scripts.mdinspect:entry_point']},
)
|
ctk3b/mdtraj
|
setup.py
|
Python
|
lgpl-2.1
| 12,594
|
[
"Amber",
"CHARMM",
"Gromacs",
"MDTraj",
"NAMD",
"NetCDF",
"TINKER"
] |
e93693a59c5cc7e0e6b38496c23774d5166bff906b61036ef8adda4141362ced
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Sherrill) of interaction energies for dissociation curves of rare-gas biatomic complexes.
| Geometries and reference interaction energies from Tang et al. JCP 118 4976 (2003).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'equilibrium'``
- ``'HeHe'`` 18-point dissociation curve for helium dimer
- ``'HeNe'`` 18-point dissociation curve for helium-neon complex
- ``'HeAr'`` 18-point dissociation curve for helium-argon complex
- ``'HeKr'`` 18-point dissociation curve for helium-krypton complex
- ``'NeNe'`` 18-point dissociation curve for neon dimer
- ``'NeAr'`` 18-point dissociation curve for neon-argon complex
- ``'NeKr'`` 18-point dissociation curve for neon-krypton complex
- ``'ArAr'`` 18-point dissociation curve for argon dimer
- ``'ArKr'`` 18-point dissociation curve for argon-krypton complex
- ``'KrKr'`` 18-point dissociation curve for krypton dimer
"""
import re
import qcdb
# <<< RGC10 Database Module >>>
dbse = 'RGC1'
# <<< Database Members >>>
HeHe = []
HeNe = []
HeAr = []
HeKr = []
NeNe = []
NeAr = []
NeKr = []
ArAr = []
ArKr = []
KrKr = []
dist = [0.85, 0.9, 0.95, 0.975, 1.0, 1.025, 1.05, 1.1, 1.15,
1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 2.0, 2.2]
for d in dist:
HeHe.append('HeHe-' + str(d))
HeNe.append('HeNe-' + str(d))
HeAr.append('HeAr-' + str(d))
HeKr.append('HeKr-' + str(d))
NeNe.append('NeNe-' + str(d))
NeAr.append('NeAr-' + str(d))
NeKr.append('NeKr-' + str(d))
ArAr.append('ArAr-' + str(d))
ArKr.append('ArKr-' + str(d))
KrKr.append('KrKr-' + str(d))
temp = [HeHe, HeNe, HeAr, HeKr, NeNe, NeAr, NeKr, ArAr, ArKr, KrKr]
HRXN = sum(temp, [])
HRXN_SM = ['NeNe-1.0', 'NeNe-1.1', 'NeAr-0.85']
HRXN_LG = ['KrKr-0.85']
HRXN_EQ = ['HeHe-1.0', 'HeNe-1.0', 'HeAr-1.0', 'HeKr-1.0', 'NeNe-1.0',
'NeAr-1.0', 'NeKr-1.0', 'ArAr-1.0', 'ArKr-1.0', 'KrKr-1.0']
Req = {}
Req['HeHe'] = 2.98
Req['HeNe'] = 3.05
Req['HeAr'] = 3.50
Req['HeKr'] = 3.70
Req['NeNe'] = 3.09
Req['NeAr'] = 3.48
Req['NeKr'] = 3.65
Req['ArAr'] = 3.75
Req['ArKr'] = 3.89
Req['KrKr'] = 4.01
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
if rxn in HeHe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-He-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-He-mono-unCP' % (dbse) ]
elif rxn in HeNe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-He-mono-unCP' % (dbse) : -1,
'%s-Ne-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-He-mono-unCP' % (dbse),
'%s-Ne-mono-unCP' % (dbse) ]
elif rxn in HeAr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-He-mono-unCP' % (dbse) : -1,
'%s-Ar-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-He-mono-unCP' % (dbse),
'%s-Ar-mono-unCP' % (dbse) ]
elif rxn in HeKr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-He-mono-unCP' % (dbse) : -1,
'%s-Kr-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-He-mono-unCP' % (dbse),
'%s-Kr-mono-unCP' % (dbse) ]
elif rxn in NeNe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Ne-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Ne-mono-unCP' % (dbse) ]
elif rxn in NeAr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Ne-mono-unCP' % (dbse) : -1,
'%s-Ar-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Ne-mono-unCP' % (dbse),
'%s-Ar-mono-unCP' % (dbse) ]
elif rxn in NeKr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Ne-mono-unCP' % (dbse) : -1,
'%s-Kr-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Ne-mono-unCP' % (dbse),
'%s-Kr-mono-unCP' % (dbse) ]
elif rxn in ArAr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Ar-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Ar-mono-unCP' % (dbse) ]
elif rxn in ArKr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Ar-mono-unCP' % (dbse) : -1,
'%s-Kr-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Ar-mono-unCP' % (dbse),
'%s-Kr-mono-unCP' % (dbse) ]
elif rxn in KrKr:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Kr-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Kr-mono-unCP' % (dbse) ]
# <<< Reference Values >>>
BIND = {}
BIND['%s-HeHe-0.85' % (dbse)] = 0.03759
BIND['%s-HeHe-0.9' % (dbse)] = -0.00449
BIND['%s-HeHe-0.95' % (dbse)] = -0.01905
BIND['%s-HeHe-0.975' % (dbse)] = -0.02135
BIND['%s-HeHe-1.0' % (dbse)] = -0.02188 # HeHe minimum
BIND['%s-HeHe-1.025' % (dbse)] = -0.02133
BIND['%s-HeHe-1.05' % (dbse)] = -0.02017
BIND['%s-HeHe-1.1' % (dbse)] = -0.01708
BIND['%s-HeHe-1.15' % (dbse)] = -0.01392
BIND['%s-HeHe-1.2' % (dbse)] = -0.01113
BIND['%s-HeHe-1.3' % (dbse)] = -0.00702
BIND['%s-HeHe-1.4' % (dbse)] = -0.00447
BIND['%s-HeHe-1.5' % (dbse)] = -0.00291
BIND['%s-HeHe-1.6' % (dbse)] = -0.00195
BIND['%s-HeHe-1.7' % (dbse)] = -0.00133
BIND['%s-HeHe-1.8' % (dbse)] = -0.00093
BIND['%s-HeHe-2.0' % (dbse)] = -0.00049
BIND['%s-HeHe-2.2' % (dbse)] = -0.00027
BIND['%s-HeNe-0.85' % (dbse)] = 0.08105
BIND['%s-HeNe-0.9' % (dbse)] = -0.00535
BIND['%s-HeNe-0.95' % (dbse)] = -0.03530
BIND['%s-HeNe-0.975' % (dbse)] = -0.04012
BIND['%s-HeNe-1.0' % (dbse)] = -0.04136 # HeNe minimum
BIND['%s-HeNe-1.025' % (dbse)] = -0.04043
BIND['%s-HeNe-1.05' % (dbse)] = -0.03825
BIND['%s-HeNe-1.1' % (dbse)] = -0.03236
BIND['%s-HeNe-1.15' % (dbse)] = -0.02629
BIND['%s-HeNe-1.2' % (dbse)] = -0.02097
BIND['%s-HeNe-1.3' % (dbse)] = -0.01315
BIND['%s-HeNe-1.4' % (dbse)] = -0.00832
BIND['%s-HeNe-1.5' % (dbse)] = -0.00540
BIND['%s-HeNe-1.6' % (dbse)] = -0.00359
BIND['%s-HeNe-1.7' % (dbse)] = -0.00246
BIND['%s-HeNe-1.8' % (dbse)] = -0.00172
BIND['%s-HeNe-2.0' % (dbse)] = -0.00089
BIND['%s-HeNe-2.2' % (dbse)] = -0.00049
BIND['%s-HeAr-0.85' % (dbse)] = 0.11196
BIND['%s-HeAr-0.9' % (dbse)] = -0.00862
BIND['%s-HeAr-0.95' % (dbse)] = -0.05048
BIND['%s-HeAr-0.975' % (dbse)] = -0.05720
BIND['%s-HeAr-1.0' % (dbse)] = -0.05889 # HeAr minimum
BIND['%s-HeAr-1.025' % (dbse)] = -0.05752
BIND['%s-HeAr-1.05' % (dbse)] = -0.05440
BIND['%s-HeAr-1.1' % (dbse)] = -0.04600
BIND['%s-HeAr-1.15' % (dbse)] = -0.03735
BIND['%s-HeAr-1.2' % (dbse)] = -0.02977
BIND['%s-HeAr-1.3' % (dbse)] = -0.01862
BIND['%s-HeAr-1.4' % (dbse)] = -0.01176
BIND['%s-HeAr-1.5' % (dbse)] = -0.00760
BIND['%s-HeAr-1.6' % (dbse)] = -0.00505
BIND['%s-HeAr-1.7' % (dbse)] = -0.00344
BIND['%s-HeAr-1.8' % (dbse)] = -0.00240
BIND['%s-HeAr-2.0' % (dbse)] = -0.00124
BIND['%s-HeAr-2.2' % (dbse)] = -0.00069
BIND['%s-HeKr-0.85' % (dbse)] = 0.11043
BIND['%s-HeKr-0.9' % (dbse)] = -0.01063
BIND['%s-HeKr-0.95' % (dbse)] = -0.05251
BIND['%s-HeKr-0.975' % (dbse)] = -0.05914
BIND['%s-HeKr-1.0' % (dbse)] = -0.06071 # HeKr minimum
BIND['%s-HeKr-1.025' % (dbse)] = -0.05919
BIND['%s-HeKr-1.05' % (dbse)] = -0.05592
BIND['%s-HeKr-1.1' % (dbse)] = -0.04721
BIND['%s-HeKr-1.15' % (dbse)] = -0.03830
BIND['%s-HeKr-1.2' % (dbse)] = -0.03050
BIND['%s-HeKr-1.3' % (dbse)] = -0.01904
BIND['%s-HeKr-1.4' % (dbse)] = -0.01201
BIND['%s-HeKr-1.5' % (dbse)] = -0.00775
BIND['%s-HeKr-1.6' % (dbse)] = -0.00514
BIND['%s-HeKr-1.7' % (dbse)] = -0.00350
BIND['%s-HeKr-1.8' % (dbse)] = -0.00244
BIND['%s-HeKr-2.0' % (dbse)] = -0.00126
BIND['%s-HeKr-2.2' % (dbse)] = -0.00070
BIND['%s-NeNe-0.85' % (dbse)] = 0.16931
BIND['%s-NeNe-0.9' % (dbse)] = -0.00949
BIND['%s-NeNe-0.95' % (dbse)] = -0.07154
BIND['%s-NeNe-0.975' % (dbse)] = -0.08158
BIND['%s-NeNe-1.0' % (dbse)] = -0.08420 # NeNe minimum
BIND['%s-NeNe-1.025' % (dbse)] = -0.08233
BIND['%s-NeNe-1.05' % (dbse)] = -0.07789
BIND['%s-NeNe-1.1' % (dbse)] = -0.06582
BIND['%s-NeNe-1.15' % (dbse)] = -0.05340
BIND['%s-NeNe-1.2' % (dbse)] = -0.04251
BIND['%s-NeNe-1.3' % (dbse)] = -0.02653
BIND['%s-NeNe-1.4' % (dbse)] = -0.01673
BIND['%s-NeNe-1.5' % (dbse)] = -0.01081
BIND['%s-NeNe-1.6' % (dbse)] = -0.00718
BIND['%s-NeNe-1.7' % (dbse)] = -0.00489
BIND['%s-NeNe-1.8' % (dbse)] = -0.00341
BIND['%s-NeNe-2.0' % (dbse)] = -0.00176
BIND['%s-NeNe-2.2' % (dbse)] = -0.00098
BIND['%s-NeAr-0.85' % (dbse)] = 0.26334
BIND['%s-NeAr-0.9' % (dbse)] = -0.01713
BIND['%s-NeAr-0.95' % (dbse)] = -0.11367
BIND['%s-NeAr-0.975' % (dbse)] = -0.12900
BIND['%s-NeAr-1.0' % (dbse)] = -0.13273 # NeAr minimum
BIND['%s-NeAr-1.025' % (dbse)] = -0.12947
BIND['%s-NeAr-1.05' % (dbse)] = -0.12224
BIND['%s-NeAr-1.1' % (dbse)] = -0.10295
BIND['%s-NeAr-1.15' % (dbse)] = -0.08326
BIND['%s-NeAr-1.2' % (dbse)] = -0.06610
BIND['%s-NeAr-1.3' % (dbse)] = -0.04105
BIND['%s-NeAr-1.4' % (dbse)] = -0.02577
BIND['%s-NeAr-1.5' % (dbse)] = -0.01659
BIND['%s-NeAr-1.6' % (dbse)] = -0.01098
BIND['%s-NeAr-1.7' % (dbse)] = -0.00746
BIND['%s-NeAr-1.8' % (dbse)] = -0.00519
BIND['%s-NeAr-2.0' % (dbse)] = -0.00267
BIND['%s-NeAr-2.2' % (dbse)] = -0.00148
BIND['%s-NeKr-0.85' % (dbse)] = 0.26707
BIND['%s-NeKr-0.9' % (dbse)] = -0.02063
BIND['%s-NeKr-0.95' % (dbse)] = -0.12057
BIND['%s-NeKr-0.975' % (dbse)] = -0.13659
BIND['%s-NeKr-1.0' % (dbse)] = -0.14056 # NeKr minimum
BIND['%s-NeKr-1.025' % (dbse)] = -0.13722
BIND['%s-NeKr-1.05' % (dbse)] = -0.12969
BIND['%s-NeKr-1.1' % (dbse)] = -0.10946
BIND['%s-NeKr-1.15' % (dbse)] = -0.08868
BIND['%s-NeKr-1.2' % (dbse)] = -0.07049
BIND['%s-NeKr-1.3' % (dbse)] = -0.04382
BIND['%s-NeKr-1.4' % (dbse)] = -0.02751
BIND['%s-NeKr-1.5' % (dbse)] = -0.01769
BIND['%s-NeKr-1.6' % (dbse)] = -0.01169
BIND['%s-NeKr-1.7' % (dbse)] = -0.00793
BIND['%s-NeKr-1.8' % (dbse)] = -0.00551
BIND['%s-NeKr-2.0' % (dbse)] = -0.00284
BIND['%s-NeKr-2.2' % (dbse)] = -0.00156
BIND['%s-ArAr-0.85' % (dbse)] = 0.63637
BIND['%s-ArAr-0.9' % (dbse)] = -0.01138
BIND['%s-ArAr-0.95' % (dbse)] = -0.23729
BIND['%s-ArAr-0.975' % (dbse)] = -0.27458
BIND['%s-ArAr-1.0' % (dbse)] = -0.28517 # ArAr minimum
BIND['%s-ArAr-1.025' % (dbse)] = -0.27957
BIND['%s-ArAr-1.05' % (dbse)] = -0.26471
BIND['%s-ArAr-1.1' % (dbse)] = -0.22350
BIND['%s-ArAr-1.15' % (dbse)] = -0.18080
BIND['%s-ArAr-1.2' % (dbse)] = -0.14343
BIND['%s-ArAr-1.3' % (dbse)] = -0.08883
BIND['%s-ArAr-1.4' % (dbse)] = -0.05560
BIND['%s-ArAr-1.5' % (dbse)] = -0.03568
BIND['%s-ArAr-1.6' % (dbse)] = -0.02355
BIND['%s-ArAr-1.7' % (dbse)] = -0.01596
BIND['%s-ArAr-1.8' % (dbse)] = -0.01109
BIND['%s-ArAr-2.0' % (dbse)] = -0.00570
BIND['%s-ArAr-2.2' % (dbse)] = -0.00314
BIND['%s-ArKr-0.85' % (dbse)] = 0.69499
BIND['%s-ArKr-0.9' % (dbse)] = -0.02873
BIND['%s-ArKr-0.95' % (dbse)] = -0.28040
BIND['%s-ArKr-0.975' % (dbse)] = -0.32132
BIND['%s-ArKr-1.0' % (dbse)] = -0.33222 # ArKr minimum
BIND['%s-ArKr-1.025' % (dbse)] = -0.32494
BIND['%s-ArKr-1.05' % (dbse)] = -0.30726
BIND['%s-ArKr-1.1' % (dbse)] = -0.25907
BIND['%s-ArKr-1.15' % (dbse)] = -0.20945
BIND['%s-ArKr-1.2' % (dbse)] = -0.16607
BIND['%s-ArKr-1.3' % (dbse)] = -0.10275
BIND['%s-ArKr-1.4' % (dbse)] = -0.06422
BIND['%s-ArKr-1.5' % (dbse)] = -0.04115
BIND['%s-ArKr-1.6' % (dbse)] = -0.02711
BIND['%s-ArKr-1.7' % (dbse)] = -0.01836
BIND['%s-ArKr-1.8' % (dbse)] = -0.01274
BIND['%s-ArKr-2.0' % (dbse)] = -0.00653
BIND['%s-ArKr-2.2' % (dbse)] = -0.00359
BIND['%s-KrKr-0.85' % (dbse)] = 0.81252
BIND['%s-KrKr-0.9' % (dbse)] = -0.04237
BIND['%s-KrKr-0.95' % (dbse)] = -0.34024
BIND['%s-KrKr-0.975' % (dbse)] = -0.38858
BIND['%s-KrKr-1.0' % (dbse)] = -0.40124 # KrKr minimum
BIND['%s-KrKr-1.025' % (dbse)] = -0.39225
BIND['%s-KrKr-1.05' % (dbse)] = -0.37085
BIND['%s-KrKr-1.1' % (dbse)] = -0.31271
BIND['%s-KrKr-1.15' % (dbse)] = -0.25283
BIND['%s-KrKr-1.2' % (dbse)] = -0.20046
BIND['%s-KrKr-1.3' % (dbse)] = -0.12394
BIND['%s-KrKr-1.4' % (dbse)] = -0.07736
BIND['%s-KrKr-1.5' % (dbse)] = -0.04949
BIND['%s-KrKr-1.6' % (dbse)] = -0.03256
BIND['%s-KrKr-1.7' % (dbse)] = -0.02201
BIND['%s-KrKr-1.8' % (dbse)] = -0.01525
BIND['%s-KrKr-2.0' % (dbse)] = -0.00781
BIND['%s-KrKr-2.2' % (dbse)] = -0.00429
# <<< Comment Lines >>>
TAGL = {}
rxnpattern = re.compile(r'^(.+)-(.+)$')
for item in HeHe:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Helium Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Helium Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Helium from Helium Dimer at %s Req' % (molname.group(2))
for item in HeNe:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Helium-Neon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Helium-Neon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Helium from Helium-Neon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Neon from Helium-Neon Complex at %s Req' % (molname.group(2))
for item in HeAr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Helium-Argon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Helium-Argon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Helium from Helium-Argon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Argon from Helium-Argon Complex at %s Req' % (molname.group(2))
for item in HeKr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Helium-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Helium-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Helium from Helium-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Krypton from Helium-Krypton Complex at %s Req' % (molname.group(2))
for item in NeNe:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Neon Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Neon Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Neon from Neon Dimer at %s Req' % (molname.group(2))
for item in NeAr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Neon-Argon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Neon-Argon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Neon from Neon-Argon Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Argon from Neon-Argon Complex at %s Req' % (molname.group(2))
for item in NeKr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Neon-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Neon-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Neon from Neon-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Krypton from Neon-Krypton Complex at %s Req' % (molname.group(2))
for item in ArAr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Argon Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Argon Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Argon from Argon Dimer at %s Req' % (molname.group(2))
for item in ArKr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Argon-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Argon-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Argon from Argon-Krypton Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Krypton from Argon-Krypton Complex at %s Req' % (molname.group(2))
for item in KrKr:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Krypton Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Krypton Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Krypton from Krypton Dimer at %s Req' % (molname.group(2))
TAGL['%s-He-mono-unCP' % (dbse)] = 'Helium Atom'
TAGL['%s-Ne-mono-unCP' % (dbse)] = 'Neon Atom'
TAGL['%s-Ar-mono-unCP' % (dbse)] = 'Argon Atom'
TAGL['%s-Kr-mono-unCP' % (dbse)] = 'Krypton Atom'
#<<< Geometry Specification Strings >>>
GEOS = {}
rxnpattern2 = re.compile(r'^(..)(..)-(.+)$')
for rxn in HRXN:
molname = rxnpattern2.match(rxn)
m1 = molname.group(1)
m2 = molname.group(2)
dm = m1 + m2
Rscal = molname.group(3)
Rval = float(Rscal) * Req[dm]
GEOS['%s-%s-%s-%s' % (dbse, dm, Rscal, 'dimer')] = qcdb.Molecule("""
0 1
%(m1)s 0.0 0.0 0.0
--
0 1
%(m2)s 0.0 0.0 R
R = %(Rval)s
units angstrom
""" % vars())
for item in ['He', 'Ne', 'Ar', 'Kr']:
GEOS['%s-%s-%s' % (dbse, item, 'mono-unCP')] = qcdb.Molecule("""
0 1
%(item)s 0.0 0.0 0.0
units angstrom
""" % vars())
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.85-dimer' ] = 0.83565292
DATA['NUCLEAR REPULSION ENERGY']['RGC1-He-mono-unCP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.9-dimer' ] = 0.78922775
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.95-dimer' ] = 0.74768945
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.975-dimer' ] = 0.72851793
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.0-dimer' ] = 0.71030498
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.025-dimer' ] = 0.69298047
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.05-dimer' ] = 0.67648093
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.1-dimer' ] = 0.64573180
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.15-dimer' ] = 0.61765650
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.2-dimer' ] = 0.59192081
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.3-dimer' ] = 0.54638844
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.4-dimer' ] = 0.50736070
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.5-dimer' ] = 0.47353665
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.6-dimer' ] = 0.44394061
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.7-dimer' ] = 0.41782646
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.8-dimer' ] = 0.39461388
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-2.0-dimer' ] = 0.35515249
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-2.2-dimer' ] = 0.32286590
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.85-dimer' ] = 4.08236998
DATA['NUCLEAR REPULSION ENERGY']['RGC1-Ne-mono-unCP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.9-dimer' ] = 3.85557165
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.95-dimer' ] = 3.65264682
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.975-dimer' ] = 3.55898921
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.0-dimer' ] = 3.47001448
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.025-dimer' ] = 3.38537998
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.05-dimer' ] = 3.30477570
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.1-dimer' ] = 3.15455862
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.15-dimer' ] = 3.01740390
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.2-dimer' ] = 2.89167874
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.3-dimer' ] = 2.66924191
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.4-dimer' ] = 2.47858177
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.5-dimer' ] = 2.31334299
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.6-dimer' ] = 2.16875905
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.7-dimer' ] = 2.04118499
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.8-dimer' ] = 1.92778582
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.0-dimer' ] = 1.73500724
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.2-dimer' ] = 1.57727931
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.85-dimer' ] = 6.40348891
DATA['NUCLEAR REPULSION ENERGY']['RGC1-Ar-mono-unCP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.9-dimer' ] = 6.04773953
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.95-dimer' ] = 5.72943745
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.975-dimer' ] = 5.58252879
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.0-dimer' ] = 5.44296557
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.025-dimer' ] = 5.31021032
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.05-dimer' ] = 5.18377674
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.1-dimer' ] = 4.94815052
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.15-dimer' ] = 4.73301354
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.2-dimer' ] = 4.53580465
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.3-dimer' ] = 4.18689660
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.4-dimer' ] = 3.88783255
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.5-dimer' ] = 3.62864372
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.6-dimer' ] = 3.40185348
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.7-dimer' ] = 3.20174446
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.8-dimer' ] = 3.02386976
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.0-dimer' ] = 2.72148279
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.2-dimer' ] = 2.47407526
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.85-dimer' ] = 12.11470875
DATA['NUCLEAR REPULSION ENERGY']['RGC1-Kr-mono-unCP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.9-dimer' ] = 11.44166937
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.95-dimer' ] = 10.83947625
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.975-dimer' ] = 10.56154096
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.0-dimer' ] = 10.29750244
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.025-dimer' ] = 10.04634384
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.05-dimer' ] = 9.80714518
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.1-dimer' ] = 9.36136585
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.15-dimer' ] = 8.95434995
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.2-dimer' ] = 8.58125203
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.3-dimer' ] = 7.92115572
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.4-dimer' ] = 7.35535888
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.5-dimer' ] = 6.86500162
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.6-dimer' ] = 6.43593902
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.7-dimer' ] = 6.05735437
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.8-dimer' ] = 5.72083469
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.0-dimer' ] = 5.14875122
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.2-dimer' ] = 4.68068293
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.85-dimer' ] = 20.14761883
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.9-dimer' ] = 19.02830667
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.95-dimer' ] = 18.02681685
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.975-dimer' ] = 17.56459078
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.0-dimer' ] = 17.12547601
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.025-dimer' ] = 16.70778147
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.05-dimer' ] = 16.30997715
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.1-dimer' ] = 15.56861455
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.15-dimer' ] = 14.89171827
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.2-dimer' ] = 14.27123001
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.3-dimer' ] = 13.17344308
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.4-dimer' ] = 12.23248286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.5-dimer' ] = 11.41698400
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.6-dimer' ] = 10.70342250
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.7-dimer' ] = 10.07380942
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.8-dimer' ] = 9.51415334
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-2.0-dimer' ] = 8.56273800
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-2.2-dimer' ] = 7.78430728
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.85-dimer' ] = 32.20145286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.9-dimer' ] = 30.41248325
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.95-dimer' ] = 28.81182624
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.975-dimer' ] = 28.07306146
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.0-dimer' ] = 27.37123493
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.025-dimer' ] = 26.70364383
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.05-dimer' ] = 26.06784279
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.1-dimer' ] = 24.88294084
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.15-dimer' ] = 23.80107385
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.2-dimer' ] = 22.80936244
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.3-dimer' ] = 21.05479610
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.4-dimer' ] = 19.55088209
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.5-dimer' ] = 18.24748995
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.6-dimer' ] = 17.10702183
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.7-dimer' ] = 16.10072643
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.8-dimer' ] = 15.20624163
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.0-dimer' ] = 13.68561746
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.2-dimer' ] = 12.44147042
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.85-dimer' ] = 61.40331832
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.9-dimer' ] = 57.99202286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.95-dimer' ] = 54.93981113
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.975-dimer' ] = 53.53109802
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.0-dimer' ] = 52.19282057
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.025-dimer' ] = 50.91982495
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.05-dimer' ] = 49.70744817
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.1-dimer' ] = 47.44801870
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.15-dimer' ] = 45.38506137
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.2-dimer' ] = 43.49401714
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.3-dimer' ] = 40.14832352
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.4-dimer' ] = 37.28058612
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.5-dimer' ] = 34.79521372
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.6-dimer' ] = 32.62051286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.7-dimer' ] = 30.70165916
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.8-dimer' ] = 28.99601143
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.0-dimer' ] = 26.09641029
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.2-dimer' ] = 23.72400935
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.85-dimer' ] = 53.78930685
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.9-dimer' ] = 50.80101202
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.95-dimer' ] = 48.12727455
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.975-dimer' ] = 46.89324187
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.0-dimer' ] = 45.72091082
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.025-dimer' ] = 44.60576666
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.05-dimer' ] = 43.54372459
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.1-dimer' ] = 41.56446438
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.15-dimer' ] = 39.75731376
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.2-dimer' ] = 38.10075902
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.3-dimer' ] = 35.16993140
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.4-dimer' ] = 32.65779344
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.5-dimer' ] = 30.48060721
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.6-dimer' ] = 28.57556926
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.7-dimer' ] = 26.89465342
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.8-dimer' ] = 25.40050601
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-2.0-dimer' ] = 22.86045541
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-2.2-dimer' ] = 20.78223219
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.85-dimer' ] = 103.70688981
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.9-dimer' ] = 97.94539593
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.95-dimer' ] = 92.79037510
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.975-dimer' ] = 90.41113471
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.0-dimer' ] = 88.15085634
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.025-dimer' ] = 86.00083545
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.05-dimer' ] = 83.95319652
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.1-dimer' ] = 80.13714213
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.15-dimer' ] = 76.65291856
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.2-dimer' ] = 73.45904695
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.3-dimer' ] = 67.80835103
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.4-dimer' ] = 62.96489739
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.5-dimer' ] = 58.76723756
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.6-dimer' ] = 55.09428521
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.7-dimer' ] = 51.85344491
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.8-dimer' ] = 48.97269797
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.0-dimer' ] = 44.07542817
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.2-dimer' ] = 40.06857106
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.85-dimer' ] = 201.20688348
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.9-dimer' ] = 190.02872328
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.95-dimer' ] = 180.02721153
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.975-dimer' ] = 175.41112919
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.0-dimer' ] = 171.02585096
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.025-dimer' ] = 166.85448874
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.05-dimer' ] = 162.88176282
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.1-dimer' ] = 155.47804632
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.15-dimer' ] = 148.71813127
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.2-dimer' ] = 142.52154246
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.3-dimer' ] = 131.55834689
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.4-dimer' ] = 122.16132211
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.5-dimer' ] = 114.01723397
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.6-dimer' ] = 106.89115685
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.7-dimer' ] = 100.60344174
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.8-dimer' ] = 95.01436164
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-2.0-dimer' ] = 85.51292548
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-2.2-dimer' ] = 77.73902316
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.85-dimer' ] = 0.83565292
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.9-dimer' ] = 0.78922775
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.95-dimer' ] = 0.74768945
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.975-dimer' ] = 0.72851793
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.0-dimer' ] = 0.71030498
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.025-dimer' ] = 0.69298047
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.05-dimer' ] = 0.67648093
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.1-dimer' ] = 0.64573180
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.15-dimer' ] = 0.61765650
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.2-dimer' ] = 0.59192081
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.3-dimer' ] = 0.54638844
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.4-dimer' ] = 0.50736070
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.5-dimer' ] = 0.47353665
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.6-dimer' ] = 0.44394061
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.7-dimer' ] = 0.41782646
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.8-dimer' ] = 0.39461388
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-2.0-dimer' ] = 0.35515249
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-2.2-dimer' ] = 0.32286590
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeHe-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.85-dimer' ] = 4.08236998
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.85-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.9-dimer' ] = 3.85557165
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.9-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.95-dimer' ] = 3.65264682
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.95-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.975-dimer' ] = 3.55898921
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-0.975-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.0-dimer' ] = 3.47001448
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.025-dimer' ] = 3.38537998
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.025-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.05-dimer' ] = 3.30477570
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.05-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.1-dimer' ] = 3.15455862
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.1-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.15-dimer' ] = 3.01740390
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.15-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.2-dimer' ] = 2.89167874
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.3-dimer' ] = 2.66924191
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.3-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.4-dimer' ] = 2.47858177
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.4-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.5-dimer' ] = 2.31334299
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.5-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.6-dimer' ] = 2.16875905
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.6-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.7-dimer' ] = 2.04118499
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.7-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.8-dimer' ] = 1.92778582
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-1.8-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.0-dimer' ] = 1.73500724
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.2-dimer' ] = 1.57727931
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeNe-2.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.85-dimer' ] = 6.40348891
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.85-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.9-dimer' ] = 6.04773953
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.9-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.95-dimer' ] = 5.72943745
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.95-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.975-dimer' ] = 5.58252879
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-0.975-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.0-dimer' ] = 5.44296557
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.025-dimer' ] = 5.31021032
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.025-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.05-dimer' ] = 5.18377674
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.05-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.1-dimer' ] = 4.94815052
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.1-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.15-dimer' ] = 4.73301354
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.15-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.2-dimer' ] = 4.53580465
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.3-dimer' ] = 4.18689660
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.3-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.4-dimer' ] = 3.88783255
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.4-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.5-dimer' ] = 3.62864372
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.5-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.6-dimer' ] = 3.40185348
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.6-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.7-dimer' ] = 3.20174446
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.7-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.8-dimer' ] = 3.02386976
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-1.8-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.0-dimer' ] = 2.72148279
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.2-dimer' ] = 2.47407526
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeAr-2.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.85-dimer' ] = 12.11470875
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.85-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.9-dimer' ] = 11.44166937
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.9-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.95-dimer' ] = 10.83947625
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.95-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.975-dimer' ] = 10.56154096
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-0.975-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.0-dimer' ] = 10.29750244
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.025-dimer' ] = 10.04634384
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.025-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.05-dimer' ] = 9.80714518
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.05-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.1-dimer' ] = 9.36136585
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.1-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.15-dimer' ] = 8.95434995
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.15-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.2-dimer' ] = 8.58125203
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.3-dimer' ] = 7.92115572
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.3-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.4-dimer' ] = 7.35535888
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.4-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.5-dimer' ] = 6.86500162
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.5-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.6-dimer' ] = 6.43593902
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.6-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.7-dimer' ] = 6.05735437
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.7-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.8-dimer' ] = 5.72083469
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-1.8-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.0-dimer' ] = 5.14875122
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.2-dimer' ] = 4.68068293
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-HeKr-2.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.85-dimer' ] = 20.14761883
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.9-dimer' ] = 19.02830667
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.95-dimer' ] = 18.02681685
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.975-dimer' ] = 17.56459078
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.0-dimer' ] = 17.12547601
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.025-dimer' ] = 16.70778147
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.05-dimer' ] = 16.30997715
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.1-dimer' ] = 15.56861455
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.15-dimer' ] = 14.89171827
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.2-dimer' ] = 14.27123001
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.3-dimer' ] = 13.17344308
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.4-dimer' ] = 12.23248286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.5-dimer' ] = 11.41698400
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.6-dimer' ] = 10.70342250
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.7-dimer' ] = 10.07380942
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.8-dimer' ] = 9.51415334
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-2.0-dimer' ] = 8.56273800
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-2.2-dimer' ] = 7.78430728
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeNe-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.85-dimer' ] = 32.20145286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.85-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.9-dimer' ] = 30.41248325
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.9-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.95-dimer' ] = 28.81182624
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.95-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.975-dimer' ] = 28.07306146
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-0.975-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.0-dimer' ] = 27.37123493
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.025-dimer' ] = 26.70364383
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.025-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.05-dimer' ] = 26.06784279
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.05-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.1-dimer' ] = 24.88294084
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.1-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.15-dimer' ] = 23.80107385
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.15-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.2-dimer' ] = 22.80936244
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.3-dimer' ] = 21.05479610
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.3-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.4-dimer' ] = 19.55088209
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.4-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.5-dimer' ] = 18.24748995
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.5-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.6-dimer' ] = 17.10702183
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.6-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.7-dimer' ] = 16.10072643
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.7-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.8-dimer' ] = 15.20624163
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-1.8-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.0-dimer' ] = 13.68561746
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.2-dimer' ] = 12.44147042
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeAr-2.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.85-dimer' ] = 61.40331832
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.85-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.9-dimer' ] = 57.99202286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.9-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.95-dimer' ] = 54.93981113
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.95-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.975-dimer' ] = 53.53109802
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-0.975-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.0-dimer' ] = 52.19282057
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.025-dimer' ] = 50.91982495
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.025-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.05-dimer' ] = 49.70744817
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.05-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.1-dimer' ] = 47.44801870
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.1-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.15-dimer' ] = 45.38506137
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.15-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.2-dimer' ] = 43.49401714
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.3-dimer' ] = 40.14832352
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.3-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.4-dimer' ] = 37.28058612
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.4-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.5-dimer' ] = 34.79521372
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.5-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.6-dimer' ] = 32.62051286
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.6-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.7-dimer' ] = 30.70165916
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.7-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.8-dimer' ] = 28.99601143
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-1.8-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.0-dimer' ] = 26.09641029
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.2-dimer' ] = 23.72400935
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-NeKr-2.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.85-dimer' ] = 53.78930685
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.9-dimer' ] = 50.80101202
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.95-dimer' ] = 48.12727455
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.975-dimer' ] = 46.89324187
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.0-dimer' ] = 45.72091082
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.025-dimer' ] = 44.60576666
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.05-dimer' ] = 43.54372459
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.1-dimer' ] = 41.56446438
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.15-dimer' ] = 39.75731376
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.2-dimer' ] = 38.10075902
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.3-dimer' ] = 35.16993140
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.4-dimer' ] = 32.65779344
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.5-dimer' ] = 30.48060721
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.6-dimer' ] = 28.57556926
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.7-dimer' ] = 26.89465342
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.8-dimer' ] = 25.40050601
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-2.0-dimer' ] = 22.86045541
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-2.2-dimer' ] = 20.78223219
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArAr-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.85-dimer' ] = 103.70688981
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.85-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.9-dimer' ] = 97.94539593
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.9-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.95-dimer' ] = 92.79037510
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.95-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.975-dimer' ] = 90.41113471
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-0.975-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.0-dimer' ] = 88.15085634
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.025-dimer' ] = 86.00083545
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.025-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.05-dimer' ] = 83.95319652
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.05-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.1-dimer' ] = 80.13714213
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.1-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.15-dimer' ] = 76.65291856
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.15-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.2-dimer' ] = 73.45904695
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.3-dimer' ] = 67.80835103
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.3-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.4-dimer' ] = 62.96489739
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.4-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.5-dimer' ] = 58.76723756
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.5-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.6-dimer' ] = 55.09428521
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.6-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.7-dimer' ] = 51.85344491
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.7-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.8-dimer' ] = 48.97269797
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-1.8-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.0-dimer' ] = 44.07542817
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.0-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.2-dimer' ] = 40.06857106
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-ArKr-2.2-monoB-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.85-dimer' ] = 201.20688348
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.85-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.9-dimer' ] = 190.02872328
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.9-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.95-dimer' ] = 180.02721153
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.95-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.975-dimer' ] = 175.41112919
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-0.975-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.0-dimer' ] = 171.02585096
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.025-dimer' ] = 166.85448874
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.025-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.05-dimer' ] = 162.88176282
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.05-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.1-dimer' ] = 155.47804632
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.1-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.15-dimer' ] = 148.71813127
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.15-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.2-dimer' ] = 142.52154246
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.2-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.3-dimer' ] = 131.55834689
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.3-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.4-dimer' ] = 122.16132211
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.4-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.5-dimer' ] = 114.01723397
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.5-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.6-dimer' ] = 106.89115685
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.6-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.7-dimer' ] = 100.60344174
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.7-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.8-dimer' ] = 95.01436164
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-1.8-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-2.0-dimer' ] = 85.51292548
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-2.0-monoA-CP' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-2.2-dimer' ] = 77.73902316
DATA['NUCLEAR REPULSION ENERGY']['RGC1-KrKr-2.2-monoA-CP' ] = 0.00000000
|
kratman/psi4public
|
psi4/share/psi4/databases/RGC10.py
|
Python
|
gpl-2.0
| 81,975
|
[
"Psi4"
] |
87919b9911912253c6a65080cb3c5651c68460b6ceda252e4a96c892ea9d6d59
|
# -*- coding: utf-8 -*-
# Tested on Markdown 2.3.1
#
# Copyright (c) 2014, Esteban Castro Borsani
# Copyright (c) 2014, Jesús Espino García
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from django.templatetags.static import static
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
# Grab the emojis (+800) here: https://github.com/arvida/emoji-cheat-sheet.com
# This **crazy long** list was generated by walking through the emojis.png
EMOJIS_PATH = "img/emojis/"
EMOJIS_SET = {
"+1", "-1", "100", "1234", "8ball", "a", "ab", "abc", "abcd", "accept", "aerial_tramway", "airplane",
"alarm_clock", "alien", "ambulance", "anchor", "angel", "anger", "angry", "anguished", "ant", "apple",
"aquarius", "aries", "arrows_clockwise", "arrows_counterclockwise", "arrow_backward", "arrow_double_down",
"arrow_double_up", "arrow_down", "arrow_down_small", "arrow_forward", "arrow_heading_down", "arrow_heading_up",
"arrow_left", "arrow_lower_left", "arrow_lower_right", "arrow_right", "arrow_right_hook", "arrow_up",
"arrow_upper_left", "arrow_upper_right", "arrow_up_down", "arrow_up_small", "art", "articulated_lorry",
"astonished", "atm", "b", "baby", "baby_bottle", "baby_chick", "baby_symbol", "baggage_claim", "balloon",
"ballot_box_with_check", "bamboo", "banana", "bangbang", "bank", "barber", "bar_chart", "baseball", "basketball",
"bath", "bathtub", "battery", "bear", "bee", "beer", "beers", "beetle", "beginner", "bell", "bento", "bicyclist",
"bike", "bikini", "bird", "birthday", "black_circle", "black_joker", "black_nib", "black_square",
"black_square_button", "blossom", "blowfish", "blue_book", "blue_car", "blue_heart", "blush", "boar", "boat",
"bomb", "book", "bookmark", "bookmark_tabs", "books", "boom", "boot", "bouquet", "bow", "bowling", "bowtie",
"boy", "bread", "bride_with_veil", "bridge_at_night", "briefcase", "broken_heart", "bug", "bulb",
"bullettrain_front", "bullettrain_side", "bus", "busstop", "busts_in_silhouette", "bust_in_silhouette",
"cactus", "cake", "calendar", "calling", "camel", "camera", "cancer", "candy", "capital_abcd", "capricorn",
"car", "card_index", "carousel_horse", "cat", "cat2", "cd", "chart", "chart_with_downwards_trend",
"chart_with_upwards_trend", "checkered_flag", "cherries", "cherry_blossom", "chestnut", "chicken",
"children_crossing", "chocolate_bar", "christmas_tree", "church", "cinema", "circus_tent", "city_sunrise",
"city_sunset", "cl", "clap", "clapper", "clipboard", "clock1", "clock10", "clock1030", "clock11", "clock1130",
"clock12", "clock1230", "clock130", "clock2", "clock230", "clock3", "clock330", "clock4", "clock430", "clock5",
"clock530", "clock6", "clock630", "clock7", "clock730", "clock8", "clock830", "clock9", "clock930",
"closed_book", "closed_lock_with_key", "closed_umbrella", "cloud", "clubs", "cn", "cocktail", "coffee",
"cold_sweat", "collision", "computer", "confetti_ball", "confounded", "confused", "congratulations",
"construction", "construction_worker", "convenience_store", "cookie", "cool", "cop", "copyright", "corn",
"couple", "couplekiss", "couple_with_heart", "cow", "cow2", "credit_card", "crocodile", "crossed_flags",
"crown", "cry", "crying_cat_face", "crystal_ball", "cupid", "curly_loop", "currency_exchange", "curry",
"custard", "customs", "cyclone", "dancer", "dancers", "dango", "dart", "dash", "date", "de", "deciduous_tree",
"department_store", "diamonds", "diamond_shape_with_a_dot_inside", "disappointed", "disappointed_relieved",
"dizzy", "dizzy_face", "dog", "dog2", "dollar", "dolls", "dolphin", "donut", "door", "doughnut",
"do_not_litter", "dragon", "dragon_face", "dress", "dromedary_camel", "droplet", "dvd", "e-mail", "ear",
"earth_africa", "earth_americas", "earth_asia", "ear_of_rice", "egg", "eggplant", "eight",
"eight_pointed_black_star", "eight_spoked_asterisk", "electric_plug", "elephant", "email", "end", "envelope",
"es", "euro", "european_castle", "european_post_office", "evergreen_tree", "exclamation", "expressionless",
"eyeglasses", "eyes", "facepunch", "factory", "fallen_leaf", "family", "fast_forward", "fax", "fearful",
"feelsgood", "feet", "ferris_wheel", "file_folder", "finnadie", "fire", "fireworks", "fire_engine",
"first_quarter_moon", "first_quarter_moon_with_face", "fish", "fishing_pole_and_fish", "fish_cake", "fist",
"five", "flags", "flashlight", "floppy_disk", "flower_playing_cards", "flushed", "foggy", "football",
"fork_and_knife", "fountain", "four", "four_leaf_clover", "fr", "free", "fried_shrimp", "fries", "frog",
"frowning", "fu", "fuelpump", "full_moon", "full_moon_with_face", "game_die", "gb", "gem", "gemini", "ghost",
"gift", "gift_heart", "girl", "globe_with_meridians", "goat", "goberserk", "godmode", "golf", "grapes",
"green_apple", "green_book", "green_heart", "grey_exclamation", "grey_question", "grimacing", "grin",
"grinning", "guardsman", "guitar", "gun", "haircut", "hamburger", "hammer", "hamster", "hand", "handbag",
"hankey", "hash", "hatched_chick", "hatching_chick", "headphones", "heart", "heartbeat", "heartpulse",
"hearts", "heart_decoration", "heart_eyes", "heart_eyes_cat", "hear_no_evil", "heavy_check_mark",
"heavy_division_sign", "heavy_dollar_sign", "heavy_exclamation_mark", "heavy_minus_sign",
"heavy_multiplication_x", "heavy_plus_sign", "helicopter", "herb", "hibiscus", "high_brightness",
"high_heel", "hocho", "honeybee", "honey_pot", "horse", "horse_racing", "hospital", "hotel", "hotsprings",
"hourglass", "hourglass_flowing_sand", "house", "house_with_garden", "hurtrealbad", "hushed", "icecream",
"ice_cream", "id", "ideograph_advantage", "imp", "inbox_tray", "incoming_envelope", "information_desk_person",
"information_source", "innocent", "interrobang", "iphone", "it", "izakaya_lantern", "jack_o_lantern", "japan",
"japanese_castle", "japanese_goblin", "japanese_ogre", "jeans", "joy", "joy_cat", "jp", "key", "keycap_ten",
"kimono", "kiss", "kissing", "kissing_cat", "kissing_closed_eyes", "kissing_face", "kissing_heart",
"kissing_smiling_eyes", "koala", "koko", "kr", "large_blue_circle", "large_blue_diamond", "large_orange_diamond",
"last_quarter_moon", "last_quarter_moon_with_face", "laughing", "leaves", "ledger", "leftwards_arrow_with_hook",
"left_luggage", "left_right_arrow", "lemon", "leo", "leopard", "libra", "light_rail", "link", "lips",
"lipstick", "lock", "lock_with_ink_pen", "lollipop", "loop", "loudspeaker", "love_hotel", "love_letter",
"low_brightness", "m", "mag", "mag_right", "mahjong", "mailbox", "mailbox_closed", "mailbox_with_mail",
"mailbox_with_no_mail", "man", "mans_shoe", "man_with_gua_pi_mao", "man_with_turban", "maple_leaf", "mask",
"massage", "meat_on_bone", "mega", "melon", "memo", "mens", "metal", "metro", "microphone", "microscope",
"milky_way", "minibus", "minidisc", "mobile_phone_off", "moneybag", "money_with_wings", "monkey", "monkey_face",
"monorail", "moon", "mortar_board", "mountain_bicyclist", "mountain_cableway", "mountain_railway",
"mount_fuji", "mouse", "mouse2", "movie_camera", "moyai", "muscle", "mushroom", "musical_keyboard",
"musical_note", "musical_score", "mute", "nail_care", "name_badge", "neckbeard", "necktie",
"negative_squared_cross_mark", "neutral_face", "new", "newspaper", "new_moon", "new_moon_with_face",
"ng", "nine", "non-potable_water", "nose", "notebook", "notebook_with_decorative_cover", "notes", "no_bell",
"no_bicycles", "no_entry", "no_entry_sign", "no_good", "no_mobile_phones", "no_mouth", "no_pedestrians",
"no_smoking", "nut_and_bolt", "o", "o2", "ocean", "octocat", "octopus", "oden", "office", "ok", "ok_hand",
"ok_woman", "older_man", "older_woman", "on", "oncoming_automobile", "oncoming_bus", "oncoming_police_car",
"oncoming_taxi", "one", "open_file_folder", "open_hands", "open_mouth", "ophiuchus", "orange_book",
"outbox_tray", "ox", "pager", "page_facing_up", "page_with_curl", "palm_tree", "panda_face", "paperclip",
"parking", "partly_sunny", "part_alternation_mark", "passport_control", "paw_prints", "peach", "pear",
"pencil", "pencil2", "penguin", "pensive", "performing_arts", "persevere", "person_frowning",
"person_with_blond_hair", "person_with_pouting_face", "phone", "pig", "pig2", "pig_nose", "pill",
"pineapple", "pisces", "pizza", "plus1", "point_down", "point_left", "point_right", "point_up",
"point_up_2", "police_car", "poodle", "poop", "postal_horn", "postbox", "post_office", "potable_water",
"pouch", "poultry_leg", "pound", "pouting_cat", "pray", "princess", "punch", "purple_heart", "purse",
"pushpin", "put_litter_in_its_place", "question", "rabbit", "rabbit2", "racehorse", "radio", "radio_button",
"rage", "rage1", "rage2", "rage3", "rage4", "railway_car", "rainbow", "raised_hand", "raised_hands",
"raising_hand", "ram", "ramen", "rat", "recycle", "red_car", "red_circle", "registered", "relaxed",
"relieved", "repeat", "repeat_one", "restroom", "revolving_hearts", "rewind", "ribbon", "rice", "rice_ball",
"rice_cracker", "rice_scene", "ring", "rocket", "roller_coaster", "rooster", "rose", "rotating_light",
"round_pushpin", "rowboat", "ru", "rugby_football", "runner", "running", "running_shirt_with_sash", "sa",
"sagittarius", "sailboat", "sake", "sandal", "santa", "satellite", "satisfied", "saxophone", "school",
"school_satchel", "scissors", "scorpius", "scream", "scream_cat", "scroll", "seat", "secret", "seedling",
"see_no_evil", "seven", "shaved_ice", "sheep", "shell", "ship", "shipit", "shirt", "shit", "shoe", "shower",
"signal_strength", "six", "six_pointed_star", "ski", "skull", "sleeping", "sleepy", "slot_machine",
"small_blue_diamond", "small_orange_diamond", "small_red_triangle", "small_red_triangle_down", "smile",
"smiley", "smiley_cat", "smile_cat", "smiling_imp", "smirk", "smirk_cat", "smoking", "snail", "snake",
"snowboarder", "snowflake", "snowman", "sob", "soccer", "soon", "sos", "sound", "space_invader", "spades",
"spaghetti", "sparkler", "sparkles", "sparkling_heart", "speaker", "speak_no_evil", "speech_balloon",
"speedboat", "squirrel", "star", "star2", "stars", "station", "statue_of_liberty", "steam_locomotive",
"stew", "straight_ruler", "strawberry", "stuck_out_tongue", "stuck_out_tongue_closed_eyes",
"stuck_out_tongue_winking_eye", "sunflower", "sunglasses", "sunny", "sunrise", "sunrise_over_mountains",
"sun_with_face", "surfer", "sushi", "suspect", "suspension_railway", "sweat", "sweat_drops", "sweat_smile",
"sweet_potato", "swimmer", "symbols", "syringe", "tada", "tanabata_tree", "tangerine", "taurus", "taxi",
"tea", "telephone", "telephone_receiver", "telescope", "tennis", "tent", "thought_balloon", "three",
"thumbsdown", "thumbsup", "ticket", "tiger", "tiger2", "tired_face", "tm", "toilet", "tokyo_tower", "tomato",
"tongue", "top", "tophat", "tractor", "traffic_light", "train", "train2", "tram", "triangular_flag_on_post",
"triangular_ruler", "trident", "triumph", "trolleybus", "trollface", "trophy", "tropical_drink",
"tropical_fish", "truck", "trumpet", "tshirt", "tulip", "turtle", "tv", "twisted_rightwards_arrows",
"two", "two_hearts", "two_men_holding_hands", "two_women_holding_hands", "u5272", "u5408", "u55b6",
"u6307", "u6708", "u6709", "u6e80", "u7121", "u7533", "u7981", "u7a7a", "uk", "umbrella", "unamused",
"underage", "unlock", "up", "us", "v", "vertical_traffic_light", "vhs", "vibration_mode", "video_camera",
"video_game", "violin", "virgo", "volcano", "vs", "walking", "waning_crescent_moon", "waning_gibbous_moon",
"warning", "watch", "watermelon", "water_buffalo", "wave", "wavy_dash", "waxing_crescent_moon",
"waxing_gibbous_moon", "wc", "weary", "wedding", "whale", "whale2", "wheelchair", "white_check_mark",
"white_circle", "white_flower", "white_square", "white_square_button", "wind_chime", "wine_glass",
"wink", "wolf", "woman", "womans_clothes", "womans_hat", "womens", "worried", "wrench", "x", "yellow_heart",
"yen", "yum", "zap", "zero", "zzz",
}
class EmojifyExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.preprocessors.add('emojify',
EmojifyPreprocessor(md),
'_end')
class EmojifyPreprocessor(Preprocessor):
def run(self, lines):
pattern = re.compile(':([a-z0-9\+\-_]+):')
new_lines = []
def emojify(match):
emoji = match.group(1)
if emoji not in EMOJIS_SET:
return match.group(0)
path = "{}{}.png".format(EMOJIS_PATH, emoji)
url = static(path)
return ''.format(emoji=emoji, url=url)
for line in lines:
if line.strip():
line = pattern.sub(emojify, line)
new_lines.append(line)
return new_lines
|
xdevelsistemas/taiga-back-community
|
taiga/mdrender/extensions/emojify.py
|
Python
|
agpl-3.0
| 14,160
|
[
"Bowtie",
"Octopus"
] |
e90676b9fed64350889808bbb5729447d5712bafd0b6cc44bc71ce31bdecdfc3
|
'''
Arthur Glowacki
APS ANL
10/17/2014
'''
from Model import Model
import numpy as np
import math, time, sys
import h5py
from PyQt4 import QtCore
from vtk import *
import Optics
class Scanner(QtCore.QThread):
notifyProgress = QtCore.pyqtSignal(int)
notifyFinish = QtCore.pyqtSignal()
def __init__(self):
QtCore.QObject.__init__(self)
self.baseModels = []
self.datasetName = ''
self.dimX = 1000
self.dimY = 1000
self.startRot = 0.0
self.stopRot = 0.0
self.numImages = 180
self.Stop = False
self.dsetLock = None
self.hfile = None
self.bSaveTheta = False
self.bounds = []
self.objectives = []
self.calcFunc = Optics.coherent
self.hdfFiles = []
def initLocator(self):
polys = vtkAppendPolyData()
for m in self.baseModels:
polys.AddInput(m.transformFilter.GetOutput())
polys.Update()
locator = vtkCellLocator()
locator.SetDataSet(polys.GetOutput())
locator.BuildLocator()
return locator, polys.GetOutput().GetBounds()
def exportTomoScanToHDF(self):
print self.datasetName,' exporting tomo scan'
self.obj_dsets = []
self.dsetLock.lock()
self.dset = self.hfile.create_dataset(self.datasetName, (self.numImages, self.dimX, self.dimY), chunks=(1, self.dimX, self.dimY), compression='gzip', compression_opts=6 )
for i in range(len(self.hdfFiles)):
self.obj_dsets += [ self.hdfFiles[i].create_dataset(self.datasetName, (self.numImages, self.dimX, self.dimY), chunks=(1, self.dimX, self.dimY), compression='gzip', compression_opts=6 ) ]
self.dsetLock.unlock()
wdata = np.zeros((self.numImages, self.dimX, self.dimY), dtype=np.float32)
if self.bSaveTheta:
thetaDset = np.zeros((self.numImages), dtype=np.float32)
#create theta rotation dataset
self.dsetLock.lock()
thetaH5 = self.hfile.create_dataset('exchange/theta', (self.numImages,))
self.obj_thetas = [ ]
for i in range(len(self.hdfFiles)):
self.obj_thetas += [ self.hdfFiles[i].create_dataset('exchange/theta', (self.numImages,)) ]
self.dsetLock.unlock()
baseLocator, nbounds = self.initLocator()
xItr = (self.bounds[1] - self.bounds[0]) / float(self.dimX)
yItr = (self.bounds[3] - self.bounds[2]) / float(self.dimY)
#if starting from 0 we want to go backwards
angle = math.radians(self.startRot)
delta = (math.radians(self.stopRot) - math.radians(self.startRot)) / float(self.numImages)
#print 'angle',angle,'delta', delta
cntr = 1
zStart = self.bounds[4] - 100
zEnd = self.bounds[5] + 100
tolerance = 0.00001
tmut = mutable(0)
subId = mutable(0)
for n in range(self.numImages):
startTime = time.time()
if self.bSaveTheta:
thetaDset[n] = angle
print self.datasetName,'Image number',n+1,'of',self.numImages
yStart = self.bounds[2]
for y in range(self.dimY):
#print 'scan line', y
if self.Stop:
print self.datasetName,'Scan Stopped!'
self.notifyFinish.emit()
return
xStart = self.bounds[0]
for x in range(self.dimX):
L0RotX = (zStart * math.sin(angle)) + (xStart * math.cos(angle))
L0RotZ = (zStart * math.cos(angle)) - (xStart * math.sin(angle))
L1RotX = (zEnd * math.sin(angle)) + (xStart * math.cos(angle))
L1RotZ = (zEnd * math.cos(angle)) - (xStart * math.sin(angle))
L0 = [L0RotX, yStart, L0RotZ]
L1 = [L1RotX, yStart, L1RotZ]
p0 = [0.0, 0.0, 0.0]
pcoords = [0.0, 0.0, 0.0]
if baseLocator.IntersectWithLine(L0, L1, tolerance, tmut, p0, pcoords, subId) > 0:
#print 'L0',L0,'L1',L1
for m in self.baseModels:
wdata[n][x][y] += m.intersect_line(L0, L1)
xStart += xItr
yStart += yItr
self.notifyProgress.emit(cntr)
cntr += 1
#perform optics
self.dsetLock.lock()
self.dset[n] = wdata[n]
for i in range(len(self.obj_dsets)):
self.obj_dsets[i][n] = self.calcFunc(wdata[n], self.objectives[i])
self.dsetLock.unlock()
angle += delta
endTime = time.time()
print self.datasetName, ' ',int(endTime - startTime),'seconds per image'
if self.bSaveTheta:
self.dsetLock.lock()
thetaH5[:] = thetaDset[:]
for i in range(len(self.hdfFiles)):
self.obj_thetas[i][:] = thetaDset[:]
self.dsetLock.unlock()
self.notifyFinish.emit()
print self.datasetName,' finished exporting'
def run(self):
self.Stop = False
self.exportTomoScanToHDF()
|
aglowacki/ScanSimulator
|
Scanner.py
|
Python
|
gpl-2.0
| 4,299
|
[
"VTK"
] |
72fa1fed3040cb2d0b51533ac4282c456629fca1d87ca3faf8c72d2b09b256df
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss computation utility functions."""
import functools
import tensorflow as tf
import tensorflow_probability as tfp
from poem.core import common
from poem.core import data_utils
from poem.core import distance_utils
from poem.core import keypoint_utils
def create_sample_distance_fn(
pair_type=common.DISTANCE_PAIR_TYPE_ALL_PAIRS,
distance_kernel=common.DISTANCE_KERNEL_SQUARED_L2,
pairwise_reduction=common.DISTANCE_REDUCTION_MEAN,
componentwise_reduction=common.DISTANCE_REDUCTION_MEAN,
**distance_kernel_kwargs):
"""Creates sample distance function.
Args:
pair_type: An enum string (see `common`) for type of pairs to use.
distance_kernel: An enum string (see `common`) or a function handle for
point distance kernel to use.
pairwise_reduction: An enum string (see `common`) or a function handle for
pairwise distance reducer to use. If not a supported enum string, uses it
directly as a function handle.
componentwise_reduction: An enum string (see `common`) or a function handle
for component-wise distance reducer to use. If not a supported enum
string, uses it directly as a function handle.
**distance_kernel_kwargs: A dictionary for additional arguments to be passed
to the distance kernel. The keys are in the format
`${distance_kernel_name}_${argument_name}`.
Returns:
A function handle for computing sample group distances that takes two
tensors of shape [..., num_components, num_embeddings, embedding_dim] as
input.
"""
def get_distance_matrix_fn():
"""Selects point distance matrix function."""
if pair_type == common.DISTANCE_PAIR_TYPE_ALL_PAIRS:
l2_distance_computer = distance_utils.compute_all_pair_l2_distances
elif pair_type == common.DISTANCE_PAIR_TYPE_CORRESPONDING_PAIRS:
l2_distance_computer = (
distance_utils.compute_corresponding_pair_l2_distances)
if distance_kernel == common.DISTANCE_KERNEL_SQUARED_L2:
return functools.partial(l2_distance_computer, squared=True)
if distance_kernel == common.DISTANCE_KERNEL_L2_SIGMOID_MATCHING_PROB:
def compute_l2_sigmoid_matching_distances(lhs, rhs):
"""Computes L2 sigmoid matching probability distances."""
inner_distances = l2_distance_computer(lhs, rhs, squared=False)
return distance_utils.compute_sigmoid_matching_probabilities(
inner_distances,
a=distance_kernel_kwargs.get(distance_kernel + '_a', None),
b=distance_kernel_kwargs.get(distance_kernel + '_b', None))
return compute_l2_sigmoid_matching_distances
if (distance_kernel ==
common.DISTANCE_KERNEL_SQUARED_L2_SIGMOID_MATCHING_PROB):
def compute_squared_l2_sigmoid_matching_distances(lhs, rhs):
"""Computes squared L2 sigmoid matching probability distances."""
inner_distances = l2_distance_computer(lhs, rhs, squared=True)
return distance_utils.compute_sigmoid_matching_probabilities(
inner_distances,
a=distance_kernel_kwargs.get(distance_kernel + '_a', None),
b=distance_kernel_kwargs.get(distance_kernel + '_b', None))
return compute_squared_l2_sigmoid_matching_distances
if distance_kernel == common.DISTANCE_KERNEL_EXPECTED_LIKELIHOOD:
def compute_gaussian_likelihoods(lhs, rhs):
"""Computes sample likelihoods."""
num_lhs_samples = lhs.shape.as_list()[-2] - 2
num_rhs_samples = rhs.shape.as_list()[-2] - 2
lhs_means, lhs_stddevs, lhs_samples = tf.split(
lhs, [1, 1, num_lhs_samples], axis=-2)
rhs_means, rhs_stddevs, rhs_samples = tf.split(
rhs, [1, 1, num_rhs_samples], axis=-2)
rhs_likelihoods = distance_utils.compute_gaussian_likelihoods(
lhs_means,
lhs_stddevs,
rhs_samples,
min_stddev=distance_kernel_kwargs.get(
distance_kernel + '_min_stddev', None),
max_squared_mahalanobis_distance=distance_kernel_kwargs.get(
distance_kernel + '_max_squared_mahalanobis_distance', None),
smoothing=distance_kernel_kwargs.get(distance_kernel + '_smoothing',
None))
lhs_likelihoods = distance_utils.compute_gaussian_likelihoods(
rhs_means,
rhs_stddevs,
lhs_samples,
l2_distance_computer=l2_distance_computer,
min_stddev=distance_kernel_kwargs.get(
distance_kernel + '_min_stddev', None),
max_squared_mahalanobis_distance=distance_kernel_kwargs.get(
distance_kernel + '_max_squared_mahalanobis_distance', None),
smoothing=distance_kernel_kwargs.get(distance_kernel + '_smoothing',
None))
return (rhs_likelihoods + lhs_likelihoods) / 2.0
return compute_gaussian_likelihoods
raise ValueError('Unsupported distance kernel: `%s`.' %
str(distance_kernel))
def get_pairwise_distance_reduction_fn():
"""Selects pairwise distance reduction function."""
if pairwise_reduction == common.DISTANCE_REDUCTION_MEAN:
return functools.partial(tf.math.reduce_mean, axis=[-2, -1])
if pairwise_reduction == common.DISTANCE_REDUCTION_LOWER_HALF_MEAN:
return functools.partial(
data_utils.compute_lower_percentile_means, axis=[-2, -1], q=50)
if pairwise_reduction == common.DISTANCE_REDUCTION_NEG_LOG_MEAN:
return lambda x: -tf.math.log(tf.math.reduce_mean(x, axis=[-2, -1]))
if pairwise_reduction == common.DISTANCE_REDUCTION_LOWER_HALF_NEG_LOG_MEAN:
def compute_lower_half_negative_log_mean(x):
return -tf.math.log(
data_utils.compute_lower_percentile_means(x, axis=[-2, -1], q=50))
return compute_lower_half_negative_log_mean
if pairwise_reduction == common.DISTANCE_REDUCTION_ONE_MINUS_MEAN:
return lambda x: 1.0 - tf.math.reduce_mean(x, axis=[-2, -1])
return pairwise_reduction
def get_componentwise_distance_reduction_fn():
"""Selects component-wise distance reduction function."""
if componentwise_reduction == common.DISTANCE_REDUCTION_MEAN:
return functools.partial(tf.math.reduce_mean, axis=[-1])
return componentwise_reduction
def sample_distance_fn(lhs, rhs):
"""Computes sample distances."""
distances = get_distance_matrix_fn()(lhs, rhs)
distances = get_pairwise_distance_reduction_fn()(distances)
distances = get_componentwise_distance_reduction_fn()(distances)
return distances
return sample_distance_fn
def compute_negative_indicator_matrix(anchor_points,
match_points,
distance_fn,
min_negative_distance,
anchor_point_masks=None,
match_point_masks=None):
"""Computes all-pair negative match indicator matrix.
Args:
anchor_points: A tensor for anchor points. Shape = [num_anchors, ...,
point_dim].
match_points: A tensor for match points. Shape = [num_matches, ...,
point_dim].
distance_fn: A function handle for computing distance matrix.
min_negative_distance: A float for the minimum negative distance threshold.
anchor_point_masks: A tensor for anchor point masks. Shape = [num_anchors,
...]. Ignored if None.
match_point_masks: A tensor for match point masks. Shape = [num_matches,
...]. Ignored if None.
Returns:
A boolean tensor for negative indicator matrix. Shape = [num_anchors,
num_matches].
"""
distance_matrix = distance_utils.compute_distance_matrix(
anchor_points,
match_points,
distance_fn=distance_fn,
start_point_masks=anchor_point_masks,
end_point_masks=match_point_masks)
return distance_matrix >= min_negative_distance
def compute_hard_negative_distances(anchor_match_distance_matrix,
negative_indicator_matrix,
use_semi_hard=False,
anchor_positive_mining_distances=None,
anchor_match_mining_distance_matrix=None):
"""Computes (semi-)hard negative distances.
Args:
anchor_match_distance_matrix: A tensor for anchor/match distance matrix.
Shape = [num_anchors, num_matches].
negative_indicator_matrix: A tensor for anchor/match negative indicator
matrix. Shape = [num_anchors, num_matches].
use_semi_hard: A boolean for whether to compute semi-hard negative distances
instead of hard negative distances.
anchor_positive_mining_distances: A tensor for positive distances of each
anchor for (semi-)hard negative mining. Only used if `use_semi_hard` is
True. Shape = [num_anchors].
anchor_match_mining_distance_matrix: A tensor for an alternative
anchor/match distance matrix to use for (semi-)hard negative mining. Use
None to ignore and use `anchor_match_distance_matrix` instead. If
specified, must be of the same shape as `anchor_match_distance_matrix`.
Returns:
hard_negative_distances: A tensor for (semi-)hard negative distances. Shape
= [num_amchors]. If an anchor has no (semi-)hard negative match, its
negative distance will be assigned as the maximum value of
anchor_match_distance_matrix.dtype.
hard_negative_mining_distances: A tensor for (semi-)hard negative mining
distances. Shape = [num_amchors]. If an anchor has no (semi-)hard negative
match, its negative distance will be assigned as the maximum value of
anchor_match_distance_matrix.dtype.
Raises:
ValueError: If `use_semi_hard` is True, but
`anchor_positive_mining_distances` is not specified.
"""
indicators = negative_indicator_matrix
if anchor_match_mining_distance_matrix is None:
anchor_match_mining_distance_matrix = anchor_match_distance_matrix
if use_semi_hard:
if anchor_positive_mining_distances is None:
raise ValueError('Positive match embeddings must be specified to compute '
'semi-hard distances.')
anchor_positive_mining_distances = tf.expand_dims(
anchor_positive_mining_distances, axis=-1)
indicators &= (
anchor_match_mining_distance_matrix > anchor_positive_mining_distances)
def find_hard_distances(distance_matrix, indicator_matrix):
distance_matrix = tf.where(
tf.stop_gradient(indicator_matrix), distance_matrix,
tf.fill(tf.shape(distance_matrix), distance_matrix.dtype.max))
hard_distances = tf.math.reduce_min(distance_matrix, axis=-1)
return hard_distances
hard_negative_mining_distances = find_hard_distances(
anchor_match_mining_distance_matrix, indicators)
indicators &= tf.math.equal(
anchor_match_mining_distance_matrix,
tf.expand_dims(hard_negative_mining_distances, axis=-1))
hard_negative_distances = find_hard_distances(anchor_match_distance_matrix,
indicators)
return hard_negative_distances, hard_negative_mining_distances
def compute_hard_negative_triplet_loss(
anchor_positive_distances,
anchor_match_distance_matrix,
anchor_match_negative_indicator_matrix,
margin,
use_semi_hard,
anchor_positive_mining_distances=None,
anchor_match_mining_distance_matrix=None):
"""Computes triplet loss with (semi-)hard negative mining.
Args:
anchor_positive_distances: A tensor for anchor/positive distances. Shape =
[num_anchors].
anchor_match_distance_matrix: A tensor for anchor/match distance matrix.
Shape = [num_anchors, num_matches].
anchor_match_negative_indicator_matrix: A tensor for anchor/match negative
indicator matrix. Shape = [num_anchors, num_matches].
margin: A float for triplet loss margin.
use_semi_hard: A boolean for whether to compute semi-hard negative distances
instead of hard negative distances.
anchor_positive_mining_distances: A tensor for positive distances of each
anchor for (semi-)hard negative mining. Only used if `use_semi_hard` is
True. Shape = [num_anchors].
anchor_match_mining_distance_matrix: A tensor for an alternative
anchor/match distance matrix to use for (semi-)hard negative mining. Use
None to ignore and use `anchor_match_distance_matrix` instead. If
specified, must be of the same shape as `anchor_match_distance_matrix`.
Returns:
loss: A tensor for loss. Shape = [].
num_active_triplets: A tensor for number of active triplets. Shape = [].
anchor_negative_distances: A tensor for anchor/negative distances. Shape =
[num_amchors]. If an anchor has no (semi-)hard negative match, its
negative distance will be assigned as the maximum value of
anchor_match_distance_matrix.dtype.
mining_loss: A tensor for loss based on mining distances. Shape = [].
num_active_mining_triplets: A tensor for number of active triplets based on
mining distances. Shape = [].
anchor_negative_mining_distances: A tensor for anchor/negative mining
distances. Shape = [num_amchors]. If an anchor has no (semi-)hard negative
match, its negative distance will be assigned as the maximum value of
anchor_match_mining_distance_matrix.dtype.
"""
if anchor_positive_mining_distances is None:
anchor_positive_mining_distances = anchor_positive_distances
if anchor_match_mining_distance_matrix is None:
anchor_match_mining_distance_matrix = anchor_match_distance_matrix
anchor_negative_distances, anchor_negative_mining_distances = (
compute_hard_negative_distances(
anchor_match_distance_matrix,
anchor_match_negative_indicator_matrix,
use_semi_hard=use_semi_hard,
anchor_positive_mining_distances=anchor_positive_mining_distances,
anchor_match_mining_distance_matrix=(
anchor_match_mining_distance_matrix)))
def compute_triplet_loss(positive_distances, negative_distances):
losses = tf.nn.relu(positive_distances + margin - negative_distances)
losses = tf.where(
tf.stop_gradient(losses < losses.dtype.max), losses,
tf.zeros_like(losses))
num_nonzero_losses = tf.math.count_nonzero(losses)
loss = tf.math.reduce_mean(losses)
return loss, num_nonzero_losses
loss, num_active_triplets = compute_triplet_loss(anchor_positive_distances,
anchor_negative_distances)
mining_loss, num_active_mining_triplets = compute_triplet_loss(
anchor_positive_mining_distances, anchor_negative_mining_distances)
return (loss, num_active_triplets, anchor_negative_distances, mining_loss,
num_active_mining_triplets, anchor_negative_mining_distances)
def compute_keypoint_triplet_losses(
anchor_embeddings,
positive_embeddings,
match_embeddings,
anchor_keypoints,
match_keypoints,
margin,
min_negative_keypoint_distance,
use_semi_hard,
exclude_inactive_triplet_loss,
anchor_keypoint_masks=None,
match_keypoint_masks=None,
embedding_sample_distance_fn=create_sample_distance_fn(),
keypoint_distance_fn=keypoint_utils.compute_procrustes_aligned_mpjpes,
anchor_mining_embeddings=None,
positive_mining_embeddings=None,
match_mining_embeddings=None,
summarize_percentiles=True):
"""Computes triplet losses with both hard and semi-hard negatives.
Args:
anchor_embeddings: A tensor for anchor embeddings. Shape = [num_anchors,
embedding_dim] or [num_anchors, num_samples, embedding_dim].
positive_embeddings: A tensor for positive match embeddings. Shape =
[num_anchors, embedding_dim] or [num_anchors, num_samples, embedding_dim].
match_embeddings: A tensor for candidate negative match embeddings. Shape =
[num_anchors, embedding_dim] or [num_matches, num_samples, embedding_dim].
anchor_keypoints: A tensor for anchor keypoints for computing pair labels.
Shape = [num_anchors, ..., num_keypoints, keypoint_dim].
match_keypoints: A tensor for match keypoints for computing pair labels.
Shape = [num_anchors, ..., num_keypoints, keypoint_dim].
margin: A float for triplet loss margin.
min_negative_keypoint_distance: A float for the minimum negative distance
threshold. If negative, uses all other samples as negative matches. In
this case, `num_anchors` and `num_matches` are assumed to be equal. Note
that this option is for saving negative match computation. To support
different `num_anchors` and `num_matches`, setting this to 0 (without
saving computation).
use_semi_hard: A boolean for whether to use semi-hard negative triplet loss
as the final loss.
exclude_inactive_triplet_loss: A boolean for whether to exclude inactive
triplets in the final loss computation.
anchor_keypoint_masks: A tensor for anchor keypoint masks for computing pair
labels. Shape = [num_anchors, ..., num_keypoints]. Ignored if None.
match_keypoint_masks: A tensor for match keypoint masks for computing pair
labels. Shape = [num_anchors, ..., num_keypoints]. Ignored if None.
embedding_sample_distance_fn: A function handle for computing sample
embedding distances, which takes two embedding tensors of shape [...,
num_samples, embedding_dim] and returns a distance tensor of shape [...].
keypoint_distance_fn: A function handle for computing keypoint distance
matrix, which takes two matrix tensors and returns an element-wise
distance matrix tensor.
anchor_mining_embeddings: A tensor for anchor embeddings for triplet mining.
Shape = [num_anchors, embedding_dim] or [num_anchors, num_samples,
embedding_dim]. Use None to ignore and use `anchor_embeddings` instead.
positive_mining_embeddings: A tensor for positive match embeddings for
triplet mining. Shape = [num_anchors, embedding_dim] or [num_anchors,
num_samples, embedding_dim]. Use None to ignore and use
`positive_embeddings` instead.
match_mining_embeddings: A tensor for candidate negative match embeddings
for triplet mining. Shape = [num_anchors, embedding_dim] or [num_matches,
num_samples, embedding_dim]. Use None to ignore and use `match_embeddings`
instead.
summarize_percentiles: A boolean for whether to summarize percentiles of
certain variables, e.g., embedding distances in triplet loss. Consider
turning this off in case tensorflow_probability percentile computation
causes failures at random due to empty tensor.
Returns:
loss: A tensor for triplet loss. Shape = [].
summaries: A dictionary for loss and batch statistics summaries.
"""
def maybe_expand_sample_dim(embeddings):
if len(embeddings.shape.as_list()) == 2:
return tf.expand_dims(embeddings, axis=-2)
return embeddings
anchor_embeddings = maybe_expand_sample_dim(anchor_embeddings)
positive_embeddings = maybe_expand_sample_dim(positive_embeddings)
match_embeddings = maybe_expand_sample_dim(match_embeddings)
if min_negative_keypoint_distance >= 0.0:
anchor_match_negative_indicator_matrix = (
compute_negative_indicator_matrix(
anchor_points=anchor_keypoints,
match_points=match_keypoints,
distance_fn=keypoint_distance_fn,
min_negative_distance=min_negative_keypoint_distance,
anchor_point_masks=anchor_keypoint_masks,
match_point_masks=match_keypoint_masks))
else:
num_anchors = tf.shape(anchor_keypoints)[0]
anchor_match_negative_indicator_matrix = tf.math.logical_not(
tf.eye(num_anchors, dtype=tf.bool))
anchor_positive_distances = embedding_sample_distance_fn(
anchor_embeddings, positive_embeddings)
if anchor_mining_embeddings is None and positive_mining_embeddings is None:
anchor_positive_mining_distances = anchor_positive_distances
else:
anchor_positive_mining_distances = embedding_sample_distance_fn(
anchor_embeddings if anchor_mining_embeddings is None else
maybe_expand_sample_dim(anchor_mining_embeddings),
positive_embeddings if positive_mining_embeddings is None else
maybe_expand_sample_dim(positive_mining_embeddings))
anchor_match_distance_matrix = distance_utils.compute_distance_matrix(
anchor_embeddings,
match_embeddings,
distance_fn=embedding_sample_distance_fn)
if anchor_mining_embeddings is None and match_mining_embeddings is None:
anchor_match_mining_distance_matrix = anchor_match_distance_matrix
else:
anchor_match_mining_distance_matrix = distance_utils.compute_distance_matrix(
anchor_embeddings if anchor_mining_embeddings is None else
maybe_expand_sample_dim(anchor_mining_embeddings),
match_embeddings if match_mining_embeddings is None else
maybe_expand_sample_dim(match_mining_embeddings),
distance_fn=embedding_sample_distance_fn)
num_total_triplets = tf.cast(tf.shape(anchor_embeddings)[0], dtype=tf.float32)
def compute_loss_and_create_summaries(use_semi_hard):
"""Computes loss and creates summaries."""
(loss, num_active_triplets, negative_distances, mining_loss,
num_active_mining_triplets, negative_mining_distances) = (
compute_hard_negative_triplet_loss(
anchor_positive_distances,
anchor_match_distance_matrix,
anchor_match_negative_indicator_matrix,
margin=margin,
use_semi_hard=use_semi_hard,
anchor_positive_mining_distances=anchor_positive_mining_distances,
anchor_match_mining_distance_matrix=(
anchor_match_mining_distance_matrix)))
negative_distances = tf.boolean_mask(
negative_distances,
mask=negative_distances < negative_distances.dtype.max)
negative_mining_distances = tf.boolean_mask(
negative_mining_distances,
mask=negative_distances < negative_distances.dtype.max)
active_triplet_ratio = (
tf.cast(num_active_triplets, dtype=tf.float32) / num_total_triplets)
active_mining_triplet_ratio = (
tf.cast(num_active_mining_triplets, dtype=tf.float32) /
num_total_triplets)
active_loss = (
loss / tf.math.maximum(1e-12, tf.stop_gradient(active_triplet_ratio)))
active_mining_loss = (
mining_loss /
tf.math.maximum(1e-12, tf.stop_gradient(active_mining_triplet_ratio)))
tag = 'SemiHardNegative' if use_semi_hard else 'HardNegative'
summaries = {
# Summaries related to triplet loss computation.
'triplet_loss/Anchor/%s/Distance/Mean' % tag:
tf.math.reduce_mean(negative_distances),
'triplet_loss/%s/Loss/All' % tag:
loss,
'triplet_loss/%s/Loss/Active' % tag:
active_loss,
'triplet_loss/%s/ActiveTripletNum' % tag:
num_active_triplets,
'triplet_loss/%s/ActiveTripletRatio' % tag:
active_triplet_ratio,
# Summaries related to triplet mining.
'triplet_mining/Anchor/%s/Distance/Mean' % tag:
tf.math.reduce_mean(negative_mining_distances),
'triplet_mining/%s/Loss/All' % tag:
mining_loss,
'triplet_mining/%s/Loss/Active' % tag:
active_mining_loss,
'triplet_mining/%s/ActiveTripletNum' % tag:
num_active_mining_triplets,
'triplet_mining/%s/ActiveTripletRatio' % tag:
active_mining_triplet_ratio,
}
if summarize_percentiles:
summaries.update({
'triplet_loss/Anchor/%s/Distance/Median' % tag:
tfp.stats.percentile(negative_distances, q=50),
'triplet_mining/Anchor/%s/Distance/Median' % tag:
tfp.stats.percentile(negative_mining_distances, q=50),
})
return loss, active_loss, summaries
hard_negative_loss, hard_negative_active_loss, hard_negative_summaries = (
compute_loss_and_create_summaries(use_semi_hard=False))
(semi_hard_negative_loss, semi_hard_negative_active_loss,
semi_hard_negative_summaries) = (
compute_loss_and_create_summaries(use_semi_hard=True))
summaries = {
'triplet_loss/Margin':
tf.constant(margin),
'triplet_loss/Anchor/Positive/Distance/Mean':
tf.math.reduce_mean(anchor_positive_distances),
'triplet_mining/Anchor/Positive/Distance/Mean':
tf.math.reduce_mean(anchor_positive_mining_distances),
}
if summarize_percentiles:
summaries.update({
'triplet_loss/Anchor/Positive/Distance/Median':
tfp.stats.percentile(anchor_positive_distances, q=50),
'triplet_mining/Anchor/Positive/Distance/Median':
tfp.stats.percentile(anchor_positive_mining_distances, q=50),
})
summaries.update(hard_negative_summaries)
summaries.update(semi_hard_negative_summaries)
if use_semi_hard:
if exclude_inactive_triplet_loss:
loss = semi_hard_negative_active_loss
else:
loss = semi_hard_negative_loss
else:
if exclude_inactive_triplet_loss:
loss = hard_negative_active_loss
else:
loss = hard_negative_loss
return loss, summaries
def compute_kl_regularization_loss(means,
stddevs,
loss_weight,
prior_mean=0.0,
prior_stddev=1.0):
"""Computes KL divergence regularization loss for multivariate Gaussian.
Args:
means: A tensor for distribution means. Shape = [..., dim].
stddevs: A tensor for distribution standard deviations. Shape = [..., dim].
loss_weight: A float for loss weight.
prior_mean: A float for prior distribution mean.
prior_stddev: A float for prior distribution standard deviation.
Returns:
loss: A tensor for weighted regularization loss. Shape = [].
summaries: A dictionary for loss summaries.
"""
loss = tf.math.reduce_mean(
distance_utils.compute_gaussian_kl_divergence(
means, stddevs, rhs_means=prior_mean, rhs_stddevs=prior_stddev))
weighted_loss = loss_weight * loss
summaries = {
'regularization_loss/KL/PriorMean/Mean':
tf.math.reduce_mean(tf.constant(prior_mean)),
'regularization_loss/KL/PriorVar/Mean':
tf.math.reduce_mean(tf.constant(prior_stddev)**2),
'regularization_loss/KL/Loss/Original':
loss,
'regularization_loss/KL/Loss/Weighted':
weighted_loss,
'regularization_loss/KL/Loss/Weight':
tf.constant(loss_weight),
}
return weighted_loss, summaries
def compute_positive_pairwise_loss(anchor_embeddings,
positive_embeddings,
loss_weight,
distance_fn=functools.partial(
distance_utils.compute_l2_distances,
squared=True)):
"""Computes anchor/positive pairwise (squared L2) loss.
Args:
anchor_embeddings: A tensor for anchor embeddings. Shape = [...,
embedding_dim].
positive_embeddings: A tensor for positive embeddings. Shape = [...,
embedding_dim].
loss_weight: A float for loss weight.
distance_fn: A function handle for computing embedding distances, which
takes two embedding tensors of shape [..., embedding_dim] and returns a
distance tensor of shape [...].
Returns:
loss: A tensor for weighted positive pairwise loss. Shape = [].
summaries: A dictionary for loss summaries.
"""
loss = tf.math.reduce_mean(
distance_fn(anchor_embeddings, positive_embeddings))
weighted_loss = loss_weight * loss
summaries = {
'pairwise_loss/PositivePair/Loss/Original': loss,
'pairwise_loss/PositivePair/Loss/Weighted': weighted_loss,
'pairwise_loss/PositivePair/Loss/Weight': tf.constant(loss_weight),
}
return weighted_loss, summaries
|
google-research/google-research
|
poem/core/loss_utils.py
|
Python
|
apache-2.0
| 28,809
|
[
"Gaussian"
] |
a3187fa6bcabe8b26070453305585c491d732e643d015650bd6fdaff2b32cd18
|
# File I/O class
# A wrapper around various NetCDF libraries, used by
# BOUT++ routines. Creates a consistent interface
# across machines
#
# NOTE: NetCDF includes unlimited dimensions,
# but this library is just for very simple
# I/O operations. Educated guesses are made
# for the dimensions.
#
# Supported libraries:
# -------------------
#
# netCDF4
#
# Scientific.IO.NetCDF
#
# scipy.io.netcdf
# old version (create_dimension, create_variable)
# new version (createDimension, createVariable)
#
try:
import numpy as np
except ImportError:
print "ERROR: NumPy module not available"
raise
library = None # Record which library to use
try:
from netCDF4 import Dataset
library = "netCDF4"
except ImportError:
#print "netcdf4-python module not found"
try:
from Scientific.IO.NetCDF import NetCDFFile as Dataset
from Scientific.N import Int, Float
library = "Scientific"
#print " => Using Scientific.IO.NetCDF instead"
except ImportError:
try:
from scipy.io.netcdf import netcdf_file as Dataset
library = "scipy"
# print "Using scipy.io.netcdf library"
except:
print "DataFile: No supported NetCDF modules available"
raise
import time
def getUserName():
try:
import os, pwd, string
except ImportError:
return 'unknown user'
pwd_entry = pwd.getpwuid(os.getuid())
name = string.strip(string.splitfields(pwd_entry[4], ',')[0])
if name == '':
name = pwd_entry[0]
return name
class DataFile:
handle = None
def open(self, filename, write=False, create=False,
format='NETCDF3_CLASSIC'):
if (not write) and (not create):
self.handle = Dataset(filename, "r")
elif create:
if library == "Scientific":
self.handle = Dataset(filename, "w",
'Created ' + time.ctime(time.time())
+ ' by ' + getUserName())
elif library == "scipy":
self.handle = Dataset(filename, "w")
else:
self.handle = Dataset(filename, "w", format=format)
else:
if library == "scipy":
raise Exception("scipy.io.netcdf doesn't support appending");
else:
self.handle = Dataset(filename, "a")
# Record if writing
self.writeable = write or create
def close(self):
if self.handle != None:
self.handle.close()
self.handle = None
def __init__(self, filename=None, write=False, create=False,
format='NETCDF3_CLASSIC'):
if filename != None:
self.open(filename, write=write, create=create, format=format)
def __del__(self):
self.close()
def read(self, name, ranges=None):
"""Read a variable from the file."""
if self.handle == None: return None
try:
var = self.handle.variables[name]
except KeyError:
# Not found. Try to find using case-insensitive search
var = None
for n in self.handle.variables.keys():
if n.lower() == name.lower():
print "WARNING: Reading '"+n+"' instead of '"+name+"'"
var = self.handle.variables[n]
if var == None:
return None
ndims = len(var.dimensions)
if ndims == 0:
data = var.getValue()
return data #[0]
else:
if ranges != None:
if len(ranges) != 2*ndims:
print "Incorrect number of elements in ranges argument"
return None
if library == "Scientific":
# Passing ranges to var[] doesn't seem to work
data = var[:]
if ndims == 1:
data = data[ranges[0]:ranges[1]]
elif ndims == 2:
data = data[ranges[0]:ranges[1],
ranges[2]:ranges[3]]
elif ndims == 3:
data = data[ranges[0]:ranges[1],
ranges[2]:ranges[3],
ranges[4]:ranges[5]]
elif ndims == 4:
data = data[(ranges[0]):(ranges[1]),
(ranges[2]):(ranges[3]),
(ranges[4]):(ranges[5]),
(ranges[6]):(ranges[7])]
else:
if ndims == 1:
data = var[ranges[0]:ranges[1]]
elif ndims == 2:
data = var[ranges[0]:ranges[1],
ranges[2]:ranges[3]]
elif ndims == 3:
data = var[ranges[0]:ranges[1],
ranges[2]:ranges[3],
ranges[4]:ranges[5]]
elif ndims == 4:
print "Ranges = ", ranges
data = var[(ranges[0]):(ranges[1]),
(ranges[2]):(ranges[3]),
(ranges[4]):(ranges[5]),
(ranges[6]):(ranges[7])]
return data
else:
return var[:]
def list(self):
"""List all variables in the file."""
if self.handle == None: return []
return self.handle.variables.keys()
def dimensions(self, varname):
"""Array of dimension names"""
if self.handle == None: return None
try:
var = self.handle.variables[varname]
except KeyError:
return None
return var.dimensions
def ndims(self, varname):
"""Number of dimensions for a variable."""
if self.handle == None: return None
try:
var = self.handle.variables[varname]
except KeyError:
return None
return len(var.dimensions)
def size(self, varname):
"""List of dimension sizes for a variable."""
if self.handle == None: return []
try:
var = self.handle.variables[varname]
except KeyError:
return []
def dimlen(d):
dim = self.handle.dimensions[d]
if dim != None:
t = type(dim).__name__
if t == 'int':
return dim
return len(dim)
return 0
return map(lambda d: dimlen(d), var.dimensions)
def write(self, name, data):
"""Writes a variable to file, making guesses for the dimensions"""
if not self.writeable:
raise Exception("File not writeable. Open with write=True keyword")
s = np.shape(data)
# Get the variable type
t = type(data).__name__
if t == 'NoneType':
print "DataFile: None passed as data to write. Ignoring"
return
if t == 'ndarray':
# Numpy type. Get the data type
t = data.dtype.str
if t == 'list':
# List -> convert to numpy array
data = np.array(data)
t = data.dtype.str
if (t == 'int') or (t == '<i8'):
# NetCDF 3 does not support type int64
data = np.int32(data)
t = data.dtype.str
try:
# See if the variable already exists
var = self.handle.variables[name]
# Check the shape of the variable
if var.shape != s:
print "Datafile: Variable already exists with different size: "+ name
raise
except:
# Not found, so add.
# Get dimensions
defdims = [(),
('x',),
('x','y'),
('x','y','z'),
('t','x','y','z')]
def find_dim(dim):
# Find a dimension with given name and size
size, name = dim
# See if it exists already
try:
d = self.handle.dimensions[name]
# Check if it's the correct size
if type(d).__name__ == 'int':
if d == size:
return name;
else:
if len(d) == size:
return name
# Find another with the correct size
for dn, d in self.handle.dimensions.iteritems():
# Some implementations need len(d) here, some just d
if type(d).__name__ == 'int':
if d == size:
return dn
else:
if len(d) == size:
return dn
# None found, so create a new one
i = 2
while True:
dn = name + str(i)
try:
d = self.handle.dimensions[dn]
# Already exists, so keep going
except KeyError:
# Not found. Create
print "Defining dimension "+ dn + " of size %d" % size
try:
self.handle.createDimension(dn, size)
except AttributeError:
# Try the old-style function
self.handle.create_dimension(dn, size)
return dn
i = i + 1
except KeyError:
# Doesn't exist, so add
print "Defining dimension "+ name + " of size %d" % size
try:
self.handle.createDimension(name, size)
except AttributeError:
self.handle.create_dimension(name, size)
return name
# List of (size, 'name') tuples
dlist = zip(s, defdims[len(s)])
# Get new list of variables, and turn into a tuple
dims = tuple( map(find_dim, dlist) )
# Create the variable
if library == "Scientific":
if t == 'int':
tc = Int
else:
tc = Float
var = self.handle.createVariable(name, tc, dims)
elif library == "scipy":
try:
# New style functions
var = self.handle.createVariable(name, t, dims)
except AttributeError:
# Old style functions
var = self.handle.create_variable(name, t, dims)
else:
var = self.handle.createVariable(name, t, dims)
if var == None:
raise Exception("Couldn't create variable")
# Write the data
try:
# Some libraries allow this for arrays
var.assignValue(data)
except:
# And some others only this
var[:] = data
|
boutproject/BOUT-2.0
|
tools/pylib/boututils/datafile.py
|
Python
|
gpl-3.0
| 11,686
|
[
"NetCDF"
] |
fe83268cef1797743c60048e2a839d238b10523c257aac29815e2ec0631a8f44
|
# coding=utf-8
from pkg_resources import resource_filename
from compliance_checker.suite import CheckSuite
from compliance_checker.base import Result, BaseCheck, GenericFile
import numpy as np
import unittest
import os
static_files = {
'2dim' : resource_filename('compliance_checker', 'tests/data/2dim-grid.nc'),
'bad_region' : resource_filename('compliance_checker', 'tests/data/bad_region.nc'),
'bad_data_type': resource_filename('compliance_checker', 'tests/data/bad_data_type.nc'),
'test_cdl' : resource_filename('compliance_checker', 'tests/data/test_cdl.cdl'),
'test_cdl_nc' : resource_filename('compliance_checker', 'tests/data/test_cdl_nc_file.nc'),
'empty' : resource_filename('compliance_checker', 'tests/data/non-comp/empty.file'),
'ru07' : resource_filename('compliance_checker', 'tests/data/ru07-20130824T170228_rt0.nc'),
'netCDF4' : resource_filename('compliance_checker', 'tests/data/test_cdl_nc4_file.cdl')
}
class TestSuite(unittest.TestCase):
# @see
# http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/
def setUp(self):
self.cs = CheckSuite()
self.cs.load_all_available_checkers()
def shortDescription(self):
return None
# override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests
# ion.module:TestClassName.test_function_name
def __repr__(self):
name = self.id()
name = name.split('.')
if name[0] not in ["ion", "pyon"]:
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
else:
return "%s ( %s )" % (name[-1], '.'.join(name[:-2]) + ":" +
'.'.join(name[-2:]))
__str__ = __repr__
def test_suite(self):
# BWA: what's the purpose of this test? Just to see if the suite
# runs without errors?
ds = self.cs.load_dataset(static_files['2dim'])
self.cs.run(ds, 'acdd')
def test_unicode_formatting(self):
ds = self.cs.load_dataset(static_files['bad_region'])
score_groups = self.cs.run(ds, 'cf')
limit = 2
for checker, rpair in score_groups.items():
groups, errors = rpair
score_list, points, out_of = self.cs.standard_output(ds.filepath(),
limit, checker,
groups)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, points, out_of, checker)
def test_generate_dataset_netCDF4(self):
"""
Tests that suite.generate_dataset works with cdl file with netCDF4
features.
"""
# create netCDF4 file
ds_name = self.cs.generate_dataset(static_files['netCDF4'])
# check if correct name is return
assert ds_name == static_files['netCDF4'].replace('.cdl', '.nc')
# check if netCDF4 file was created
assert os.path.isfile(static_files['netCDF4'].replace('.cdl', '.nc'))
def test_skip_checks(self):
"""Tests that checks are properly skipped when specified"""
ds = self.cs.load_dataset(static_files['2dim'])
# exclude title from the check attributes
score_groups = self.cs.run(ds, ['check_high'], 'acdd')
assert all(sg.name not in {'Conventions', 'title', 'keywords',
'summary'} for sg in score_groups['acdd'][0])
def test_skip_check_level(self):
"""Checks level limited skip checks"""
ds = self.cs.load_dataset(static_files['ru07'])
score_groups = self.cs.run(ds, ['check_flags:A',
'check_convention_possibly_var_attrs:M',
'check_standard_name:L'], 'cf')
name_set = {sg.name for sg in score_groups['cf'][0]}
# flattened set of messages
msg_set = {msg for sg in score_groups['cf'][0] for msg in sg.msgs}
expected_excluded_names = {u'§3.5 flag_meanings for lat',
u'§3.5 flag_meanings for lon',
u'§3.5 lat is a valid flags variable',
u'§3.5 lat is a valid flags variable',
u'§3.5 lon is a valid flags variable'}
self.assertTrue(len(expected_excluded_names & name_set) == 0)
# should skip references
ref_msg = u'references global attribute should be a non-empty string'
self.assertTrue(ref_msg not in msg_set)
# check_standard_name is high priority, but we requested only low,
# so the standard_name check should still exist
standard_name_hdr = u'§3.3 Standard Name'
self.assertTrue(standard_name_hdr in name_set)
def test_group_func(self):
# This is checking for issue #183, where group_func results in
# IndexError: list index out of range
ds = self.cs.load_dataset(static_files['bad_data_type'])
score_groups = self.cs.run(ds, 'cf')
limit = 2
for checker, rpair in score_groups.items():
groups, errors = rpair
score_list, points, out_of = self.cs.standard_output(ds.filepath(),
limit, checker,
groups)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, points, out_of, checker)
def test_score_grouping(self):
# Testing the grouping of results for output, which can fail
# if some assumptions are not met, e.g. if a Result object has
# a value attribute of unexpected type
res = [
Result(BaseCheck.MEDIUM, True, 'one'),
Result(BaseCheck.MEDIUM, (1, 3), 'one'),
Result(BaseCheck.MEDIUM, None, 'one'),
Result(BaseCheck.MEDIUM, True, 'two'),
Result(BaseCheck.MEDIUM, np.isnan(1), 'two') # value is type numpy.bool_
]
score = self.cs.scores(res)
self.assertEqual(score[0].name, 'one')
self.assertEqual(score[0].value, (2, 4))
self.assertEqual(score[1].name, 'two')
self.assertEqual(score[1].value, (1, 2))
def test_cdl_file(self):
# Testing whether you can run compliance checker on a .cdl file
# Load the cdl file
ds = self.cs.load_dataset(static_files['test_cdl'])
vals = self.cs.run(ds, 'cf')
limit = 2
for checker, rpair in vals.items():
groups, errors = rpair
score_list, cdl_points, cdl_out_of = self.cs.standard_output(ds.filepath(),
limit,
checker,
groups)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, cdl_points, cdl_out_of, checker)
ds.close()
# Ok now load the nc file that it came from
ds = self.cs.load_dataset(static_files['test_cdl_nc'])
vals = self.cs.run(ds, 'cf')
limit = 2
for checker, rpair in vals.items():
groups, errors = rpair
score_list, nc_points, nc_out_of = self.cs.standard_output(ds.filepath(),
limit,
checker,
groups)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, nc_points, nc_out_of, checker)
ds.close()
nc_file_path = static_files['test_cdl'].replace('.cdl', '.nc')
self.addCleanup(os.remove, nc_file_path)
# Ok the scores should be equal!
self.assertEqual(nc_points, cdl_points)
self.assertEqual(nc_out_of, cdl_out_of)
def test_load_local_dataset_GenericFile(self):
resp = self.cs.load_local_dataset(static_files['empty'])
assert isinstance(resp, GenericFile) == True
def test_standard_output_score_header(self):
"""
Check that the output score header only checks the number of
of potential issues, rather than the weighted score
"""
ds = self.cs.load_dataset(static_files['bad_region'])
score_groups = self.cs.run(ds, [], 'cf')
limit = 2
groups, errors = score_groups['cf']
score_list, all_passed, out_of = self.cs.standard_output(
ds.filepath(),
limit, 'cf',
groups)
assert all_passed < out_of
def test_netCDF4_features(self):
"""
Check if a proper netCDF4 file with netCDF4-datatypes is created.
"""
# create and open dataset
ds = self.cs.load_dataset(static_files['netCDF4'])
# check if netCDF type of global attributes is correct
assert isinstance(ds.global_att_of_type_int, np.int32)
# check if netCDF4 type of global attributes is correct
assert isinstance(ds.global_att_of_type_int64, np.int64)
# check if netCDF type of variable is correct
assert ds['tas'].dtype is np.dtype('float32')
# check if netCDF4 type of variable is correct
assert ds['mask'].dtype is np.dtype('int64')
|
lukecampbell/compliance-checker
|
compliance_checker/tests/test_suite.py
|
Python
|
apache-2.0
| 9,873
|
[
"BWA",
"NetCDF"
] |
4254152a7df83faefdefd326873828de1dab7ffd66b14ecb9e392e0e4e660a07
|
"""cdbifunc.py
Developer: Noelle Todd
Last Updated: August 30, 2014
This module holds all functions that will be called directly by the user
interface. This module uses several functions in cdbfunctions.py; the two
modules have been split to make designing the user interface as simple as
simple as possible.
"""
import sqlalchemy
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy import desc
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
from cdbtabledef import Household, Person, Volunteer, Visit
from cdbfunctions import *
engine = create_engine('sqlite:///test2_db.sqlite')
session = sessionmaker()
session.configure(bind=engine)
base = declarative_base()
s = session()
####Closing, cancelling, and resetting functions####
def quit_session():
"""This function will close the session.
"""
s.close()
def cancel_changes():
"""This function will rollback transactions.
"""
s.rollback()
def reset(I_ID):
""" This function sends the original data back.
"""
info = select_client(I_ID)
return info
####Functions for listing####
def list_people():
"""This function takes no arguments and returns a list of tuples.
Each tuple contains a string for a person's full name, a string for
the person's street_address, and an integer for the person's unique id.
Note: this only returns people that are members of a household.
"""
people = []
#create a list of tuples, where each tuple contains a string holding a
#person's full-name, a string holding the person's street, and an integer
#holding the person's unique id. The names are added in alphabetic (A-Z)
#order.
#
for instance in s.query(Person).order_by(Person.last_name):
try:
h = s.query(Household).filter(Household.id == instance.HH_ID).one()
fullname = instance.first_name + " " + instance.last_name
people.append((fullname, instance.DOB, instance.id))
except NoResultFound:
pass
return people
def list_historical_members():
"""This function lists all people who are no longer associated with a
household.
"""
people = []
for instance in s.query(Person).order_by(Person.last_name):
if instance.HH_ID == None:
fullname = instance.first_name + " " + instance.last_name
people.append(fullname)
else: pass
return people
def list_active_volunteers():
"""This function takes no arguments and returns a list of tuples.
Each tuple contains a string for a volunteer's full name, and a string
for their phone number.
"""
volunteers = []
for instance in s.query(Volunteer).order_by(Volunteer.last_name):
if instance.active == True:
fullname = instance.first_name + " " + instance.last_name
volunteers.append((fullname, instance.id))
else: pass
return volunteers
def list_all_volunteers():
"""This function takes no arguments and returns a list of tuples.
This lists all volunteers, whether active or not, and their activity
status.
"""
volunteers = []
for instance in s.query(Volunteer).order_by(Volunteer.last_name):
fullname = instance.first_name + " " + instance.last_name
volunteers.append((fullname, instance.id))
return volunteers
def list_households():
"""This function simply lists all households.
"""
houses = []
for instance in s.query(Household).order_by(Household.city):
houses.append((instance.street_address, instance.city, instance.id))
return houses
def list_vis():
"""This function simply lists all visits.
"""
visits = []
for instance in s.query(Visit).order_by(Visit.date):
visits.append((instance.HH_ID, instance.id, instance.I_ID))
return visits
def select_volunteer(Vol_ID):
"""This returns all volunteer information.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
volreturn = volunteerData(firstname=vol.first_name, lastname=vol.last_name,
phone=vol.phone, active=vol.active,
color=vol.color)
return volreturn
def select_client(I_ID):
"""This a dictionary of objects containing all data for a selected
client.
The return will include an oldClientData object for the visitor,
a houseData object for the household, a list of visitDataReturn objects,
a list of oldClientData objects for family members, and a dictionary of
agegroups.
"""
#find person and associated household
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#create new object to hold visitor's data
visitor = oldClientData(id=pers.id, firstname=pers.first_name,
lastname=pers.last_name, dob=pers.DOB,
phone=pers.phone, dateJoined=pers.date_joined)
#create new object to hold household data
household = houseData(street=house.street_address, city=house.city,
state=house.state, zip=house.zip,
dateVerified=house.date_verified, apt=house.apt)
#list to hold member-data objects
members = []
#create new objects to hold data for each additional household member
for member in house.members:
if member.first_name == pers.first_name: pass
else:
mem = oldClientData(id=member.id, firstname=member.first_name,
lastname=member.last_name, dob=member.DOB,
phone=member.phone,
dateJoined=member.date_joined)
members.append(mem)
#get list of information about past 3 visits
visits = list_visits(s, I_ID)
#call to function to get dictionary of ages
agegroups = get_age_breakdown(house.members)
house.seniors = agegroups["seniors"]
house.adults = agegroups["adults"]
house.children = agegroups["children"]
house.infants = agegroups["infants"]
house.total = agegroups["total"]
#create dictionary of all objects to be returned
info = {"visitor":visitor, "household":household, "member_list":members,
"visit_list":visits, "agegroup_dict":agegroups}
return info
####Functions for creating new records####
def new_volunteer(firstname, lastname, phone=None, active=True):
"""This function creates a new record for an active volunteer.
"""
insert_volunteer(s, firstname, lastname, phonenum=phone, active=active)
def new_visit(I_ID, visitInfo):
"""This function records a visit for a household.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#create a new visit
insert_visit(s, visitInfo.Vol_ID, pers.id, house.id, visitInfo.visitDate,
visitInfo.notes)
def new_household(houseInfo, visitInfo, newClientInfo_list):
"""This function takes an object for house info, an object for
visit info, and a list of objects for client info (one object per
client).
This function creates a new record for each new person, a new record
for the household, and new record for the a visit.
"""
#create new household
newhouse = insert_household(s, houseInfo.street, houseInfo.dateVerified,
houseInfo.apt, houseInfo.city,
houseInfo.state, houseInfo.zip)
#create new person for every household member
data = newClientInfo_list #variable renamed for simplicity
for i in range(0, len(data)):
fname = data[i].firstname
lname = data[i].lastname
dob = data[i].dob
phone = data[i].phone
dateJoined = data[i].dateJoined
pers = insert_person(s, data[i].firstname, data[i].lastname,
data[i].dob, newhouse.id, data[i].dateJoined,
data[i].phone)
#the first person is the actual visitor; save for insert_visit
if i == 0:
newpers = pers
age_dict = get_age_breakdown(newhouse.members)
newhouse.seniors = age_dict["seniors"]
newhouse.adults = age_dict["adults"]
newhouse.children = age_dict["children"]
newhouse.infants = age_dict["infants"]
newhouse.total = age_dict["total"]
#create new visit for household
insert_visit(s, visitInfo.Vol_ID, newpers.id, newhouse.id,
visitInfo.visitDate, visitInfo.notes)
return newpers.id
####Functions for updating records####
def update_all(I_ID, houseInfo, oldClientInfo_list,
newClientInfo_list=None):
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#update household
update_household(s, house.id, houseInfo.street, houseInfo.city,
houseInfo.state, houseInfo.zip, houseInfo.apt,
houseInfo.dateVerified)
#add new clients (if they exist)
data = newClientInfo_list #renamed for simplicity
if data == None: pass
else:
for i in range(0, len(data)):
newpers = insert_person(s, data[i].firstname, data[i].lastname,
data[i].dob, house.id,
phonenum=data[i].phone)
#update old clients
old = oldClientInfo_list #renamed for simplicity
for i in range(0, len(old)):
update_person(s, old[i].id, old[i].firstname, old[i].lastname,
old[i].dob, old[i].phone)
def update_vol(vol_id, firstname, lastname, phonenum, active_state):
"""This function will update a volunteer's records.
"""
update_volunteer(s, vol_id, firstname, lastname, phonenum, active_state)
def update_vis(vis_id, date, notes=None):
"""This function will update a visit.
"""
update_visit(s, vis_id, date, notes)
def reactivate_volunteer(Vol_ID):
"""This function reactivates a volunteer. The volunteer will now
reappear in lists and such.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
vol.active = True
s.commit()
####Functions for deleting/deactivating records####
def remove_client(I_ID):
"""This function will only delete a single client if the client
has never participated in a visit. If the client has visited, then
their household is set to "None" and they are placed in a "historical
members" list, but they remain in the database.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
vis = s.query(Visit).filter(Visit.I_ID == pers.id).all()
#create new household with dummy address
house = insert_household(s, street="None", dateverified=None, Apt=None,
City='None', State='None', Zip='00000')
pers.HH_ID = house.id
#pers.HH_ID = None
s.commit()
def remove_volunteer(Vol_ID):
"""This function will delete a volunteer if the volunteer has
not participated in a visit. Else, it will "deactivate" the
volunteer. The volunteer will remain in the database and can be
reactivated, but will not appear in the "active_volunteers" list.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
vis = s.query(Visit).filter(Visit.Vol_ID == Vol_ID).all()
#if volunteer is not associated with a visit, then delete
if len(vis) == 0:
delete_volunteer(s, Vol_ID)
#if volunteer has helped with visits, just deactivate them
else:
vol.active = False
s.commit()
def remove_household(I_ID):
"""This function deletes the entire household, all members of the
household, and all visits associated with the household.
"""
#get household id
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#remove all visits the household has made
visits = s.query(Visit).filter(Visit.HH_ID == house.id).all()
for visit in visits:
delete_visit(s, visit.id)
#remove all members from the household
for member in house.members:
delete_person(s, member.id)
#remove all visits the household has made
delete_household(s, house.id)
def remove_visit(vis_id):
"""This function deletes a single visit.
"""
delete_visit(s, vis_id)
####Functions for generating monthly/yearly reports####
def generate_monthly_report():
"""This function will generate a csv/excel file that holds
data about households for the past month.
"""
duration = timedelta(days=31)
generate_report(s, duration)
def generate_yearly_report():
"""This function will generate a csv/excel file that holds
data about households for the past year.
"""
duration = timedelta(days=365)
generate_report(s, duration)
def generate_weekly_report():
"""This function will generate a csv/excel file that holds
date about the households for the past 7 days. This will
include the number of new visitors, and the number of old
visitors.
"""
duration = timedelta(days=1)
generate_report(s, duration)
|
ChristinaHammer/Client_Database
|
cdbifunc.py
|
Python
|
mit
| 12,536
|
[
"VisIt"
] |
2c8f7aacdac3b80d0868bff523e25a2dd454bc7d45e5c88e2cbda1411a251cda
|
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_jobs, h2o_gbm, h2o_exec as h2e
DO_BUG = False
DO_HDFS = False
DO_ALL_DIGITS = False
print "Uses numpy to create dataset..I guess we have to deal with jenkins not having it"
print "uses dot product off some coefficients to create output. also correlatation with constant term in cols"
SCIPY_INSTALLED = True
try:
import scipy as sp
import numpy as np
print "Both numpy and scipy are installed. Will do extra checks"
except ImportError:
print "numpy or scipy is not installed. Will only do sort-based checking"
SCIPY_INSTALLED = False
def write_syn_dataset(csvPathname, rowCount=100, colCount=10):
# http://nbviewer.ipython.org/github/fabianp/pytron/blob/master/doc/benchmark_logistic.ipynb
# http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
# The synthetic data used in the benchmarks was generated as described in 2 and
# consists primarily of the design matrix X being Gaussian noise,
# the vector of coefficients is drawn also from a Gaussian distribution
# and the explained variable y is generated as y=sign(Xw).
# We then perturb matrix X by adding Gaussian noise with covariance 0.8.
corr = 1. # 0., 1., 10.
n_samples = rowCount
n_features = colCount
np.random.seed(0)
X = np.random.randn(n_samples, n_features)
w = np.random.randn(n_features)
# np.sign returns sign
y = np.sign(X.dot(w))
X += 0.8 * np.random.randn(n_samples, n_features) # add noise
X+= corr # this makes it correlated by adding a constant term
# X = np.hstack((X, np.ones((X.shape[0], 1)))) # add a column of ones for intercept
print X.shape
print y.shape
# concatenate X and y columns together so we can write a csv
y2 = np.reshape(y, (X.shape[0], 1))
Xy = np.hstack((X, y2))
np.savetxt(csvPathname, Xy, delimiter=',', fmt='%5.4f')
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_mnist(self):
if not SCIPY_INSTALLED:
pass
else:
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilelist = [
(10000, 500, 'cA', 60),
]
trial = 0
for (rowCount, colCount, hex_key, timeoutSecs) in csvFilelist:
trialStart = time.time()
# PARSE test****************************************
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + "/" + csvFilename
write_syn_dataset(csvPathname, rowCount, colCount)
start = time.time()
parseResult = h2i.import_parse(path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# GLM****************************************
modelKey = 'GLM_model'
y = colCount
kwargs = {
'response': 'C' + str(y+1),
'family': 'binomial',
'lambda': 1e-4,
'alpha': 0,
'max_iter': 15,
'n_folds': 1,
'beta_epsilon': 1.0E-4,
'destination_key': modelKey,
}
# GLM wants the output col to be strictly 0,1 integer
execExpr = "aHack=%s; aHack[,%s] = aHack[,%s]==1" % (hex_key, y+1, y+1)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
aHack = {'destination_key': 'aHack'}
timeoutSecs = 1800
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "GLM completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, None, noPrint=True, **kwargs)
modelKey = glm['glm_model']['_key']
# This seems wrong..what's the format of the cm?
lambdaMax = glm['glm_model']['lambda_max']
print "lambdaMax:", lambdaMax
best_threshold= glm['glm_model']['submodels'][0]['validation']['best_threshold']
print "best_threshold", best_threshold
# pick the middle one?
cm = glm['glm_model']['submodels'][0]['validation']['_cms'][5]['_arr']
print "cm:", cm
pctWrong = h2o_gbm.pp_cm_summary(cm);
# self.assertLess(pctWrong, 9,"Should see less than 9% error (class = 4)")
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
# Score *******************************
# this messes up if you use case_mode/case_vale above
print "\nPredict\n==========\n"
predictKey = 'Predict.hex'
start = time.time()
predictResult = h2o_cmd.runPredict(
data_key='aHack',
model_key=modelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual='aHack',
vactual='C' + str(y+1),
predict=predictKey,
vpredict='predict',
)
cm = predictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
self.assertLess(pctWrong, 50,"Should see less than 50% error")
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
if __name__ == '__main__':
h2o.unit_main()
|
janezhango/BigDataMachineLearning
|
py/testdir_single_jvm/test_GLM2_syn_corr.py
|
Python
|
apache-2.0
| 6,766
|
[
"Gaussian"
] |
d55552e05ac5ed291dbb844626d29df514940237ec7a00305fb11a52a44236c4
|
"""Probe image with mesh. Use VTK 5.10 which comes with Anaconda: Created by Arjan Geers, Modified by Noel Conlisk"""
import os
import vtk
def read_image(path):
"""Read VTK ImageData"""
filename, extension = os.path.splitext(path)
if extension == '.vtk':
reader = vtk.vtkStructuredPointsReader()
elif extension == '.vti':
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(path)
reader.Update()
return reader.GetOutput()
def read_mesh(path):
"""Read VTK-file"""
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(path)
reader.Update()
return reader.GetOutput()
def probe(image, mesh):
"""Sample image with mesh"""
prober = vtk.vtkProbeFilter()
prober.SetInput(mesh)
prober.SetSource(image)
prober.Update()
return prober.GetOutput()
def write_mesh(mesh, path):
"""Write VTK UnstructuredGrid. Depending on the file extension either
legacy VTK format or VTK XML format will be used."""
filename, extension = os.path.splitext(path)
if extension == '.vtk':
writer = vtk.vtkUnstructuredGridWriter()
elif extension == '.vtu':
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetInput(mesh)
writer.SetFileName(path)
writer.Write()
image = read_image('SOF086_pet_data.vtk')
mesh = read_mesh('SOF086_2_3_edited_realigned.vtk')
output = probe(image, mesh)
write_mesh(output, 'SOF086_pet_values.vtu')
|
nconlisk/python
|
VTK/probe_mesh_combined.py
|
Python
|
gpl-3.0
| 1,502
|
[
"VTK"
] |
fa5d5ae68cec61ce5e84783082eb9167a949a93c076fdd733d2250436b2c6ee0
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#======================================================================
#
# ccport.py - cclib port
#
# ³õʼ»¯·þÎñ
#
#======================================================================
import sys, time, struct, os
import ctypes
from ctypes import c_uint, c_int, c_char_p, c_void_p, c_int32, c_uint32
from ctypes import c_int16, c_uint16, c_int8, c_uint8, c_ushort, byref
#----------------------------------------------------------------------
# initialize
#----------------------------------------------------------------------
HOME = os.environ.get('CHOME', os.path.abspath('.')) # CASUALD Ŀ¼
HOME = os.path.abspath(HOME)
def loadlib (fn):
try: dl = ctypes.cdll.LoadLibrary(fn)
except: return None
return dl
_unix = sys.platform[:3] != 'win' and True or False
_search = [ HOME, os.path.join(HOME, 'bin'), '.', '../bin' ]
_names = [ 'cclib', 'cclib.cc', 'cclib.win', 'cclib.pyd', 'cclib.so' ]
_dllname = ''
_cclib = None
for root in _search:
path = os.path.abspath(root)
for fn in _names:
nm = os.path.join(path, fn)
dl = loadlib(nm)
if not dl: continue
try: test = dl.cg_attach
except: continue
_cclib = dl
_dllname = nm
break
if _cclib == None:
print 'can not load dynamic library cclib'
sys.exit(1)
#----------------------------------------------------------------------
# port interface
#----------------------------------------------------------------------
_cg_attach = _cclib.cg_attach
_cg_headmode = _cclib.cg_headmode
_cg_peek = _cclib.cg_peek
_cg_exit = _cclib.cg_exit
_cg_read = _cclib.cg_read
_cg_write = _cclib.cg_write
_cg_sendto = _cclib.cg_sendto
_cg_bsendto = _cclib.cg_bsendto
_cg_groupcast = _cclib.cg_groupcast
_cg_close = _cclib.cg_close
_cg_ioflush = _cclib.cg_ioflush
_cg_tag = _cclib.cg_tag
_cg_movec = _cclib.cg_movec
_cg_channel = _cclib.cg_channel
_cg_broadcast = _cclib.cg_broadcast
_cg_settimer = _cclib.cg_settimer
_cg_sysinfo = _cclib.cg_sysinfo
_cg_logmask = _cclib.cg_logmask
_cg_book_add = _cclib.cg_book_add
_cg_book_del = _cclib.cg_book_del
_cg_book_reset = _cclib.cg_book_reset
_cg_head = _cclib.cg_head
_cg_tail = _cclib.cg_tail
_cg_next = _cclib.cg_next
_cg_prev = _cclib.cg_prev
_cg_htag = _cclib.cg_htag
_cg_hidnum = _cclib.cg_hidnum
_cg_getchid = _cclib.cg_getchid
_cg_setseed = _cclib.cg_setseed
_cg_rc4_set_skey = _cclib.cg_rc4_set_skey
_cg_rc4_set_rkey = _cclib.cg_rc4_set_rkey
_cg_flush = _cclib.cg_flush
_cg_bufmod = _cclib.cg_bufmod
_cg_gethd = _cclib.cg_gethd
_cg_sethd = _cclib.cg_sethd
_cg_setmask = _cclib.cg_setmask
#----------------------------------------------------------------------
# prototypes
#----------------------------------------------------------------------
_cg_attach.argtypes = [ c_char_p, c_ushort, c_int ]
_cg_attach.restype = c_int
_cg_headmode.argtypes = [ c_int ]
_cg_headmode.restype = None
_cg_peek.argtypes = [ c_int ]
_cg_peek.restype = c_int
_cg_exit.argtypes = [ ]
_cg_exit.restype = None
_cg_read.argtypes = [ c_void_p, c_void_p, c_void_p, c_void_p, c_int ]
_cg_read.restype = c_int
_cg_write.argtypes = [ c_int, c_int, c_int, c_char_p, c_int, c_int ]
_cg_write.restype = c_int
_cg_sendto.argtypes = [ c_int, c_char_p, c_int, c_int ]
_cg_sendto.restype = c_int
_cg_bsendto.argtypes = [ c_int, c_char_p, c_int, c_int ]
_cg_bsendto.restype = c_int
_cg_groupcast.argtypes = [ c_void_p, c_int, c_char_p, c_int, c_int ]
_cg_groupcast.restype = c_int
_cg_close.argtypes = [ c_int, c_int ]
_cg_close.restype = c_int
_cg_ioflush.argtypes = []
_cg_ioflush.restype = c_int
_cg_tag.argtypes = [ c_int, c_int ]
_cg_tag.restype = c_int
_cg_movec.argtypes = [ c_int, c_int, c_char_p, c_int ]
_cg_movec.restype = c_int
_cg_channel.argtypes = [ c_int, c_char_p, c_int ]
_cg_channel.restype = c_int
_cg_broadcast.argtypes = [ c_void_p, c_int, c_char_p, c_int ]
_cg_broadcast.restype = c_int
_cg_settimer.argtypes = [ c_int ]
_cg_settimer.restype = c_int
_cg_sysinfo.argtypes = [ c_int, c_int ]
_cg_sysinfo.restype = c_int
_cg_logmask.argtypes = [ c_int ]
_cg_logmask.restype = None
_cg_book_add.argtypes = [ c_int ]
_cg_book_add.restype = c_int
_cg_book_del.argtypes = [ c_int ]
_cg_book_del.restype = c_int
_cg_book_reset.argtypes = []
_cg_book_reset.restype = c_int
_cg_head.argtypes = []
_cg_head.restype = c_int
_cg_tail.argtypes = []
_cg_tail.restype = c_int
_cg_next.argtypes = [ c_int ]
_cg_next.restype = c_int
_cg_prev.argtypes = [ c_int ]
_cg_prev.restype = c_int
_cg_htag.argtypes = [ c_int ]
_cg_htag.restype = c_int
_cg_hidnum.argtypes = []
_cg_hidnum.restype = c_int
_cg_getchid.argtypes = []
_cg_getchid.restype = c_int
_cg_setseed.argtypes = [ c_int, c_int ]
_cg_setseed.restype = c_int
_cg_rc4_set_skey.argtypes = [ c_int, c_char_p, c_int ]
_cg_rc4_set_skey.restype = c_int
_cg_rc4_set_rkey.argtypes = [ c_int, c_char_p, c_int ]
_cg_rc4_set_rkey.restype = c_int
_cg_flush.argtypes = []
_cg_flush.restype = c_int
_cg_bufmod.argtypes = [ c_int ]
_cg_bufmod.restype = c_int
_cg_gethd.argtypes = [ c_int ]
_cg_gethd.restype = c_int
_cg_sethd.argtypes = [ c_int, c_int ]
_cg_sethd.restype = c_int
_cg_setmask.argtypes = [ c_int ]
_cg_setmask.restype = c_int
#----------------------------------------------------------------------
# instruction
#----------------------------------------------------------------------
ITMT_NEW = 0 # нüÍⲿÁ¬½Ó£º(id,tag) ip/d,port/w <hid>
ITMT_LEAVE = 1 # ¶Ï¿ªÍⲿÁ¬½Ó£º(id,tag) <hid>
ITMT_DATA = 2 # ÍⲿÊý¾Ýµ½´ï£º(id,tag) data... <hid>
ITMT_CHANNEL = 3 # ƵµÀͨÐÅ£º(channel,tag) <>
ITMT_CHNEW = 4 # ƵµÀ¿ªÆô£º(channel,id)
ITMT_CHSTOP = 5 # ƵµÀ¶Ï¿ª£º(channel,tag)
ITMT_SYSCD = 6 # ϵͳÐÅÏ¢£º(subtype, v) data...
ITMT_TIMER = 7 # ϵͳʱÖÓ£º(timesec,timeusec)
ITMT_UNRDAT = 10 # ²»¿É¿¿Êý¾Ý°ü£º(id,tag)
ITMT_NOOP = 80 # ¿ÕÖ¸Á(wparam, lparam)
ITMT_BLOCK = 99 # ûÓÐÖ¸Áî
ITMC_DATA = 0 # ÍⲿÊý¾Ý·¢ËÍ£º(id,*) data...
ITMC_CLOSE = 1 # ¹Ø±ÕÍⲿÁ¬½Ó£º(id,code)
ITMC_TAG = 2 # ÉèÖÃTAG£º(id,tag)
ITMC_CHANNEL = 3 # ×é¼äͨÐÅ£º(channel,*) data...
ITMC_MOVEC = 4 # ÒÆ¶¯ÍⲿÁ¬½Ó£º(channel,id) data...
ITMC_SYSCD = 5 # ϵͳ¿ØÖÆÏûÏ¢£º(subtype, v) data...
ITMC_BROADCAST = 6 # ¹ã²¥ÏûÏ¢
ITMC_UNRDAT = 10 # ²»¿É¿¿Êý¾Ý°ü£º(id,tag)
ITMC_IOCTL = 11 # Á¬½Ó¿ØÖÆÖ¸Á(id,flag)
ITMC_NOOP = 80 # ¿ÕÖ¸Á(*,*)
ITMS_CONNC = 0 # ÇëÇóÁ¬½ÓÊýÁ¿(st,0) cu/d,cc/d
ITMS_LOGLV = 1 # ÉèÖÃÈÕÖ¾¼¶±ð(st,level)
ITMS_LISTC = 2 # ·µ»ØÆµµÀÐÅÏ¢(st,cn) d[ch,id,tag],w[t,c]
ITMS_RTIME = 3 # ϵͳÔËÐÐʱ¼ä(st,wtime)
ITMS_TMVER = 4 # ´«ÊäÄ£¿é°æ±¾(st,tmver)
ITMS_REHID = 5 # ·µ»ØÆµµÀµÄ(st,ch)
ITMS_QUITD = 6 # ÇëÇó×Ô¼ºÍ˳ö
ITMS_TIMER = 8 # ÉèÖÃÆµµÀÁãµÄʱÖÓ(st,timems)
ITMS_INTERVAL = 9 # ÉèÖÃÊÇ·ñΪ¼ä¸ôģʽ(st,isinterval)
ITMS_FASTMODE = 10 # ÉèÖÃÊÇ·ñÆôÓÿìËÙģʽ
ITMS_NODELAY = 1 # ÉèÖýûÓÃNagleËã·¨
ITMS_NOPUSH = 2 # ½ûÖ¹·¢ËͽӿÚ
#----------------------------------------------------------------------
# port interface
#----------------------------------------------------------------------
def attach (ip, port, channel):
if not _unix:
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = None
return _cg_attach(ip, port, channel)
def headmode (mode = 0):
_cg_headmode(mode)
def peek (length):
return _cg_peak(length)
def exit ():
return _cg_exit()
textdata = ctypes.create_string_buffer('\000' * 0x100000)
def read (nowait = 0):
p_event = c_int()
p_wparam = c_int()
p_lparam = c_int()
hr = _cg_read(byref(p_event), byref(p_wparam), byref(p_lparam), \
textdata, nowait)
e = p_event.value
w = p_wparam.value
l = p_lparam.value
if hr < 0:
return -1, w, l, ''
elif hr == 0 and e < 0:
return 99, w, l, ''
d = textdata[:hr]
return e, w, l, d
def write (event, wparam, lparam, data, flush = True):
flush = flush and 1 or 0
return _cg_write(event, wparam, lparam, data, len(data), flush)
def sendto (hid, data, udp = False):
udp = udp and 1 or 0
return _cg_sendto(hid, data, len(data), udp)
def send (hid, data, udp = False):
udp = udp and 1 or 0
return _cg_sendto(hid, data, len(data), udp)
def bsendto (hid, data):
return _cg_bsendto(hid, data, len(data))
def sendmulti (hids, data, datamode = 0, buffermode = 0):
if type(hids) == type(''):
result = []
unpack = struct.unpack
for i in xrange(len(hids) / 4):
hid = unpack('I', hids[i * 4: i * 4 + 4])[0]
result.append(hid)
hids = result
cmd = (datamode) and ITMC_UNRDAT or ITMC_DATA
for hid in hids:
_cg_write(cmd, hid, 0, data, len(data), buffermode)
return 0
def close (hid, code = 0):
return _cg_close(hid, code)
def tag (hid, TAG):
return _cg_tag(hid, TAG)
def movec (channel, hid, data):
return _cg_movec(channel, hid, data, len(data))
def channel (channelid, data):
return _cg_channel(channelid, data, len(data))
def settimer (millisec):
return _cg_settimer(millisec)
def sysinfo (mode, info):
return _cg_sysinfo(mode, info)
def head ():
return _cg_head()
def tail ():
return _cg_tail()
def next (hid):
return _cg_next(hid)
def prev (hid):
return _cg_prev(hid)
def htag (hid):
return _cg_htag(hid)
def hidnum ():
return _cg_hidnum()
def flush ():
return _cg_flush()
def bufmod (mode):
return _cg_bufmod(mode)
def gethd (hid):
return _cg_gethd(hid)
def sethd (hid, hd):
return _cg_sethd(hid, hd)
def logmask (mask):
return _cg_logmask(mask)
def bookadd (category):
return _cg_book_add(category)
def bookdel (category):
return _cg_book_del(category)
def bookrst (category):
return _cg_book_reset()
def getchid ():
return _cg_getchid()
def setseed (hid, seed):
return _cg_setseed(hid, seed)
def groupcast (hids, data, limit = -1):
pack = struct.pack
count = len(hids)
hids = ''.join(pack('I', hid) for hid in hids)
return _cg_groupcast(hids, count, data, len(data), limit)
def rc4_set_rkey (hid, key):
return _cg_rc4_set_rkey(hid, key, len(key))
def rc4_set_skey (hid, key):
return _cg_rc4_set_skey(hid, key, len(key))
def setmask (mask):
return _cg_setmask(mask)
__all__ = [
'attach', 'headmode', 'peek', 'exit', 'read', 'write', 'send', 'sendto',
'bsendto', 'sendmulti', 'close', 'tag', 'movec', 'channel', 'settimer',
'sysinfo', 'head', 'tail', 'next', 'prev', 'htag', 'hidnum', 'flush',
'bufmod', 'gethd', 'sethd', 'bookadd', 'bookdel', 'bookrst', 'getchid',
'setseed', 'groupcast', 'rc4_set_skey', 'rc4_set_rkey', 'setmask',
]
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
headmode(0)
|
skywind3000/cannon
|
cannon/lib/cport.py
|
Python
|
apache-2.0
| 11,083
|
[
"cclib"
] |
8703f14c3b10ff849fa0c5282513433858fde459e62d061594cae515e49f34b3
|
"""
Class for management of Stomp MQ connections, e.g. RabbitMQ
"""
import json
import random
import os
import socket
import ssl
import time
import stomp
from DIRAC.Resources.MessageQueue.MQConnector import MQConnector
from DIRAC.Core.Security import Locations
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.DErrno import EMQUKN, EMQCONN
LOG = gLogger.getSubLogger(__name__)
class StompMQConnector(MQConnector):
"""
Class for management of message queue connections
Allows to both send and receive messages from a queue
When several IPs are behind an alias, we shuffle the ips, and connect to one.
The others are used as failover by stomp's internals
"""
# Setting for the reconnection handling by stomp interface.
# See e.g. the description of Transport class in
# https://github.com/jasonrbriggs/stomp.py/blob/master/stomp/transport.py
RECONNECT_SLEEP_INITIAL = 1 # [s] Initial delay before reattempting to establish a connection.
RECONNECT_SLEEP_INCREASE = 0.5 # Factor by which sleep delay is increased 0.5 means increase by 50%.
RECONNECT_SLEEP_MAX = 120 # [s] The maximum delay that can be reached independent of increasing procedure.
RECONNECT_SLEEP_JITTER = 0.1 # Random factor to add. 0.1 means a random number from 0 to 10% of the current time.
RECONNECT_ATTEMPTS_MAX = 1e4 # Maximum attempts to reconnect.
PORT = 61613
def __init__(self, parameters=None):
"""Standard constructor"""
super(StompMQConnector, self).__init__(parameters=parameters)
self.connection = None
if "DIRAC_DEBUG_STOMP" in os.environ:
gLogger.enableLogsFromExternalLibs()
def setupConnection(self, parameters=None):
"""
Establishes a new connection to a Stomp server, e.g. RabbitMQ
Args:
parameters(dict): dictionary with additional MQ parameters if any.
Returns:
S_OK/S_ERROR
"""
log = LOG.getSubLogger("setupConnection")
if parameters is not None:
self.parameters.update(parameters)
# Check that the minimum set of parameters is present
if not all(p in parameters for p in ("Host", "VHost")):
return S_ERROR("Input parameters are missing!")
reconnectSleepInitial = self.parameters.get("ReconnectSleepInitial", StompMQConnector.RECONNECT_SLEEP_INITIAL)
reconnectSleepIncrease = self.parameters.get(
"ReconnectSleepIncrease", StompMQConnector.RECONNECT_SLEEP_INCREASE
)
reconnectSleepMax = self.parameters.get("ReconnectSleepMax", StompMQConnector.RECONNECT_SLEEP_MAX)
reconnectSleepJitter = self.parameters.get("ReconnectSleepJitter", StompMQConnector.RECONNECT_SLEEP_JITTER)
reconnectAttemptsMax = self.parameters.get("ReconnectAttemptsMax", StompMQConnector.RECONNECT_ATTEMPTS_MAX)
host = self.parameters.get("Host")
port = self.parameters.get("Port", StompMQConnector.PORT)
vhost = self.parameters.get("VHost")
sslVersion = self.parameters.get("SSLVersion")
hostcert = self.parameters.get("HostCertificate")
hostkey = self.parameters.get("HostKey")
connectionArgs = {
"vhost": vhost,
"keepalive": True,
"reconnect_sleep_initial": reconnectSleepInitial,
"reconnect_sleep_increase": reconnectSleepIncrease,
"reconnect_sleep_max": reconnectSleepMax,
"reconnect_sleep_jitter": reconnectSleepJitter,
"reconnect_attempts_max": reconnectAttemptsMax,
}
# We use ssl credentials and not user-password.
if sslVersion is not None:
if sslVersion == "TLSv1":
sslVersion = ssl.PROTOCOL_TLSv1
# get local key and certificate if not available via configuration
if not (hostcert or hostkey):
paths = Locations.getHostCertificateAndKeyLocation()
if not paths:
return S_ERROR("Could not find a certificate!")
hostcert = paths[0]
hostkey = paths[1]
connectionArgs.update(
{"use_ssl": True, "ssl_version": sslVersion, "ssl_key_file": hostkey, "ssl_cert_file": hostcert}
)
else:
return S_ERROR(EMQCONN, "Invalid SSL version provided: %s" % sslVersion)
try:
# Get IP addresses of brokers
# Start with the IPv6, and randomize it
ipv6_addrInfo = socket.getaddrinfo(host, port, socket.AF_INET6, socket.SOCK_STREAM)
random.shuffle(ipv6_addrInfo)
# Same with IPv4
ipv4_addrInfo = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
random.shuffle(ipv4_addrInfo)
# Create the host_port tuples, keeping the ipv6 in front
host_and_ports = []
for _family, _socktype, _proto, _canonname, sockaddr in ipv6_addrInfo + ipv4_addrInfo:
host_and_ports.append((sockaddr[0], sockaddr[1]))
connectionArgs.update({"host_and_ports": host_and_ports})
log.debug("Connection args: %s" % str(connectionArgs))
self.connection = stomp.Connection(**connectionArgs)
except Exception as e:
log.debug("Failed setting up connection", repr(e))
return S_ERROR(EMQCONN, "Failed to setup connection: %s" % e)
return S_OK("Setup successful")
def reconnect(self):
"""
Callback method when a disconnection happens
:param serverIP: IP of the server disconnected
"""
log = LOG.getSubLogger("reconnect")
log.info("Trigger reconnection for broker")
res = self.connect(self.parameters)
return res
def put(self, message, parameters=None):
"""
Sends a message to the queue
message contains the body of the message
Args:
message(str): string or any json encodable structure.
parameters(dict): parameters with 'destination' key defined.
"""
log = LOG.getSubLogger("put")
destination = parameters.get("destination", "")
try:
try:
self.connection.send(body=json.dumps(message), destination=destination)
except stomp.exception.StompException:
self.connect()
self.connection.send(body=json.dumps(message), destination=destination)
except Exception as e:
log.debug("Failed to send message", repr(e))
return S_ERROR(EMQUKN, "Failed to send message: %s" % repr(e))
return S_OK("Message sent successfully")
def connect(self, parameters=None):
"""Call the ~stomp.Connection.connect method for each endpoint
:param parameters: connection parameter
"""
log = LOG.getSubLogger("connect")
# Since I use a dirty trick to know to what IP I am connected,
# I'd rather not rely too much on it
remoteIP = "unknown"
user = self.parameters.get("User")
password = self.parameters.get("Password")
for _ in range(10):
try:
self.connection.connect(username=user, passcode=password, wait=True)
if self.connection.is_connected():
# Go to the socket of the Stomp to find the remote host
try:
remoteIP = self.connection.transport.socket.getpeername()[0]
except Exception:
pass
log.info("MQ Connected to %s" % remoteIP)
return S_OK("Connected to %s" % remoteIP)
else:
log.warn("Not connected")
except Exception as e:
log.error("Failed to connect: %s" % repr(e))
# Wait a bit before retrying
time.sleep(5)
return S_ERROR(EMQCONN, "Failed to connect")
def disconnect(self, parameters=None):
"""
Disconnects from the message queue server
"""
log = LOG.getSubLogger("disconnect")
try:
# Indicate to the Listener that we want a disconnection
listener = self.connection.get_listener("StompListener")
if listener:
listener.wantsDisconnect = True
self.connection.disconnect()
log.info("Disconnected from broker")
except Exception as e:
log.error("Failed to disconnect from broker", repr(e))
return S_ERROR(EMQUKN, "Failed to disconnect from broker %s" % repr(e))
return S_OK("Successfully disconnected from broker")
def subscribe(self, parameters=None):
log = LOG.getSubLogger("subscribe")
mId = parameters.get("messengerId", "")
callback = parameters.get("callback", None)
dest = parameters.get("destination", "")
headers = {}
if self.parameters.get("Persistent", "").lower() in ["true", "yes", "1"]:
headers = {"persistent": "true"}
ack = "auto"
acknowledgement = False
if self.parameters.get("Acknowledgement", "").lower() in ["true", "yes", "1"]:
acknowledgement = True
ack = "client-individual"
if not callback:
# Chris 26.02.20
# If it is an error, why not returning ?!
log.error("No callback specified!")
try:
listener = StompListener(callback, acknowledgement, self.connection, mId, self.connect)
self.connection.set_listener("StompListener", listener)
self.connection.subscribe(destination=dest, id=mId, ack=ack, headers=headers)
except Exception as e:
log.error("Failed to subscribe: %s" % e)
return S_ERROR(EMQUKN, "Failed to subscribe to broker: %s" % repr(e))
return S_OK("Subscription successful")
def unsubscribe(self, parameters):
log = LOG.getSubLogger("unsubscribe")
dest = parameters.get("destination", "")
mId = parameters.get("messengerId", "")
try:
self.connection.unsubscribe(destination=dest, id=mId)
except Exception as e:
log.error("Failed to unsubscribe", repr(e))
return S_ERROR(EMQUKN, "Failed to unsubscribe: %s" % repr(e))
return S_OK("Successfully unsubscribed from all destinations")
class StompListener(stomp.ConnectionListener):
"""
Internal listener class responsible for handling new messages and errors.
"""
def __init__(self, callback, ack, connection, messengerId, connectCallback):
"""
Initializes the internal listener object
Args:
callback: a defaultCallback compatible function.
ack(bool): if set to true an acknowledgement will be send back to the sender.
messengerId(str): messenger identifier sent with acknowledgement messages.
connectCallback: the connect method to call in case of disconnection
"""
self.log = LOG.getSubLogger("StompListener")
if not callback:
self.log.error("Error initializing StompMQConnector!callback is None")
self.callback = callback
self.ack = ack
self.mId = messengerId
self.connection = connection
self.connectCallback = connectCallback
# This boolean is to know whether we effectively
# want to disconnect or if it is because of a failure
self.wantsDisconnect = False
def on_message(self, headers, body):
"""
Function called upon receiving a message
:param dict headers: message headers
:param json body: message body
"""
result = self.callback(headers, json.loads(body))
if self.ack:
if result["OK"]:
self.connection.ack(headers["message-id"], self.mId)
else:
self.connection.nack(headers["message-id"], self.mId)
def on_error(self, headers, message):
"""Function called when an error happens
Args:
headers(dict): message headers.
body(json): message body.
"""
self.log.error(message)
def on_disconnected(self):
"""Callback function called after disconnecting from broker."""
if not self.wantsDisconnect:
self.log.warn("Disconnected from broker")
try:
res = self.connectCallback()
if res["OK"]:
self.log.info("Reconnection successful to broker")
else:
self.log.error("Error reconnectiong broker", "%s" % res)
except Exception as e:
self.log.error("Unexpected error while calling reconnect callback: %s" % e)
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/MessageQueue/StompMQConnector.py
|
Python
|
gpl-3.0
| 12,925
|
[
"DIRAC"
] |
387060d2c2cbe6a3d09418b28689755aa7f05bdf637b574019cf9e52f6a03c56
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# __init__ - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""This file is needed to tell python that this dir is a package
so that other modules can call, say, import shared.editing
Please refer to http://www.network-theory.co.uk/docs/pytut/tut_51.html
for details
"""
__dummy = True
# above line is only to make python tidy behave and not
# move module doc string inside header
|
heromod/migrid
|
mig/shared/__init__.py
|
Python
|
gpl-2.0
| 1,270
|
[
"Brian"
] |
a5f89bd6ed360c27a17d3d7d605effff0da338bf55ec91eaa252d6afc57e7c63
|
# -*- coding: utf-8 -*-
"""Define pRF finding parameters here."""
# Part of py_pRF_motion library
# Copyright (C) 2016 Marian Schneider, Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Number of x-positions to model:
varNumX = 25
# Number of y-positions to model:
varNumY = 25
# Number of pRF sizes to model:
varNumPrfSizes = 22
# Extend of visual space from centre of the screen (i.e. from the fixation
# point) [degrees of visual angle]:
varExtXmin = -12.00
varExtXmax = 12.00
varExtYmin = -12.00
varExtYmax = 12.00
# Maximum and minimum pRF model size (standard deviation of 2D Gaussian)
# [degrees of visual angle]:
varPrfStdMin = 1.0
varPrfStdMax = 22.0
# Volume TR of input data [s]:
varTr = 3.0
# Number of fMRI volumes and png files to load:
varNumVol = 1032
# Intensity cutoff value for fMRI time series. Voxels with a mean intensity
# lower than the value specified here are not included in the pRF model finding
# (this speeds up the calculation, and, more importatnly, avoids division by
# zero):
varIntCtf = -100.0
# Number of processes to run in parallel:
varPar = 8
aperture = 'mskSquare'
# Parent path to functional data
strPathNiiFunc = '/media/sf_D_DRIVE/MotionLocaliser/Simulation2p0/Apertures/pRF_model_tc/' + aperture + '/simResp_xval_2.npy'
# Output basename:
strPathOut = '/media/sf_D_DRIVE/MotionLocaliser/Simulation2p0/FitResults/' + aperture + '/simResp_xval_2'
# Use cython (i.e. compiled code) for faster performance? (Requires cython to
# be installed.)
lgcCython = False
# Create pRF time course models?
lgcCrteMdl = False
# reduce presented motion direction from 8 to 4?
lgcAoM = True
# length of the runs that were done
vecRunLngth = [172, 172, 172, 172, 172, 172]
# cross validate?
lgcXval = True
# set which set of hrf functions should be used
lgcOldSchoolHrf = True
if lgcOldSchoolHrf: # use legacy hrf function
strBasis = '_oldSch'
# use only canonical hrf function
switchHrfSet = 1
else: # use hrf basis
# decide of how many functions the basis set should consist:
# 1: canonical hrf function
# 2: canonical hrf function and 1st tmp derivative
# 3: canonical hrf function, 1st tmp and spatial derivative
switchHrfSet = 3
strBasis = '_bsSet' + str(switchHrfSet)
if lgcXval:
varNumXval = 6 # set nr of xvalidations, equal to nr of runs
if lgcCrteMdl:
# If we create new pRF time course models, the following parameters have to
# be provided:
# visual stimuli that were used for this run (if everything is well 1,2,3 )
vecVslStim = [1, 2, 3, 4, 5, 6]
# Basename of the filenames that have the presentation orders saved
strPathPresOrd = '/media/sf_D_DRIVE/MotionLocaliser/Simulation2p0/Conditions/Conditions_run0'
# Size of png files (pixel*pixel):
tplPngSize = (128, 128)
# Basename of the 'binary stimulus files'. The files need to be in png
# format and number in the order of their presentation during the
# experiment.
strPathPng = '/media/sf_D_DRIVE/MotionLocaliser/Simulation2p0/Apertures/PNGs/' + aperture + '/Ima_'
# Output path for pRF time course models file (without file extension):
strPathMdl = '/media/sf_D_DRIVE/MotionLocaliser/Simulation2p0/FitResults/pRF_model_mtn_tc' + aperture + strBasis
else:
# provide number of motion directions
varNumMtDrctn = 5 * switchHrfSet
# If we use existing pRF time course models, the path to the respective
# file has to be provided (including file extension, i.e. '*.npy'):
strPathMdl = '/media/sf_D_DRIVE/MotionLocaliser/Simulation2p0/FitResults/pRF_model_mtn_tc' + aperture + strBasis + '.npy'
|
MSchnei/py_pRF_motion
|
pyprf_feature/simulation/configs/pRF_sim_config_square2.py
|
Python
|
gpl-3.0
| 4,233
|
[
"Gaussian"
] |
61e34086108f19738ca9c5bbbfe595b5d71837a2ddea104cb56c37afac0a3076
|
#!/usr/bin/env python
"""
python script/module that uses pyamg to calculat and plot fiedler vectors of a graph
using pyamg,numpy and scipy.
Input:
A sif file or any three column white space deliminated file with the first and
third column repesenting node names and each row repesenting an edge.
Comand line Usage:
python fiedler.py my.sif
Can also be used on rf-ace output files provided the file has a ".out" exstension.
Or with x args as a thread pool to plot many sif files:
ls *.sif | xargs --max-procs=8 -I FILE python fiedler.py FILE
By default generates a number of pngs of diffrent sorts of plots and a .json file containing:
{"f1": the first fiedler vector,
"f2": (if caclulated) the second fideler vector
"d": the node degrees,
"r1": the rank of each node in the first fiedler vector
"r2": the rank of each node in the second fiedler vector
"iByn": the index of the nodes by the string used to represent them in the input file
"nByi": the string used to represent nodes in the input file by their index in the graph
"adj": the adjascancy list}
Author/Contact:Ryan Bressler, ryan.bressler@systemsbiology.org
"""
import sys
import json
import math
import random
import itertools
import os
import numpy
import scipy
from scipy.sparse.linalg import lobpcg
from scipy import linalg
from scipy.sparse import coo_matrix
from pyamg import smoothed_aggregation_solver
import matplotlib as mpl
import pylab as pl
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from sklearn import mixture
from sklearn.cluster import DBSCAN
import hypergeom
def file_parse(fo, node1=0, node2=1, filter_col=-1, filter_min=.5, val_col=-1, blacklist=[]):
"""parse a sif like file into an adjascancy list by index in a matrix and node name look up tables.
Takes:
f0: A file like object containing a sif or similar white space deliminated file containing at at least 2
columns of node names that are legal python dictionary keys deliminated by tabs or spaces.
node1=0 : the index of the column containing the first node
node2=2 : the index of the column containing the second node2
Returns a tuple containing:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
Representing the adjascancy list.
A dictionary containing int ids in the above by the string name in the input file.
An array of strings containing the name in the input by the int id.
"""
out = []
intidsbyname = {}
namesbyintid = []
incintid=0
len_blacklist=len(blacklist)
for line in fo:
if line[1]=="#":
continue
vs = line.rstrip().split()
if len(vs)>node2:
if filter_col!=-1:
if math.fabs(float(vs[filter_col]))<filter_min:
continue
if len_blacklist>0:
skip = False
for black_sheep in blacklist:
for strid in [vs[node1],vs[node2]]:
if strid.find(black_sheep)!=-1:
skip = True
continue
if skip==True:
continue
if skip==True:
continue
for strid in [vs[node1],vs[node2]]:
if not strid in intidsbyname:
intidsbyname[strid]=incintid
namesbyintid.append(strid)
incintid = incintid+1
row =[intidsbyname[vs[node1]],intidsbyname[vs[node2]]]
if val_col!=-1:
row.append(math.fabs(float(vs[val_col])))
out.append(row)
fo.close()
return (out,intidsbyname,namesbyintid)
def adj_mat(adj_list):
"""get the graph laplacian (in coo_matrix sparse matrix form) of an
adjancy list.0
Takes:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
or an Nx3 list in the form:
[[node1,node2,value],
...]
Representing the adjascancy list.
Returns
The adjasancy matrix in coo_matrix format.
"""
adj=numpy.array(adj_list)
Npts = numpy.max(adj[:,:2])+1
data = numpy.ones(adj.shape[0],dtype=float)
if adj.shape[1]>2:
data=adj[:,2]
A = coo_matrix((data,(adj[:,0],adj[:,1])), shape=(Npts,Npts))
return (A,adj,Npts)
def adj_list(adj_mat,includeValue=True):
am=adj_mat.tocoo()
rv=numpy.column_stack((am.row,am.col,am.data)).tolist()
for row in rv:
row[0]=int(row[0])
row[1]=int(row[1])
return rv
def graph_laplacian(adj_list):
"""get the graph laplacian (in coo_matrix sparse matrix form) of an
adjancy list.0
Takes:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
Representing the adjascancy list.
Returns
The graph laplaciian in coo_matrix format.
"""
(A,adj,Npts) = adj_mat(adj_list)
A = -1*(A.T + A)/2
A=A.tocsr()
if len(adj_list[0])==2:
A.data = -1*numpy.ones((A.nnz,),dtype=float)
A.setdiag(numpy.zeros((Npts,),dtype=float))
A.setdiag(-1*numpy.array(A.sum(axis=1)).ravel())
return A.tocsr()
def fiedler(adj_list,plot=False,fn="FiedlerPlots",n_fied=2):
"""calculate the first fiedler vector of a graph adjascancy list and optionally write associated plots to file.
Takes:
adj_list:
An Nx2 nested list of ints of the form:
[[node1,node2],
...]
Representing the adjascancy list.
plot=False: make plots or not.
fn="FiedlerPlots": filename to prepend to the plot png file names
n_fied=2: the number of fiedler vectors to calculate (values above 2 will not be output)
Returns a Dictionary of the form:
{"f1": the first fiedler vector,
"f2": (if caclulated) the second fideler vector
"d": the node degrees,
"r1": the rank of each node in the first fiedler vector
"r2": the rank of each node in the second fiedler vector}
"""
A = graph_laplacian(adj_list)
# construct preconditioner
ml = smoothed_aggregation_solver(A, coarse_solver='pinv2',max_coarse=10)
M = ml.aspreconditioner()
# solve for lowest two modes: constant vector and Fiedler vector
X = scipy.rand(A.shape[0], n_fied+1)
(eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
verbosityLevel=0, retResidualNormsHistory=True)
if plot:
doPlots(evec[:,1],evec[:,2],A.diagonal(),adj_list,fn)
out = {"f1":list(evec[:,1]),"d":list(A.diagonal()),"r1":[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,1])))]}
if n_fied > 1:
out["f2"]=list(evec[:,2])
out["r2"]=[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,2])))]
return out
#Plots are not optimized ...ie they end up sorting the same thing multiple times
def doPlots(f1,f2,degrees,adj_list,fn,widths=[16],heights=False,vsdeg=True,nByi=False,adj_list2=False,directed=False,dbscan_eps=0,dbscan_rank_eps=0,enrichdb="",clust_x=False,clust_y=False,clust_xy=True,dorank=True,doraw=True):
# output first
if vsdeg:
plotFiedvsDeg(f1,degrees,fn)
#if n_fied>1:
for i,width in enumerate(widths):
height=width
if heights!=False:
height=heights[i]
#output fied vs fied:
plotFiedvsFied(f1,f2,fn,adj_list=adj_list,adj_list2=adj_list2,width=width,height=height,nByi=nByi,directed=directed,dbscan_eps=dbscan_eps,dbscan_rank_eps=dbscan_rank_eps,enrichdb=enrichdb,clust_x=clust_x,clust_y=clust_y,clust_xy=clust_xy,dorank=dorank,doraw=doraw)
#output second
if vsdeg:
plotFiedvsDeg(f2,degrees,fn+".second")
def plotEdges(x,y,ax,adj_list,width,height,color="green",directed=False):
#codes=[]
#points=[]
emax = x.max()
for edge in adj_list:
#points[len(points):]=[(x[edge[0]],y[edge[0]]),(x[edge[1]],y[edge[1]])]
points=[(x[edge[0]],y[edge[0]]),(x[edge[1]],y[edge[1]])]
#codes[len(codes):]=[mpath.Path.MOVETO,mpath.Path.LINETO]
codes=[mpath.Path.MOVETO,mpath.Path.LINETO]
alpha=.5
if len(edge)>2:
alpha=0
if float(edge[2])>0:
#alpha=math.sqrt(float(edge[2]))
alpha=float(edge[2])
if directed:
dx=points[1][0]-points[0][0]
dy=points[1][1]-points[0][1]
length = math.sqrt(dx*dx+dy*dy)
head_width=emax*.3*length/(width*math.fabs(dy)+height*math.fabs(dx))
head_length=emax*.4*length/(height*math.fabs(dy)+width*math.fabs(dx))
ax.arrow(points[0][0],points[0][1],dx,dy,width=.2*head_width,head_width=head_width,head_length=head_length,color=color,alpha=alpha,length_includes_head=True)
else:
patch = mpatches.PathPatch(mpath.Path(points,codes), edgecolor=color, lw=.2,alpha=alpha)
ax.add_patch(patch)
def PlotEdgeVvsEdgeV(adj1,adj2,nByi1,nByi2,fn,width=16):
edgevs = {}
nedges = 0
nByis=[nByi1,nByi2]
for i,adj in enumerate([adj1,adj2]):
for edge in adj:
[e0,e1,v]=edge
e0=nByis[i][e0]
e1=nByis[i][e1]
if not e0 in edgevs:
edgevs[e0]={}
if not e1 in edgevs[e0]:
edgevs[e0][e1]={}
nedges+=1
edgevs[e0][e1][i]=float(v)
x = numpy.zeros((nedges,),dtype=float)
y = numpy.zeros((nedges,),dtype=float)
i = 0
for n0 in edgevs:
for n1 in edgevs[n0]:
e = edgevs[n0][n1]
if 0 in e:
x[i]=e[0]
if 1 in e:
y[i]=e[1]
i=i+1
F = plt.figure()
ax = F.add_subplot(111)
ax.scatter(x, y,zorder=2)
i = 0
for n0 in edgevs:
for n1 in edgevs[n0]:
plt.annotate(
"->".join([":".join(n.split(":")[1:3]) for n in [n0,n1]]),
xy = (x[i], y[i]), xytext = (-0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',size=8,alpha=.4)
i+=1
ax.grid(True)
F.set_size_inches( (width,width) )
F.savefig(fn+".EdgeVvsEdgeV.width%s.pdf"%(width),bbox_inches='tight')
F.clear()
def doDbScan(plt,ax,fied1,fied2,fn,adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,dbscan_eps,enrichdb,axis="xy"):
"""
add enriched dbscan information to a plot
"""
X=0
minormin = 0
if axis == "x":
print "dbscaning x at %s"%(dbscan_eps)
X=numpy.transpose(numpy.column_stack((fied1)))
minormin = fied2.min()
elif axis == "y":
print "dbscaning y at %s"%(dbscan_eps)
X=numpy.transpose(numpy.column_stack((fied2)))
minormin = fied1.min()
else:
print "dbscaning xy at %s"%(dbscan_eps)
X=numpy.column_stack((fied1,fied2))
db = DBSCAN(eps=dbscan_eps, min_samples=10).fit(X)
core_samples = db.core_sample_indices_
labels = db.labels_
print "Found %s core samples and %s labels"%(len(core_samples),len(labels))
colors=[(random.random(),random.random(),random.random()) for el in labels]
backgroundgenes =[]
enrich=False
enriched = []
if nByi!=False and enrichdb!="":
enrich=True
backgroundgenes = [gene for gene in [nodelabel.split(":")[2] for nodelabel in nByi] if gene!=""]
for k, col in zip(set(labels), colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
elif enrich:
memberins = numpy.argwhere(labels == k)
setgenes = [nByi[i].split(":")[2] for i in memberins]
setgenes = numpy.array([gene for gene in setgenes if gene!=""])
enrichedsets = hypergeom.enrich(setgenes,backgroundgenes,enrichdb,verbose=False)
enriched.append({"indexes":memberins.tolist(),"genes":setgenes.tolist(),"sets":enrichedsets})
text=str(len(enriched))
if len(enrichedsets)>0:
text=":".join([text,enrichedsets[0][0].replace("_"," "),str(enrichedsets[0][2])])
if axis == "x":
labelPoints(plt,[numpy.mean(fied1[memberins])],[minormin],[text],size=24,zorder=4,alpha=.8,color=col,rotation='vertical',ha="left",trim=False)
elif axis == "y":
labelPoints(plt,[minormin],[numpy.mean(fied2[memberins])],[text],size=24,zorder=4,alpha=.8,color=col,ha="left",trim=False)
else:
labelPoints(plt,[numpy.mean(fied1[memberins])],[numpy.mean(fied2[memberins])],[text],size=14,zorder=4,alpha=.6,color=col,ha="center",trim=False)
class_members = [index[0] for index in numpy.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 6
else:
markersize = 6
if k!=-1:
if axis == "x":
plotCircles(ax,[(x[0],minormin)],dbscan_eps,col,edgecolor=col,alpha=.01,zorder=-1)
elif axis == "y":
plotCircles(ax,[(minormin,x[0])],dbscan_eps,col,edgecolor=col,alpha=.01,zorder=-1)
else:
plotCircles(ax,[(x[0],x[1])],dbscan_eps,col,edgecolor=col,alpha=.01,zorder=-1)
if axis == "xy":
ax.plot(x[0], x[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=markersize,alpha=.4)
if enrich:
fo = open (fn+".clusts.json","w")
json.dump(enriched,fo)
fo.close()
def doSinglePlot(fied1,fied2,fn,adj_list=False,adj_list2=False,width=16,height=False,nByi=False,directed=False,gmmcomponents=0,dbscan_eps=0,enrichdb="",clust_x=0,clust_y=0,clust_xy=True):
""" make scatter plots and rank v rank plots and write to files.
Takes
fied1: the fiedler vector to use as the x axis
fied2: the fiedler vector to use as the y axis
fn: the filename to prepend"""
plt.axis('off')
if height==False:
height=width
F = plt.figure()
ax = F.add_subplot(111)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if gmmcomponents>1:
dpgmm = mixture.DPGMM(gmmcomponents,"full",alpha=.1,thresh=1e-2)
#dpgmm = mixture.GMM(gmmcomponents,"full",thresh=1e-10)
xmax=float(numpy.max(fied1))
ymax=float(numpy.max(fied2))
X=numpy.column_stack((fied1/xmax,fied2/ymax))
dpgmm.fit(X)
Y_ = dpgmm.predict(X)
#color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
colors=((random.random(),random.random(),random.random()) for el in dpgmm.means_)
for i, (mean, covar, color) in enumerate(zip(dpgmm.means_, dpgmm._get_covars(),colors)):
if not numpy.any(Y_ == i):
continue
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
ax.scatter(X[Y_ == i, 0]*xmax, X[Y_ == i, 1]*ymax, 2, color=color,zorder=2)
# Plot an ellipse to show the Gaussian component
angle = numpy.arctan(u[1] / u[0])
angle = 180 * angle / numpy.pi # convert to degrees
print "Ploting elipse: %s"%(", ".join([str(el) for el in [mean, v[0], v[1], 180 + angle, color]]))
ell = mpl.patches.Ellipse([mean[0]*xmax,mean[1]*ymax], v[0]*xmax, v[1]*ymax, 180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
elif dbscan_eps>0 or clust_x > 0 or clust_y > 0:
if clust_xy>0:
print "clust_xy:"+str(clust_xy)
doDbScan(plt,ax,fied1,fied2,fn,adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,dbscan_eps,enrichdb)
if clust_x>0:
doDbScan(plt,ax,fied1,fied2,fn+".x.",adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,clust_x,enrichdb,axis="x")
if clust_y>0:
doDbScan(plt,ax,fied1,fied2,fn+".y.",adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,clust_y,enrichdb,axis="y")
if clust_xy==False or clust_xy == 0:
ax.scatter(fied1, fied2,s=10,alpha=0.4,zorder=2)
else:
ax.scatter(fied1, fied2,s=10,alpha=0.4,zorder=2)
if not adj_list==False:
plotEdges(fied1,fied2,ax,adj_list,width,height,directed=directed)
if not adj_list2==False:
plotEdges(fied1,fied2,ax,adj_list2,width,height,color="red",directed=directed)
if not nByi==False:
labelPoints(plt,fied1,fied2,nByi=nByi)
#ax.grid(True)
min1=numpy.min(fied1)
max1=numpy.max(fied1)
pad1=.05*(max1-min1)
min2=numpy.min(fied2)
max2=numpy.max(fied2)
pad2=.05*(max2-min2)
ax.set_xlim([min1-pad1, max1+pad1])
ax.set_ylim([min2-pad2, max2+pad2])
ax.set_xmargin(0)
ax.set_ymargin(0)
F.set_size_inches( (width,height) )
F.savefig(fn+".png",bbox_inches='tight',pad_inches=0)
#F.savefig(fn+".svg",bbox_inches='tight')
F.clear()
def plotCircles(ax,xy,radius,facecolor,alpha=.5,edgecolor="k",zorder=-1):
for point in xy:
patch = mpatches.Circle(point,radius=radius,facecolor=facecolor,edgecolor=edgecolor,alpha=alpha,linewidth=0,zorder=-1)
ax.add_patch(patch)
def plotFiedvsFied(fied1,fied2,fn,adj_list=False,adj_list2=False,width=16,height=False,nByi=False,directed=False,gmmcomponents=0,dbscan_eps=0,dbscan_rank_eps=0,enrichdb="",clust_x=False,clust_y=False,clust_xy=True,dorank=True,doraw=True):
""" make scatter plots and rank v rank plots and write to files.
Takes
fied1: the fiedler vector to use as the x axis
fied2: the fiedler vector to use as the y axis
fn: the filename to prepend"""
if doraw:
doSinglePlot(fied1,fied2,fn+".fied1vfied2.width%s"%(width),adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,dbscan_eps,enrichdb,clust_x,clust_y,clust_xy)
if dorank:
sortx=numpy.argsort(numpy.argsort(fied1))
sorty=numpy.argsort(numpy.argsort(fied2))
doSinglePlot(sortx,sorty,fn+".fied1rank.v.fied2rank.width%s"%(width),adj_list,adj_list2,width,height,nByi,directed,gmmcomponents,dbscan_rank_eps,enrichdb,clust_x,clust_y,clust_xy)
def labelPoints(plt,x,y,nByi,size=6,zorder=3,alpha=.4,color="k",rotation=0,ha="right",trim=True):
for i,xi in enumerate(x):
text = nByi[i]
if trim:
vs = text.split(":")
newvs = vs[1:3]
newvs.append(vs[-1])
text = ":".join(newvs)
plt.annotate(
text,
xy=(xi, y[i]),
xytext=(-1, 1),
textcoords='offset points', ha = ha, va = 'bottom',size=size,alpha=alpha,zorder=zorder,color=color,rotation=rotation)
def plotFiedvsDeg(fied, degree,fn):
""" make fied vs degree and fiedler rank vs degree plots and write to files.
Takes
fied: the fiedler vector to use as the x axis
degree: the degree of the nodes
fn: the filename to prepend"""
F = plt.figure()
ax = F.add_subplot(111)
ax.scatter(fied, numpy.log2(degree))
ax.grid(True)
F.set_size_inches( (64,8) )
F.savefig(fn+".fiedler.vs.log2.degree.png")
F.clear()
F = plt.figure()
ax = F.add_subplot(111)
order = numpy.argsort(fied)
ax.scatter(numpy.arange(0,fied.size), numpy.log2(degree[order]))
ax.grid(True)
F.set_size_inches( (64,8) )
F.savefig(fn+".fiedler.ranks.vs.log2.degree.png")
F.clear()
def filename_parse(fn, filter_min=.001):
"""Wraps file_parse and infers paramaters based on extensions.
Takes:
filename.
".out" files will be treated as rf-ace output and filtered by imortance
all other files will be treated as sif files.
returns:
The same tuple as filename_parse
"""
fo = open(fn)
out = ()
if fn[-4:] == ".out":
out = file_parse(fo, node2=1, filter_col=3, filter_min=filter_min, val_col=3)
elif fn[-5:] == ".pwpv":
out = file_parse(fo, node2=1, filter_col=2, filter_min=filter_min, val_col=2, blacklist=["PRDM", "CNVR"])
elif fn[-4:] == ".tsv":
out = file_parse(fo, node2=1, filter_col=2, filter_min=filter_min, val_col=2)
else:
out = file_parse(fo)
fo.close()
return out
def main():
fn = sys.argv[1]
filter_min = ""
if len(sys.argv) > 2:
filter_min = float(sys.argv[2])
(adj_list, iByn, nByi) = filename_parse(fn, filter_min)
fn = os.path.basename(fn)
fied = fiedler(adj_list, fn=fn + str(filter_min), plot=False, n_fied=2)
fied["adj"] = adj_list
fied["iByn"] = iByn
fied["nByi"] = nByi
fo = open(fn + str(filter_min) + ".continuous.json", "w")
json.dump(fied, fo)
fo.close()
if __name__ == '__main__':
main()
|
ryanbressler/GraphSpectrometer
|
fiedler.py
|
Python
|
bsd-3-clause
| 20,913
|
[
"Gaussian"
] |
a964eab4799a5ce823f9b558cdb0f863537322ed728715354e6d581ef9167a0f
|
#!/usr/bin/env python
'''
Convert back and forth between the molecule (open boundary) and the 0D PBC
system.
'''
import numpy
from pyscf import gto, scf
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
from pyscf.pbc import df
cell = pbcgto.Cell()
cell.atom = 'N 0 0 0; N 0 0 1.2'
cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = numpy.eye(3)
cell.dimension = 0
cell.symmetry = True
cell.build()
mf = pbcscf.RHF(cell)
mf.with_df = df.AFTDF(cell)
mf.run()
print('E(HF) with 0D PBC RHF calculation %s' % mf.e_tot)
#
# Convert cell to mol.
#
# Except lattice vectors, the mole object inherits all parameters from the
# cell object, like geometry, basis sets, and pseudopotential. Using the
# generated mol object with molecular code, it should produce the same results
# as the 0D PBC calculation
#
mol = cell.to_mol()
mf = scf.RHF(mol).run()
print('E(HF) with molecular RHF calculation %s' % mf.e_tot)
# Cell and Mole have almost the same structure. If cell was fed to the
# molecular functions, the code is able to handle the cell without any
# errors. However, due to the different treatments of nuclear repulsion
# energy, a small discrepancy will be found in the total energy.
mf = scf.RHF(cell).run()
print('E(HF) of molecular RHF with cell %s' % mf.e_tot)
#
# Convert mol back to cell.
#
# The mol ojbect above contains all information of the pbc system which was
# initialized at the beginning. Using the "view" method to convert mol back to
# the cell object, all information can be transfer to the resultant cell
# object. Lattice vectors "a" are not available in the mole object. It needs
# to be specified in the cell.
#
cell_0D = mol.view(pbcgto.Cell)
cell_0D.a = numpy.eye(3)
cell_0D.dimension = 0
mf = pbcscf.RHF(cell).density_fit().run()
print('E(HF) with 0D PBC RHF calculation %s' % mf.e_tot)
|
sunqm/pyscf
|
examples/pbc/31-pbc_0D_as_mol.py
|
Python
|
apache-2.0
| 1,848
|
[
"PySCF"
] |
2636673c03e1329ebb50616bb2e1d961cf93e4a86e32dbf516f03d4e80306c32
|
#!/usr/bin/env python
"""Create rst files for documentation of DIRAC source code."""
import sys
from diracdoctools.cmd.codeReference import run, CLParser
sys.exit(run(**(CLParser().optionDict())))
|
DIRACGrid/DIRAC
|
docs/diracdoctools/scripts/dirac-docs-build-code.py
|
Python
|
gpl-3.0
| 199
|
[
"DIRAC"
] |
daad506257d57a4e06452992a1d44b360556fa6812a9c4eef5b1a8bd0512dc27
|
"""
The MIT License (MIT)
Copyright (c) 2015 <Satyajit Sarangi>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from src.ir.function import *
from src.ir.constants import Number
from src.ir.base_ir_visitor import IRBaseVisitor
from src.optimizer.pass_support import *
from src.utils.print_utils import draw_header
BINARY_OPERATORS = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'**': lambda x, y: x ** y,
'/': lambda x, y: x / y,
'//': lambda x, y: x // y,
'<<': lambda x, y: x << y,
'>>': lambda x, y: x >> y,
'%': lambda x, y: x % type(x)(y),
'&': lambda x, y: x & y,
'|': lambda x, y: x | y,
'^': lambda x, y: x ^ y,
}
class ConstPropagationPass(FunctionPass, IRBaseVisitor):
def __init__(self):
FunctionPass.__init__(self)
IRBaseVisitor.__init__(self)
self.insts_to_remove = []
@verify(node=Function)
def run_on_function(self, node):
draw_header("Constant Propagation: %s" % node.name)
func = node
for bb in func.basic_blocks:
self.visit_basicblock(bb)
print(node)
def visit_basicblock(self, node):
for inst in node.instructions:
IRBaseVisitor.visit(self, inst)
for inst in self.insts_to_remove:
inst.erase_from_parent()
self.insts_to_remove.clear()
def const_fold_binary_op(self, lhs, rhs, op):
result = None
if isinstance(lhs, Number) and isinstance(rhs, Number):
result = BINARY_OPERATORS[op](lhs.number, rhs.number)
result = Number(result)
return result
def replace_uses_with_const(self, node, const):
for use in node.uses:
self.replace_use_with_const(node, use, const)
self.insts_to_remove.append(node)
def replace_use_with_const(self, node, use, const):
if hasattr(use, "operands"):
for i, ops in enumerate(use.operands):
if ops == node:
use.operands[i] = const
def visit_returninstruction(self, node):
pass
def visit_addinstruction(self, node):
lhs = node.lhs
rhs = node.rhs
result = self.const_fold_binary_op(lhs, rhs, '+')
if result is not None:
self.replace_uses_with_const(node, result)
def visit_callinstruction(self, node):
func = node.function
def visit_subinstruction(self, node):
lhs = node.lhs
rhs = node.rhs
result = self.const_fold_binary_op(lhs, rhs, '-')
if result is not None:
self.replace_uses_with_const(node, result)
def visit_mulinstruction(self, node):
lhs = node.lhs
rhs = node.rhs
result = self.const_fold_binary_op(lhs, rhs, '*')
if result is not None:
self.replace_uses_with_const(node, result)
def visit_divinstruction(self, node):
lhs = node.lhs
rhs = node.rhs
result = self.const_fold_binary_op(lhs, rhs, '/')
if result is not None:
self.replace_uses_with_const(node, result)
|
ssarangi/spiderjit
|
src/optimizer/const_propagation.py
|
Python
|
mit
| 4,120
|
[
"VisIt"
] |
2640cbe5ef7006d73b6e98102ca3142c2b28fa5a24b2e5647b42771fa67c3367
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim:set ts=2 sw=2 et:
#
# -----------------------------------------------------------------------
#
# Go2C is based on the c-to-c.py example from pycparser by Eli Bendersky,
# and uses pycparser extensively.
#
# Alexander Rødseth <rodseth@gmail.com>
# License: BSD
#
# -----------------------------------------------------------------------
#
#-----------------------------------------------------------------
# pycparser: c-to-c.py
#
# Example of a C code generator from pycparser AST nodes, serving
# as a simplistic translator from C to AST and back to C.
# Note: at this stage, the example is "alpha release" and considered
# experimental. Please file any bugs you find in the Issues page on pycparser's
# website.
#
# Copyright (C) 2008-2011, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
from __future__ import print_function
#import tempfile
import sys
import os
__version__ = "0.2"
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.insert(0, '..')
import pycparser
from pycparser import c_parser, c_ast, parse_file, plyparser
REPLACEMENT_FUNCTIONS = {
"stat": "syscall.Stat",
"access": "syscall.Access",
"rand": "rand.Float64"
}
REPLACEMENT_TYPES = {
"static struct stat": "syscall.Stat_t",
"struct timeval": "syscall.Timeval",
"char *": "CString",
"char": "byte",
"unsigned char": "byte",
"int *": "*int",
"int": "int",
"unsigned int": "uint",
"unsigned int *": "*uint",
"void": "",
"short": "int16",
"short *": "*int16",
"unsigned short": "uint16",
"unsigned short *": "*uint16",
"float": "float32",
"float *": "*float32",
"double": "float64",
# TODO: check if int is int32 in Go
"long": "int",
# TODO: Needs a better plan for static
"static long": "int",
"static int": "int"
}
REPLACEMENT_MACROS = {
"S_ISDIR" : ["syscall", "(((", ") & syscall.S_IFMT) == syscall.S_IFDIR)"]
}
REPLACEMENT_DEFS = {
"F_OK" : 0,
"X_OK" : 1,
"W_OK" : 2,
"R_OK" : 4
}
CUSTOM_FUNCTIONS = {
"atoi": ["strconv", "func atoi(a string) int {\n\tv, _ := strconv.Atoi(a)\n\treturn v\n}"],
"sleep": ["time", "func sleep(sec int64) {\n\ttime.Sleep(1e9 * sec)\n}"],
"getchar":["fmt", 'func getchar() byte {\n\tvar b byte\n\tfmt.Scanf("%c", &b)\n\treturn b\n}'],
"putchar":["fmt", 'func putchar(b byte) {\n\tfmt.Printf("%c", b)\n}'],
"abs": ["", 'func abs(a int) int {\n\tif a >= 0 {\n\t\treturn a\n\t}\n\treturn -a\n}'],
"strcpy": ["CString", "func strcpy(a *CString, b CString) {\n\t*a = b\n}"],
"strcmp": ["CString", 'func strcmp(acs, bcs CString) int {\n\ta := acs.ToString()\n\tb := bcs.ToString()\n\tif a == b {\n\t\treturn 0\n\t}\n\talen := len(a)\n\tblen := len(b)\n\tminlen := blen\n\tif alen < minlen {\n\t\tminlen = alen\n\t}\n\tfor i := 0; i < minlen; i++ {\n\t\tif a[i] > b[i] {\n\t\t\treturn 1\n\t\t} else if a[i] < b[i] {\n\t\t\treturn -1\n\t\t}\n\t}\n\tif alen > blen {\n\t\treturn 1\n\t}\n\treturn -1\n}'],
"strlen": ["CString", 'func strlen(c CString) int {\n\treturn len(c.ToString())\n}'],
"printf": ["fmt", "func printf(format CString, a ...interface{}) {\n\tfmt.Printf(format.ToString(), a...)\n}"],
"scanf": ["fmt", "func scanf(format CString, a ...interface{}) {\n\tfmt.Scanf(format.ToString(), a...)\n}"],
"b2i": ["", "func b2i(b bool) int {\n\tif b{\n\t\treturn 1\n\t}\n\treturn 0\n}"]
}
SKIP_INCLUDES = ["CString"]
WHOLE_PROGRAM_REPLACEMENTS = {
r'fmt.Printf("\n")': "fmt.Println()",
"func main(argc int, argv *[]CString) int {": "func main() {\n\tflag.Parse()\n\targv := flag.Args()\n\targc := len(argv)+1\n",
"argv[": "argv[-1+",
"for (1)": "for",
"\n\n\n": "\n",
# TODO: Find a sensible way to figure out when a program wants strings and when it wants byte arrays
"*[]byte": "CString",
#"*[128]byte = new([128]byte)": "*string",
#
"'\\0'": "'\\x00'",
"int = 0\n": "int\n"
}
def is_prolly_var(s): #: bool
for letter in s:
if letter not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789[]":
return False
return True
class GoGenerator(object):
""" Uses the same visitor pattern as c_ast.NodeVisitor, but modified to
return a value from each visit method, using string accumulation in
generic_visit.
"""
def __init__(self):
self.output = ''
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method
#
self.indent_level = 0
self.imports = []
# in main()
self.inmain = False
# scopeless declarations
self.vartypes = {}
# function name for the current function
self.current_function_name = ""
# some functions should return bool instead of int
self.should_return_bool_instead_of_int = []
# custom functions that has been used
self.used_custom_functions = []
# variables that has to be renamed
self.renames = {}
def _make_packagename(self):
return "package main\n\n"
def _make_imports(self):
s = "import (\n"
for imp in self.imports:
if imp and (imp not in SKIP_INCLUDES):
s += " \"%s\"\n" % (imp)
s += ")\n\n"
return s
def _make_customfunc(self):
"""Define custom functions"""
s = ""
specials = ["CString"]
for special in specials:
if special in self.imports:
if special == "CString":
s += "type CString []byte\n\nfunc (c CString) ToString() string {\n\ts := \"\"\n\tfor _, e := range c {\n\t\tif e == 0 {\n\t\t\t\tbreak\n\t\t}\n\t\ts += string(e)\n\t}\n\treturn s\n}\n\nfunc cstr(s string) CString {\n\tc := make(CString, len(s)+1)\n\tfor i, e := range s {\n\t\tc[i] = byte(e)\n\t}\n\t// The last byte will already be 0\n\treturn c\n}\n\nfunc cstrN(length int) CString {\n\tc := make(CString, length+1)\n\treturn c\n}\n\n"
del self.imports[self.imports.index(special)]
for fun in self.used_custom_functions:
if fun in CUSTOM_FUNCTIONS:
s += CUSTOM_FUNCTIONS[fun][1] + "\n\n"
return s
def make_header(self):
return self._make_packagename() + self._make_imports() + self._make_customfunc()
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
"""Called recursively, beware"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
#~ print('generic:', type(node))
if node is None:
return ''
else:
try:
return ''.join(self.visit(c) for c in node.children())
except AttributeError:
return '/* C2GO: ? */'
def visit_Constant(self, n):
v = n.value
if v.startswith("\"") and v.endswith("\""):
return "cstr(" + v + ")"
return n.value
def visit_ID(self, n):
name = n.name
if name in self.renames:
return self.renames[name]
return name
def visit_ArrayRef(self, n):
arrref = self._parenthesize_unless_simple(n.name)
contents = self.visit(n.subscript)
if "++" in contents or "--" in contents:
if "++" in contents:
op = "++"
else:
op = "--"
# Insert markers to move the ++/-- expression down or up at a later stage
contents = "$$$" + contents + "$$$"
return arrref + '[' + contents + ']'
def visit_StructRef(self, n):
sref = self._parenthesize_unless_simple(n.name)
return sref + n.type + self.visit(n.field)
def visit_FuncCall(self, n):
s = ""
fref = self._parenthesize_unless_simple(n.name)
# Use the replacement table to convert from C to Go functions
if fref in REPLACEMENT_FUNCTIONS:
gofref = REPLACEMENT_FUNCTIONS[fref]
# ... and import the right package
pkg = ".".join(gofref.split(".")[:-1])
if pkg not in self.imports:
self.imports.append(pkg)
fref = gofref
elif fref in CUSTOM_FUNCTIONS:
if not fref in self.used_custom_functions:
self.used_custom_functions.append(fref)
pkg = CUSTOM_FUNCTIONS[fref][0]
if pkg not in self.imports:
self.imports.append(pkg)
specials = ["CString"]
for special in specials:
if special in CUSTOM_FUNCTIONS[fref][1]:
if special not in self.imports:
self.imports.append(special)
#log("WANTS TO ENABLE CUSTOM FUNCTION: " + fref)
elif fref in REPLACEMENT_MACROS:
pkg, first_part, last_part = REPLACEMENT_MACROS[fref]
# ... and import the right package
if pkg not in self.imports:
self.imports.append(pkg)
s = first_part + self.visit(n.args) + last_part
if not s:
s = fref + '(' + self.visit(n.args) + ')'
for d in REPLACEMENT_DEFS:
if d in s:
s = s.replace(d, str(REPLACEMENT_DEFS[d]) + "/*" + d + "*/")
return s
def visit_UnaryOp(self, n):
operand = self._parenthesize_unless_simple(n.expr)
if n.op == 'p++':
return '%s++' % operand
elif n.op == 'p--':
return '%s--' % operand
elif n.op == 'sizeof':
# Always parenthesize the argument of sizeof since it can be
# a name.
e = self.visit(n.expr)
if e in self.vartypes:
if "]" in self.vartypes[e]:
return 'len(%s)' % e
#else:
# log("NOT IN VARTYPES: " + str(e))
if not "unsafe" in self.imports:
self.imports.append("unsafe")
return 'unsafe.Sizeof(%s)' % e
else:
return '%s%s' % (n.op, operand)
def visit_BinaryOp(self, n):
lval_str = self._parenthesize_if(n.left,
lambda d: not self._is_simple_node(d))
rval_str = self._parenthesize_if(n.right,
lambda d: not self._is_simple_node(d))
return '%s %s %s' % (lval_str, n.op, rval_str)
def visit_Assignment(self, n):
rval_str = self._parenthesize_if(
n.rvalue,
lambda n: isinstance(n, c_ast.Assignment))
op = n.op
lvalue = self.visit(n.lvalue)
if "=" in rval_str:
# There is probably an assignment on the right side, not good
r = rval_str.strip()
if r.startswith("(") and r.endswith(")"):
rval_str = rval_str.split("(", 1)[1].rsplit(")", 1)[0]
# Convert a = (b = 1) to a, b = 1, 1
# TODO: This covers some code, but not everything, make it more general
if (op == "=") and ("==" in rval_str):
op = "="
rval_str += "," + rval_str.split("=")[1]
if rval_str.strip().endswith(","):
rval_str = rval_str.strip()[:-1]
# Most likely, a bool would be in place here
if lvalue in self.vartypes:
if self.vartypes[lvalue] == "int":
rval_str = "b2i(" + rval_str + ")"
if not "b2i" in self.used_custom_functions:
self.used_custom_functions.append("b2i")
elif (op == "=") and ("=" in rval_str):
op = ", "
rval_str += "," + rval_str.split("=")[1]
if "[" in lvalue:
name = lvalue.split("[")[0].strip()
if name in self.vartypes:
type = self.vartypes[name]
log(name + " is " + type)
if type in ["*string", "*[]byte"]:
pos = lvalue.split("[")[1].split("]")[0]
#lvalue = "*" + name
#name = "(*" + name + ")"
# This doesn't work
#rvalue = name + "[:" + pos + "] + string(" + rval_str + ") + " + name + "[" + pos + "+1:]"
#rval_str = rvalue
#if name in self.vartypes:
# log(lvalue + " IS " + self.vartypes[lvalue] + "!!!")
# if self.vartypes[lvalue] == "*string":
# log("STRING! " + lvalue)
return '%s %s %s' % (lvalue, op, rval_str)
def visit_IdentifierType(self, n):
return ' '.join(n.names)
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first delaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
add_addr_of = False
found = False
#log("from: " + s)
arraytype = ""
basetype = ""
repl_keys = REPLACEMENT_TYPES.keys()
for t in repl_keys:
if s.startswith(t):
twofirstwords = " ".join(s.split(" ", 2)[:2])
if ("*" in twofirstwords) and ("*" not in t):
# don't match "int" if it could be an "int *"
#log("skipping: " + twofirstwords + " (of " + s + ")")
continue
if s.endswith("]"):
arraytype = "[" + s.split("[", 1)[1]
s = s.replace(arraytype, "")
#log("array: " + arraytype)
s = s.replace(t, "", 1)
s += " " + REPLACEMENT_TYPES[t]
if "." in REPLACEMENT_TYPES[t]:
pkg = REPLACEMENT_TYPES[t].split(".")[0]
if pkg not in self.imports:
self.imports.append(pkg)
l = s.rsplit(" ", 1)
arraystar = ""
if ("[" in arraytype) and ("]" in arraytype):
arraystar = "*"
s = l[0] + " " + arraystar + arraytype + l[1]
found = True
basetype = l[1]
break
#log("changed to: " + s + "\n")
if not found:
print("// C2GO: Unconverted declaration: " + s.strip() + "\n")
return s.rstrip() + " // C2GO: ???"
if "(" in s:
# We can guess that this is a function
s = "func " + s
self.inmain = n.name == "main"
self.current_function_name = n.name
if "(void)" in s:
s = s.replace("(void)", "()", 1)
if "main() int" in s:
s = s.replace("main() int", "main()")
#print("in main:", self.inmain, n.name)
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
if n.init:
if isinstance(n.init, c_ast.ExprList):
s += ' = {' + self.visit(n.init) + '}'
else:
s += ' = ' + self.visit(n.init)
if not s.strip().startswith("func"):
# Assume it's a variable declaration
s = "var " + s
s = s.replace(" ", " ")
if "var long" in s:
# There is no long double in Go (float128)
s = s.replace("long", "", 1).replace(" ", " ")
if "var static struct" in s:
s = s.replace("var static struct", "struct", 1)
s = "var " + s.split(" ")[-1] + " " + " ".join(s.split(" ")[:-1])
if ("[" in s) and ("]" in s):
arraynumber = s.split("[", 1)[1].rsplit("]", 1)[0]
if arraynumber:
#log("!!!" + s)
if "=" not in s:
s += " = new(" + arraytype + basetype + ")"
elif ("=" in s) and ("{" in s):
add_addr_of = True
#log("DECLARING: " + s)
if "=" in s:
vartype = s.split("=")[0].strip().split(" ")[-1]
else:
vartype = s.split(" ")[-1]
varname = s.split(" ")[1]
if varname in ["len"]: # is varname a Go keyword?
oldvarname = varname
varname = varname + "_" + varname
self.renames[oldvarname] = varname
#log("RENAME FROM " + oldvarname + " TO " + varname)
s = s.replace(oldvarname, varname)
# Storing the type for later reference (by visit_Ternary, for example)
self.vartypes[varname] = vartype
#log(varname + " is of type: " + vartype)
else:
# Remove "var " from function declaration
s = s.replace("var ", "").replace(" ", " ")
# Remove "= new" from function declaration
removefrom = " = new"
removeto = ")" # including
if removefrom in s:
remove_n = s.count(removefrom)
for n in range(remove_n):
pos = s.find(removefrom)
#log("Remove from =new to ) at pos: " + str(pos))
pos2 = s.find(removeto, pos)
#log("to pos: " + str(pos2))
#log("pre removal: " + s)
s = s[:pos] + s[pos2+len(removeto):]
#log("post removal: " + s)
if arraytype and s.count("=") == 1 and "{" in s and "}" in s:
# We're defining an array on the fly, prepend the type
s = s.replace("{", arraytype + basetype + "{", 1)
# Use pointers to arrays for C arrays, also when declared with assignment
if ("=" in s) and ("{" in s) and add_addr_of:
s = s.replace("= [", "= &[")
return s
def visit_DeclList(self, n):
s = self.visit(n.decls[0])
if len(n.decls) > 1:
s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True)
for decl in n.decls[1:])
return s
def visit_Typedef(self, n):
s = ''
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def visit_Cast(self, n):
s = '(' + self._generate_type(n.to_type) + ')'
return s + ' ' + self.visit(n.expr)
def visit_ExprList(self, n):
visited_subexprs = []
for expr in n.exprs:
if isinstance(expr, c_ast.ExprList):
visited_subexprs.append('{' + self.visit(expr) + '}')
else:
visited_subexprs.append(self.visit(expr))
return ', '.join(visited_subexprs)
def visit_Enum(self, n):
s = 'enum'
if n.name: s += ' ' + n.name
if n.values:
s += ' {'
for i, enumerator in enumerate(n.values.enumerators):
s += enumerator.name
if enumerator.value:
s += ' = ' + self.visit(enumerator.value)
if i != len(n.values.enumerators) - 1:
s += ', '
s += '}'
return s
def visit_FuncDef(self, n):
decl = self.visit(n.decl)
self.indent_level = 0
# The body is a Compound node
body = self.visit(n.body)
return decl + ' ' + body + '\n'
def visit_FileAST(self, n):
s = ''
for ext in n.ext:
if isinstance(ext, c_ast.FuncDef):
s += self.visit(ext)
else:
s += self.visit(ext) + ';\n'
# This could be just a function prototype
for line in s.split("\n"):
if line.strip().startswith("func") and ("{" not in s):
# Delete this line
#log("Deleting: " + line)
s = s.replace(line, "")
#elif "func" in s:
# log("Strange: " + s)
return s
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.indent_level += 2
if n.block_items:
s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items)
self.indent_level -= 2
s += self._make_indent() + '}\n'
return s
def visit_ParamList(self, n):
return ', '.join(self.visit(param) for param in n.params)
def visit_Return(self, n):
s = 'return'
if self.inmain:
# Go can return in main, but not with a value
return s
if n.expr:
s += ' ' + self.visit(n.expr)
if ("==" in s) or (">" in s) or ("<" in s) or (">=" in s) or ("<=" in s) or ("!=" in s):
#log("This function should really return a bool instead! " + self.current_function_name)
if not self.current_function_name in self.should_return_bool_instead_of_int:
self.should_return_bool_instead_of_int.append(self.current_function_name)
return s
def visit_Break(self, n):
return 'break;'
def visit_Continue(self, n):
return 'continue;'
def visit_TernaryOp(self, n):
# Look at the type for the variable in n.conf if it's only one variable, find the type from the hashtable for types
condition = self.visit(n.cond).strip()
ctype = "bool"
if condition in self.vartypes:
ctype = self.vartypes[condition]
#log(condition + " is of type " + ctype)
ntype = n.iftrue.type
if ntype == "string":
ntype = "CString"
if ctype == "bool":
s = "map[bool]" + ntype + "{true: " + self.visit(n.iftrue) + ", false: " + self.visit(n.iffalse) + "}[" + condition + "]"
elif ctype == "int":
s = "map[int]" + ntype + "{1: " + self.visit(n.iftrue) + ", 0: " + self.visit(n.iffalse) + "}[" + condition + "]"
return s
def _remove_curly_blank_lines(self, s):
"""Remove curly brackets at beginning and at end, remove blank lines"""
s = s.replace("{", "", 1)
s = s.rsplit("}", 1)[0]
lines = s.split("\n")
bodylines = []
for line in lines:
if line.strip() != "":
bodylines.append(line)
s = "\n".join(bodylines) + "\n"
return s
def visit_If(self, n):
s = 'if ('
if n.cond:
e = self.visit(n.cond)
if is_prolly_var(e):
# TODO: Find out how to replace all variables that are evaluated
# on their own with > 0, since ints are so often booleans
#
# if it's just a lone variable, assume it is an int used as bool
e = e + " > 0"
else:
# Just handle a few simple and common cases
# TODO: Find a proper fix for integers in boolean expressions
if ("&&" in e) or ("||" in e) or ("!" in e):
if e.count("&&") == 1:
if is_prolly_var(e.split("&&")[0].strip()):
word = e.split("&&")[0].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif is_prolly_var(e.split("&&")[1].strip()):
word = e.split("&&")[1].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif e.count("||") == 1:
if is_prolly_var(e.split("||")[0].strip()):
word = e.split("&&")[0].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif is_prolly_var(e.split("||")[1].strip()):
word = e.split("&&")[1].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif e.strip().startswith("!(") and (e.count(")") == 1):
word = e.split("!(")[1].strip().split(")")[0].strip()
if is_prolly_var(word):
e = "(" + word + " <= 0)"
s += e
s += ') {\n'
if_body_true = self._generate_stmt(n.iftrue, add_indent=True)
if if_body_true.strip().startswith("{"):
if_body_true = self._remove_curly_blank_lines(if_body_true)
s += if_body_true
if n.iffalse:
else_between = self._make_indent() + '} else {\n'
if_body_false = self._generate_stmt(n.iffalse, add_indent=True)
if if_body_false.strip().startswith("{"):
if_body_false = self._remove_curly_blank_lines(if_body_false)
# Only add the "else" part if it is not empty
if if_body_false.strip() != "":
s += else_between + if_body_false
s += self._make_indent() + "}"
return s
def visit_For(self, n):
s = 'for '
if n.init:
inits = self.visit(n.init)
if "," in inits:
# TODO: Fix this so that x=",", y="," as init can work...
s = inits.replace(",", ";") + "\n"
s += self._make_indent() + 'for '
else:
s += inits
s += ';'
if n.cond:
e = self.visit(n.cond)
if is_prolly_var(e):
# TODO: Find out how to replace all variables that are evaluated
# on their own with > 0, since ints are so often booleans
#
# if it's just a lone variable, assume it is an int used as bool
e = e + " > 0"
else:
# Just handle a few simple and common cases
# TODO: Find a proper fix for integers in boolean expressions
if ("&&" in e) or ("||" in e):
if e.count("&&") == 1:
if is_prolly_var(e.split("&&")[0].strip()):
word = e.split("&&")[0].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif is_prolly_var(e.split("&&")[1].strip()):
word = e.split("&&")[1].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif e.count("||") == 1:
if is_prolly_var(e.split("||")[0].strip()):
word = e.split("&&")[0].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif is_prolly_var(e.split("||")[1].strip()):
word = e.split("&&")[1].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
s += e
s += ';'
multiple_nexts = False
if n.next:
nexts = ' ' + self.visit(n.next)
if "," in nexts:
s += ""
multiple_nexts = True
else:
s += nexts
genstmt = self._generate_stmt(n.stmt, add_indent=True)
if not genstmt.strip().startswith("{"):
s += " {\n"
if genstmt.strip() != "":
s += genstmt
if multiple_nexts:
# TODO: Fix this so that x++, y++ can work...
nexts = nexts.replace(",", ";")
s += "\n" + self._make_indent() + nexts.rstrip() + "\n"
if s.count("}") < s.count("{"):
s += self._make_indent() + "}"
return s
def visit_While(self, n):
s = 'for ('
if n.cond:
pexp = ""
cond = self.visit(n.cond)
if "=" in cond:
# If the assignment is surrounded by (), pick it out and put it
# above the for-loop and befor the last } in the for loop.
# Replace the () with the left side of the assignment.
log("assignment in cond!")
apos = cond.find("=")
pos1 = cond.rfind("(", 0, apos)
# find matching ) for the one at pos 1
inside = 1
found = False
for i, c in enumerate(cond):
#log("POS: " + str(i) + " INSIDE: " + str(inside))
if i <= apos:
continue
if c == "(":
inside += 1
elif c == ")":
inside -= 1
if inside == 0:
pos2 = i
found = True
break
if found:
pexp = cond[pos1+1:pos2]
#log("PEXP: " + pexp)
left = pexp.split("=")[0].strip()
cond = cond.replace(pexp, left)
s = pexp + "\n" + self._make_indent() + s
s += cond
s += ') '
body = self._generate_stmt(n.stmt, add_indent=True)
# TODO: Create a function of the curly-bracket fix
if pexp:
body += self._make_indent() + pexp
if not body.strip().startswith("{"):
s += " {\n"
if body.strip() != "":
s += body
if s.count("}") < s.count("{"):
s += self._make_indent() + "}"
return s
def visit_DoWhile(self, n):
s = 'for'
s += self._generate_stmt(n.stmt, add_indent=True)
# Remove the last "}" in s
s = "}".join(s.split("}")[:-1])
if n.cond:
con = self.visit(n.cond)
if con != "1":
s += self._make_indent() + 'if '
e = self.visit(n.cond)
if ("++" in e) or ("--" in e):
log("NOOO")
if is_prolly_var(e):
# TODO: Find out how to replace all variables that are evaluated
# on their own with > 0, since ints are so often booleans
#
# if it's just a lone variable, assume it is an int used as bool
e = e + " > 0"
else:
# Just handle a few simple and common cases
# TODO: Find a proper fix for integers in boolean expressions
if ("&&" in e) or ("||" in e) or ("!" in e):
if e.count("&&") == 1:
if is_prolly_var(e.split("&&")[0].strip()):
word = e.split("&&")[0].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif is_prolly_var(e.split("&&")[1].strip()):
word = e.split("&&")[1].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif e.count("||") == 1:
if is_prolly_var(e.split("||")[0].strip()):
word = e.split("&&")[0].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif is_prolly_var(e.split("||")[1].strip()):
word = e.split("&&")[1].strip()
e = e.replace(word, "(" + word + " > 0)", 1)
elif e.strip().startswith("!(") and (e.count(")") == 1):
word = e.split("!(")[1].strip().split(")")[0].strip()
if is_prolly_var(word):
e = "(" + word + " <= 0)"
s += e
s += ' {break};\n'
s += self._make_indent() + "}"
return s
def visit_Switch(self, n):
s = 'switch (' + self.visit(n.cond) + ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_Case(self, n):
s = 'case ' + self.visit(n.expr) + ':\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_Default(self, n):
return 'default:\n' + self._generate_stmt(n.stmt, add_indent=True)
def visit_Label(self, n):
return n.name + ':\n' + self._generate_stmt(n.stmt)
def visit_Goto(self, n):
return 'goto ' + n.name + ';'
def visit_EllipsisParam(self, n):
return '...'
def visit_Struct(self, n):
return self._generate_struct_union(n, 'struct')
def visit_Typename(self, n):
return self._generate_type(n.type)
def visit_Union(self, n):
return self._generate_struct_union(n, 'union')
def visit_NamedInitializer(self, n):
s = ''
for name in n.name:
if isinstance(name, c_ast.ID):
s += '.' + name.name
elif isinstance(name, c_ast.Constant):
s += '[' + name.value + ']'
s += ' = ' + self.visit(n.expr)
return s
def _generate_struct_union(self, n, name):
""" Generates code for structs and unions. name should be either
'struct' or union.
"""
s = name + ' ' + (n.name or '')
if n.decls:
s += '\n'
s += self._make_indent()
self.indent_level += 2
s += '{\n'
for decl in n.decls:
s += self._generate_stmt(decl)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def _generate_stmt(self, n, add_indent=False):
""" Generation from a statement node. This method exists as a wrapper
for individual visit_* methods to handle different treatment of
some statements in this context.
"""
typ = type(n)
if add_indent: self.indent_level += 2
indent = self._make_indent()
if add_indent: self.indent_level -= 2
if typ in (
c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,
c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,
c_ast.StructRef):
# These can also appear in an expression context so no semicolon
# is added to them automatically
#
return indent + self.visit(n) + '\n'
elif typ in (c_ast.Compound,):
# No extra indentation required before the opening brace of a
# compound - because it consists of multiple lines it has to
# compute its own indentation.
#
return self.visit(n)
else:
return indent + self.visit(n) + '\n'
def _generate_decl(self, n):
""" Generation from a Decl node.
"""
s = ''
if n.funcspec: s = ' '.join(n.funcspec) + ' '
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def _generate_type(self, n, modifiers=[]):
""" Recursive generation from a type node. n is the type node.
modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers
encountered on the way down to a TypeDecl, to allow proper
generation from it.
"""
typ = type(n)
#~ print(n, modifiers)
if typ == c_ast.TypeDecl:
s = ''
if n.quals: s += ' '.join(n.quals) + ' '
s += self.visit(n.type)
nstr = n.declname if n.declname else ''
# Resolve modifiers.
# Wrap in parens to distinguish pointer to array and pointer to
# function syntax.
#
for i, modifier in enumerate(modifiers):
if isinstance(modifier, c_ast.ArrayDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '[' + self.visit(modifier.dim) + ']'
elif isinstance(modifier, c_ast.FuncDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '(' + self.visit(modifier.args) + ')'
elif isinstance(modifier, c_ast.PtrDecl):
nstr = '*' + nstr
if nstr: s += ' ' + nstr
return s
elif typ == c_ast.Decl:
return self._generate_decl(n.type)
elif typ == c_ast.Typename:
return self._generate_type(n.type)
elif typ == c_ast.IdentifierType:
return ' '.join(n.names) + ' '
elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl):
return self._generate_type(n.type, modifiers + [n])
else:
return self.visit(n)
def _parenthesize_if(self, n, condition):
""" Visits 'n' and returns its string representation, parenthesized
if the condition function applied to the node returns True.
"""
s = self.visit(n)
if condition(n):
return '(' + s + ')'
else:
return s
def _parenthesize_unless_simple(self, n):
""" Common use case for _parenthesize_if
"""
return self._parenthesize_if(n, lambda d: not self._is_simple_node(d))
def _is_simple_node(self, n):
""" Returns True for nodes that are "simple" - i.e. nodes that always
have higher precedence than operators.
"""
return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
c_ast.StructRef, c_ast.FuncCall))
def last_minute_replacements(self, s, firstname):
# Insert the filename without extension as argv[0]
if "argv[0]" in s:
s = s.replace("argv[0]", '"' + firstname + '"')
# Make replacements all over
for r in WHOLE_PROGRAM_REPLACEMENTS:
if r in s:
newstring = WHOLE_PROGRAM_REPLACEMENTS[r]
# Special cases
if "flag.Args" in newstring:
if "flag" not in self.imports:
self.imports.append("flag")
s = s.replace(r, newstring)
# Fix ++ and --
while s.count("$$$") > 0:
pos1 = s.find("$$$") + 3
pos2 = s.find("$$$", pos1)
contents = s[pos1:pos2]
fixed_cont = contents.replace("++", "").replace("--", "")
s = s.replace("$$$" + contents + "$$$", fixed_cont)
line = s[s.rfind("\n", 0, pos1)+1:s.find("\n", pos1)]
whitespace = line[:len(line) - len(line.lstrip())]
# Ok, we have removed ++ or -- from the line, let's add it again on the line above or below
contents = contents.strip()
if contents.startswith("++") or contents.startswith("--"):
# insert pos should be the line above
inspos = s.rfind("\n", 0, pos1) + 1
if contents.startswith("++"):
contents = contents.replace("++", "") + "++"
elif contents.startswith("--"):
contents = contents.replace("--", "") + "--"
s = s[:inspos] + whitespace + contents + "\n" + s[inspos:]
elif contents.endswith("++") or contents.endswith("--"):
# insert pos should be the line below
inspos = s.find("\n", pos1) + 1
s = s[:inspos] + whitespace + contents + "\n" + s[inspos:]
#break
for fix in range(s.count("]byte = new([")):
pos = s.find("]byte = new([")
bpos = s.rfind("\n", 0, pos)
eolpos = s.find("\n", pos)
contents = s[bpos:eolpos]
num = "".join([x for x in contents.split("=")[1] if x in "0123456789"])
if num:
newcontents = contents.split("*")[0] + "CString = cstrN(" + str(int(num) - 1) + ")"
else:
newcontents = contents + "CString"
s = s[:bpos] + newcontents + s[eolpos:]
# Use fmt.Println instead of fmt.Printf ... \n
for fix in range(s.count("fmt.Printf(")):
pos = s.find("fmt.Printf(")
eolpos = s.find("\n", pos)
contents = s[pos:eolpos]
if contents.strip().endswith("\\n\")") and contents.count("\"") == 2:
#log("FOUND: " + contents)
newcontents = contents.replace("fmt.Printf(", "fmt.Println(", 1).replace("\\n\")", "\")", 1)
#log("NEWCONTENTS: " + newcontents)
s = s[:pos] + newcontents + s[eolpos:]
# Add & to scanf variables
oldpos = 0
for fix in range(s.count("scanf(")):
pos = s.find("scanf(", oldpos)
oldpos = pos
eolpos = s.find("\n", pos)
contents = s[pos:eolpos]
paramstring = contents.split("(", 1)[1].rsplit(")", 1)[0]
params = paramstring.split(",")
#log("PARAMS: " + str(params))
newparams = []
for param in params:
if not "(" in param:
if not param.strip().startswith("&"):
newparams.append("&" + param.strip())
else:
newparams.append(param.strip())
else:
newparams.append(param.strip())
newps = ", ".join(newparams)
newcontents = contents.replace(paramstring, newps)
#log("NEWCONTENTS: " + str(newcontents))
s = s[:pos] + newcontents + s[eolpos:]
# Add & to first strcpy variable, if it's not there already
oldpos = 0
for fix in range(s.count("strcpy(")):
pos = s.find("strcpy(", oldpos)
oldpos = pos
eolpos = s.find("\n", pos)
contents = s[pos:eolpos]
paramstring = contents.split("(", 1)[1].rsplit(")", 1)[0]
params = paramstring.split(",")
#log("PARAMS: " + str(params))
newparams = []
donefirst = False
for param in params:
if (not donefirst) and (not "(" in param):
if not param.strip().startswith("&"):
newparams.append("&" + param.strip())
else:
newparams.append(param.strip())
donefirst = True
else:
newparams.append(param.strip())
newps = ", ".join(newparams)
newcontents = contents.replace(paramstring, newps)
#log("NEWCONTENTS: " + str(newcontents))
s = s[:pos] + newcontents + s[eolpos:]
return s
def fix_int_to_bool(self, s):
#log("Fix int to bool")
for fn in self.should_return_bool_instead_of_int:
pos = s.find("func " + fn)
#log("Found " + fn + " at " + str(pos))
eolpos = s.find("\n", pos)
#log("Found eol at " + str(eolpos))
intpos = s.rfind(" int ", pos, eolpos)
if intpos < eolpos:
#log("This is an int function, yes! " + str(pos) + " " + str(intpos) + " " + str(eolpos))
#log(s[intpos:eolpos])
s = s[:intpos] + "bool" + s[intpos+4:]
return s
def cleanup(data):
lines = []
for line in data.split("\n"):
if "#include" in line:
continue
if line.strip().startswith("//"):
continue
lines.append(line)
return "\n".join(lines)
def translate_to_go(filename):
firstname = filename
if "." in filename:
firstname = filename.rsplit(".", 1)[0]
clearlog()
f = open(filename)
data = f.read()
f.close()
data = cleanup(data)
#filename = tempfile.mkstemp()[1]
filename2 = "/tmp/jeje"
f = open(filename2, "w")
f.write(data)
f.close()
try:
ast = parse_file(filename2, use_cpp=True)
except plyparser.ParseError as e:
print("Could not parse %s:" % (filename))
print("line " + "".join(str(e).split(":", 1)[1:]))
return
generator = GoGenerator()
s = generator.visit(ast)
s = generator.fix_int_to_bool(s)
s = generator.last_minute_replacements(s, firstname)
s = generator.make_header() + s
print(s)
def log(s):
f = open("c2go.log", "a")
f.write(str(s) + "\n")
f.close()
def clearlog():
f = open("c2go.log", "w")
f.close()
#------------------------------------------------------------------------------
if __name__ == "__main__":
if '-v' in sys.argv:
print("c2go %s (using pycparser %s)" % (__version__, pycparser.__version__))
sys.argv.remove('-v')
if len(sys.argv) > 1:
translate_to_go(sys.argv[1])
else:
sys.exit("usage: c2go.py <filename>")
|
xyproto/c2go
|
c2go.py
|
Python
|
bsd-3-clause
| 43,202
|
[
"VisIt"
] |
12f4da4bf0a3e8eb3e006aaf36c5edb5045b40f57a13d7be701fe3c284cd2c0b
|
'''
The MIT License (MIT)
(c) Juergen Simon 2014 (juergen.simon@uni-bonn.de)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import glob
import os
import pprint
import visit
import utils
##
# Plots a list of tracks from .vtk files produced by
# meanie3D-trackstats --write-center-tracks-as-vtk.
#
# \param:conf Configuration dictionary
#
def run(conf):
pp = pprint.PrettyPrinter()
# pp.pprint(conf)
# Make sure the global configuration is in place
utils.run_global_visit_configuration(conf)
visitConf = utils.getValueForKeyPath(conf,'postprocessing.tracks.visit')
if not visitConf:
print "No configuration for visuals. Nothing to do."
return 0
# Set up background gradient, axis labels etc.
utils.setAnnotations(conf,'postprocessing.tracks.visit.annotationAttributes')
# Set the view straight
utils.setView(conf,'postprocessing.tracks.visit.view')
# Plot the map data
utils.plotMapdata(conf,'postprocessing.tracks.visit.map')
# Plot the tracks
trackPlotConf = utils.getValueForKeyPath(conf,'postprocessing.tracks.visit.track')
# pp.pprint(trackPlotConf)
currentDirectory = os.path.abspath(os.getcwd())
os.chdir(conf['tracks_dir'])
if trackPlotConf:
# Save value of legend flag
legendFlag = trackPlotConf['PseudocolorAttributes']['legendFlag']
# Plot the Tracks
# track_pattern = conf['tracks_dir'] + "/*-track_*.vtk"
track_pattern = "*-track_*.vtk"
list = sorted(glob.glob(track_pattern))
print "Looking with pattern " + track_pattern
print "Found %d track files." % len(list)
count = 0;
for trackFile in list:
# add plot
# trackFile = conf['tracks_dir'] + os.path.sep + fname
# plot the legend for the first one only
if (count == 1) and legendFlag:
trackPlotConf['PseudocolorAttributes']['legendFlag'] = 0
# pp.pprint(trackPlotConf)
# Plot the actual track data
file = conf['tracks_dir'] + os.path.sep + trackFile
print "Adding plot for " + file
utils.addPseudocolorPlot(file,trackPlotConf)
count = count + 1
# in case the script is being debugged, exit the script
# after 10 tracks. This could be configured
# if getValueForKeyPath(conf,'postprocessing.debugVisitScript') and count > 10
# Restore flag value
trackPlotConf['PseudocolorAttributes']['legendFlag'] = legendFlag
# pp.pprint(trackPlotConf)
print "Drawing plots"
visit.DrawPlots()
print "Saving image to %s" % os.getcwd()
utils.saveImage("tracks",0)
os.chdir(currentDirectory)
return
|
meteo-ubonn/meanie3D
|
python/meanie3D/visualisation/tracks.py
|
Python
|
mit
| 3,744
|
[
"VTK",
"VisIt"
] |
f9a8dfc8865da159a3f596861bc8a9b167aa8c43449993bb06929ab9fee78cc2
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
try:
from urllib.parse import urlparse # py2
except ImportError:
from urlparse import urlparse # py3
try:
xrange # py2
except NameError:
xrange = range # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client::
class EchoWebSocket(websocket.WebSocketHandler):
def open(self):
print "WebSocket opened"
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print "WebSocket closed"
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
tornado.web.RequestHandler.__init__(self, application, request,
**kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
self.finish("Can \"Upgrade\" only to \"WebSocket\".")
return
# Connection header should be upgrade. Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
self.finish("\"Connection\" must be \"Upgrade\".")
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
self.finish("Cross origin websockets not allowed")
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
self.ws_connection = WebSocketProtocol13(
self, compression_options=self.get_compression_options())
self.ws_connection.accept_connection()
else:
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 8\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(-1, zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received", exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s"
"\r\n" % (self._challenge_response(),
subprotocol_header, extension_header)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
self.stream.write(frame)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
try:
self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
self._abort()
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length, self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
self.close()
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = WebSocketProtocol13(
self, mask_outgoing=True,
compression_options=self.compression_options)
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options``.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request, compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
0xkag/tornado
|
tornado/websocket.py
|
Python
|
apache-2.0
| 38,343
|
[
"VisIt"
] |
cd5ac097ba18e3d94db443aa581cd561843f92786af9bca3bedeeb0f49f4085c
|
# JN 2016-03-02
"""
show resposes in css-gui
"""
from __future__ import print_function, division, absolute_import
import os
import numpy as np
import scipy.signal as signal
from .sort_widgets import MplCanvas
from matplotlib.pyplot import imread
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
HGAP = VGAP = BOTTOM = .02
T_PRE = 1000 # relative to onset
T_POST = 2000 # relative to onset
std = 100
nsamp_window = 10 * std
LW = 1.8 # linewidth of individual lines in raster
fr_bins = np.arange(-T_PRE-500, T_POST+500, 1)
fr_window = signal.get_window(('gaussian', std), nsamp_window)
fr_w = int(nsamp_window/2) - 1 # JN 2016-10-17 add int
def set_raster_properties(p, ylim=8):
resp_ylim = (-1, ylim)
for where in ('left', 'right'):
p.spines[where].set_visible(False)
p.axvline(0, ls='dashed', color='k')
p.axvline(1000, ls='dashed', color='k')
p.set_xlim([-T_PRE, T_POST])
p.set_xticks([-T_PRE, T_POST])
p.set_xticklabels([])
p.set_ylim(resp_ylim)
p.set_yticks([])
def plot_one_cluster_one_stim(plot, rows, ylim, color):
"""
version for lists of cl_times
"""
set_raster_properties(plot, ylim=ylim)
colors = np.array([color] * len(rows))
plot.eventplot(rows, colors=colors, linewidths=LW)
def plot_convolution(plot, rows, lw=1):
"""
good if there is a raster already
"""
all_times = np.hstack(rows)
hist, _ = np.histogram(all_times, fr_bins)
smooth = signal.convolve(hist, fr_window)
smooth *= (len(rows)/smooth.max())
smooth -= .5
plot.plot(fr_bins, smooth[fr_w:-fr_w], 'm', alpha=.5, lw=lw)
def create_raster_rows(cl_times, onset_times):
"""
prepare for raster plots
"""
rows = []
empty = [-2 * T_PRE]
onset_times /= 1000
do_plot = False
for onset_time in onset_times:
idx = (cl_times >= onset_time - T_PRE) &\
(cl_times <= onset_time + T_POST)
if idx.any():
do_plot = True
rows.append(cl_times[idx] - onset_time)
else:
rows.append(empty)
return rows, do_plot
def plot_one_plot(plot, scale, spike_times, onset_times,
stim_name, image, scrtype, stimnum=None):
"""
plot either image or name screening raster plot into plot
"""
colors = ((0, 0, .8), (.7, 0, 0), (1, 1, 0))
merged_spike_times = np.hstack(spike_times)
merged_spike_times.sort()
for i_sp, sptimes in enumerate(spike_times):
rows, do_plot = create_raster_rows(sptimes,
onset_times.copy())
if do_plot:
plot_one_cluster_one_stim(plot, rows,
ylim=len(rows) + 1,
color=colors[i_sp])
# else:
# print('Not plotting raster!')
rows, do_plot = create_raster_rows(merged_spike_times,
onset_times.copy())
if do_plot:
plot_convolution(plot, rows, min(2.5, 10/scale))
#else:
#print('Not doing convolution!')
# plot.text(0, .96, str(len(onset_times)), ha='left', va='top',
# transform=plot.transAxes, size=SIZE_STIMNAME)
if scrtype == 'nscr':
plot.text(.5, 1.01, stim_name, ha='center', va='bottom',
transform=plot.transAxes, size=20 - scale)
elif scrtype == 'scr':
imar = OffsetImage(image, zoom=1/scale)
artist = AnnotationBbox(imar, (.5, 1.01), pad=0,
box_alignment=(.5, 0),
xycoords='axes fraction',
frameon=False)
plot.add_artist(artist)
if stimnum is not None:
plot.text(0, 1.01, '#{}'.format(stimnum), ha='left',
va='bottom', transform=plot.transAxes,
size=20 - scale)
def get_stim_info(frame, stimulus):
idx = frame['stim_num'] == stimulus
stim_name = frame.loc[idx, 'stim_name'].get_values()[0]
fname_image = frame.loc[idx, 'filename'].get_values()[0]
return stim_name, fname_image
def get_onset_times(frame, stimulus, daytime, paradigm):
idx = (frame['stim_num'] == stimulus)\
& (frame['paradigm'] == paradigm)
if len(daytime):
idx &= frame['daytime'] == daytime
return frame.loc[idx, 'time'].get_values() * 1000
class RasterFigure(MplCanvas):
def __init__(self, parent, width=5, height=5):
super(RasterFigure, self).__init__(parent, width, height)
self.frame = None
self.stimuli = None
self.images = None
self.names = None
def set_paradigm_data(self, frame, image_path):
self.frame = frame
self.stimuli = frame['stim_num'].unique()
self.images = dict()
self.names = dict()
for stimulus in self.stimuli:
stim_name, fname_image = get_stim_info(self.frame, stimulus)
self.names[stimulus] = stim_name
fname_image = os.path.join(image_path, fname_image)
print(fname_image)
self.images[stimulus] = imread(fname_image)
def update_figure(self, spiketimes, daytime, scale=5,
do_numbers=False):
"""
update the plot
"""
figure = self.fig
figure.clf()
stimuli = self.stimuli
n_stim = len(stimuli)
hgap = HGAP
vgap = VGAP
# number of cols is 2 starting from 4 stimuli
if n_stim > 3:
n_cols = 2
plot_width = (1 - 8*hgap)/4
if len(set(self.frame.paradigm))<2:
plot_width *= 2
n_rows = int((n_stim + 1)/2)
else:
n_cols = 1
plot_width = (1 - 4*hgap)/2
n_rows = n_stim
row_height = (1 - 2*vgap)/n_rows
plot_height = row_height * .6
iterator = list(zip((0, plot_width + hgap), ('scr', 'nscr')))
col_shift = 0
for istim, stimulus in enumerate(stimuli):
if (n_cols > 1) and (istim >= n_stim/2):
col_shift = .5
row_bottom = 1 - vgap - (istim % n_rows)*row_height -\
row_height + BOTTOM/2 + .01
for shift, paradigm in iterator:
if paradigm in set(self.frame.paradigm):
pos = (col_shift + hgap + shift, row_bottom,
plot_width, plot_height)
plot = figure.add_axes(pos)
onset_times = get_onset_times(self.frame, stimulus,
daytime, paradigm)
if do_numbers:
number = stimulus + 1
else:
number = None
plot_one_plot(plot, scale, spiketimes, onset_times,
self.names[stimulus], self.images[stimulus],
paradigm, number)
if (istim + 1 == n_rows) or (istim + 1 == n_stim):
plot.set_xticks([0, 1000])
plot.set_xticklabels([0, 1000])
self.draw()
|
jniediek/combinato
|
combinato/guisort/raster_figure.py
|
Python
|
mit
| 7,173
|
[
"Gaussian"
] |
33cb0d766a03e6295045a4017654784096089b5ad07a5796b807322aca44ad2b
|
#!/usr/bin/python
########################################################################
# 2 September 2014
# Patrick Lombard, Centre for Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import os, re, sys
import argparse
import numpy as np
import HTSeq
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm as CM
import pandas as pd
import subprocess
from plot_ngs_data.scripts import homer_heatmaps
import metaseq
import pybedtools
import multiprocessing
import ConfigParser
def plot_matrix(homer_file, label):
data = list()
with open(homer_file) as f:
header = next(f)
head = header.split()
del head[0]
for line in f:
line = line.rstrip()
word = line.split("\t")
del word[0]
data.append(word)
b = np.array(data, dtype="f" )
c = b[b[:,1].argsort()] #Sorts from lowest to highest!
d = np.log2(c+1) #Not sure if correct with +1
plt.locator_params(axis = 'x', nbins = 10) #Took ages to figure this out, do not forget! This sets the number of bins and must match the arange length!
cmap = CM.get_cmap('Reds', 100) #Sets color scheme
plt.imshow(d, interpolation='none', cmap=cmap, aspect="auto")
plt.xticks(np.arange(0, 401, 50), np.arange(-2000, 2001, 500), rotation='vertical') #Key is to make them both exactly the same length!!
plt.colorbar()
plt.savefig(label+"_custom_heatmap.png") #Doesnt work with PDF for some reason, stick to PNG
plt.close()
def pandas_plot_matrix(homer_file, label):
data = list()
index = []
with open(homer_file) as f:
header = next(f)
head = header.split()
del head[0]
#for pandas, this is the columns
for line in f:
line = line.rstrip()
word = line.split("\t")
index.append(word[0])
del word[0]
data.append(word)
b = np.array(data, dtype="f" ) #This is fine
c = b[b[:,1].argsort()] #Sorts from lowest to highest!
d = np.log2(c+1) #Not sure if correct with +1
#Now convert this to pandas dataframe
df = pd.DataFrame(d, index=index, columns=head)
print df
cmap = CM.get_cmap('Reds', 100) #Sets color scheme
plt.imshow(df, interpolation='none', cmap=cmap, aspect="auto")
# plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
#plt.xticks(np.arange(0, len(df.columns), 500), df.columns)
plt.xticks(np.arange(0, 1001, 100), np.arange(-5000, 5001, 1000), rotation='vertical')
plt.colorbar()
plt.savefig(label+".png") #Doesnt work with PDF for some reason, stick to PNG
plt.close()
|
pdl30/pyngsplot
|
pyngsplot/tools/custom_heatmaps.py
|
Python
|
mit
| 2,543
|
[
"HTSeq"
] |
f432ffb79619d2b3a1358c516c6b58049d16f3d54dbf378775c801d8305dc8d5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='jana',
version='0.1.0',
description='RESTful manager of secrets',
long_description=readme + '\n\n' + history,
author='Christopher Petrilli',
author_email='petrilli@amber.org',
url='https://github.com/petrilli/jana',
packages=[
'jana',
],
package_dir={'jana':
'jana'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='jana',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
|
petrilli/jana
|
setup.py
|
Python
|
bsd-3-clause
| 1,410
|
[
"Amber"
] |
3efeef94247d1f25e73bccd61b9cc4680d324d7fdbfa639f29148429c82eebcc
|
import superimport
import pyprobml_utils as pml
import numpy as np
from scipy.stats import uniform, laplace, norm
import matplotlib.pyplot as plt
n = 2000
x = np.arange(-4, 4, 0.01)
y1 = norm.pdf(x, 0, 1)
y2 = uniform.pdf(x, -2, 4)
y3 = laplace.pdf(x, 0, 1)
plt.plot(x, y1, color='blue')
plt.plot(x, y2, color='green')
plt.plot(x, y3, color='red')
pml.savefig('1D.pdf')
plt.savefig('1D.pdf')
plt.show()
x1 = np.random.normal(0, 1, n).reshape(n, 1)
x2 = np.random.normal(0, 1, n).reshape(n, 1)
plt.scatter(x1, x2, marker='.', color='blue')
plt.gca().set_aspect('equal')
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.title("Gaussian")
pml.savefig('Gaussian.pdf')
plt.savefig('Gaussian.pdf')
plt.show()
x1 = np.random.laplace(0, 1, n).reshape(n, 1)
x2 = np.random.laplace(0, 1, n).reshape(n, 1)
plt.scatter(x1, x2, marker='.', color='red')
plt.gca().set_aspect('equal')
plt.xlim(-8,8)
plt.ylim(-8,8)
plt.title("Laplace")
pml.savefig('Laplace.pdf')
plt.savefig('Laplace.pdf')
plt.show()
x1 = np.random.uniform(-2, 2, n).reshape(n, 1)
x2 = np.random.uniform(-2, 2, n).reshape(n, 1)
plt.scatter(x1, x2, marker='.', color='green')
plt.gca().set_aspect('equal')
plt.xlim(-2.5, 2.5)
plt.ylim(-2, 2)
plt.title("Uniform")
pml.savefig('Uniform.pdf')
plt.savefig('Uniform.pdf')
plt.show()
|
probml/pyprobml
|
scripts/sub_super_gauss_plot.py
|
Python
|
mit
| 1,270
|
[
"Gaussian"
] |
63d5705c88dea18e594b420974d5547efe7beaa8adecb89bd1fd54b57c965476
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Asynchronous execution of Fdist and spliting of loads.
FDistAsync Allows for the execution of FDist.
SplitFDist splits a single Fdist execution in several, taking advantage
of multi-core architectures.
"""
import os
import shutil
import threading
from time import sleep
from Bio.PopGen.Async import Local
from Bio.PopGen.FDist.Controller import FDistController
__docformat__ = "restructuredtext en"
class FDistAsync(FDistController):
"""Asynchronous FDist execution.
"""
def __init__(self, fdist_dir="", ext=None):
"""Constructor.
Parameters:
- fdist_dir - Where fdist can be found, if = "", then it
should be on the path.
- ext - Extension of binary names (e.g. nothing on Unix,
".exe" on Windows
"""
FDistController.__init__(self, fdist_dir, ext)
def run_job(self, parameters, input_files):
"""Runs FDist asynchronously.
Gets typical Fdist parameters from a dictionary and
makes a "normal" call. This is run, normally, inside
a separate thread.
"""
npops = parameters['npops']
nsamples = parameters['nsamples']
fst = parameters['fst']
sample_size = parameters['sample_size']
mut = parameters.get('mut', 0)
num_sims = parameters.get('num_sims', 20000)
data_dir = parameters.get('data_dir', '.')
is_dominant = parameters.get('is_dominant', False)
theta = parameters.get('theta', 0.06)
beta = parameters.get('beta', (0.25, 0.25))
max_freq = parameters.get('max_freq', 0.99)
fst = self.run_fdist(npops, nsamples, fst, sample_size,
mut, num_sims, data_dir,
is_dominant, theta, beta,
max_freq)
output_files = {}
output_files['out.dat'] = open(data_dir + os.sep + 'out.dat', 'r')
return fst, output_files
class SplitFDist(object):
"""Splits a FDist run.
The idea is to split a certain number of simulations in smaller
numbers (e.g. 30.000 sims split in 30 packets of 1.000). This
allows to run simulations in parallel, thus taking advantage
of multi-core CPUs.
Each SplitFDist object can only be used to run a single FDist
simulation.
"""
def __init__(self, report_fun=None,
num_thr=2, split_size=1000, fdist_dir='', ext=None):
"""Constructor.
Parameters:
- report_fun - Function that is called when a single packet is
run, it should have a single parameter: Fst.
- num_thr - Number of desired threads, typically the number
of cores.
- split_size - Size that a full simulation will be split in.
- ext - Binary extension name (e.g. nothing on Unix, '.exe' on
Windows).
"""
self.async = Local.Local(num_thr)
self.async.hooks['fdist'] = FDistAsync(fdist_dir, ext)
self.report_fun = report_fun
self.split_size = split_size
# There might be races when reporting...
def monitor(self):
"""Monitors and reports (using report_fun) execution.
Every time a partial simulation ends, calls report_fun.
IMPORTANT: monitor calls can be concurrent with other
events, ie, a tasks might end while report_fun is being
called. This means that report_fun should be consider that
other events might be happening while it is running (it
can call acquire/release if necessary).
"""
while(True):
sleep(1)
self.async.access_ds.acquire()
keys = list(self.async.done.keys()) # copy it
self.async.access_ds.release()
for done in keys:
self.async.access_ds.acquire()
fst, files = self.async.done[done]
del self.async.done[done]
out_dat = files['out.dat']
f = open(self.data_dir + os.sep + 'out.dat', 'a')
f.writelines(out_dat.readlines())
f.close()
out_dat.close()
self.async.access_ds.release()
for file in os.listdir(self.parts[done]):
os.remove(self.parts[done] + os.sep + file)
os.rmdir(self.parts[done])
if self.report_fun:
self.report_fun(fst)
self.async.access_ds.acquire()
if len(self.async.waiting) == 0 and len(self.async.running) == 0 \
and len(self.async.done) == 0:
break
self.async.access_ds.release()
def acquire(self):
"""Allows the external acquisition of the lock.
"""
self.async.access_ds.acquire()
def release(self):
"""Allows the external release of the lock.
"""
self.async.access_ds.release()
# You can only run a fdist case at a time
def run_fdist(self, npops, nsamples, fst, sample_size,
mut=0, num_sims=20000, data_dir='.',
is_dominant=False, theta=0.06, beta=(0.25, 0.25),
max_freq=0.99):
"""Runs FDist.
Parameters can be seen on FDistController.run_fdist.
It will split a single execution in several parts and
create separated data directories.
"""
num_parts = num_sims // self.split_size
self.parts = {}
self.data_dir = data_dir
for directory in range(num_parts):
full_path = data_dir + os.sep + str(directory)
try:
os.mkdir(full_path)
except OSError:
pass # Its ok, if it is already there
if "ss_file" in os.listdir(data_dir):
shutil.copy(data_dir + os.sep + "ss_file", full_path)
id = self.async.run_program('fdist', {
'npops': npops,
'nsamples': nsamples,
'fst': fst,
'sample_size': sample_size,
'mut': mut,
'num_sims': self.split_size,
'data_dir': full_path,
'is_dominant': is_dominant,
'theta': theta,
'beta': beta,
'max_freq': max_freq
}, {})
self.parts[id] = full_path
threading.Thread(target=self.monitor).run()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/PopGen/FDist/Async.py
|
Python
|
gpl-2.0
| 6,761
|
[
"Biopython"
] |
9c64de7613f0fea194aefd155e5ccd69f226adc13e6af0b5f9da79559be1e52e
|
from cifsdk.format.cifbind import Bind
import re
def test_format_bind():
data = [
{
'observable': "example.com",
'provider': "me.com",
'tlp': "amber",
'confidence': "85",
'reporttime': '2015-01-01T00:00:00Z',
'otype': 'fqdn'
},
{
'observable': "example2.com",
'provider': "me.com",
'tlp': "amber",
'confidence': "85",
'reporttime': '2015-01-01T00:00:00Z',
'otype': 'fqdn'
},
{
'observable': "example3.com",
'provider': "me.com",
'tlp': "amber",
'confidence': "85",
'reporttime': '2015-01-01T00:00:00Z',
'otype': 'fqdn'
},
]
text = str(Bind(data))
assert re.findall(r'^// generated by: cifsdk.format.cifbind at \S+', text)
assert re.findall(r'\nzone "example.com" {type master; file "\S+";};\n', text)
assert re.findall(r'\nzone "example3.com" {type master; file "\S+";};', text)
if __name__ == '__main__':
test_format_bind()
|
aeppert/py-cifsdk
|
test/test_bind.py
|
Python
|
lgpl-3.0
| 1,121
|
[
"Amber"
] |
792847584aa86e4915b1aca7a337eed2aca22b63dc46d4d92b9bd965c2e6a195
|
"""Test that set_momenta behaves as expected when constraints are
involved."""
import numpy as np
from ase import Atoms, Atom
from ase.constraints import Hookean, FixAtoms
# FixAtoms check
atoms = Atoms([Atom('H', (0., 0., 0.)),
Atom('H', (2., 0., 0.))])
atoms.set_constraint(FixAtoms(indices=[0]))
atoms.set_momenta(np.ones(atoms.get_momenta().shape))
desired = np.ones(atoms.get_momenta().shape)
desired[0] = 0.
actual = atoms.get_momenta()
assert (actual == desired).all()
# Hookean check
atoms = Atoms([Atom('H', (0., 0., 0.)),
Atom('H', (2., 0., 0.))])
atoms.set_constraint(Hookean(0, 1, rt=1., k=10.))
atoms.set_momenta(np.zeros(atoms.get_momenta().shape))
actual = atoms.get_momenta()
desired = np.zeros(atoms.get_momenta().shape)
assert (actual == desired).all()
|
suttond/MODOI
|
ase/test/set_momenta.py
|
Python
|
lgpl-3.0
| 805
|
[
"ASE"
] |
74c5c6209d1628f487a4ad834a134f570e398d19b1cb3e5a5446708d9b17f8c3
|
#! /usr/bin/env python
# coding:utf-8
# Author: bingwang
# Email: toaya.kase@gmail.com
# Copylight 2012-2012 Bing Wang
# LICENCES: GPL v3.0
__docformat__ = "epytext en"
#1. get scer 1:1 seub
#2. get scer 1:1 suva 1:1 anc
#3. map scer:suva:seub use 1:1:1
#4. insert suva specific genes, check seub gene use anc and blast
#5. insert seub specific genes
#6. output
seubONscer_file = "/Users/bingwang/zen/yeast_anno_pipe/seubONscer.txt"
suva_tab_file = "/Users/bingwang/zen/yeast_anno_pipe/YGOB_dataset/Suvarum_genome.tab"
pillar_file = "/Users/bingwang/zen/yeast_anno_pipe/YGOB_dataset/Pillars.tab"
write_file = "/Users/bingwang/zen/yeast_anno_pipe/Pillar_add_seub.txt"
content = []
scer2seub = {}
scer_seq = []
for line in open(seubONscer_file):
record = line.split("\t")
scer2seub[record[0]] = record[5]
scer_seq.append(record[0])
'''
#2. get scer 1:1 suva
scer2suva = {}
suva2scer = {}
scer2suva[""] = ""
for i,line in enumerate(open(suva_tab_file)):
content = line.split("\t")
suva = content[0]
if not "trna" in suva and not "CEN" in suva:
scer = content[8][:content[8].find("(")].strip()
homo_type = content[8][content[8].find("(")+1:content[8].find(")")]
if len(scer) not in [0,7,9,12]:
print len(scer),i,content[8]
if homo_type not in ["REAL","HSP","PSEUDO","REPEAT","NOVEL","HYPO","INTER"]:
print homo_type
suva2scer[suva] = scer
if scer != "":
if scer in scer2suva:
print "conflict:",scer2suva[scer],suva,"share same homo"
else:
scer2suva[scer] = suva
'''
content = []
for line in open(pillar_file):
if 'Scer_' in line or "trna" in line:
continue
genes = line.split("\t")
scer_1,scer_2,anc,suva_1,suva_2 = genes[11],genes[-12],genes[12],genes[8],genes[-9]
dubious = ["YIL170W","YIR044C","YIL171W","YOL153C","YPL276W","YPL275W","YAR061W","YAR073W","YAR075W"]
if scer_1 in dubious or scer_2 in dubious:
continue
seub_1 = "---" if scer_1 == "---" else scer2seub[scer_1]
seub_2 = "---" if scer_2 == "---" else scer2seub[scer_2]
if [scer_1,suva_1,seub_1,anc,scer_2,suva_2,seub_2].count('---') == 7:
continue
content.append("\t".join([scer_1,suva_1,seub_1,anc,scer_2,suva_2,seub_2]))
open(write_file,"w").write("\n".join(content))
|
BingW/yeast_anno_pipe
|
src/make_seub_pillar.py
|
Python
|
gpl-3.0
| 2,349
|
[
"BLAST"
] |
b147eb16518fff8a402476a1fade9644566cb5361cbef35df3a4184c2945e612
|
import time, sys, os
import numpy as np
np.errstate(invalid='ignore')
from prospect.models import model_setup
from prospect.io import write_results
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log
from dynesty.dynamicsampler import stopping_function, weight_function, _kld_error
from dynesty.utils import *
# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'param_file': ''}
clargs = model_setup.parse_args(sargv, argdict=argdict)
run_params = model_setup.get_run_params(argv=sargv, **clargs)
# --------------
# Globals
# --------------
# GP instances as global
spec_noise, phot_noise = model_setup.load_gp(**run_params)
# Model as global
global_model = model_setup.load_model(**run_params)
# Obs as global
global_obs = model_setup.load_obs(**run_params)
# SPS Model instance as global
sps = model_setup.load_sps(**run_params)
# -----------------
# LnP function as global
# ------------------
def lnprobfn(theta, model=None, obs=None, verbose=run_params['verbose']):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the ln of the posterior. This requires that
an sps object (and if using spectra and gaussian processes, a GP object) be
instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
bsfh.sedmodel model object, with attributes including ``params``, a
dictionary of model parameters. It must also have ``prior_product()``,
and ``mean_model()`` methods defined.
:param obs:
A dictionary of observational data. The keys should be
*``wavelength``
*``spectrum``
*``unc``
*``maggies``
*``maggies_unc``
*``filters``
* and optional spectroscopic ``mask`` and ``phot_mask``.
:returns lnp:
Ln posterior probability.
"""
if model is None:
model = global_model
if obs is None:
obs = global_obs
lnp_prior = model.prior_product(theta, nested=True)
if np.isfinite(lnp_prior):
# Generate mean model
try:
mu, phot, x = model.mean_model(theta, obs, sps=sps)
except(ValueError):
return -np.infty
# Noise modeling
if spec_noise is not None:
spec_noise.update(**model.params)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors = {'spec': mu, 'unc': obs['unc'],
'sed': model._spec, 'cal': model._speccal,
'phot': phot, 'maggies_unc': obs['maggies_unc']}
# Calculate likelihoods
lnp_spec = lnlike_spec(mu, obs=obs, spec_noise=spec_noise, **vectors)
lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise, **vectors)
return lnp_phot + lnp_spec + lnp_prior
else:
return -np.infty
def prior_transform(u, model=None):
if model is None:
model = global_model
return model.prior_transform(u)
pool = None
nprocs = 1
def halt(message):
"""Exit, closing pool safely.
"""
print(message)
try:
pool.close()
except:
pass
sys.exit(0)
if __name__ == "__main__":
# --------------
# Setup
# --------------
rp = run_params
rp['sys.argv'] = sys.argv
try:
rp['sps_libraries'] = sps.ssp.libraries
except(AttributeError):
rp['sps_libraries'] = None
# Use the globals
model = global_model
obs = global_obs
if rp.get('debug', False):
halt('stopping for debug')
# Try to set up an HDF5 file and write basic info to it
outroot = "{0}_{1}".format(rp['outfile'], int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
badout = 'Target output directory {} does not exist, please make it.'.format(odir)
halt(badout)
# -------
# Sample
# -------
if rp['verbose']:
print('dynesty sampling...')
tstart = time.time() # time it
dynestyout = fitting.run_dynesty_sampler(lnprobfn, prior_transform, model.ndim,
pool=pool, queue_size=nprocs,
stop_function=stopping_function,
wt_function=weight_function,
**rp)
ndur = time.time() - tstart
print('done dynesty in {0}s'.format(ndur))
# -------------------------
# Output HDF5 (and pickles if asked for)
# -------------------------
if rp.get("output_pickles", False):
# Write the dynesty result object as a pickle
import pickle
with open(outroot + '_dns.pkl', 'w') as f:
pickle.dump(dynestyout, f)
# Write the model as a pickle
partext = write_results.paramfile_string(**rp)
write_results.write_model_pickle(outroot + '_model', model, powell=None,
paramfile_text=partext)
# Write HDF5
hfile = outroot + '_mcmc.h5'
write_results.write_hdf5(hfile, rp, model, obs, dynestyout,
None, tsample=ndur)
|
bd-j/bsfh
|
scripts/prospector_dynesty.py
|
Python
|
gpl-2.0
| 5,286
|
[
"Gaussian"
] |
11236bf87ce1c9e67352e0ab3ca9f655a3d4bf2084b77d98a13b3880df245bdc
|
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from os import path
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
("0x1700", "0x17ff", "BetaCrypt", "B", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
("0x1800", "0x18ff", "Nagravision", "N", True ),
( "0x100", "0x1ff", "Seca Mediaguard","S", True ),
("0x1000", "0x10FF", "Tandberg", "T", True ),
( "0x500", "0x5ff", "Viaccess", "V", True ),
("0x2600", "0x2600", "Biss", "BI", True ),
("0x4aee", "0x4aee", "BulCrypt", "BU", True ),
("0x5581", "0x5581", "BulCrypt", "BU", False),
( "0xb00", "0xbff", "Conax", "CO", True ),
( "0xd00", "0xdff", "CryptoWorks", "CW", True ),
("0x4ae0", "0x4ae1", "DRE-Crypt", "DC", True ),
( "0x900", "0x9ff", "NDS Videoguard", "ND", True ),
( "0xe00", "0xeff", "PowerVu", "PV", True ),
)
self.ca_table = (
("CryptoCaidBetatAvailable", "B", False),
("CryptoCaidIrdetoAvailable" "I", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidTandbergAvailable", "T", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidBissAvailable", "BI", False),
("CryptoCaidBulCrypt1Available","BU", False),
("CryptoCaidBulCrypt2Available","BU", False),
("CryptoCaidConaxAvailable", "CO", False),
("CryptoCaidCryptoWAvailable", "CW", False),
("CryptoCaidDreAvailable", "DC", False),
("CryptoCaidNDSAvailable", "ND", False),
("CryptoCaidPowerVuAvailable", "PV", False),
("CryptoCaidBetaSelected", "B", True ),
("CryptoCaidIrdetoSelected", "I", True ),
("CryptoCaidNagraSelected", "N", True ),
("CryptoCaidSecaSelected", "S", True ),
("CryptoCaidTandbergSelected", "T", True ),
("CryptoCaidViaSelected", "V", True ),
("CryptoCaidBissSelected", "BI", True ),
("CryptoCaidBulCrypt1Selected", "BU", True ),
("CryptoCaidBulCrypt2Selected", "BU", True ),
("CryptoCaidConaxSelected", "CO", True ),
("CryptoCaidCryptoWSelected", "CW", True ),
("CryptoCaidDreSelected", "DC", True ),
("CryptoCaidNDSSelected", "ND", True ),
("CryptoCaidPowerVuSelected", "PV", True ),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if info.getInfo(iServiceInformation.sIsCrypted) == 1:
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoBeta(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1700', 16) <= int(self.current_caid, 16) <= int('0x17ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x1700', 16) <= caid <= int('0x17ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'B'
res += "\c00??????"
return res
def createCryptoIrdeto(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x600', 16) <= int(self.current_caid, 16) <= int('0x6ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x600', 16) <= caid <= int('0x6ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'I'
res += "\c00??????"
return res
def createCryptoNagra(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1800', 16) <= int(self.current_caid, 16) <= int('0x18ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x1800', 16) <= caid <= int('0x18ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'N'
res += "\c00??????"
return res
def createCryptoSeca(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x100', 16) <= int(self.current_caid, 16) <= int('0x1ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x100', 16) <= caid <= int('0x1ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'S'
res += "\c00??????"
return res
def createCryptoTandberg(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x1010', 16) <= int(self.current_caid, 16) <= int('0x1010', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x1010', 16) <= caid <= int('0x1010', 16):
color="\c00eeee00"
except:
pass
res = color + 'T'
res += "\c00??????"
return res
def createCryptoVia(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x500', 16) <= int(self.current_caid, 16) <= int('0x5ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x500', 16) <= caid <= int('0x5ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'V'
res += "\c00??????"
return res
def createCryptoBiss(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x2600', 16) <= int(self.current_caid, 16) <= int('0x26ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x2600', 16) <= caid <= int('0x26ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'BI'
res += "\c00??????"
return res
def createCryptoBulCrypt1(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x4aee', 16) <= int(self.current_caid, 16) <= int('0x4aee', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x4aee', 16) <= caid <= int('0x4aee', 16):
color="\c00eeee00"
except:
pass
res = color + 'BU'
res += "\c00??????"
return res
def createCryptoBulCrypt2(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x5581', 16) <= int(self.current_caid, 16) <= int('0x5581', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x5581', 16) <= caid <= int('0x5581', 16):
color="\c00eeee00"
except:
pass
res = color + 'BU'
res += "\c00??????"
return res
def createCryptoConax(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xb00', 16) <= int(self.current_caid, 16) <= int('0xbff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0xb00', 16) <= caid <= int('0xbff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CO'
res += "\c00??????"
return res
def createCryptoCryptoW(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xd00', 16) <= int(self.current_caid, 16) <= int('0xdff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0xd00', 16) <= caid <= int('0xdff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CW'
res += "\c00??????"
return res
def createCryptoDre(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x4ae0', 16) <= int(self.current_caid, 16) <= int('0x4ae1', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x4ae0', 16) <= caid <= int('0x4ae1', 16):
color="\c00eeee00"
except:
pass
res = color + 'DC'
res += "\c00??????"
return res
def createCryptoNDS(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0x900', 16) <= int(self.current_caid, 16) <= int('0x9ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0x900', 16) <= caid <= int('0x9ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'NDS'
res += "\c00??????"
return res
def createCryptoPowerVU(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int('0xe00', 16) <= int(self.current_caid, 16) <= int('0xeff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if int('0xe00', 16) <= caid <= int('0xeff', 16):
color="\c00eeee00"
except:
pass
res = color + 'PV'
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "Free to Air"
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID))
except:
pass
return ""
def createCryptoNameCaid(self, info):
caid_name = "Free to Air"
if int(self.current_caid,16) == 0:
return caid_name
try:
for caid_entry in self.caid_data:
if int(caid_entry[0], 16) <= int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x" % (int(self.current_caid,16))
except:
pass
return ""
def createResolution(self, info):
video_height = 0
video_width = 0
video_pol = " "
video_rate = 0
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
try:
video_height = int(f.read(),16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
try:
video_width = int(f.read(),16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/progressive"):
f = open("/proc/stb/vmpeg/0/progressive", "r")
try:
video_pol = "p" if int(f.read(),16) else "i"
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
try:
video_rate = int(f.read())
except:
pass
f.close()
fps = str((video_rate + 500) / 1000)
return str(video_width) + "x" + str(video_height) + video_pol + fps
def createVideoCodec(self, info):
return ("MPEG2", "MPEG4 H.264", "MPEG1", "MPEG4-VC", "VC1", "VC1-SM", "HEVC H.265", "")[info.getInfo(iServiceInformation.sVideoType)]
def createServiceRef(self, info):
return info.getInfoString(iServiceInformation.sServiceref)
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
if tsid < 0 : tsid = 0
if onid < 0 : onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createTransponderInfo(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
tmp = addspace(self.createChannelNumber(fedata, feraw)) + addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))
else:
tmp = addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))
return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \
+ addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw))
def createFrequency(self, fedata):
frequency = fedata.get("frequency")
if frequency:
return str(frequency)
return ""
def createChannelNumber(self, fedata, feraw):
return "DVB-T" in feraw.get("tuner_type") and fedata.get("channel") or ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate)
return ""
def createPolarization(self, fedata):
return fedata.get("polarization_abbreviation") or ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
if code_rate_lp and code_rate_hp:
return code_rate_lp + "-" + code_rate_hp
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
return feraw.get("tuner_type") or ""
def createTunerSystem(self, fedata):
return fedata.get("system") or ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createTransponderName(self,feraw):
orbpos = feraw.get("orbital_position")
if orbpos is None: # Not satellite
return ""
freq = feraw.get("frequency")
if freq and freq < 10700000: # C-band
if orbpos > 1800:
orbpos += 1
else:
orbpos -= 1
sat_names = {
30: 'Rascom/Eutelsat 3E',
48: 'SES 5',
70: 'Eutelsat 7E',
90: 'Eutelsat 9E',
100: 'Eutelsat 10E',
130: 'Hot Bird',
160: 'Eutelsat 16E',
192: 'Astra 1KR/1L/1M/1N',
200: 'Arabsat 20E',
216: 'Eutelsat 21.5E',
235: 'Astra 3',
255: 'Eutelsat 25.5E',
260: 'Badr 4/5/6',
282: 'Astra 2E/2F/2G',
305: 'Arabsat 30.5E',
315: 'Astra 5',
330: 'Eutelsat 33E',
360: 'Eutelsat 36E',
380: 'Paksat',
390: 'Hellas Sat',
400: 'Express 40E',
420: 'Turksat',
450: 'Intelsat 45E',
480: 'Afghansat',
490: 'Yamal 49E',
530: 'Express 53E',
570: 'NSS 57E',
600: 'Intelsat 60E',
620: 'Intelsat 62E',
685: 'Intelsat 68.5E',
705: 'Eutelsat 70.5E',
720: 'Intelsat 72E',
750: 'ABS',
765: 'Apstar',
785: 'ThaiCom',
800: 'Express 80E',
830: 'Insat',
851: 'Intelsat/Horizons',
880: 'ST2',
900: 'Yamal 90E',
915: 'Mesat',
950: 'NSS/SES 95E',
1005: 'AsiaSat 100E',
1030: 'Express 103E',
1055: 'Asiasat 105E',
1082: 'NSS/SES 108E',
1100: 'BSat/NSAT',
1105: 'ChinaSat',
1130: 'KoreaSat',
1222: 'AsiaSat 122E',
1380: 'Telstar 18',
1440: 'SuperBird',
2310: 'Ciel',
2390: 'Echostar/Galaxy 121W',
2410: 'Echostar/DirectTV 119W',
2500: 'Echostar/DirectTV 110W',
2630: 'Galaxy 97W',
2690: 'NIMIQ 91W',
2780: 'NIMIQ 82W',
2830: 'Echostar/QuetzSat',
2880: 'AMC 72W',
2900: 'Star One',
2985: 'Echostar 61.5W',
2990: 'Amazonas',
3020: 'Intelsat 58W',
3045: 'Intelsat 55.5W',
3070: 'Intelsat 53W',
3100: 'Intelsat 50W',
3150: 'Intelsat 45W',
3169: 'Intelsat 43.1W',
3195: 'SES 40.5W',
3225: 'NSS/Telstar 37W',
3255: 'Intelsat 34.5W',
3285: 'Intelsat 31.5W',
3300: 'Hispasat',
3325: 'Intelsat 27.5W',
3355: 'Intelsat 24.5W',
3380: 'SES 22W',
3400: 'NSS 20W',
3420: 'Intelsat 18W',
3450: 'Telstar 15W',
3460: 'Express 14W',
3475: 'Eutelsat 12.5W',
3490: 'Express 11W',
3520: 'Eutelsat 8W',
3530: 'Nilesat/Eutelsat 7W',
3550: 'Eutelsat 5W',
3560: 'Amos',
3592: 'Thor/Intelsat'
}
if orbpos in sat_names:
return sat_names[orbpos]
elif orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "W"
else:
return str((float(orbpos)) / 10.0) + "E"
def createProviderName(self,info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.value) > 0:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBar(info)
else:
return ""
if self.type == "CryptoBeta":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBeta(info)
else:
return ""
if self.type == "CryptoIrdeto":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoIrdeto(info)
else:
return ""
if self.type == "CryptoNagra":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNagra(info)
else:
return ""
if self.type == "Seca":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSeca(info)
else:
return ""
if self.type == "CryptoTandberg":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoTandberg(info)
else:
return ""
if self.type == "CryptoVia":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoVia(info)
else:
return ""
if self.type == "CryptoBiss":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBiss(info)
else:
return ""
if self.type == "CryptoBulCrypt":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoBulCrypt(info)
else:
return ""
if self.type == "CryptoConax":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoConax(info)
else:
return ""
if self.type == "CryptoCryptoW":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoCryptoW(info)
else:
return ""
if self.type == "CryptoDre":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoDre(info)
else:
return ""
if self.type == "CryptoNDS":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNDS(info)
else:
return ""
if self.type == "CryptoPowerVu":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoPowerVu(info)
else:
return ""
if self.type == "CryptoSpecial":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
else:
return ""
if self.type == "CryptoNameCaid":
if int(config.usage.show_cryptoinfo.value) > 0:
self.getCryptoInfo(info)
return self.createCryptoNameCaid(info)
else:
return ""
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
if not feraw:
feraw = info.getInfoObject(iServiceInformation.sTransponderData)
if not feraw:
return ""
fedata = ConvertToHumanReadable(feraw)
else:
fedata = self.fedata
if self.type == "All":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.value) > 0:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + addspace(self.createTransponderName(feraw)) + "\n"\
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n"\
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + addspace(self.createTransponderName(feraw)) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) + addspace(self.createTransponderName(feraw))\
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo2line":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createTransponderName(feraw)) + '\n'\
+ addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))\
+ addspace(self.createSymbolRate(fedata, feraw)) + self.createModulation(fedata) + '-' + addspace(self.createFEC(fedata, feraw))
if self.type == "PIDInfo":
return self.createPIDInfo(info)
if self.type == "ServiceRef":
return self.createServiceRef(info)
if not feraw:
return ""
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata, feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if request_selected:
if int(caid_entry[0], 16) <= int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if int(caid_entry[0], 16) <= caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
|
oostende/openblachole
|
lib/python/Components/Converter/PliExtraInfo.py
|
Python
|
gpl-2.0
| 25,721
|
[
"Galaxy"
] |
e7c14b4c26fe52110c505d8a0442ad250d64f7ffbed9acd937ef68fc981c2246
|
#!/usr/bin/env python2.7
import os
import httplib2
import oauth2client
import apiclient
import flask
from uuid import uuid4
from flask import Flask, render_template, request, g
from models import users_model, index_model, teachers_model, students_model, \
courses_model, model
from google.cloud import datastore
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
app.secret_key = str(uuid4())
@app.before_request
def before_request():
pass
@app.before_request
def teacher_session():
if '/teacher/' in request.path:
if 'credentials' not in flask.session:
return flask.redirect(flask.url_for('index'))
elif not flask.session['is_teacher']:
return flask.redirect(flask.url_for('register'))
@app.before_request
def student_session():
if '/student/' in request.path:
if 'credentials' not in flask.session:
return flask.redirect(flask.url_for('index'))
elif not flask.session['is_student']:
return flask.redirect(flask.url_for('register'))
# make sure user is authenticated w/ live session on every request
@app.before_request
def manage_session():
# want to go through oauth flow for this route specifically
# not get stuck in redirect loop
if request.path == '/oauth/callback':
return
# allow all users to visit the index page without a session
if request.path == '/' or request.path == '/oauth/logout':
return
# validate that user has valid session
# add the google user info into session
if 'credentials' not in flask.session:
flask.session['redirect'] = request.path
return flask.redirect(flask.url_for('oauth2callback'))
@app.teardown_request
def teardown_request(exception):
pass
@app.route('/switch_type', methods=['POST'])
def switch_type():
im = index_model.Index(flask.session['id'])
if request.form['type'] == 'teacher':
if im.is_teacher():
return flask.redirect(flask.url_for('main_teacher'))
else:
return flask.redirect(flask.url_for('register'))
elif request.form['type'] == 'student':
if im.is_student():
return flask.redirect(flask.url_for('main_student'))
else:
return flask.redirect(flask.url_for('register'))
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('login.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
im = index_model.Index(flask.session['id'])
if im.is_student():
print flask.url_for('main_student')
return flask.redirect(flask.url_for('main_student'))
elif im.is_teacher():
return flask.redirect(flask.url_for('main_teacher'))
else:
return render_template('login.html', not_registered=True)
@app.route('/student/', methods=['GET', 'POST'])
def main_student():
sm = students_model.Students(flask.session['id'])
courses = sm.get_courses()
context = dict(data=courses)
signed_in = True if sm.has_signed_in() else False
if request.method == 'GET':
return render_template(
'main_student.html',
signed_in=signed_in,
**context)
elif request.method == 'POST':
if 'secret_code' in request.form.keys():
provided_secret = request.form['secret_code']
actual_secret, seid = sm.get_secret_and_seid()
if int(provided_secret) == int(actual_secret):
sm.insert_attendance_record(seid)
valid = True
else:
valid = False
return render_template(
'main_student.html',
submitted=True,
valid=valid,
**context)
@app.route('/teacher/', methods=['GET', 'POST'])
def main_teacher():
tm = teachers_model.Teachers(flask.session['id'])
if request.method == 'POST':
cm = courses_model.Courses()
if "close" in request.form.keys():
cid = request.form["close"]
cm.cid = cid
cm.close_session(cm.get_active_session())
elif "open" in request.form.keys():
cid = request.form["open"]
cm.cid = cid
cm.open_session()
courses = tm.get_courses_with_session()
empty = True if len(courses) == 0 else False
context = dict(data=courses)
return render_template('main_teacher.html', empty=empty, **context)
@app.route('/teacher/add_class', methods=['POST', 'GET'])
def add_class():
tm = teachers_model.Teachers(flask.session['id'])
if request.method == 'GET':
return render_template('add_class.html')
elif request.method == 'POST':
# first check that all unis are valid
um = users_model.Users()
for uni in request.form['unis'].split('\n'):
uni = uni.strip('\r')
# always reads at least one empty line from form
if not uni:
continue
if not um.is_valid_uni(uni):
return render_template('add_class.html', invalid_uni=True)
# then create course and add students to course
course_name = request.form['classname']
cid = tm.add_course(course_name)
cm = courses_model.Courses(cid)
for uni in request.form['unis'].split('\n'):
uni = uni.strip('\r')
cm.add_student(uni)
return flask.redirect(flask.url_for('main_teacher'))
@app.route('/teacher/remove_class', methods=['POST', 'GET'])
def remove_class():
tm = teachers_model.Teachers(flask.session['id'])
# show potential courses to remove on get request
if request.method == 'GET':
courses = tm.get_courses()
context = dict(data=courses)
return render_template('remove_class.html', **context)
# remove course by cid
elif request.method == 'POST':
cid = request.form['cid']
tm.remove_course(cid)
return flask.redirect(flask.url_for('main_teacher'))
@app.route('/teacher/view_class', methods=['POST', 'GET'])
def view_class():
if request.method == 'GET':
flask.redirect(flask.url_for('main_teacher'))
elif request.method == 'POST':
cm = courses_model.Courses()
if 'close' in request.form.keys():
cid = request.form['close']
cm.cid = cid
cm.close_session(cm.get_active_session())
elif 'open' in request.form.keys():
cid = request.form['open']
cm.cid = cid
cm.open_session()
else:
cid = request.form['cid']
cm.cid = cid
res = 0
uni = None
if 'add_student' in request.form.keys():
uni = request.form['add_student']
res = cm.add_student(uni)
elif 'remove_student' in request.form.keys():
uni = request.form['remove_student']
res = cm.remove_student(uni)
course_name = cm.get_course_name()
secret = cm.get_secret_code()
num_sessions = cm.get_num_sessions()
students = cm.get_students()
students_with_ar = []
for student in students:
sm = students_model.Students(student['id'])
student_uni = sm.get_uni()
num_ar = sm.get_num_attendance_records(cid)
students_with_ar.append([student, student_uni, num_ar])
context = dict(students=students_with_ar)
return render_template(
'view_class.html',
cid=cid,
secret=secret,
course_name=course_name,
num_sessions=num_sessions,
uni=uni,
res=res,
**context)
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'GET':
return render_template(
'register.html',
name=flask.session['google_user']['name'],
is_student=flask.session['is_student'],
is_teacher=flask.session['is_teacher']
)
elif request.method == 'POST':
m = model.Model()
ds = m.get_client()
if request.form['type'] == 'student':
# check that uni doesn't already exist
# if it doesn't, continue student creation
um = users_model.Users()
if not um.is_valid_uni(request.form['uni']):
key = ds.key('student')
entity = datastore.Entity(
key=key)
entity.update({
'sid': flask.session['id'],
'uni': request.form['uni']
})
ds.put(entity)
flask.session['is_student'] = True
return flask.redirect(flask.url_for('main_student'))
else:
return render_template(
'register.html',
name=flask.session['google_user']['name'],
invalid_uni=True)
else:
try:
key = ds.key('teacher')
entity = datastore.Entity(
key=key)
entity.update({
'tid': flask.session['id']
})
ds.put(entity)
flask.session['is_teacher'] = True
except:
pass
return flask.redirect(flask.url_for('main_teacher'))
@app.route('/oauth/callback')
def oauth2callback():
flow = oauth2client.client.flow_from_clientsecrets(
'client_secrets_oauth.json',
scope=[
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile'],
redirect_uri=flask.url_for('oauth2callback', _external=True))
if 'code' not in flask.request.args:
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
# use token to get user profile from google oauth api
http_auth = credentials.authorize(httplib2.Http())
userinfo_client = apiclient.discovery.build('oauth2', 'v2', http_auth)
user = userinfo_client.userinfo().v2().me().get().execute()
# TODO only allow columbia.edu emails
# if 'columbia.edu' not in user['email']:
# return flask.redirect(flask.url_for('bademail'))
um = users_model.Users()
flask.session['google_user'] = user
flask.session['id'] = um.get_or_create_user(user)
# now add is_student and is_teacher to flask.session
im = index_model.Index(flask.session['id'])
flask.session['is_student'] = True if im.is_student() else False
flask.session['is_teacher'] = True if im.is_teacher() else False
redirect = flask.session['redirect']
flask.session.pop('redirect', None)
return flask.redirect(redirect)
@app.route('/oauth/logout', methods=['POST', 'GET'])
def logout():
flask.session.clear()
return flask.redirect(flask.url_for('index'))
|
keirl/coms4156_jumpstart
|
imhere/imhere.py
|
Python
|
apache-2.0
| 11,312
|
[
"VisIt"
] |
8c181ad0911bb645fb9c33be0eef033132f8e4a45d1e6998b20dcfa2bfb88425
|
#!/usr/bin/env python
# Imports
try: import argparse
except: error('This version of python is not new enough. python 2.7 or newer is required.')
try: from netCDF4 import Dataset
except: error('Unable to import netCDF4 module. Check your PYTHONPATH.\n'
+'Perhaps try:\n module load python_netcdf4')
try: import numpy
except: error('Unable to import numpy module. Check your PYTHONPATH.\n'
+'Perhaps try:\n module load python_numpy')
def main():
# Command line arguments
parser = argparse.ArgumentParser(description=
'''Extracts the recorded edits in a topogaphy file, if any.
''',
epilog='Written by A.Adcroft, 2015.')
parser.add_argument('topography_file', type=str,
help='netcdf file to read.')
parser.add_argument('-o','--output', type=str,
nargs='?', default=' ',
help='Write list of edits to output file. If no output file is specified, edits are reported to the screen.')
optCmdLineArgs = parser.parse_args()
createGUI(optCmdLineArgs.topography_file, optCmdLineArgs.output)
def createGUI(fileName, outFile):
# Open netcdf file
try: rg = Dataset(fileName, 'r')
except: error('There was a problem opening "'+fileName+'".')
if not ( 'iEdit' in rg.variables and 'jEdit' in rg.variables and 'zEdit' in rg.variables):
print fileName,'does not have any recorded edits'
return
iEdit = rg.variables['iEdit'][:]
jEdit = rg.variables['jEdit'][:]
zEdit = rg.variables['zEdit'][:]
zEdit_units = rg.variables['zEdit'].units
depths = numpy.zeros( zEdit.shape[0] )
(nj,ni) = rg.variables['depth'].shape
for n,(i,j) in enumerate(zip(iEdit, jEdit)):
depths[n] = rg.variables['depth'][j,i]
if outFile == ' ':
print 'Edits apply to a dataset of dimensions %i x %i'%(ni,nj)
for n,(i,j,oz,nz) in enumerate(zip(iEdit, jEdit, zEdit, depths)):
print '%5i: i=%4i, j=%4i, old depth=%8.2f, new depth=%8.2f'%(n,i,j,oz,nz)
return
rg.close()
rg = Dataset(outFile, 'w', format='NETCDF3_CLASSIC')
nc_nEdits = rg.createDimension('nEdits', None)
nc_iEdit = rg.createVariable('iEdit', 'i', ('nEdits',))
nc_jEdit = rg.createVariable('jEdit', 'i', ('nEdits',))
nc_zEdit = rg.createVariable('zEdit', 'i', ('nEdits',))
nc_ni = rg.createVariable('ni', 'i')
nc_nj = rg.createVariable('nj', 'i')
nc_iEdit.long_name = 'i-index of edited data'
nc_jEdit.long_name = 'j-index of edited data'
nc_zEdit.long_name = 'New value of data'
nc_zEdit.units = zEdit_units
nc_ni.long_name = 'The size of the i-dimension of the dataset these edits apply to'
nc_nj.long_name = 'The size of the j-dimension of the dataset these edits apply to'
nc_ni[:] = ni
nc_nj[:] = nj
for n,(i,j,z) in enumerate(zip(iEdit, jEdit, depths)):
nc_iEdit[n] = i
nc_jEdit[n] = j
nc_zEdit[n] = z
rg.close()
# Invoke main()
if __name__ == '__main__': main()
|
nicjhan/MOM6-examples
|
ice_ocean_SIS2/OM4_025/preprocessing/extract_edits.py
|
Python
|
gpl-3.0
| 2,926
|
[
"NetCDF"
] |
392f9eff8d5908af27b679935a651c61a9ec6b0ed582638d9507109f733678ed
|
# coding=utf-8
"""Login in the application feature tests."""
import pytest
from django.contrib.auth.models import User
from model_mommy import mommy
from pytest_bdd import (
given,
scenario,
then,
when,
)
from quiz.models import Unity, Lesson
@pytest.mark.django_db
@scenario('features/signin.feature', 'Login fails for invalid user')
def test_login_fails_for_invalid_user():
"""Login fails for invalid user."""
@pytest.mark.django_db
@scenario('features/signin.feature', 'Login fails for unregistred user')
def test_login_fails_for_unregistred_user():
"""Login fails for unregistred user."""
@pytest.mark.django_db
@scenario('features/signin.feature', 'Successful login')
def test_successful_login():
"""Successful login."""
@given('Jack, an user with password 1234')
def jack_an_user_with_password_1234():
"""Jack, an user with password 1234."""
User.objects.create_user(username='jack', password='1234')
@when('Jack logs in with password 1234')
def jack_logs_in_with_password_1234(browser):
"""Jack logs in with password 1234."""
browser.visit('http://localhost:8000')
browser.fill('username', 'jack')
browser.fill('password', '1234')
browser.find_by_id('submit').first.click()
@when('Jack logs in with password errado')
def jack_logs_in_with_password_errado(browser):
"""Jack logs in with password errado."""
browser.visit('http://localhost:8000')
browser.fill('username', 'jack')
browser.fill('password', 'errado')
browser.find_by_id('submit').first.click()
@then('he sees the list of lessons')
def he_sees_the_list_of_lessons(browser):
"""he sees the foo blah."""
unity = mommy.make(Unity, name='Unity 2', level=2)
mommy.make(Lesson, name='Lesson AND', unity=unity)
browser.reload()
assert browser.is_text_present('Level 2 - Unity 2') is True
@then('login fails')
def login_fails(browser):
"""login fails."""
assert browser.is_text_present('Level 2 - Unity 2') is False
assert 'http://localhost:8000/accounts/login/?next=/' in browser.url
|
jesuejunior/golingo
|
quiz/tests/functional/test_signin.py
|
Python
|
bsd-3-clause
| 2,071
|
[
"VisIt"
] |
e2ea9f9fa823f24534361e2e01888dae702081800b5e1f5480c65990b2b13a74
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PreviousObstetricHistory.problems'
db.delete_column(u'patient_previousobstetrichistory', 'problems')
# Adding field 'PreviousObstetricHistory.maternal_complication'
db.add_column(u'patient_previousobstetrichistory', 'maternal_complication',
self.gf('django.db.models.fields.CharField')(default='', max_length=2),
keep_default=False)
# Adding field 'PreviousObstetricHistory.perinatal_complication'
db.add_column(u'patient_previousobstetrichistory', 'perinatal_complication',
self.gf('django.db.models.fields.CharField')(default='', max_length=2),
keep_default=False)
def backwards(self, orm):
# Adding field 'PreviousObstetricHistory.problems'
db.add_column(u'patient_previousobstetrichistory', 'problems',
self.gf('django.db.models.fields.CharField')(default='', max_length=2),
keep_default=False)
# Deleting field 'PreviousObstetricHistory.maternal_complication'
db.delete_column(u'patient_previousobstetrichistory', 'maternal_complication')
# Deleting field 'PreviousObstetricHistory.perinatal_complication'
db.delete_column(u'patient_previousobstetrichistory', 'perinatal_complication')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_injection': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'maternal_complication': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'perinatal_complication': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient']
|
aazhbd/medical_info01
|
patient/migrations/0026_auto__del_field_previousobstetrichistory_problems__add_field_previouso.py
|
Python
|
bsd-3-clause
| 30,623
|
[
"VisIt"
] |
48e681658e693b6ed7271e78d251ded95514de58c13e47b9c77c3873728cf7f0
|
# $Id$
#
# Copyright (C) 2001-2008 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" unit testing code for composite models
"""
import unittest
import io
import pickle
from rdkit.ML.Composite import Composite
from rdkit.ML.DecTree.DecTree import DecTreeNode as Node
from rdkit import RDConfig
class TestCase(unittest.TestCase):
def setUp(self):
with open(RDConfig.RDCodeDir + '/ML/Composite/test_data/ferro.pkl', 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
self.examples = pickle.load(pklF)
self.varNames = ['composition', 'max_atomic', 'has3d', 'has4d', 'has5d', 'elconc', 'atvol',
'isferro']
self.qBounds = [[], [1.89, 3.53], [], [], [], [0.55, 0.73], [11.81, 14.52], []]
self.nPoss = [0, 3, 2, 2, 2, 3, 3, 2]
self.attrs = list(range(1, len(self.varNames) - 1))
from rdkit.ML.Data import DataUtils
DataUtils.InitRandomNumbers((23, 43))
def testQuantize(self):
# testing data quantization
qBounds = [[], [1, 2, 3]]
examples = [['foo', 0], ['foo', 1.5], ['foo', 5.5], ['foo', 2.5]]
answers = [['foo', 0], ['foo', 1], ['foo', 3], ['foo', 2]]
nPoss = [0, 4]
composite = Composite.Composite()
composite.SetQuantBounds(qBounds, nPoss)
for i in range(len(examples)):
qEx = composite.QuantizeExample(examples[i])
self.assertEqual(qEx, answers[i])
def testTreeGrow(self):
# testing tree-based composite
with open(RDConfig.RDCodeDir + '/ML/Composite/test_data/composite_base.pkl', 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
self.refCompos = pickle.load(pklF)
composite = Composite.Composite()
composite._varNames = self.varNames
composite.SetQuantBounds(self.qBounds, self.nPoss)
from rdkit.ML.DecTree import CrossValidate
driver = CrossValidate.CrossValidationDriver
pruner = None
composite.Grow(self.examples, self.attrs, [], buildDriver=driver, pruner=pruner, nTries=100,
silent=1)
composite.AverageErrors()
composite.SortModels(sortOnError=False)
self.assertEqual(composite.countList, sorted(composite.countList))
self.assertNotEqual(composite.errList, sorted(composite.errList))
composite.SortModels()
self.assertNotEqual(composite.countList, sorted(composite.countList))
self.assertEqual(composite.errList, sorted(composite.errList))
# with open(RDConfig.RDCodeDir+'/ML/Composite/test_data/composite_base.pkl','wb') as pklF:
# pickle.dump(composite,pklF)
self.treeComposite = composite
self.assertEqual(len(composite), len(self.refCompos))
for i in range(len(composite)):
t1, c1, e1 = composite[i]
t2, c2, e2 = self.refCompos[i]
self.assertEqual(e1, e2)
# we used to check for equality here, but since there are redundant errors,
# that's non-trivial.
# assert t1 == t2, 'tree mismatch'
# assert c1 == c2, 'count mismatch'
s = str(composite)
self.assertIn('Composite', s)
self.assertIn('Model', s)
self.assertIn('error', s)
def testErrorEstimate(self):
# testing out-of-bag error estimates
compos = Composite.Composite()
compos.SetQuantBounds([(0.5, ), (0.5, ), (0.5, ), []], [2, 2, 2, 2])
compos.SetDescriptorNames(('D0', 'D1', 'D2', 'Act'))
compos.SetInputOrder(('ID', 'D0', 'D1', 'D2', 'Act'))
data = [['A', 0, 0, 0, 0], ['B', 1, 0, 0, 1], ['C', 0, 1, 0, 0], ['D', 1, 1, 1, 1]]
#
# Build and validate three simple trees:
#
t1 = Node(None, 'D0', 0)
n = Node(t1, 'D1', 1)
t1.AddChildNode(n)
n.AddChildNode(Node(n, '0', 0, isTerminal=1))
n.AddChildNode(Node(n, '1', 1, isTerminal=1))
n = Node(t1, 'D2', 2)
t1.AddChildNode(n)
n.AddChildNode(Node(n, '1', 1, isTerminal=1))
n.AddChildNode(Node(n, '0', 0, isTerminal=1))
assert t1.ClassifyExample(data[0][1:]) == 0
assert t1.ClassifyExample(data[1][1:]) == 1
assert t1.ClassifyExample(data[2][1:]) == 1
assert t1.ClassifyExample(data[3][1:]) == 0
t1._trainIndices = (0, 1)
compos.AddModel(t1, .5)
t2 = Node(None, 'D1', 1)
n = Node(t2, 'D0', 0)
t2.AddChildNode(n)
n.AddChildNode(Node(n, '0', 0, isTerminal=1))
n.AddChildNode(Node(n, '1', 1, isTerminal=1))
n = Node(t2, 'D2', 2)
t2.AddChildNode(n)
n.AddChildNode(Node(n, '0', 0, isTerminal=1))
n.AddChildNode(Node(n, '1', 1, isTerminal=1))
assert t2.ClassifyExample(data[0][1:]) == 0
assert t2.ClassifyExample(data[1][1:]) == 1
assert t2.ClassifyExample(data[2][1:]) == 0
assert t2.ClassifyExample(data[3][1:]) == 1
t2._trainIndices = (1, 2)
compos.AddModel(t2, 0.0)
t3 = Node(None, 'D0', 0)
n = Node(t3, 'D2', 2)
t3.AddChildNode(n)
n.AddChildNode(Node(n, '0', 0, isTerminal=1))
n.AddChildNode(Node(n, '1', 1, isTerminal=1))
n = Node(t3, 'D1', 1)
t3.AddChildNode(n)
n.AddChildNode(Node(n, '0', 0, isTerminal=1))
n.AddChildNode(Node(n, '1', 1, isTerminal=1))
assert t3.ClassifyExample(data[0][1:]) == 0
assert t3.ClassifyExample(data[1][1:]) == 0
assert t3.ClassifyExample(data[2][1:]) == 0
assert t3.ClassifyExample(data[3][1:]) == 1
t3._trainIndices = (2, 3)
compos.AddModel(t3, 0.25)
#
# validate the composite itself:
#
pred, conf = compos.ClassifyExample(data[0])
assert pred == 0
assert conf == 1.0
pred, conf = compos.ClassifyExample(data[1])
assert pred == 1
assert conf == 2. / 3.
pred, conf = compos.ClassifyExample(data[2])
assert pred == 0
assert conf == 2. / 3.
pred, conf = compos.ClassifyExample(data[3])
assert pred == 1
assert conf == 2. / 3.
self.assertEqual(compos.GetVoteDetails(), [0, 1, 1])
self.assertEqual(compos.GetInputOrder(), [1, 2, 3, 4])
#
# now test the out-of-bag calculation:
#
pred, conf = compos.ClassifyExample(data[0], onlyModels=(1, 2))
assert pred == 0
assert conf == 1.0
pred, conf = compos.ClassifyExample(data[1], onlyModels=(2, ))
assert pred == 0
assert conf == 1.0
pred, conf = compos.ClassifyExample(data[2], onlyModels=(0, ))
assert pred == 1
assert conf == 1.0
pred, conf = compos.ClassifyExample(data[3], onlyModels=(0, 1))
assert pred == 0
assert conf == 0.5
compos.ClearModelExamples()
def test_exceptions(self):
compos = Composite.Composite()
compos.SetQuantBounds([(0.5, ), (0.5, ), (0.5, ), []], [2, 2, 2, 2])
compos.SetDescriptorNames(('ID', 'D0', 'D1', 'D2', 'Act'))
compos.SetInputOrder(('ID', 'D2', 'D1', 'D0', 'Act'))
self.assertEqual(compos._mapOrder, [0, 3, 2, 1, 4])
# Probes caught exception for ID
compos.SetInputOrder(('D2', 'D1', 'D0', 'Act'))
self.assertEqual(compos._mapOrder, [0, 2, 1, 0, 3])
# Probes caught exception for Act
compos.SetInputOrder(('ID', 'D2', 'D1', 'D0'))
self.assertEqual(compos._mapOrder, [0, 3, 2, 1, -1])
self.assertRaises(ValueError, compos.SetInputOrder, ('Unknown', 'D0'))
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/ML/Composite/UnitTestComposite.py
|
Python
|
bsd-3-clause
| 7,222
|
[
"RDKit"
] |
3bdfa92a82ce6b490a640ba112adeb8ec086febdd57dcd221d70dc925b38d764
|
########################################################################
# File : JobWrapper.py
# Author : Stuart Paterson
########################################################################
""" The Job Wrapper Class is instantiated with arguments tailored for running
a particular job. The JobWrapper starts a thread for execution of the job
and a Watchdog Agent that can monitor progress.
"""
__RCSID__ = "$Id: $"
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.FailoverTransfer import FailoverTransfer
from DIRAC.Resources.Catalog.PoolXMLFile import getGUID
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.JobWrapper.WatchdogFactory import WatchdogFactory
from DIRAC.AccountingSystem.Client.Types.Job import Job as AccountingJob
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemSection
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Utilities.Subprocess import Subprocess
from DIRAC.Core.Utilities.File import getGlobbedTotalSize, getGlobbedFiles
from DIRAC.Core.Utilities.Version import getCurrentVersion
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities import List, Time
from DIRAC.Core.Utilities import DEncode
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, Time
import DIRAC
import os
import stat
import re
import sys
import time
import shutil
import threading
import tarfile
import glob
import types
import urllib
EXECUTION_RESULT = {}
class JobWrapper( object ):
#############################################################################
def __init__( self, jobID = None, jobReport = None ):
""" Standard constructor
"""
self.initialTiming = os.times()
self.section = os.path.join( getSystemSection( 'WorkloadManagement/JobWrapper' ), 'JobWrapper' )
self.log = gLogger
# Create the accounting report
self.accountingReport = AccountingJob()
# Initialize for accounting
self.wmsMajorStatus = "unknown"
self.wmsMinorStatus = "unknown"
# Set now as start time
self.accountingReport.setStartTime()
if not jobID:
self.jobID = 0
else:
self.jobID = jobID
self.siteName = gConfig.getValue( '/LocalSite/Site', 'Unknown' )
if jobReport:
self.jobReport = jobReport
else:
self.jobReport = JobReport( self.jobID, 'JobWrapper@%s' % self.siteName )
self.failoverTransfer = FailoverTransfer()
# self.root is the path the Wrapper is running at
self.root = os.getcwd()
# self.localSiteRoot is the path where the local DIRAC installation used to run the payload
# is taken from
self.localSiteRoot = gConfig.getValue( '/LocalSite/Root', DIRAC.rootPath )
# FIXME: Why do we need to load any .cfg file here????
self.__loadLocalCFGFiles( self.localSiteRoot )
result = getCurrentVersion()
if result['OK']:
self.diracVersion = result['Value']
else:
self.diracVersion = 'DIRAC version %s' % DIRAC.buildVersion
self.maxPeekLines = gConfig.getValue( self.section + '/MaxJobPeekLines', 20 )
if self.maxPeekLines < 0:
self.maxPeekLines = 0
self.defaultCPUTime = gConfig.getValue( self.section + '/DefaultCPUTime', 600 )
self.defaultOutputFile = gConfig.getValue( self.section + '/DefaultOutputFile', 'std.out' )
self.defaultErrorFile = gConfig.getValue( self.section + '/DefaultErrorFile', 'std.err' )
self.diskSE = gConfig.getValue( self.section + '/DiskSE', ['-disk', '-DST', '-USER'] )
self.tapeSE = gConfig.getValue( self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'] )
self.sandboxSizeLimit = gConfig.getValue( self.section + '/OutputSandboxLimit', 1024 * 1024 * 10 )
self.cleanUpFlag = gConfig.getValue( self.section + '/CleanUpFlag', True )
self.pilotRef = gConfig.getValue( '/LocalSite/PilotReference', 'Unknown' )
self.cpuNormalizationFactor = gConfig.getValue ( "/LocalSite/CPUNormalizationFactor", 0.0 )
self.bufferLimit = gConfig.getValue( self.section + '/BufferLimit', 10485760 )
self.defaultOutputSE = gConfig.getValue( '/Resources/StorageElementGroups/SE-USER', [] )
self.defaultCatalog = gConfig.getValue( self.section + '/DefaultCatalog', [] )
self.masterCatalogOnlyFlag = gConfig.getValue( self.section + '/MasterCatalogOnlyFlag', True )
self.defaultFailoverSE = gConfig.getValue( '/Resources/StorageElementGroups/Tier1-Failover', [] )
self.defaultOutputPath = ''
self.dm = DataManager()
self.fc = FileCatalog()
self.log.verbose( '===========================================================================' )
self.log.verbose( 'SVN version %s' % ( __RCSID__ ) )
self.log.verbose( self.diracVersion )
self.log.verbose( 'Developer tag: 2' )
self.currentPID = os.getpid()
self.log.verbose( 'Job Wrapper started under PID: %s' % self.currentPID )
# Define a new process group for the job wrapper
self.parentPGID = os.getpgid( self.currentPID )
self.log.verbose( 'Job Wrapper parent process group ID: %s' % self.parentPGID )
os.setpgid( self.currentPID, self.currentPID )
self.currentPGID = os.getpgid( self.currentPID )
self.log.verbose( 'Job Wrapper process group ID: %s' % self.currentPGID )
self.log.verbose( '==========================================================================' )
self.log.verbose( 'sys.path is: \n%s' % '\n'.join( sys.path ) )
self.log.verbose( '==========================================================================' )
if 'PYTHONPATH' not in os.environ:
self.log.verbose( 'PYTHONPATH is: null' )
else:
pypath = os.environ['PYTHONPATH']
self.log.verbose( 'PYTHONPATH is: \n%s' % '\n'.join( pypath.split( ':' ) ) )
self.log.verbose( '==========================================================================' )
if 'LD_LIBRARY_PATH_SAVE' in os.environ:
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':' + os.environ['LD_LIBRARY_PATH_SAVE']
else:
os.environ['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH_SAVE']
if 'LD_LIBRARY_PATH' not in os.environ:
self.log.verbose( 'LD_LIBRARY_PATH is: null' )
else:
ldpath = os.environ['LD_LIBRARY_PATH']
self.log.verbose( 'LD_LIBRARY_PATH is: \n%s' % '\n'.join( ldpath.split( ':' ) ) )
self.log.verbose( '==========================================================================' )
if not self.cleanUpFlag:
self.log.verbose( 'CleanUp Flag is disabled by configuration' )
# Failure flag
self.failedFlag = True
# Set defaults for some global parameters to be defined for the accounting report
self.owner = 'unknown'
self.jobGroup = 'unknown'
self.jobType = 'unknown'
self.processingType = 'unknown'
self.userGroup = 'unknown'
self.jobClass = 'Single'
self.inputDataFiles = 0
self.outputDataFiles = 0
self.inputDataSize = 0
self.inputSandboxSize = 0
self.outputSandboxSize = 0
self.outputDataSize = 0
self.processedEvents = 0
self.jobAccountingSent = False
self.jobArgs = {}
self.optArgs = {}
self.ceArgs = {}
#############################################################################
def initialize( self, arguments ):
""" Initializes parameters and environment for job.
"""
self.__report( 'Running', 'Job Initialization' )
self.log.info( 'Starting Job Wrapper Initialization for Job %s' % ( self.jobID ) )
self.jobArgs = arguments['Job']
self.log.verbose( self.jobArgs )
self.ceArgs = arguments ['CE']
self.log.verbose( self.ceArgs )
self.__setInitialJobParameters()
self.optArgs = arguments.get( 'Optimizer', {} )
# Fill some parameters for the accounting report
self.owner = self.jobArgs.get( 'Owner', self.owner )
self.jobGroup = self.jobArgs.get( 'JobGroup', self.jobGroup )
self.jobType = self.jobArgs.get( 'JobType', self.jobType )
dataParam = self.jobArgs.get( 'InputData', [] )
if dataParam and not type( dataParam ) == types.ListType:
dataParam = [dataParam]
self.inputDataFiles = len( dataParam )
dataParam = self.jobArgs.get( 'OutputData', [] )
if dataParam and not type( dataParam ) == types.ListType:
dataParam = [dataParam]
self.outputDataFiles = len( dataParam )
self.processingType = self.jobArgs.get( 'ProcessingType', self.processingType )
self.userGroup = self.jobArgs.get( 'OwnerGroup', self.userGroup )
self.jobClass = self.jobArgs.get( 'JobSplitType', self.jobClass )
# Prepare the working directory, cd to there, and copying eventual extra arguments in it
if self.jobID:
if os.path.exists( str( self.jobID ) ):
shutil.rmtree( str( self.jobID ) )
os.mkdir( str( self.jobID ) )
os.chdir( str( self.jobID ) )
extraOpts = self.jobArgs.get( 'ExtraOptions', '' )
if extraOpts:
if os.path.exists( '%s/%s' % ( self.root, extraOpts ) ):
shutil.copyfile( '%s/%s' % ( self.root, extraOpts ), extraOpts )
else:
self.log.info( 'JobID is not defined, running in current directory' )
infoFile = open( 'job.info', 'w' )
infoFile.write( self.__dictAsInfoString( self.jobArgs, '/Job' ) )
infoFile.close()
#############################################################################
def __setInitialJobParameters( self ):
"""Sets some initial job parameters
"""
parameters = []
if 'LocalSE' in self.ceArgs:
parameters.append( ( 'AgentLocalSE', ','.join( self.ceArgs['LocalSE'] ) ) )
if 'PilotReference' in self.ceArgs:
parameters.append( ( 'Pilot_Reference', self.ceArgs['PilotReference'] ) )
if 'CPUScalingFactor' in self.ceArgs:
parameters.append( ( 'CPUScalingFactor', self.ceArgs['CPUScalingFactor'] ) )
if 'CPUNormalizationFactor' in self.ceArgs:
parameters.append( ( 'CPUNormalizationFactor', self.ceArgs['CPUNormalizationFactor'] ) )
parameters.append( ( 'PilotAgent', self.diracVersion ) )
parameters.append( ( 'JobWrapperPID', self.currentPID ) )
result = self.__setJobParamList( parameters )
return result
#############################################################################
def __loadLocalCFGFiles( self, localRoot ):
"""Loads any extra CFG files residing in the local DIRAC site root.
"""
files = os.listdir( localRoot )
self.log.debug( 'Checking directory %s for *.cfg files' % localRoot )
for i in files:
if re.search( '.cfg$', i ):
gConfig.loadFile( '%s/%s' % ( localRoot, i ) )
self.log.debug( 'Found local .cfg file %s' % i )
#############################################################################
def __dictAsInfoString( self, dData, infoString = '', currentBase = "" ):
for key in dData:
value = dData[ key ]
if type( value ) == types.DictType:
infoString = self.__dictAsInfoString( value, infoString, "%s/%s" % ( currentBase, key ) )
elif type( value ) in ( types.ListType, types.TupleType ):
if len( value ) and value[0] == '[':
infoString += "%s/%s = %s\n" % ( currentBase, key, " ".join( value ) )
else:
infoString += "%s/%s = %s\n" % ( currentBase, key, ", ".join( value ) )
else:
infoString += "%s/%s = %s\n" % ( currentBase, key, str( value ) )
return infoString
#############################################################################
def execute( self, arguments ):
"""The main execution method of the Job Wrapper
"""
self.log.info( 'Job Wrapper is starting execution phase for job %s' % ( self.jobID ) )
os.environ['DIRACJOBID'] = str( self.jobID )
os.environ['DIRACROOT'] = self.localSiteRoot
self.log.verbose( 'DIRACROOT = %s' % ( self.localSiteRoot ) )
os.environ['DIRACPYTHON'] = sys.executable
self.log.verbose( 'DIRACPYTHON = %s' % ( sys.executable ) )
os.environ['DIRACSITE'] = DIRAC.siteName()
self.log.verbose( 'DIRACSITE = %s' % ( DIRAC.siteName() ) )
errorFile = self.jobArgs.get( 'StdError', self.defaultErrorFile )
outputFile = self.jobArgs.get( 'StdOutput', self.defaultOutputFile )
if 'CPUTime' in self.jobArgs:
jobCPUTime = int( self.jobArgs['CPUTime'] )
else:
self.log.info( 'Job %s has no CPU time limit specified, '
'applying default of %s' % ( self.jobID, self.defaultCPUTime ) )
jobCPUTime = self.defaultCPUTime
jobMemory = 0.
if "Memory" in self.jobArgs:
# Job specifies memory in GB, internally use KB
jobMemory = int( self.jobArgs['Memory'] )*1024.*1024.
if 'Executable' in self.jobArgs:
executable = self.jobArgs['Executable'].strip()
else:
msg = 'Job %s has no specified executable' % ( self.jobID )
self.log.warn( msg )
return S_ERROR( msg )
jobArguments = self.jobArgs.get( 'Arguments', '' )
executable = os.path.expandvars( executable )
exeThread = None
spObject = None
if re.search( 'DIRACROOT', executable ):
executable = executable.replace( '$DIRACROOT', self.localSiteRoot )
self.log.verbose( 'Replaced $DIRACROOT for executable as %s' % ( self.localSiteRoot ) )
# Make the full path since . is not always in the PATH
executable = os.path.abspath( executable )
if not os.access( executable, os.X_OK ):
try:
os.chmod( executable, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH )
except Exception:
self.log.warn( 'Failed to change mode to 775 for the executable', executable )
exeEnv = dict( os.environ )
if 'ExecutionEnvironment' in self.jobArgs:
self.log.verbose( 'Adding variables to execution environment' )
variableList = self.jobArgs['ExecutionEnvironment']
if type( variableList ) == type( " " ):
variableList = [variableList]
for var in variableList:
nameEnv = var.split( '=' )[0]
valEnv = urllib.unquote( var.split( '=' )[1] )
exeEnv[nameEnv] = valEnv
self.log.verbose( '%s = %s' % ( nameEnv, valEnv ) )
if os.path.exists( executable ):
self.__report( 'Running', 'Application', sendFlag = True )
spObject = Subprocess( timeout = False, bufferLimit = int( self.bufferLimit ) )
command = executable
if jobArguments:
command += ' ' + jobArguments
self.log.verbose( 'Execution command: %s' % ( command ) )
maxPeekLines = self.maxPeekLines
exeThread = ExecutionThread( spObject, command, maxPeekLines, outputFile, errorFile, exeEnv )
exeThread.start()
time.sleep( 10 )
payloadPID = spObject.getChildPID()
if not payloadPID:
return S_ERROR( 'Payload process could not start after 10 seconds' )
else:
self.__report( 'Failed', 'Application not found', sendFlag = True )
return S_ERROR( 'Path to executable %s not found' % ( executable ) )
self.__setJobParam( 'PayloadPID', payloadPID )
watchdogFactory = WatchdogFactory()
watchdogInstance = watchdogFactory.getWatchdog( self.currentPID, exeThread, spObject, jobCPUTime, jobMemory )
if not watchdogInstance['OK']:
self.log.warn( watchdogInstance['Message'] )
return S_ERROR( 'Could not create Watchdog instance' )
self.log.verbose( 'WatchdogInstance %s' % ( watchdogInstance ) )
watchdog = watchdogInstance['Value']
self.log.verbose( 'Initializing Watchdog instance' )
watchdog.initialize()
self.log.verbose( 'Calibrating Watchdog instance' )
watchdog.calibrate()
# do not kill Test jobs by CPU time
if self.jobArgs.get( 'JobType', '' ) == 'Test':
watchdog.testCPUConsumed = False
if 'DisableCPUCheck' in self.jobArgs:
watchdog.testCPUConsumed = False
if exeThread.isAlive():
self.log.info( 'Application thread is started in Job Wrapper' )
watchdog.run()
else:
self.log.warn( 'Application thread stopped very quickly...' )
if exeThread.isAlive():
self.log.warn( 'Watchdog exited before completion of execution thread' )
while exeThread.isAlive():
time.sleep( 5 )
outputs = None
if 'Thread' in EXECUTION_RESULT:
threadResult = EXECUTION_RESULT['Thread']
if not threadResult['OK']:
self.log.error( 'Failed to execute the payload', threadResult['Message'] )
self.__report( 'Failed', 'Application thread failed', sendFlag = True )
if 'Value' in threadResult:
outs = threadResult['Value']
if outs:
self.__setJobParam( 'ApplicationError', outs[0], sendFlag = True )
else:
self.__setJobParam( 'ApplicationError', 'None reported', sendFlag = True )
else:
outputs = threadResult['Value']
if 'CPU' in EXECUTION_RESULT:
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in JobWrapper execute', cpuString )
if watchdog.checkError:
# In this case, the Watchdog has killed the Payload and the ExecutionThread can not get the CPU statistics
# os.times only reports for waited children
# Take the CPU from the last value recorded by the Watchdog
self.__report( 'Failed', watchdog.checkError, sendFlag = True )
if 'CPU' in EXECUTION_RESULT:
if 'LastUpdateCPU(s)' in watchdog.currentStats:
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = watchdog.currentStats['LastUpdateCPU(s)']
if watchdog.currentStats:
self.log.info( 'Statistics collected by the Watchdog:\n ',
'\n '.join( ['%s: %s' % items for items in watchdog.currentStats.items() ] ) )
if outputs:
status = threadResult['Value'][0]
# Send final heartbeat of a configurable number of lines here
self.log.verbose( 'Sending final application standard output heartbeat' )
self.__sendFinalStdOut( exeThread )
self.log.verbose( 'Execution thread status = %s' % ( status ) )
if not watchdog.checkError and not status:
self.failedFlag = False
self.__report( 'Completed', 'Application Finished Successfully', sendFlag = True )
elif not watchdog.checkError:
self.__report( 'Completed', 'Application Finished With Errors', sendFlag = True )
else:
return S_ERROR( 'No outputs generated from job execution' )
self.log.info( 'Checking directory contents after execution:' )
res = systemCall( 5, ['ls', '-al'] )
if not res['OK']:
self.log.error( 'Failed to list the current directory', res['Message'] )
elif res['Value'][0]:
self.log.error( 'Failed to list the current directory', res['Value'][2] )
else:
# no timeout and exit code is 0
self.log.info( res['Value'][1] )
return S_OK()
#############################################################################
def __sendFinalStdOut( self, exeThread ):
"""After the Watchdog process has finished, this function sends a final
report to be presented in the StdOut in the web page via the heartbeat
mechanism.
"""
cpuConsumed = self.__getCPU()['Value']
self.log.info( 'Total CPU Consumed is: %s' % cpuConsumed[1] )
self.__setJobParam( 'TotalCPUTime(s)', cpuConsumed[0] )
normCPU = cpuConsumed[0] * self.cpuNormalizationFactor
self.__setJobParam( 'NormCPUTime(s)', normCPU )
if self.cpuNormalizationFactor:
self.log.info( 'Normalized CPU Consumed is:', normCPU )
result = exeThread.getOutput( self.maxPeekLines )
if not result['OK']:
lines = 0
appStdOut = ''
else:
lines = len( result['Value'] )
appStdOut = '\n'.join( result['Value'] )
header = 'Last %s lines of application output from JobWrapper on %s :' % ( lines, Time.toString() )
border = '=' * len( header )
cpuTotal = 'CPU Total: %s (h:m:s)' % cpuConsumed[1]
cpuTotal += " Normalized CPU Total %.1f s @ HEP'06" % normCPU
header = '\n%s\n%s\n%s\n%s\n' % ( border, header, cpuTotal, border )
appStdOut = header + appStdOut
self.log.info( appStdOut )
heartBeatDict = {}
staticParamDict = {'StandardOutput':appStdOut}
if self.jobID:
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
result = jobReport.sendHeartBeat( self.jobID, heartBeatDict, staticParamDict )
if not result['OK']:
self.log.error( 'Problem sending final heartbeat from JobWrapper', result['Message'] )
return
#############################################################################
def __getCPU( self ):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
# TODO: normalize CPU consumed via scale factor
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in __getCPU', cpuString )
utime, stime, cutime, cstime, _elapsed = EXECUTION_RESULT['CPU']
cpuTime = utime + stime + cutime + cstime
self.log.verbose( "Total CPU time consumed = %s" % ( cpuTime ) )
result = self.__getCPUHMS( cpuTime )
return result
#############################################################################
def __getCPUHMS( self, cpuTime ):
mins, secs = divmod( cpuTime, 60 )
hours, mins = divmod( mins, 60 )
humanTime = '%02d:%02d:%02d' % ( hours, mins, secs )
self.log.verbose( 'Human readable CPU time is: %s' % humanTime )
return S_OK( ( cpuTime, humanTime ) )
#############################################################################
def resolveInputData( self ):
"""Input data is resolved here using a VO specific plugin module.
"""
self.__report( 'Running', 'Input Data Resolution', sendFlag = True )
if 'LocalSE' in self.ceArgs:
localSEList = self.ceArgs[ 'LocalSE']
else:
localSEList = gConfig.getValue( '/LocalSite/LocalSE', [] )
if not localSEList:
msg = 'Job has input data requirement but no site LocalSE defined'
self.log.warn( msg )
return S_ERROR( msg )
inputData = self.jobArgs['InputData']
self.log.verbose( 'Input Data is: \n%s' % ( inputData ) )
if type( inputData ) in types.StringTypes:
inputData = [inputData]
if type( localSEList ) in types.StringTypes:
localSEList = List.fromChar( localSEList )
msg = 'Job Wrapper cannot resolve local replicas of input data with null '
if not inputData:
msg += 'job input data parameter '
self.log.warn( msg )
return S_ERROR( msg )
if not localSEList:
msg += 'site localSEList list'
self.log.warn( msg )
# return S_ERROR( msg )
if 'InputDataModule' not in self.jobArgs:
msg = 'Job has no input data resolution module specified'
self.log.warn( msg )
# Use the default one
inputDataPolicy = 'DIRAC.WorkloadManagementSystem.Client.InputDataResolution'
else:
inputDataPolicy = self.jobArgs['InputDataModule']
self.log.verbose( 'Job input data requirement is \n%s' % ',\n'.join( inputData ) )
self.log.verbose( 'Job input data resolution policy module is %s' % ( inputDataPolicy ) )
self.log.info( 'Site has the following local SEs: %s' % ', '.join( localSEList ) )
lfns = [ fname.replace( 'LFN:', '' ) for fname in inputData ]
optReplicas = {}
if self.optArgs:
optDict = None
try:
optDict = eval( self.optArgs['InputData'] )
optReplicas = optDict['Value']
self.log.info( 'Found optimizer catalogue result' )
self.log.verbose( optReplicas )
except Exception, x:
optDict = None
self.log.warn( str( x ) )
self.log.warn( 'Optimizer information could not be converted to a dictionary will call catalogue directly' )
resolvedData = {}
result = self.__checkFileCatalog( lfns, optReplicas )
if not result['OK']:
self.log.info( 'Could not obtain replica information from Optimizer File Catalog information' )
self.log.warn( result )
result = self.__checkFileCatalog( lfns )
if not result['OK']:
self.log.warn( 'Could not obtain replica information from File Catalog directly' )
self.log.warn( result )
return S_ERROR( result['Message'] )
else:
resolvedData = result
else:
resolvedData = result
# add input data size to accounting report (since resolution successful)
for lfn, mdata in resolvedData['Value']['Successful'].items():
if 'Size' in mdata:
lfnSize = mdata['Size']
if not type( lfnSize ) == type( long( 1 ) ):
try:
lfnSize = long( lfnSize )
except Exception, x:
lfnSize = 0
self.log.info( 'File size for LFN:%s was not a long integer, setting size to 0' % ( lfn ) )
self.inputDataSize += lfnSize
configDict = {'JobID':self.jobID, 'LocalSEList':localSEList, 'DiskSEList':self.diskSE, 'TapeSEList':self.tapeSE}
self.log.info( configDict )
argumentsDict = {'FileCatalog':resolvedData, 'Configuration':configDict, 'InputData':lfns, 'Job':self.jobArgs}
self.log.info( argumentsDict )
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( inputDataPolicy, argumentsDict )
if not moduleInstance['OK']:
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn( 'Input data resolution failed' )
return result
return S_OK()
#############################################################################
def __checkFileCatalog( self, lfns, optReplicaInfo = None ):
"""This function returns dictionaries containing all relevant parameters
to allow data access from the relevant file catalogue. Optionally, optimizer
parameters can be supplied here but if these are not sufficient, the file catalogue
is subsequently consulted.
N.B. this will be considerably simplified when the DMS evolves to have a
generic FC interface and a single call for all available information.
"""
replicas = optReplicaInfo
if not replicas:
replicas = self.__getReplicaMetadata( lfns )
if not replicas['OK']:
return replicas
self.log.verbose( replicas )
failedGUIDs = []
for lfn, reps in replicas['Value']['Successful'].items():
if 'GUID' not in reps:
failedGUIDs.append( lfn )
if failedGUIDs:
self.log.info( 'The following file(s) were found not to have a GUID:\n%s' % ',\n'.join( failedGUIDs ) )
if failedGUIDs:
return S_ERROR( 'File metadata is not available' )
else:
return replicas
#############################################################################
def __getReplicaMetadata( self, lfns ):
""" Wrapper function to consult catalog for all necessary file metadata
and check the result.
"""
start = time.time()
repsResult = self.dm.getReplicas( lfns )
timing = time.time() - start
self.log.info( 'Replica Lookup Time: %.2f seconds ' % ( timing ) )
if not repsResult['OK']:
self.log.warn( repsResult['Message'] )
return repsResult
badLFNCount = 0
badLFNs = []
catalogResult = repsResult['Value']
for lfn, cause in catalogResult.get( 'Failed', {} ).items():
badLFNCount += 1
badLFNs.append( 'LFN:%s Problem: %s' % ( lfn, cause ) )
for lfn, replicas in catalogResult.get( 'Successful', {} ).items():
if not replicas:
badLFNCount += 1
badLFNs.append( 'LFN:%s Problem: Null replica value' % ( lfn ) )
if badLFNCount:
self.log.warn( 'Job Wrapper found %s problematic LFN(s) for job %s' % ( badLFNCount, self.jobID ) )
param = '\n'.join( badLFNs )
self.log.info( param )
self.__setJobParam( 'MissingLFNs', param )
return S_ERROR( 'Input Data Not Available' )
# Must retrieve GUIDs from LFC for files
start = time.time()
guidDict = self.fc.getFileMetadata( lfns )
timing = time.time() - start
self.log.info( 'GUID Lookup Time: %.2f seconds ' % ( timing ) )
if not guidDict['OK']:
self.log.warn( 'Failed to retrieve GUIDs from file catalogue' )
self.log.warn( guidDict['Message'] )
return guidDict
failed = guidDict['Value']['Failed']
if failed:
self.log.warn( 'Could not retrieve GUIDs from catalogue for the following files' )
self.log.warn( failed )
return S_ERROR( 'Missing GUIDs' )
for lfn, reps in repsResult['Value']['Successful'].items():
guidDict['Value']['Successful'][lfn].update( reps )
catResult = guidDict
return catResult
#############################################################################
def processJobOutputs( self, arguments ):
"""Outputs for a job may be treated here.
"""
# first iteration of this, no checking of wildcards or oversize sandbox files etc.
outputSandbox = self.jobArgs.get( 'OutputSandbox', [] )
if type( outputSandbox ) == type( '' ):
outputSandbox = [ outputSandbox ]
if outputSandbox:
self.log.verbose( 'OutputSandbox files are: %s' % ', '.join( outputSandbox ) )
outputData = self.jobArgs.get( 'OutputData', '' )
if type( outputData ) == type( '' ):
outputData = outputData.split( ';' )
if outputData:
self.log.verbose( 'OutputData files are: %s' % ', '.join( outputData ) )
# First resolve any wildcards for output files and work out if any files are missing
resolvedSandbox = self.__resolveOutputSandboxFiles( outputSandbox )
if not resolvedSandbox['OK']:
self.log.warn( 'Output sandbox file resolution failed:' )
self.log.warn( resolvedSandbox['Message'] )
self.__report( 'Failed', 'Resolving Output Sandbox' )
fileList = resolvedSandbox['Value']['Files']
missingFiles = resolvedSandbox['Value']['Missing']
if missingFiles:
self.jobReport.setJobParameter( 'OutputSandboxMissingFiles', ', '.join( missingFiles ), sendFlag = False )
if 'Owner' not in self.jobArgs:
msg = 'Job has no owner specified'
self.log.warn( msg )
return S_OK( msg )
# Do not overwrite in case of Error
if not self.failedFlag:
self.__report( 'Completed', 'Uploading Output Sandbox' )
uploadOutputDataInAnyCase = False
if fileList and self.jobID:
self.outputSandboxSize = getGlobbedTotalSize( fileList )
self.log.info( 'Attempting to upload Sandbox with limit:', self.sandboxSizeLimit )
sandboxClient = SandboxStoreClient()
result = sandboxClient.uploadFilesAsSandboxForJob( fileList, self.jobID,
'Output', self.sandboxSizeLimit ) # 1024*1024*10
if not result['OK']:
self.log.error( 'Output sandbox upload failed with message', result['Message'] )
outputSandboxData = result.get( 'SandboxFileName' )
if outputSandboxData:
self.log.info( 'Attempting to upload %s as output data' % ( outputSandboxData ) )
if self.failedFlag:
outputData = [outputSandboxData]
uploadOutputDataInAnyCase = True
else:
outputData.append( outputSandboxData )
self.jobReport.setJobParameter( 'OutputSandbox', 'Sandbox uploaded to grid storage', sendFlag = False )
self.jobReport.setJobParameter( 'OutputSandboxLFN',
self.__getLFNfromOutputFile( outputSandboxData )[0], sendFlag = False )
else:
self.log.info( 'Could not get SandboxFileName to attempt upload to Grid storage' )
return S_ERROR( 'Output sandbox upload failed and no file name supplied for failover to Grid storage' )
else:
# Do not overwrite in case of Error
if not self.failedFlag:
self.__report( 'Completed', 'Output Sandbox Uploaded' )
self.log.info( 'Sandbox uploaded successfully' )
if ( outputData and not self.failedFlag ) or uploadOutputDataInAnyCase:
# Do not upload outputdata if the job has failed.
# The exception is when the outputData is what was the OutputSandbox, which should be uploaded in any case
outputSE = self.jobArgs.get( 'OutputSE', self.defaultOutputSE )
if type( outputSE ) in types.StringTypes:
outputSE = [outputSE]
outputPath = self.jobArgs.get( 'OutputPath', self.defaultOutputPath )
if type( outputPath ) not in types.StringTypes:
outputPath = self.defaultOutputPath
if not outputSE and not self.defaultFailoverSE:
return S_ERROR( 'No output SEs defined in VO configuration' )
result = self.__transferOutputDataFiles( outputData, outputSE, outputPath )
if not result['OK']:
return result
return S_OK( 'Job outputs processed' )
#############################################################################
def __resolveOutputSandboxFiles( self, outputSandbox ):
"""Checks the output sandbox file list and resolves any specified wildcards.
Also tars any specified directories.
"""
missing = []
okFiles = []
for i in outputSandbox:
self.log.verbose( 'Looking at OutputSandbox file/directory/wildcard: %s' % i )
globList = glob.glob( i )
for check in globList:
if os.path.isfile( check ):
self.log.verbose( 'Found locally existing OutputSandbox file: %s' % check )
okFiles.append( check )
if os.path.isdir( check ):
self.log.verbose( 'Found locally existing OutputSandbox directory: %s' % check )
cmd = ['tar', 'cf', '%s.tar' % check, check]
result = systemCall( 60, cmd )
if not result['OK']:
self.log.error( 'Failed to create OutputSandbox tar', result['Message'] )
elif result['Value'][0]:
self.log.error( 'Failed to create OutputSandbox tar', result['Value'][2] )
if os.path.isfile( '%s.tar' % ( check ) ):
self.log.verbose( 'Appending %s.tar to OutputSandbox' % check )
okFiles.append( '%s.tar' % ( check ) )
else:
self.log.warn( 'Could not tar OutputSandbox directory: %s' % check )
missing.append( check )
for i in outputSandbox:
if not i in okFiles:
if not '%s.tar' % i in okFiles:
if not re.search( '\*', i ):
if not i in missing:
missing.append( i )
result = {'Missing':missing, 'Files':okFiles}
return S_OK( result )
#############################################################################
def __transferOutputDataFiles( self, outputData, outputSE, outputPath ):
"""Performs the upload and registration in the LFC
"""
self.log.verbose( 'Uploading output data files' )
self.__report( 'Completed', 'Uploading Output Data' )
self.log.info( 'Output data files %s to be uploaded to %s SE' % ( ', '.join( outputData ), outputSE ) )
missing = []
uploaded = []
# Separate outputdata in the form of lfns and local files
lfnList = []
nonlfnList = []
for out in outputData:
if out.lower().find( 'lfn:' ) != -1:
lfnList.append( out )
else:
nonlfnList.append( out )
# Check whether list of outputData has a globbable pattern
globbedOutputList = List.uniqueElements( getGlobbedFiles( nonlfnList ) )
if not globbedOutputList == nonlfnList and globbedOutputList:
self.log.info( 'Found a pattern in the output data file list, files to upload are:',
', '.join( globbedOutputList ) )
nonlfnList = globbedOutputList
outputData = lfnList + nonlfnList
pfnGUID = {}
result = getGUID( outputData )
if not result['OK']:
self.log.warn( 'Failed to determine POOL GUID(s) for output file list (OK if not POOL files)',
result['Message'] )
else:
pfnGUID = result['Value']
for outputFile in outputData:
( lfn, localfile ) = self.__getLFNfromOutputFile( outputFile, outputPath )
if not os.path.exists( localfile ):
self.log.error( 'Missing specified output data file:', outputFile )
continue
# # file size
localfileSize = getGlobbedTotalSize( localfile )
self.outputDataSize += getGlobbedTotalSize( localfile )
outputFilePath = os.path.join( os.getcwd(), localfile )
# # file GUID
fileGUID = pfnGUID[localfile] if localfile in pfnGUID else None
if fileGUID:
self.log.verbose( 'Found GUID for file from POOL XML catalogue %s' % localfile )
# # file checksum
cksm = fileAdler( outputFilePath )
fileMetaDict = { "Size": localfileSize,
"LFN" : lfn,
"ChecksumType" : "Adler32",
"Checksum": cksm,
"GUID" : fileGUID }
outputSEList = self.__getSortedSEList( outputSE )
upload = self.failoverTransfer.transferAndRegisterFile( fileName = localfile,
localPath = outputFilePath,
lfn = lfn,
destinationSEList = outputSEList,
fileMetaDict = fileMetaDict,
fileCatalog = self.defaultCatalog,
masterCatalogOnly = self.masterCatalogOnlyFlag )
if upload['OK']:
self.log.info( '"%s" successfully uploaded to "%s" as "LFN:%s"' % ( localfile,
upload['Value']['uploadedSE'],
lfn ) )
uploaded.append( lfn )
continue
self.log.error( 'Could not putAndRegister file',
'%s with LFN %s to %s with GUID %s trying failover storage' % ( localfile, lfn,
', '.join( outputSEList ),
fileGUID ) )
if not self.defaultFailoverSE:
self.log.info( 'No failover SEs defined for JobWrapper,',
'cannot try to upload output file %s anywhere else.' % outputFile )
missing.append( outputFile )
continue
failoverSEs = self.__getSortedSEList( self.defaultFailoverSE )
targetSE = outputSEList[0]
result = self.failoverTransfer.transferAndRegisterFileFailover( fileName = localfile,
localPath = outputFilePath,
lfn = lfn,
targetSE = targetSE,
failoverSEList = failoverSEs,
fileMetaDict = fileMetaDict,
fileCatalog = self.defaultCatalog,
masterCatalogOnly = self.masterCatalogOnlyFlag )
if not result['OK']:
self.log.error( 'Completely failed to upload file to failover SEs', result['Message'] )
missing.append( outputFile )
else:
self.log.info( 'File %s successfully uploaded to failover storage element' % lfn )
uploaded.append( lfn )
# For files correctly uploaded must report LFNs to job parameters
if uploaded:
report = ', '.join( uploaded )
# In case the VO payload has also uploaded data using the same parameter
# name this should be checked prior to setting.
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring', timeout = 120 )
result = monitoring.getJobParameter( int( self.jobID ), 'UploadedOutputData' )
if result['OK']:
if 'UploadedOutputData' in result['Value']:
report += ', %s' % result['Value']['UploadedOutputData']
self.jobReport.setJobParameter( 'UploadedOutputData', report, sendFlag = False )
# TODO Notify the user of any output data / output sandboxes
if missing:
self.__setJobParam( 'OutputData', 'MissingFiles: %s' % ', '.join( missing ) )
self.__report( 'Failed', 'Uploading Job OutputData' )
return S_ERROR( 'Failed to upload OutputData' )
self.__report( 'Completed', 'Output Data Uploaded' )
return S_OK( 'OutputData uploaded successfully' )
#############################################################################
def __getSortedSEList( self, seList ):
""" Randomize SE, putting first those that are Local/Close to the Site
"""
if not seList:
return seList
localSEs = []
otherSEs = []
siteSEs = []
seMapping = getSEsForSite( DIRAC.siteName() )
if seMapping['OK'] and seMapping['Value']:
siteSEs = seMapping['Value']
for seName in seList:
if seName in siteSEs:
localSEs.append( seName )
else:
otherSEs.append( seName )
return List.randomize( localSEs ) + List.randomize( otherSEs )
#############################################################################
def __getLFNfromOutputFile( self, outputFile, outputPath = '' ):
"""Provides a generic convention for VO output data
files if no path is specified.
"""
if not re.search( '^LFN:', outputFile ):
localfile = outputFile
initial = self.owner[:1]
vo = getVOForGroup( self.userGroup )
if not vo:
vo = 'dirac'
ops = Operations( vo = vo )
user_prefix = ops.getValue( "LFNUserPrefix", 'user' )
basePath = '/' + vo + '/' + user_prefix + '/' + initial + '/' + self.owner
if outputPath:
# If output path is given, append it to the user path and put output files in this directory
if outputPath.startswith( '/' ):
outputPath = outputPath[1:]
else:
# By default the output path is constructed from the job id
subdir = str( self.jobID / 1000 )
outputPath = subdir + '/' + str( self.jobID )
lfn = os.path.join( basePath, outputPath, os.path.basename( localfile ) )
else:
# if LFN is given, take it as it is
localfile = os.path.basename( outputFile.replace( "LFN:", "" ) )
lfn = outputFile.replace( "LFN:", "" )
return ( lfn, localfile )
#############################################################################
def transferInputSandbox( self, inputSandbox ):
"""Downloads the input sandbox for the job
"""
sandboxFiles = []
registeredISB = []
lfns = []
self.__report( 'Running', 'Downloading InputSandbox' )
if type( inputSandbox ) not in ( types.TupleType, types.ListType ):
inputSandbox = [ inputSandbox ]
for isb in inputSandbox:
if isb.find( "LFN:" ) == 0 or isb.find( "lfn:" ) == 0:
lfns.append( isb )
else:
if isb.find( "SB:" ) == 0:
registeredISB.append( isb )
else:
sandboxFiles.append( os.path.basename( isb ) )
self.log.info( 'Downloading InputSandbox for job %s: %s' % ( self.jobID, ', '.join( sandboxFiles ) ) )
if os.path.exists( '%s/inputsandbox' % ( self.root ) ):
# This is a debugging tool, get the file from local storage to debug Job Wrapper
sandboxFiles.append( 'jobDescription.xml' )
for inputFile in sandboxFiles:
if os.path.exists( '%s/inputsandbox/%s' % ( self.root, inputFile ) ):
self.log.info( 'Getting InputSandbox file %s from local directory for testing' % ( inputFile ) )
shutil.copy( self.root + '/inputsandbox/' + inputFile, inputFile )
result = S_OK( sandboxFiles )
else:
if registeredISB:
for isb in registeredISB:
self.log.info( "Downloading Input SandBox %s" % isb )
result = SandboxStoreClient().downloadSandbox( isb )
if not result[ 'OK' ]:
self.__report( 'Running', 'Failed Downloading InputSandbox' )
return S_ERROR( "Cannot download Input sandbox %s: %s" % ( isb, result[ 'Message' ] ) )
else:
self.inputSandboxSize += result[ 'Value' ]
if lfns:
self.log.info( "Downloading Input SandBox LFNs, number of files to get", len( lfns ) )
self.__report( 'Running', 'Downloading InputSandbox LFN(s)' )
lfns = [fname.replace( 'LFN:', '' ).replace( 'lfn:', '' ) for fname in lfns]
download = self.dm.getFile( lfns )
if not download['OK']:
self.log.warn( download )
self.__report( 'Running', 'Failed Downloading InputSandbox LFN(s)' )
return S_ERROR( download['Message'] )
failed = download['Value']['Failed']
if failed:
self.log.warn( 'Could not download InputSandbox LFN(s)' )
self.log.warn( failed )
return S_ERROR( str( failed ) )
for lfn in lfns:
if os.path.exists( '%s/%s' % ( self.root, os.path.basename( download['Value']['Successful'][lfn] ) ) ):
sandboxFiles.append( os.path.basename( download['Value']['Successful'][lfn] ) )
userFiles = sandboxFiles + [ os.path.basename( lfn ) for lfn in lfns ]
for possibleTarFile in userFiles:
if not os.path.exists( possibleTarFile ) :
continue
try:
if os.path.isfile( possibleTarFile ) and tarfile.is_tarfile( possibleTarFile ):
self.log.info( 'Unpacking input sandbox file %s' % ( possibleTarFile ) )
tarFile = tarfile.open( possibleTarFile, 'r' )
for member in tarFile.getmembers():
tarFile.extract( member, os.getcwd() )
except Exception, x :
return S_ERROR( 'Could not untar %s with exception %s' % ( possibleTarFile, str( x ) ) )
if userFiles:
self.inputSandboxSize = getGlobbedTotalSize( userFiles )
self.log.info( "Total size of input sandbox:",
"%0.2f MiB (%s bytes)" % ( self.inputSandboxSize / 1048576.0, self.inputSandboxSize ) )
return S_OK( 'InputSandbox downloaded' )
#############################################################################
def finalize( self, arguments ):
"""Perform any final actions to clean up after job execution.
"""
self.log.info( 'Running JobWrapper finalization' )
# find if there are pending failover requests
requests = self.__getRequestFiles()
outputDataRequest = self.failoverTransfer.getRequest()
requestFlag = len( requests ) > 0 or not outputDataRequest.isEmpty()
if self.failedFlag and requestFlag:
self.log.info( 'Application finished with errors and there are pending requests for this job.' )
self.__report( 'Failed', 'Pending Requests' )
elif not self.failedFlag and requestFlag:
self.log.info( 'Application finished successfully with pending requests for this job.' )
self.__report( 'Completed', 'Pending Requests' )
elif self.failedFlag and not requestFlag:
self.log.info( 'Application finished with errors with no pending requests.' )
self.__report( 'Failed' )
elif not self.failedFlag and not requestFlag:
self.log.info( 'Application finished successfully with no pending requests for this job.' )
self.__report( 'Done', 'Execution Complete' )
self.sendFailoverRequest()
self.__cleanUp()
if self.failedFlag:
return 1
else:
return 0
#############################################################################
def sendJobAccounting( self, status = '', minorStatus = '' ):
"""Send WMS accounting data.
"""
if self.jobAccountingSent:
return S_OK()
if status:
self.wmsMajorStatus = status
if minorStatus:
self.wmsMinorStatus = minorStatus
self.accountingReport.setEndTime()
# CPUTime and ExecTime
if not 'CPU' in EXECUTION_RESULT:
# If the payload has not started execution (error with input data, SW, SB,...)
# Execution result is not filled use self.initialTiming
self.log.info( 'EXECUTION_RESULT[CPU] missing in sendJobAccounting' )
finalStat = os.times()
EXECUTION_RESULT['CPU'] = []
for i in range( len( finalStat ) ):
EXECUTION_RESULT['CPU'].append( finalStat[i] - self.initialTiming[i] )
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in sendJobAccounting', cpuString )
utime, stime, cutime, cstime, elapsed = EXECUTION_RESULT['CPU']
cpuTime = utime + stime + cutime + cstime
execTime = elapsed
diskSpaceConsumed = getGlobbedTotalSize( os.path.join( self.root, str( self.jobID ) ) )
# Fill the data
acData = {
'User' : self.owner,
'UserGroup' : self.userGroup,
'JobGroup' : self.jobGroup,
'JobType' : self.jobType,
'JobClass' : self.jobClass,
'ProcessingType' : self.processingType,
'FinalMajorStatus' : self.wmsMajorStatus,
'FinalMinorStatus' : self.wmsMinorStatus,
'CPUTime' : cpuTime,
# Based on the factor to convert raw CPU to Normalized units (based on the CPU Model)
'NormCPUTime' : cpuTime * self.cpuNormalizationFactor,
'ExecTime' : execTime,
'InputDataSize' : self.inputDataSize,
'OutputDataSize' : self.outputDataSize,
'InputDataFiles' : self.inputDataFiles,
'OutputDataFiles' : self.outputDataFiles,
'DiskSpace' : diskSpaceConsumed,
'InputSandBoxSize' : self.inputSandboxSize,
'OutputSandBoxSize' : self.outputSandboxSize,
'ProcessedEvents' : self.processedEvents
}
self.log.verbose( 'Accounting Report is:' )
self.log.verbose( acData )
self.accountingReport.setValuesFromDict( acData )
result = self.accountingReport.commit()
# Even if it fails a failover request will be created
self.jobAccountingSent = True
return result
#############################################################################
def sendFailoverRequest( self, status = '', minorStatus = '' ):
""" Create and send a combined job failover request if any
"""
request = Request()
requestName = 'job_%s' % self.jobID
if 'JobName' in self.jobArgs:
# To make the request names more appealing for users
jobName = self.jobArgs['JobName']
if type( jobName ) == type( ' ' ) and jobName:
jobName = jobName.replace( ' ', '' ).replace( '(', '' ).replace( ')', '' ).replace( '"', '' )
jobName = jobName.replace( '.', '' ).replace( '{', '' ).replace( '}', '' ).replace( ':', '' )
requestName = '%s_%s' % ( jobName, requestName )
request.RequestName = requestName.replace( '"', '' )
request.JobID = self.jobID
request.SourceComponent = "Job_%s" % self.jobID
# JobReport part first
result = self.jobReport.generateForwardDISET()
if result['OK']:
if isinstance( result["Value"], Operation ):
self.log.info( 'Adding a job state update DISET operation to the request' )
request.addOperation( result["Value"] )
else:
self.log.warn( 'JobReportFailure', "Could not generate a forwardDISET operation: %s" % result['Message'] )
self.log.warn( 'JobReportFailure', "The job won't fail, but the jobLogging info might be incomplete" )
# Accounting part
if not self.jobID:
self.log.debug( 'No accounting to be sent since running locally' )
else:
result = self.sendJobAccounting( status, minorStatus )
if not result['OK']:
self.log.warn( 'JobAccountingFailure', "Could not send job accounting with result: \n%s" % result['Message'] )
self.log.warn( 'JobAccountingFailure', "Trying to build a failover request" )
if 'rpcStub' in result:
self.log.verbose( "Adding accounting report to failover request object" )
forwardDISETOp = Operation()
forwardDISETOp.Type = "ForwardDISET"
forwardDISETOp.Arguments = DEncode.encode( result['rpcStub'] )
request.addOperation( forwardDISETOp )
self.log.verbose( "Added accounting report to failover request object" )
else:
self.log.warn( 'JobAccountingFailure', "No rpcStub found to construct failover request for job accounting report" )
self.log.warn( 'JobAccountingFailure', "The job won't fail, but the accounting for this job won't be sent" )
# Failover transfer requests
for storedOperation in self.failoverTransfer.request:
request.addOperation( storedOperation )
# Any other requests in the current directory
rfiles = self.__getRequestFiles()
for rfname in rfiles:
rfile = open( rfname, 'r' )
reqString = rfile.read()
rfile.close()
requestStored = Request( eval( reqString ) )
for storedOperation in requestStored:
request.addOperation( storedOperation )
if len( request ):
# The request is ready, send it now
isValid = RequestValidator().validate( request )
if not isValid["OK"]:
self.log.error( "Failover request is not valid", isValid["Message"] )
else:
# We try several times to put the request before failing the job: it's very important that requests go through,
# or the job will be in an unclear status (workflow ok, but, e.g., the output files won't be registered).
# It's a poor man solution, but I don't see fancy alternatives
for counter in range( 10 ):
requestClient = ReqClient()
result = requestClient.putRequest( request )
if result['OK']:
resDigest = request.getDigest()
digest = resDigest['Value']
self.jobReport.setJobParameter( 'PendingRequest', digest )
break
else:
self.log.error( 'Failed to set failover request',
'%d: %s. Re-trying...' % ( counter, result['Message'] ) )
del requestClient
time.sleep( counter ** 3 )
if not result['OK']:
self.__report( 'Failed', 'Failover Request Failed' )
return result
return S_OK()
#############################################################################
def __getRequestFiles( self ):
"""Simple wrapper to return the list of request files.
"""
return glob.glob( '*_request.json' )
#############################################################################
def __cleanUp( self ):
"""Cleans up after job processing. Can be switched off via environment
variable DO_NOT_DO_JOB_CLEANUP or by JobWrapper configuration option.
"""
# Environment variable is a feature for DIRAC (helps local debugging).
if 'DO_NOT_DO_JOB_CLEANUP' in os.environ or not self.cleanUpFlag:
cleanUp = False
else:
cleanUp = True
os.chdir( self.root )
if cleanUp:
self.log.verbose( 'Cleaning up job working directory' )
if os.path.exists( str( self.jobID ) ):
shutil.rmtree( str( self.jobID ) )
#############################################################################
def __report( self, status = '', minorStatus = '', sendFlag = False ):
"""Wraps around setJobStatus of state update client
"""
if status:
self.wmsMajorStatus = status
if minorStatus:
self.wmsMinorStatus = minorStatus
jobStatus = self.jobReport.setJobStatus( status = status, minor = minorStatus, sendFlag = sendFlag )
if not jobStatus['OK']:
self.log.warn( jobStatus['Message'] )
if self.jobID:
self.log.verbose( 'setJobStatus(%s,%s,%s,%s)' % ( self.jobID, status, minorStatus, 'JobWrapper' ) )
return jobStatus
#############################################################################
def __setJobParam( self, name, value, sendFlag = False ):
"""Wraps around setJobParameter of state update client
"""
jobParam = self.jobReport.setJobParameter( str( name ), str( value ), sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
if self.jobID:
self.log.verbose( 'setJobParameter(%s,%s,%s)' % ( self.jobID, name, value ) )
return jobParam
#############################################################################
def __setJobParamList( self, value, sendFlag = False ):
"""Wraps around setJobParameters of state update client
"""
jobParam = self.jobReport.setJobParameters( value, sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
if self.jobID:
self.log.verbose( 'setJobParameters(%s,%s)' % ( self.jobID, value ) )
return jobParam
###############################################################################
###############################################################################
class ExecutionThread( threading.Thread ):
#############################################################################
def __init__( self, spObject, cmd, maxPeekLines, stdoutFile, stderrFile, exeEnv ):
threading.Thread.__init__( self )
self.cmd = cmd
self.spObject = spObject
self.outputLines = []
self.maxPeekLines = maxPeekLines
self.stdout = stdoutFile
self.stderr = stderrFile
self.exeEnv = exeEnv
#############################################################################
def run( self ):
# FIXME: why local instances of object variables are created?
cmd = self.cmd
spObject = self.spObject
start = time.time()
initialStat = os.times()
output = spObject.systemCall( cmd, env = self.exeEnv, callbackFunction = self.sendOutput, shell = True )
EXECUTION_RESULT['Thread'] = output
timing = time.time() - start
EXECUTION_RESULT['Timing'] = timing
finalStat = os.times()
EXECUTION_RESULT['CPU'] = []
for i in range( len( finalStat ) ):
EXECUTION_RESULT['CPU'].append( finalStat[i] - initialStat[i] )
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
gLogger.info( 'EXECUTION_RESULT[CPU] after Execution of spObject.systemCall', cpuString )
gLogger.info( 'EXECUTION_RESULT[Thread] after Execution of spObject.systemCall', str( EXECUTION_RESULT['Thread'] ) )
#############################################################################
def getCurrentPID( self ):
return self.spObject.getChildPID()
#############################################################################
def sendOutput( self, stdid, line ):
if stdid == 0 and self.stdout:
outputFile = open( self.stdout, 'a+' )
print >> outputFile, line
outputFile.close()
elif stdid == 1 and self.stderr:
errorFile = open( self.stderr, 'a+' )
print >> errorFile, line
errorFile.close()
self.outputLines.append( line )
size = len( self.outputLines )
if size > self.maxPeekLines:
# reduce max size of output peeking
self.outputLines.pop( 0 )
#############################################################################
def getOutput( self, lines = 0 ):
if self.outputLines:
# restrict to smaller number of lines for regular
# peeking by the watchdog
# FIXME: this is multithread, thus single line would be better
if lines:
size = len( self.outputLines )
cut = size - lines
self.outputLines = self.outputLines[cut:]
return S_OK( self.outputLines )
return S_ERROR( 'No Job output found' )
def rescheduleFailedJob( jobID, message, jobReport = None ):
rescheduleResult = 'Rescheduled'
try:
gLogger.warn( 'Failure during %s' % ( message ) )
# Setting a job parameter does not help since the job will be rescheduled,
# instead set the status with the cause and then another status showing the
# reschedule operation.
if not jobReport:
gLogger.info( 'Creating a new JobReport Object' )
jobReport = JobReport( int( jobID ), 'JobWrapper' )
jobReport.setApplicationStatus( 'Failed %s ' % message, sendFlag = False )
jobReport.setJobStatus( 'Rescheduled', message, sendFlag = False )
# We must send Job States and Parameters before it gets reschedule
jobReport.sendStoredStatusInfo()
jobReport.sendStoredJobParameters()
gLogger.info( 'Job will be rescheduled' )
jobManager = RPCClient( 'WorkloadManagement/JobManager' )
result = jobManager.rescheduleJob( int( jobID ) )
if not result['OK']:
gLogger.error( result['Message'] )
if 'Maximum number of reschedulings is reached' in result['Message']:
rescheduleResult = 'Failed'
return rescheduleResult
except Exception:
gLogger.exception( 'JobWrapperTemplate failed to reschedule Job' )
return 'Failed'
# EOF
|
miloszz/DIRAC
|
WorkloadManagementSystem/JobWrapper/JobWrapper.py
|
Python
|
gpl-3.0
| 62,516
|
[
"DIRAC"
] |
cfb840e02aa279a1d63740c82187299f56a339d3a10b3a2861ebbd470f5954ae
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Takes a list of maf filenames on the command line and prints a comma separated
list of the species that occur in all of the mafs.
usage %prog maf1 maf2 ...
"""
import operator
import sys
import bx.align.maf
files = sys.argv[1:]
sets = []
for file in files:
sys.stderr.write( "." )
s = set()
for block in bx.align.maf.Reader( open( file ) ):
for comp in block.components:
s.add( comp.src.split( '.' )[0] )
sets.append( s )
inter = reduce( operator.and_, sets )
print ",".join( inter )
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/maf_species_in_all_files.py
|
Python
|
bsd-3-clause
| 572
|
[
"Galaxy"
] |
a5e8efb4071366af727f2ac6549dec82e45efd4db5024c68862f3c0346fbddda
|
# -*- coding: utf-8 -*-
import math
import zope.event
from bika.lims.utils import formatDecimalMark
from Products.Archetypes.event import ObjectInitializedEvent
from Products.CMFCore.WorkflowCore import WorkflowException
from Products.CMFPlone.utils import _createObjectByType
def create_analysis(context, service, keyword, interim_fields):
# Determine if the sampling workflow is enabled
workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
# Create the analysis
analysis = _createObjectByType("Analysis", context, keyword)
analysis.setService(service)
analysis.setInterimFields(interim_fields)
analysis.setMaxTimeAllowed(service.getMaxTimeAllowed())
analysis.unmarkCreationFlag()
analysis.reindexObject()
# Trigger the intitialization event of the new object
zope.event.notify(ObjectInitializedEvent(analysis))
# Perform the appropriate workflow action
try:
workflow_action = 'sampling_workflow' if workflow_enabled \
else 'no_sampling_workflow'
context.portal_workflow.doActionFor(analysis, workflow_action)
except WorkflowException:
# The analysis may have been transitioned already!
# I am leaving this code here though, to prevent regression.
pass
# Return the newly created analysis
return analysis
def get_significant_digits(numeric_value):
"""
Returns the precision for a given floatable value.
If value is None or not floatable, returns None.
Will return positive values if the result is below 0 and will
return 0 or positive values if the result is above 0.
:param numeric_value: the value to get the precision from
:return: the numeric_value's precision
"""
try:
numeric_value = float(numeric_value)
except ValueError:
return None
if numeric_value == 0:
return 0
return int(math.floor(math.log10(abs(numeric_value))))
def format_uncertainty(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted uncertainty according to the analysis, result
and decimal mark specified following these rules:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the uncertainty will
be formatted in scientific notation. The uncertainty exponential
value used will be the same as the one used for the result. The
uncertainty will be rounded according to the same precision as
the result.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 0.004E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the uncertainty will be
formatted as decimal notation and the uncertainty will be
rounded one position after reaching the last 0 (precision
calculated according to the uncertainty value).
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 0.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the uncertainty is not calculated from
the uncertainty neither the result. The fixed length precision is
used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
If the result is not floatable or no uncertainty defined, returns
an empty string.
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result of the analysis. Used to retrieve and/or
calculate the precision and/or uncertainty
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:return: the formatted uncertainty
"""
try:
result = float(result)
except ValueError:
return ""
service = analysis.getService()
uncertainty = service.getUncertainty(result)
if uncertainty is None:
return ""
# Scientific notation?
# Get the default precision for scientific notation
threshold = service.getExponentialFormatPrecision()
# Current result precision is above the threshold?
sig_digits = get_significant_digits(result)
negative = sig_digits < 0
sign = '-' if negative else ''
sig_digits = abs(sig_digits)
sci = sig_digits >= threshold and sig_digits > 0
formatted = ''
if sci:
# Scientific notation
# 3.2014E+4
if negative == True:
res = float(uncertainty)*(10**sig_digits)
else:
res = float(uncertainty)/(10**sig_digits)
res = float(str("%%.%sf" % (sig_digits-1)) % res)
res = int(res) if res.is_integer() else res
if sciformat in [2,3,4,5]:
if sciformat == 2:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"x10^",sign,sig_digits)
elif sciformat == 3:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"x10<sup>",sign,sig_digits,"</sup>")
elif sciformat == 4:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"·10^",sign,sig_digits)
elif sciformat == 5:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"·10<sup>",sign,sig_digits,"</sup>")
else:
# Default format: aE^+b
sig_digits = "%02d" % sig_digits
formatted = "%s%s%s%s" % (res,"e",sign,sig_digits)
#formatted = str("%%.%se" % sig_digits) % uncertainty
else:
# Decimal notation
prec = service.getPrecision(result)
prec = prec if prec else ''
formatted = str("%%.%sf" % prec) % uncertainty
return formatDecimalMark(formatted, decimalmark)
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:return: the formatted result
"""
try:
result = float(result)
except ValueError:
return result
service = analysis.getService()
# Scientific notation?
# Get the default precision for scientific notation
threshold = service.getExponentialFormatPrecision()
# Current result precision is above the threshold?
sig_digits = get_significant_digits(result)
negative = sig_digits < 0
sign = '-' if negative else ''
sig_digits = abs(sig_digits)
sci = sig_digits >= threshold
formatted = ''
if sci:
# Scientific notation
if sciformat in [2,3,4,5]:
if negative == True:
res = float(result)*(10**sig_digits)
else:
res = float(result)/(10**sig_digits)
res = float(str("%%.%sf" % (sig_digits-1)) % res)
res = int(res) if res.is_integer() else res
if sciformat == 2:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"x10^",sign,sig_digits)
elif sciformat == 3:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"x10<sup>",sign,sig_digits,"</sup>")
elif sciformat == 4:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"·10^",sign,sig_digits)
elif sciformat == 5:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"·10<sup>",sign,sig_digits,"</sup>")
else:
# Default format: aE^+b
formatted = str("%%.%se" % sig_digits) % result
else:
# Decimal notation
prec = service.getPrecision(result)
prec = prec if prec else ''
formatted = str("%%.%sf" % prec) % result
formatted = str(int(float(formatted))) if float(formatted).is_integer() else formatted
return formatDecimalMark(formatted, decimalmark)
|
DeBortoliWines/Bika-LIMS
|
bika/lims/utils/analysis.py
|
Python
|
agpl-3.0
| 11,236
|
[
"VisIt"
] |
c157fda1de9fe1896ec291ecaa050bca54f8b54e9c316adbc5a46a993cde1930
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import os
import sys
import numpy as np
from scipy import ndimage
sys.path.append("../..")
# try to import the PIL Image
try:
from PIL import Image
except ImportError:
import Image
import matplotlib.pyplot as plt
import logging
from builtins import range
from facerec.feature import SpatialHistogram
from facerec.distance import ChiSquareDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.lbp import LPQ, ExtendedLBP
from facerec.validation import SimpleValidation, precision
from facerec.util import shuffle_array
EXPERIMENT_NAME = "LocalPhaseQuantizationExperiment"
# ITER_MAX is the number of experimental runs, as described in the
# original paper. For testing purposes, it was set to 1, but it
# should be set to a higher value to get at least a little confidence
# in the results.
ITER_MAX = 1
class FileNameFilter:
"""
Base class used for filtering files.
"""
def __init__(self, name):
self._name = name
def __call__(self, filename):
return True
def __repr__(self):
return "FileNameFilter (name=%s)" % (self._name)
class YaleBaseFilter(FileNameFilter):
"""
This Filter filters files, based on their filetype ending (.pgm) and
their azimuth and elevation. The higher the angle, the more shadows in
the face. This is useful for experiments with illumination and
preprocessing.
"""
def __init__(self, min_azimuth, max_azimuth, min_elevation, max_elevation):
FileNameFilter.__init__(self, "Filter YaleFDB Subset1")
self._min_azimuth = min_azimuth
self._max_azimuth = max_azimuth
self._min_elevation = min_elevation
self._max_elevation = max_elevation
def __call__(self, filename):
# We only want the PGM files:
filetype = filename[-4:]
if filetype != ".pgm":
return False
# There are "Ambient" PGM files, ignore them:
if "Ambient" in filename:
return False
azimuth = abs(int(filename[12:16]))
elevation = abs(int(filename[17:20]))
# Now filter based on angles:
if azimuth < self._min_azimuth or azimuth > self._max_azimuth:
return False
if elevation < self._min_elevation or elevation > self._max_elevation:
return False
return True
def __repr__(self):
return "Yale FDB Filter (min_azimuth=%s, max_azimuth=%s, min_elevation=%s, max_elevation=%s)" % (self.min_azimuth, self.max_azimuth, self.min_elevation, self.max_elevation)
def read_images(path, fileNameFilter=FileNameFilter("None"), sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
if fileNameFilter(filename):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError as e:
print("I/O error: {0}".format(e))
raise e
except:
print("Unexpected error: {0}".format(sys.exc_info()[0]))
raise
c = c+1
return [X,y]
def apply_gaussian(X, sigma):
"""A simple function to apply a Gaussian Blur on each image in X.
Args:
X: A list of images.
sigma: sigma to apply
Returns:
Y: The processed images
"""
return np.array([ndimage.gaussian_filter(x, sigma) for x in X])
def results_to_list(validation_results):
return [precision(result.true_positives,result.false_positives) for result in validation_results]
def partition_data(X, y):
"""
Shuffles the input data and splits it into a new set of images. This resembles the experimental setup
used in the paper on the Local Phase Quantization descriptor in:
"Recognition of Blurred Faces Using Local Phase Quantization", Timo Ahonen, Esa Rahtu, Ville Ojansivu, Janne Heikkila
What it does is to build a subset for each class, so it has 1 image for training and the rest for testing.
The original dataset is shuffled for each call, hence you always get a new partitioning.
"""
Xs,ys = shuffle_array(X,y)
# Maps index to class:
mapping = {}
for i in range(len(y)):
yi = ys[i]
try:
mapping[yi].append(i)
except KeyError:
mapping[yi] = [i]
# Get one image for each subject:
Xtrain, ytrain = [], []
Xtest, ytest = [], []
# Finally build partition:
for key, indices in mapping.iteritems():
# Add images:
Xtrain.extend([ Xs[i] for i in indices[:1] ])
ytrain.extend([ ys[i] for i in indices[:1] ])
Xtest.extend([ Xs[i] for i in indices[1:20]])
ytest.extend([ ys[i] for i in indices[1:20]])
# Return shuffled partitions:
return Xtrain, ytrain, Xtest, ytest
if __name__ == "__main__":
# This is where we write the results to, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print("USAGE: lpq_experiment.py </path/to/images>")
sys.exit()
# Define filters for the Dataset:
yale_subset_0_40 = YaleBaseFilter(0, 40, 0, 40)
# Now read in the image data. Apply filters, scale to 128 x 128 pixel:
[X,y] = read_images(sys.argv[1], yale_subset_0_40, sz=(64,64))
# Set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# The models we want to evaluate:
model0 = PredictableModel(feature=SpatialHistogram(lbp_operator=ExtendedLBP()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
model1 = PredictableModel(feature=SpatialHistogram(lbp_operator=LPQ()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
# The sigmas we'll apply for each run:
sigmas = [0]
print("The experiment will be run %s times!" % ITER_MAX)
# Initialize experiments (with empty results):
experiments = {}
experiments['lbp_model'] = { 'model': model0, 'results' : {}, 'color' : 'r', 'linestyle' : '--', 'marker' : '*'}
experiments['lpq_model'] = { 'model': model1, 'results' : {}, 'color' : 'b', 'linestyle' : '--', 'marker' : 's'}
# Loop to acquire the results for each experiment:
for sigma in sigmas:
print("Setting sigma=%s" % sigma)
for key, value in experiments.iteritems():
print("Running experiment for model=%s" % key)
# Define the validators for the model:
cv0 = SimpleValidation(value['model'])
for iteration in range(ITER_MAX):
print("Repeating experiment %s/%s." % (iteration + 1, ITER_MAX))
# Split dataset according to the papers description:
Xtrain, ytrain, Xtest, ytest = partition_data(X,y)
# Apply a gaussian blur on the images:
Xs = apply_gaussian(Xtest, sigma)
# Run each validator with the given data:
experiment_description = "%s (iteration=%s, sigma=%.2f)" % (EXPERIMENT_NAME, iteration, sigma)
cv0.validate(Xtrain, ytrain, Xs, ytest, experiment_description)
# Get overall results:
true_positives = sum([validation_result.true_positives for validation_result in cv0.validation_results])
false_positives = sum([validation_result.false_positives for validation_result in cv0.validation_results])
# Calculate overall precision:
prec = precision(true_positives,false_positives)
# Store the result:
experiments[key]['results'][sigma] = prec
# Make a nice plot of this textual output:
fig = plt.figure()
# Holds the legend items:
plot_legend = []
# Add the Validation results:
for experiment_name, experiment_definition in experiments.iteritems():
print(key, experiment_definition)
results = experiment_definition['results']
(xvalues, yvalues) = zip(*[(k,v) for k,v in results.iteritems()])
# Add to the legend:
plot_legend.append(experiment_name)
# Put the results into the plot:
plot_color = experiment_definition['color']
plot_linestyle = experiment_definition['linestyle']
plot_marker = experiment_definition['marker']
plt.plot(sigmas, yvalues, linestyle=plot_linestyle, marker=plot_marker, color=plot_color)
# Put the legend below the plot (TODO):
plt.legend(plot_legend, prop={'size':6}, numpoints=1, loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=True, ncol=1)
# Scale y-axis between 0,1 to see the Precision:
plt.ylim(0,1)
plt.xlim(-0.2, max(sigmas) + 1)
# Finally add the labels:
plt.title(EXPERIMENT_NAME)
plt.ylabel('Precision')
plt.xlabel('Sigma')
fig.subplots_adjust(bottom=0.5)
# Save the gifure and we are out of here!
plt.savefig("lpq_experiment.png", bbox_inches='tight',dpi=100)
|
bytefish/facerec
|
py/apps/scripts/lpq_experiment.py
|
Python
|
bsd-3-clause
| 10,585
|
[
"Gaussian"
] |
96bb617f1d02e49e70b5c633e2a629ec8e22e2c5f579289e847a47cdc1e2499e
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
from six.moves import reduce
from .. import core
from ..layers import utils
from ..layers import nn as F
from .. import dygraph_utils
from . import layers
from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator, default_main_program, _global_flags
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant, NumpyArrayInitializer
from .. import unique_name
from .layer_object_helper import LayerObjectHelper
from ..data_feeder import check_variable_and_dtype, check_type
import numpy as np
import numbers
import logging
import os
import paddle.utils.deprecated as deprecated
from paddle import _C_ops
__all__ = [
'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding',
'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu',
'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm',
'SpectralNorm', 'TreeConv', 'Flatten'
]
class Conv2D(layers.Layer):
r"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \\sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output
feature map.
filter_size (int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding (int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Raises:
ValueError: if ``use_cudnn`` is not a bool value.
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D
import numpy as np
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
conv2d = Conv2D(3, 2, 3)
data = to_variable(data)
conv = conv2d(data)
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__()
if (core.is_compiled_with_cuda() and paddle.fluid.get_flags(
"FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]):
use_cudnn = False
self._num_channels = num_channels
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding')
self._dilation = utils.convert_to_list(dilation, 2, 'dilation')
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if (self._num_channels == self._groups and
num_filters % self._num_channels == 0 and
not self._use_cudnn and not self._use_mkldnn):
self._l_type = 'depthwise_conv2d'
else:
self._l_type = 'conv2d'
# NPU only supports depthwise_conv2d when "input_channel = output_channel = groups"
if core.is_compiled_with_npu():
if (self._num_channels == self._groups and
self._num_channels == self._num_filters):
self._l_type = 'depthwise_conv2d'
else:
self._l_type = 'conv2d'
self._num_channels = num_channels
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 2, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[
1] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
if in_dygraph_mode() and (self._l_type == 'conv2d' or
self._l_type == 'depthwise_conv2d'):
attrs = ('strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups
if self._groups else 1, 'use_cudnn', self._use_cudnn,
'use_mkldnn', self._use_mkldnn)
out = _C_ops.conv2d(input, self.weight, *attrs)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
inputs = {
'Input': [input],
'Filter': [self.weight],
}
attrs = {
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': self._use_mkldnn,
}
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'Conv2D')
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={
'Input': input,
'Filter': self.weight,
},
outputs={"Output": pre_bias},
attrs=attrs)
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1,
'use_mkldnn': self._use_mkldnn})
else:
pre_act = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_act, act=self._act)
class Conv3D(layers.Layer):
r"""
**Convlution3D Layer**
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output image channel.
filter_size (int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square, filter_size_depth = filter_size_height
= filter_size_width = filter_size.
stride (int|tuple, optional): The stride size. If stride is a tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding (int|tuple, optional): The padding size. If padding is a tuple, it must
contain three integers, (padding_D, padding_H, padding_W). Otherwise, the
padding_D = padding_H = padding_W = padding. The default value is 0.
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3d = fluid.dygraph.nn.Conv3D(
num_channels=3, num_filters=2, filter_size=3, act="relu")
ret = conv3d(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv3D, self).__init__()
self._num_channels = num_channels
self._groups = groups
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._act = act
self._use_cudnn = use_cudnn
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='conv3d',
inputs={
'Input': input,
'Filter': self.weight,
},
outputs={"Output": pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': False
})
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class Conv3DTranspose(layers.Layer):
r"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
image channel.
filter_size(int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
padding(int|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
The default value is 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose(
num_channels=3,
num_filters=12,
filter_size=12,
use_cudnn=False)
ret = conv3dTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
super(Conv3DTranspose, self).__init__()
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._param_attr = param_attr
self._num_channels = num_channels
self._filter_size = filter_size
self._groups = 1 if groups is None else groups
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._bias_attr = bias_attr
self._act = act
self._dtype = dtype
self._filter_size = utils.convert_to_list(
self._filter_size, 3, 'conv3d_transpose.filter_size')
filter_shape = [self._num_channels, self._num_filters // self._groups
] + self._filter_size
self.weight = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="conv3d_transpose",
inputs={'Input': [input],
'Filter': [self.weight]},
outputs={'Output': pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn
})
if self._bias_attr:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_act, act=self._act)
class Pool2D(layers.Layer):
r"""
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,
pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
- Input:
Input shape: :math:`(N, C, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C, H_{out}, W_{out})`
If ``ceil_mode`` = False:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
If ``ceil_mode`` = True:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
If ``exclusive`` = False:
.. math::
hstart &= i * strides[0] - paddings[0] \\\\
hend &= hstart + ksize[0] \\\\
wstart &= j * strides[1] - paddings[1] \\\\
wend &= wstart + ksize[1] \\\\
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
If ``exclusive`` = True:
.. math::
hstart &= max(0, i * strides[0] - paddings[0])\\\\
hend &= min(H, hstart + ksize[0]) \\\\
wstart &= max(0, j * strides[1] - paddings[1]) \\\\
wend & = min(W, wstart + ksize[1]) \\\\
Output(i ,j) & = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Parameters:
pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int. Default: -1.
pool_type(str, optional) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling.
Default: max.
pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,
the pool stride size will be a square of an int. Default: 1.
pool_padding (int or list or tuple, optional): The padding size for pooling operation.
If ``pool_padding`` is a tuple,
it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).
Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.
global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,
kernel size and paddings will be ignored. Default: False.
use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.
ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.
False is the default. If it is set to False, the floor function will be used. Default: False.
exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
``[batch_size, input_channels, input_height, input_width]``. When it is `"NHWC"`, the data is
stored in the order of: ``[batch_size, input_height, input_width, input_channels]``
Returns:
None
Raises:
ValueError: If ``pool_type`` is not "max" nor "avg".
ValueError: If ``global_pooling`` is False and ``pool_size`` is -1.
ValueError: If ``use_cudnn`` is not a bool value.
ValueError: If ``data_format`` is not "NCHW" nor "NHWC".
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
with fluid.dygraph.guard():
data = numpy.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D(pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
pool2d_res = pool2d(to_variable(data))
"""
def __init__(self,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
exclusive=True,
data_format="NCHW"):
data_format = data_format.upper() # supprt NHWC, nhwc, etc.
pool_type = pool_type.lower() # supprt max, Max, etc.
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When the global_pooling is False, pool_size must be passed "
"and be a valid value. Received pool_size: " + str(pool_size))
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
super(Pool2D, self).__init__()
self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
self._pool_padding = utils.convert_to_list(pool_padding, 2,
'pool_padding')
self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
self._global_pooling = global_pooling
self._use_cudnn = use_cudnn
self._ceil_mode = ceil_mode
self._exclusive = exclusive
self._data_format = data_format
self._l_type = 'pool2d'
def forward(self, input):
if in_dygraph_mode():
attrs = ('pooling_type', self._pool_type, 'ksize', self._pool_size,
'global_pooling', self._global_pooling, 'strides',
self._pool_stride, 'paddings', self._pool_padding,
'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode,
'use_mkldnn', self._use_mkldnn, 'exclusive',
self._exclusive, 'data_format', self._data_format)
return _C_ops.pool2d(input, *attrs)
check_variable_and_dtype(
input, 'input', ['int8', 'uint8', 'float16', 'float32', 'float64'],
'Pool2D')
attrs = {
"pooling_type": self._pool_type,
"ksize": self._pool_size,
"global_pooling": self._global_pooling,
"strides": self._pool_stride,
"paddings": self._pool_padding,
"use_cudnn": self._use_cudnn,
"ceil_mode": self._ceil_mode,
"use_mkldnn": self._use_mkldnn,
"exclusive": self._exclusive,
"data_format": self._data_format,
}
inputs = {"X": [input]}
pool_out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs=attrs)
return pool_out
class Linear(layers.Layer):
"""
Fully-connected linear transformation layer:
.. math::
Out = Act({XW + b})
where :math:`X` is the input Tensor, :math:`W` and :math:`b` are weight and bias respectively.
Linear layer takes only one ``Tensor`` input.
The Linear layer multiplies input tensor with weight matrix and
produces an output Tensor of shape [N, *, `output_dim`],
where N is batch size and `*` means any number of additional dimensions.
If ``bias_attr`` is not None, a bias variable will be created and added to the output.
Finally, if ``act`` is not None, it will be applied to the output as well.
Parameters:
input_dim(int): The number of input units in this layer.
output_dim(int): The number of output units in this layer.
param_attr(ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable
weights(Parameter) of this layer. Default: None.
bias_attr(ParamAttr or list of ParamAttr, optional): The attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of this layer. Default: None.
dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32".
Attributes:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
res = linear(data) # [30, 10, 64]
"""
def __init__(self,
input_dim,
output_dim,
param_attr=None,
bias_attr=None,
act=None,
dtype="float32"):
super(Linear, self).__init__()
self._act = act
self._dtype = dtype
self.weight = self.create_parameter(
shape=[input_dim, output_dim],
attr=param_attr,
dtype=dtype,
is_bias=False)
self.bias = self.create_parameter(
shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True)
self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
def forward(self, input):
if in_dygraph_mode():
pre_bias = _varbase_creator(dtype=input.dtype)
_C_ops.matmul(input, self.weight, pre_bias, 'transpose_X', False,
'transpose_Y', False, "alpha", 1, "use_mkldnn",
self._use_mkldnn)
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias,
self.bias,
axis=len(input.shape) - 1,
use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], "Linear")
attrs = {
"transpose_X": False,
"transpose_Y": False,
"alpha": 1,
"use_mkldnn": self._use_mkldnn,
}
inputs = {"X": [input], "Y": [self.weight]}
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="matmul", inputs=inputs, outputs={"Out": tmp}, attrs=attrs)
if self.bias is not None:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={
'axis': len(input.shape) - 1,
'use_mkldnn': self._use_mkldnn
})
else:
pre_activation = tmp
return self._helper.append_activation(pre_activation, act=self._act)
class InstanceNorm(layers.Layer):
r"""
This interface is used to construct a callable object of the ``InstanceNorm`` class.
For more details, refer to code examples.
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
one. If it is set to False, will not create param_attr. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
If it is set to False, will not create bias_attr. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle
# x's shape is [1, 3, 1, 2]
x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
instanceNorm = paddle.nn.InstanceNorm(3)
ret = instanceNorm(x)
# ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995]
print(ret)
"""
def __init__(self,
num_channels,
epsilon=1e-5,
param_attr=None,
bias_attr=None,
dtype='float32'):
super(InstanceNorm, self).__init__()
if param_attr == False or bias_attr == False:
assert bias_attr == param_attr, "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if param_attr != False and bias_attr != False:
self.scale = self.create_parameter(
attr=self._param_attr,
shape=[num_channels],
dtype=self._dtype,
default_initializer=Constant(1.0),
is_bias=False)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[num_channels],
dtype=self._dtype,
default_initializer=Constant(0.0),
is_bias=True)
else:
self.scale = None
self.bias = None
def forward(self, input):
if in_dygraph_mode():
out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
'epsilon', self._epsilon)
return out
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"InstanceNorm")
attrs = {"epsilon": self._epsilon}
if self.scale and self.bias:
inputs = {"X": [input], "Scale": [self.scale], "Bias": [self.bias]}
else:
inputs = {"X": [input]}
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
instance_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
outputs = {
"Y": [instance_norm_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
self._helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return instance_norm_out
class BatchNorm(layers.Layer):
r"""
This interface is used to construct a callable object of the ``BatchNorm`` class.
For more details, refer to code examples.
It implements the function of the Batch Normalization Layer and can be used
as a normalizer function for conv2d and fully connected operations.
The data is normalized by the mean and variance of the channel based on the current batch data.
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
When use_global_stats = False, the :math:`\mu_{\beta}`
and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
Calculated as follows:
.. math::
\mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &
//\ mini-batch\ mean \\
\sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \qquad &
//\ mini-batch\ variance \\
- :math:`x` : mini-batch data
- :math:`m` : the size of the mini-batch data
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global or running statistics (moving_mean and moving_variance). It usually got from the
pre-trained model. Calculated as follows:
.. math::
moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\
moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\
The normalization function formula is as follows:
.. math::
\hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
- :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
- :math:`\gamma` : trainable proportional parameter
- :math:`\beta` : trainable deviation parameter
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
act(str, optional): Activation to be applied to the output of batch normalization. Default: None.
is_test (bool, optional): A flag indicating whether it is in test phrase or not.
This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
Default: False.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
data_layout(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.
moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.
moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.
do_model_average_for_mean_and_var(bool, optional): Whether parameter mean and variance should do model
average when model average is enabled. Default: True.
use_global_stats(bool, optional): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period. Default: False.
trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when
setting trainable_statistics True, mean and variance will be calculated by current batch statistics.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
batch_norm = fluid.BatchNorm(10)
hidden1 = batch_norm(x)
"""
def __init__(self,
num_channels,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
dtype='float32',
data_layout='NCHW',
in_place=False,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
trainable_statistics=False):
super(BatchNorm, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"]
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
if dtype == "float16":
self._dtype = "float32"
else:
self._dtype = dtype
param_shape = [num_channels]
# create parameter
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
self.weight.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
self.bias.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.
self._mean = self.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._mean.stop_gradient = True
self._variance = self.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._variance.stop_gradient = True
self._in_place = in_place
self._data_layout = data_layout
self._momentum = momentum
self._epsilon = epsilon
self._is_test = is_test
self._fuse_with_relu = False
self._use_global_stats = use_global_stats
self._trainable_statistics = trainable_statistics
def forward(self, input):
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
if in_dygraph_mode():
attrs = ("momentum", self._momentum, "epsilon", self._epsilon,
"is_test", not self.training, "data_layout",
self._data_layout, "use_mkldnn", self._use_mkldnn,
"fuse_with_relu", self._fuse_with_relu, "use_global_stats",
self._use_global_stats, 'trainable_statistics',
self._trainable_statistics)
batch_norm_out, _, _, _, _, _ = _C_ops.batch_norm(
input, self.weight, self.bias, self._mean, self._variance,
mean_out, variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'BatchNorm')
attrs = {
"momentum": self._momentum,
"epsilon": self._epsilon,
"is_test": self._is_test,
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats,
"trainable_statistics": self._trainable_statistics,
}
inputs = {
"X": [input],
"Scale": [self.weight],
"Bias": [self.bias],
"Mean": [self._mean],
"Variance": [self._variance]
}
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
reserve_space = self._helper.create_variable_for_type_inference(
dtype=self._helper.input_dtype(input), stop_gradient=True)
batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(
self._dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [mean_out],
"VarianceOut": [variance_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
if reserve_space is not None:
outputs["ReserveSpace"] = [reserve_space]
self._helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act)
class Dropout(layers.Layer):
"""
This interface is used to construct a callable object of the ``Dropout`` class.
For more details, refer to code examples.
Drop or keep each element of input independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
Dropout layer can be removed for efficiency concern.
Parameters:
p (float, optional): Probability of setting units to zero. Default: 0.5
seed (int, optional): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training. Default: None.
dropout_implementation(string, optional): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - p)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - p )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is p)
is_test (bool, optional): A flag indicating whether it is in test phrase or not.
This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
m = fluid.dygraph.Dropout(p=0.5)
droped_train = m(x)
# switch to eval mode
m.eval()
droped_eval = m(x)
"""
def __init__(self,
p=0.5,
seed=None,
dropout_implementation="downgrade_in_infer",
is_test=False):
super(Dropout, self).__init__()
assert isinstance(p, (float, int)), "p argument should be a number"
assert 0 <= p <= 1, "p argument should between 0 and 1"
self._dropout_prob = p
assert seed is None or isinstance(
seed, int), "seed argument should be None or a integer"
self._seed = seed
assert dropout_implementation in (
'downgrade_in_infer', 'upscale_in_train'
), "dropout_implementation argument should be 'downgrade_in_infer' or 'upscale_in_train'"
self._dropout_implementation = dropout_implementation
self._is_test = is_test
def forward(self, input):
# fast return for p == 0
if self._dropout_prob == 0:
return input
prog = default_main_program()
if (self._seed is None or self._seed == 0) and prog.random_seed != 0:
self._seed = prog.random_seed
attrs = {
'dropout_prob': self._dropout_prob,
'is_test': not self.training
if in_dygraph_mode() else self._is_test,
'fix_seed': self._seed is not None,
'seed': self._seed if self._seed is not None else 0,
'dropout_implementation': self._dropout_implementation,
}
if in_dygraph_mode():
attrs = sum(attrs.items(), ())
out, mask = _C_ops.dropout(input, *attrs)
return out
out = self._helper.create_variable_for_type_inference(dtype=input.dtype)
mask = self._helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
self._helper.append_op(
type='dropout',
inputs={'X': [input]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
class Embedding(layers.Layer):
r"""
:alias_main: paddle.nn.Embedding
:alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding
:old_api: paddle.fluid.dygraph.Embedding
**Embedding Layer**
This interface is used to construct a callable object of the ``Embedding`` class.
For specific usage, refer to code examples. It implements the function of the Embedding Layer.
This layer is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[1, 3], [2, 4], [4, 127]
input.shape = [3, 2]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Parameters:
size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size
of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(np.dtype|core.VarDesc.VarType|str): It refers to the data type of output Tensor.
It must be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy as np
# example 1
inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64')
inp_word.shape # [2, 3]
dict_size = 20
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
static_rlt3.shape # [2, 3, 32]
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
size=[128, 100],
param_attr= w_param_attrs,
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
"""
def __init__(self,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
super(Embedding, self).__init__()
self._size = size
self._is_sparse = is_sparse
self._is_distributed = is_distributed
self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
self._param_attr = param_attr
self._dtype = dtype
self._remote_prefetch = self._is_sparse and (not self._is_distributed)
if self._remote_prefetch:
assert self._is_sparse is True and self._is_distributed is False
self.weight = self.create_parameter(
attr=self._param_attr,
shape=self._size,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
if in_dygraph_mode():
return _C_ops.lookup_table_v2(
self.weight, input, 'is_sparse', self._is_sparse,
'is_distributed', self._is_distributed, 'remote_prefetch',
self._remote_prefetch, 'padding_idx', self._padding_idx)
check_variable_and_dtype(input, 'input',
['uint8', 'int8', 'int16', 'int32', 'int64'],
'Embedding')
attrs = {
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='lookup_table_v2',
inputs={'Ids': input,
'W': self.weight},
outputs={'Out': out},
attrs=attrs)
return out
class LayerNorm(layers.Layer):
r"""
:alias_main: paddle.nn.LayerNorm
:alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm
:old_api: paddle.fluid.dygraph.LayerNorm
This interface is used to construct a callable object of the ``LayerNorm`` class.
For more details, refer to code examples.
It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Parameters:
normalized_shape(int or list or tuple): Input shape from an expected input of
size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
If it is a single integer, this module will normalize over the last dimension
which is expected to be of that specific size.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy
x = numpy.random.random((3, 32, 32)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
layerNorm = fluid.LayerNorm([32, 32])
ret = layerNorm(x)
"""
def __init__(self,
normalized_shape,
scale=True,
shift=True,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
dtype='float32'):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = [normalized_shape]
self._normalized_shape = list(normalized_shape)
self._scale = scale
self._shift = shift
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._dtype = dtype
param_shape = [np.prod(self._normalized_shape)]
if self._scale:
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
else:
if self._param_attr:
logging.warn("param_attr are only available with scale is True")
self.weight = None
if self._shift:
assert self._bias_attr is not False
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
else:
if self._bias_attr:
logging.warn("bias_attr are only available with shift is True")
self.bias = None
def forward(self, input):
input_shape = list(input.shape)
input_ndim = len(input_shape)
normalized_ndim = len(self._normalized_shape)
self._begin_norm_axis = input_ndim - normalized_ndim
if input_ndim < normalized_ndim or input_shape[
self._begin_norm_axis:] != self._normalized_shape:
str_normalized_shape = str(self._normalized_shape)
raise ValueError(
'Given normalized_shape is ' + str_normalized_shape +
', expected input with shape [*, ' + str_normalized_shape[
1:] + ', but got input shape ' + str(input_shape))
if in_dygraph_mode():
pre_act, _, _ = _C_ops.layer_norm(
input, self.weight, self.bias, 'epsilon', self._epsilon,
'begin_norm_axis', self._begin_norm_axis)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'LayerNorm')
inputs = dict()
inputs['X'] = [input]
if self._scale:
inputs['Scale'] = [self.weight]
if self._shift:
inputs['Bias'] = [self.bias]
attrs = {
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
}
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
layer_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
})
return self._helper.append_activation(layer_norm_out, act=self._act)
class GRUUnit(layers.Layer):
"""
**GRU unit layer**
It creates a callable object from GRUUnit class.
If origin_mode is True, then the equation of a gru step is from paper
`Learning Phrase Representations using RNN Encoder-Decoder for Statistical
Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
If origin_mode is False, then the equation of a gru step is from paper
`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot((1-u_t), h_{t-1}) + dot(u_t, m_t)
The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms
of the equation above, the :math:`z_t` is split into 3 parts -
:math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to
implement a full GRU unit operator for an input, a fully
connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.
The terms :math:`u_t` and :math:`r_t` represent the update and reset gates
of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is
an intermediate candidate hidden output, which is denoted by :math:`m_t`.
This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`
and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.
Parameters:
size (int): The input dimension value.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
hidden-hidden weight matrix.
**Note**:
1. The shape of the weight matrix is :math:`[T, 3*D]`, where D is the hidden size.
2. All elements in the weight matrix can be divided into two parts. The first
part are weights of the update gate and reset gate with shape :math:`[D, 2*D]`,
and the second part are weights for candidate hidden state with shape :math:`[D, D]`.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default
value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias
of GRU.Note that the bias with :math:`[1, 3*D]` concatenates
the bias in the update gate, reset gate and candidate calculations.
If it is set to False, no bias will be applied to the update gate,
reset gate and candidate calculations. If it is set to None or one
attribute of ParamAttr, gru_unit will create ParamAttr as
bias_attr. If the Initializer of the bias_attr is not set, the bias
is initialized zero. The default value is None.
activation (str): The activation type for cell (actNode).
The default value is 'tanh'.
gate_activation (str): The activation type for gates (actGate).
The default value is 'sigmoid'.
dtype(str): The dtype of the layers. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
tuple: The hidden value, reset-hidden value and gate values. The hidden value
is a 2-D tensor with shape :math:`[T, D]` . The reset-hidden value is a
2-D tensor with shape :math:`[T, D]` . The gate value is a 2-D tensor with
shape :math:`[T, 3*D]`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy
lod = [[2, 4, 3]]
D = 5
T = sum(lod[0])
input = numpy.random.rand(T, 3 * D).astype('float32')
hidden_input = numpy.random.rand(T, D).astype('float32')
with fluid.dygraph.guard():
x = numpy.random.random((3, 32, 32)).astype('float32')
gru = fluid.dygraph.GRUUnit(size=D * 3)
dy_ret = gru(
base.to_variable(input), base.to_variable(hidden_input))
"""
def __init__(self,
size,
param_attr=None,
bias_attr=None,
activation='tanh',
gate_activation='sigmoid',
origin_mode=False,
dtype='float32'):
super(GRUUnit, self).__init__()
self._bias_attr = bias_attr
activation_dict = dict(
identity=0,
sigmoid=1,
tanh=2,
relu=3, )
self.activation = activation_dict[activation]
self.gate_activation = activation_dict[gate_activation]
self._dtype = dtype
size = size // 3
# create weight
self.weight = self.create_parameter(
attr=param_attr, shape=[size, 3 * size], dtype=dtype)
# create bias
bias_size = [1, 3 * size]
self._bias_size = bias_size
self.bias = self.create_parameter(
attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
def forward(self, input, hidden):
if in_dygraph_mode():
gate, reset_hidden_pre, updated_hidden = _C_ops.gru_unit(
input, hidden, self.weight, self.bias, 'activation',
self.activation, 'gate_activation', self.gate_activation)
return updated_hidden, reset_hidden_pre, gate
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'GRUUnit')
check_variable_and_dtype(hidden, 'hidden', ['float32', 'float64'],
'GRUUnit')
inputs = {
'Input': [input],
'HiddenPrev': [hidden],
'Weight': [self.weight]
}
if self.bias is not None:
inputs['Bias'] = [self.bias]
gate = self._helper.create_variable_for_type_inference(self._dtype)
reset_hidden_pre = self._helper.create_variable_for_type_inference(
self._dtype)
updated_hidden = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type='gru_unit',
inputs=inputs,
outputs={
'Gate': gate,
'ResetHiddenPrev': reset_hidden_pre,
'Hidden': updated_hidden,
},
attrs={
'activation': self.activation,
'gate_activation': self.gate_activation,
})
return updated_hidden, reset_hidden_pre, gate
class NCE(layers.Layer):
"""
This interface is used to construct a callable object of the ``NCE`` class.
For more details, refer to code examples.
It implements the function of the ``NCE`` loss function.
By default this function uses a uniform distribution for sampling, and it
compute and return the noise-contrastive estimation training loss. See
`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ .
Parameters:
num_total_classes (int): Total number of classes in all samples.
dim (int): Dimension of input (possibly embedding dim).
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of nce. If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
num_neg_samples (int, optional): The number of negative classes. The default value is 10.
sampler (str, optional): The sampler used to sample class from negative classes.
It can be 'uniform', 'log_uniform' or 'custom_dist'.
default: 'uniform'.
custom_dist (float[], optional): A float[] with size=num_total_classes.
It is used when sampler is set to 'custom_dist'.
custom_dist[i] is the probability of i-th class to be sampled.
Default: None.
seed (int, optional): The seed used in sampler. Default: 0.
is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
window_size = 5
dict_size = 20
label_word = int(window_size // 2) + 1
inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
with fluid.dygraph.guard():
words = []
for i in range(window_size):
words.append(fluid.dygraph.base.to_variable(inp_word[i]))
emb = fluid.Embedding(
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
embs3 = []
for i in range(window_size):
if i == label_word:
continue
emb_rlt = emb(words[i])
embs3.append(emb_rlt)
embs3 = fluid.layers.concat(input=embs3, axis=1)
nce = fluid.NCE(
num_total_classes=dict_size,
dim=embs3.shape[1],
num_neg_samples=2,
sampler="custom_dist",
custom_dist=nid_freq_arr.tolist(),
seed=1,
param_attr='nce.w',
bias_attr='nce.b')
wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
nce_loss3 = nce(embs3, wl)
"""
def __init__(self,
num_total_classes,
dim,
sample_weight=None,
param_attr=None,
bias_attr=None,
num_neg_samples=None,
sampler="uniform",
custom_dist=None,
seed=0,
is_sparse=False,
dtype='float32'):
super(NCE, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._num_total_classes = num_total_classes
self._dtype = dtype
self._inputs = dict()
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
if sampler == "uniform":
sampler = 0
elif sampler == "log_uniform":
sampler = 1
elif sampler == "custom_dist":
assert custom_dist is not None
# assert isinstance(custom_dist, Variable)
custom_dist_len = len(custom_dist)
alias_probs_ = [0] * custom_dist_len
alias_ = [0] * custom_dist_len
bigs = []
littles = []
for i in range(custom_dist_len):
normal_prob = custom_dist[i] * custom_dist_len
if normal_prob - 1.0 > 0:
bigs.append((i, normal_prob))
elif 1.0 - normal_prob > 0:
littles.append((i, normal_prob))
else:
alias_probs_[i] = normal_prob
alias_[i] = -1
while len(bigs) and len(littles):
big = bigs.pop(0)
little = littles.pop(0)
big_idx = big[0]
big_prob = big[1]
alias_probs_[little[0]] = little[1]
alias_[little[0]] = big_idx
big_left = big[1] + little[1] - 1
if big_left - 1.0 > 0:
bigs.append((big_idx, big_left))
elif 1.0 - big_left > 0:
littles.append((big_idx, big_left))
else:
alias_probs_[big_idx] = big_left
alias_[big_idx] = -1
if len(bigs):
big = bigs.pop(0)
alias_probs_[big[0]] = 1.0
alias_[big[0]] = -1
if len(littles):
little = littles.pop(0)
alias_probs_[little[0]] = 1.0
alias_[little[0]] = -1
def _init_by_numpy_array(numpy_array):
ret = self.create_parameter(
attr=ParamAttr(),
shape=numpy_array.shape,
dtype=numpy_array.dtype,
default_initializer=NumpyArrayInitializer(numpy_array))
ret.stop_gradient = True
return ret
self._inputs['CustomDistProbs'] = _init_by_numpy_array(
np.array(custom_dist).astype('float32'))
self._inputs['CustomDistAlias'] = _init_by_numpy_array(
np.array(alias_).astype('int32'))
self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array(
np.array(alias_probs_).astype('float32'))
sampler = 2
else:
raise Exception("Unsupported sampler type.")
if num_neg_samples is None:
num_neg_samples = 10
else:
num_neg_samples = int(num_neg_samples)
self._num_neg_samples = num_neg_samples
remote_prefetch = is_sparse
print(
"With sparse mode, if your models has only small parameter prefetch may cause speed down"
)
self._attrs = {
'num_total_classes': int(num_total_classes),
'num_neg_samples': num_neg_samples,
'seed': seed,
'sampler': sampler,
'is_sparse': is_sparse,
'remote_prefetch': remote_prefetch
}
self.weight = self.create_parameter(
attr=self._param_attr,
shape=[self._num_total_classes, dim],
is_bias=False,
dtype=self._dtype)
if self._bias_attr:
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_total_classes, 1],
is_bias=True,
dtype=self._dtype)
self._inputs['Bias'] = self.bias
self._inputs['Weight'] = self.weight
def forward(self, input, label, sample_weight=None):
if in_dygraph_mode():
attrs = ('num_total_classes', self._attrs['num_total_classes'],
'num_neg_samples', self._attrs['num_neg_samples'], 'seed',
self._attrs['seed'], 'sampler', self._attrs['sampler'],
'is_sparse', self._attrs['is_sparse'], 'remote_prefetch',
self._attrs['remote_prefetch'])
cost, _, _ = _C_ops.nce(
input, label, self.weight, self.bias,
self._inputs['SampleWeight'], self._inputs['CustomDistProbs'],
self._inputs['CustomDistAlias'],
self._inputs['CustomDistAliasProbs'], *attrs)
return cost / (self._num_neg_samples + 1)
check_variable_and_dtype(input, "input", ['float32', 'float64'], "NCE")
check_variable_and_dtype(label, "label", ['int64'], "NCE")
check_type(sample_weight, 'sample_weight', (Variable, type(None)),
'NCE')
assert isinstance(input, Variable)
assert isinstance(label, Variable)
self._inputs['Input'] = input
self._inputs['Label'] = label
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
cost = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_logits = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_labels = self._helper.create_variable_for_type_inference(
dtype=label.dtype)
self._helper.append_op(
type='nce',
inputs=self._inputs,
outputs={
'Cost': cost,
'SampleLogits': sample_logits,
'SampleLabels': sample_labels
},
attrs=self._attrs)
return cost / (self._num_neg_samples + 1)
class PRelu(layers.Layer):
r"""
This interface is used to construct a callable object of the ``PRelu`` class.
For more details, refer to code examples.
It implements three activation methods of the ``PRelu`` activation function.
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
Parameters:
mode (str): The mode for weight sharing. It supports all, channel
and element. all: all elements share same weight
channel:elements in a channel share same weight
element:each element has a weight
channel (int, optional): The number of channels.
This argument is required when mode is "channel".
Default: None.
input_shape (list or tuple, optional): The shape of input.
This argument is required when mode is "element".
Default: None.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
weight (alpha). Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with fluid.dygraph.guard():
inp_np = to_variable(inp_np)
prelu0 = fluid.PRelu(
mode='all',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt0 = prelu0(inp_np)
prelu1 = fluid.PRelu(
mode='channel',
channel=200,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt1 = prelu1(inp_np)
prelu2 = fluid.PRelu(
mode='element',
input_shape=inp_np.shape,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt2 = prelu2(inp_np)
"""
def __init__(self,
mode,
channel=None,
input_shape=None,
param_attr=None,
dtype='float32'):
# need specify name_scope since snake-cased 'PRelu' is 'p_relu'
super(PRelu, self).__init__(name_scope='prelu')
self._mode = mode
self._param_attr = param_attr
self._dtype = dtype
if mode == 'all':
self._alpha_shape = [1]
elif mode == 'channel':
assert isinstance(
channel,
int), "channel argument is required when mode is 'channel'."
#NOTE(zhiqiu): The _alpha_shape should be [1, channel] + [1] * len(input_shape[2:]), not [1, channel, 1, 1].
# However, the suffix 1 in the list is useless, since the tensor is viewed as one demension array during kernel calculation.
# And, input_shape is not required when mode is 'channel', so it is simplified.
#NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
self._alpha_shape = [1, channel, 1, 1]
elif mode == 'element':
assert isinstance(input_shape, (
list, tuple
)), "input_shape argument is required when mode is 'element'."
self._alpha_shape = [1] + list(input_shape)[1:]
else:
raise ValueError('mode should be one of all, channel, element.')
self.weight = self.create_parameter(
attr=self._param_attr,
shape=self._alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(1.0))
def forward(self, input):
check_variable_and_dtype(input, 'input', ['float32'], 'PRelu')
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="prelu",
inputs={"X": input,
'Alpha': self.weight},
attrs={"mode": self._mode},
outputs={"Out": out})
return out
class BilinearTensorProduct(layers.Layer):
r"""
**Add Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y`.
Parameters:
input1_dim (int): The dimension of each first input.
input2_dim (int): The dimension of each second input.
output_dim (int): The dimension of output of this layer.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
act (str, optional): Activation to be applied to the output of this layer. The default value is None.
param_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
this layer. The default value is None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. The default value is None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
Tensor: A 2-D Tensor of shape [batch_size, size].
Examples:
.. code-block:: python
import paddle
import numpy
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
bilinearTensorProduct = paddle.nn.BilinearTensorProduct(
input1_dim=5, input2_dim=4, output_dim=1000)
ret = bilinearTensorProduct(paddle.to_tensor(layer1),
paddle.to_tensor(layer2))
"""
def __init__(self,
input1_dim,
input2_dim,
output_dim,
name=None,
act=None,
param_attr=None,
bias_attr=None,
dtype='float32'):
super(BilinearTensorProduct, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._name = name
self._input1_dim = input1_dim
self._input2_dim = input2_dim
self._output_dim = output_dim
self._inputs = dict()
self._dtype = dtype
param_shape = [self._output_dim, self._input1_dim, self._input2_dim]
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=False)
bias_size = [1, self._output_dim]
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=bias_size,
dtype=self._dtype,
is_bias=True)
@deprecated(
since="2.0.0",
update_to="paddle.nn.Bilinear",
reason="New name and new args in Bilinear, easier to use.")
def forward(self, x, y):
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'BilinearTensorProduct')
check_variable_and_dtype(y, 'y', ['float32', 'float64'],
'BilinearTensorProduct')
self._inputs = {"X": x, "Y": y, "Weight": self.weight}
if self.bias is not None:
self._inputs["Bias"] = self.bias
if self._name is not None:
out = self._helper.create_variable(
name=".".join([self.full_name(), self._name]),
dtype=self._dtype,
persistable=False)
else:
out = self._helper.create_variable(
dtype=self._dtype, persistable=False)
self._helper.append_op(
type="bilinear_tensor_product",
inputs=self._inputs,
outputs={"Out": out})
# add activation
return self._helper.append_activation(out, act=self._act)
class Conv2DTranspose(layers.Layer):
r"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output
are in NCHW format. Where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of input feature map,
C is the number of output feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
The details of convolution transpose layer, please refer to the following explanation and references
`conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
feature map.
filter_size(int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
output_size(int or tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). None if use
filter_size, padding, and stride to calculate output_size.
if output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None.
padding(int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
stride(int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(
num_channels=32, num_filters=2, filter_size=3)
ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
super(Conv2DTranspose, self).__init__()
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._groups = groups
self._num_channels = num_channels
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._padding = padding
self._stride = stride
self._dilation = dilation
self._filter_size = filter_size
self._output_size = output_size
self._dtype = dtype
if (self._num_channels == self._groups and
self._num_filters == self._num_channels and
not self._use_cudnn):
self._op_type = 'depthwise_conv2d_transpose'
else:
self._op_type = 'conv2d_transpose'
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._stride = utils.convert_to_list(self._stride, 2, 'stride')
self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation')
self._filter_size = utils.convert_to_list(
self._filter_size, 2, 'conv2d_transpose.filter_size')
if self._output_size is None:
self._output_size = []
elif isinstance(self._output_size, list) or isinstance(
self._output_size, int):
self._output_size = utils.convert_to_list(self._output_size, 2,
'output_size')
else:
raise ValueError("output_size should be list or int")
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._groups = 1 if self._groups is None else self._groups
filter_shape = [self._num_channels, self._num_filters // self._groups
] + self._filter_size
self.weight = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
if in_dygraph_mode():
op = getattr(_C_ops, self._op_type)
out = op(input, self.weight, 'output_size', self._output_size,
'strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups,
'use_cudnn', self._use_cudnn)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias,
1)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'],
"Conv2DTranspose")
inputs = {'Input': [input], 'Filter': [self.weight]}
attrs = {
'output_size': self._output_size,
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups,
'use_cudnn': self._use_cudnn
}
pre_bias = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
self._helper.append_op(
type=self._op_type,
inputs=inputs,
outputs={'Output': pre_bias},
attrs=attrs)
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
out = self._helper.append_activation(pre_act, act=self._act)
return out
class SequenceConv(layers.Layer):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
Parameters:
name_scope(str): The name of this class.
num_filters (int): number of filters.
filter_size (int): the filter size (H and W). Default: 3.
filter_stride (int): stride of the filter. Default: 1.
padding (bool|None): if True, add paddings. Default: None
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns:
Variable: output of sequence_conv
"""
def __init__(self,
name_scope,
num_filters,
filter_size=3,
filter_stride=1,
padding=None,
bias_attr=None,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "SequenceConv is not supported by dynamic graph mode yet!"
super(SequenceConv, self).__init__(name_scope)
self._num_filters = num_filters
self._filter_size = filter_size
self._filter_stride = filter_stride
self._padding = padding
self._bias_attr = bias_attr
self._param_attr = param_attr
self._act = act
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._filter_size * input.shape[1], self._num_filters]
self.weight = self.create_parameter(
attr=self._param_attr, shape=filter_shape, dtype=self._dtype)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [self.weight],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': self._filter_stride,
'contextStart': -int(self._filter_size // 2),
'contextLength': self._filter_size
})
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class RowConv(layers.Layer):
"""
***Row-convolution operator***
The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2:
http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf
The main motivation is that a bidirectional RNN, useful in DeepSpeech like speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution operator is
different from the 1D sequence convolution, and is computed as follows:
Given an input sequence X of length t and input dimension D, and a filter (W) of size context * D.
More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
Parameters:
name_scope(str): The name of this class.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc. Default: None.
act (str): Non-linear activation to be applied to output variable. Default: None.
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns:
the output(Out) is a LodTensor, which supports variable time-length input sequences.
The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
x = numpy.random.random((16)).astype('float32')
rowConv = fluid.dygraph.nn.RowConv(
'RowConv', future_context_size=2)
ret = rowConv(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
name_scope,
future_context_size,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "RowConv is not supported by dynamic graph mode yet!"
super(RowConv, self).__init__(name_scope)
self._act = act
self._param_attr = param_attr
self._future_context_size = future_context_size
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._future_context_size + 1, input.shape[1]]
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [self.weight]},
outputs={'Out': [out]})
return self._helper.append_activation(out, act=self._act)
class GroupNorm(layers.Layer):
"""
:alias_main: paddle.nn.GroupNorm
:alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm
:old_api: paddle.fluid.dygraph.GroupNorm
This interface is used to construct a callable object of the ``GroupNorm`` class.
For more details, refer to code examples.
It implements the function of the Group Normalization Layer.
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
channels(int): The number of channels of input.
groups(int): The number of groups that divided from channels.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
scale :math:`g`. If it is set to False, no scale will be added to the output units.
If it is set to None, the bias is initialized one. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of group normalization. Default: None.
data_layout(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = np.random.random((8, 32, 32)).astype('float32')
groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4)
ret = groupNorm(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
channels,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
dtype='float32'):
super(GroupNorm, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._epsilon = epsilon
self._channels = channels
self._groups = groups
self._act = act
self._dtype = dtype
if data_layout != 'NCHW':
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [self._channels]
self.weight = self.create_parameter(
attr=self._param_attr or False,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
self.bias = self.create_parameter(
attr=self._bias_attr or False,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
def forward(self, input):
inputs = {'X': input}
if self.bias is not None:
inputs['Bias'] = self.bias
if self.weight is not None:
inputs['Scale'] = self.weight
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
group_norm_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": self._epsilon,
"groups": self._groups})
return self._helper.append_activation(group_norm_out, self._act)
class SpectralNorm(layers.Layer):
r"""
This interface is used to construct a callable object of the ``SpectralNorm`` class.
For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.
This layer calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds.
.. math::
\mathbf{v} := \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Parameters:
weight_shape(list or tuple): The shape of weight parameter.
dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.
eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Examples:
.. code-block:: python
import paddle
x = paddle.rand((2,8,32,32))
spectral_norm = paddle.nn.SpectralNorm(x.shape, dim=1, power_iters=2)
spectral_norm_out = spectral_norm(x)
print(spectral_norm_out.shape) # [2, 8, 32, 32]
"""
def __init__(self,
weight_shape,
dim=0,
power_iters=1,
eps=1e-12,
dtype='float32'):
super(SpectralNorm, self).__init__()
self._power_iters = power_iters
self._eps = eps
self._dim = dim
self._dtype = dtype
self._weight_shape = list(weight_shape)
assert np.prod(self._weight_shape) > 0,\
"Any dimension of `weight_shape` cannot be equal to 0."
assert dim < len(self._weight_shape), \
("The input `dim` should be less than the "
"length of `weight_shape`, but received dim="
"{}".format(dim))
h = self._weight_shape[self._dim]
w = np.prod(self._weight_shape) // h
self.weight_u = self.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.weight_u.stop_gradient = True
self.weight_v = self.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.weight_v.stop_gradient = True
def forward(self, weight):
check_variable_and_dtype(weight, "weight", ['float32', 'float64'],
'SpectralNorm')
inputs = {'Weight': weight, 'U': self.weight_u, 'V': self.weight_v}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": self._dim,
"power_iters": self._power_iters,
"eps": self._eps,
})
return out
class TreeConv(layers.Layer):
"""
This interface is used to construct a callable object of the ``TreeConv`` class.
For more details, refer to code examples.
Tree-Based Convolution is a kind of convolution based on tree structure.
Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),
which is used to classify tree structures, such as Abstract Syntax Tree.
Tree-Based Convolution proposed a kind of data structure called continuous binary tree,
which regards multiway tree as binary tree.
The paper of Tree-Based Convolution Operator is here: `tree-based convolution <https://arxiv.org/abs/1409.5718v1/>`_ .
Parameters:
feature_size(int): last dimension of nodes_vector.
output_size(int): output feature width.
num_filters(int, optional): number of filters, Default: 1.
max_depth(int, optional): max depth of filters, Default: 2.
act(str, optional): activation function, Default: tanh.
param_attr(ParamAttr, optional): the parameter attribute for the filters, Default: None.
bias_attr(ParamAttr, optional): the parameter attribute for the bias of this layer, Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')
edge_set = numpy.random.random((1, 9, 2)).astype('int32')
treeConv = fluid.dygraph.nn.TreeConv(
feature_size=5, output_size=6, num_filters=1, max_depth=2)
ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))
"""
def __init__(self,
feature_size,
output_size,
num_filters=1,
max_depth=2,
act='tanh',
param_attr=None,
bias_attr=None,
name=None,
dtype='float32'):
super(TreeConv, self).__init__()
self._name = name
self._feature_size = feature_size
self._output_size = output_size
self._act = act
self._max_depth = max_depth
self._num_filters = num_filters
self._bias_attr = bias_attr
self._param_attr = param_attr
self._dtype = dtype
w_shape = [self._feature_size, 3, self._output_size, self._num_filters]
if self._bias_attr:
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=w_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, nodes_vector, edge_set):
check_type(nodes_vector, 'nodes_vector', (Variable), 'TreeConv')
check_type(edge_set, 'edge_set', (Variable), 'TreeConv')
if self._name:
out = self.create_variable(
name=self._name, dtype=self._dtype, persistable=False)
else:
out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='tree_conv',
inputs={
'NodesVector': nodes_vector,
'EdgeSet': edge_set,
'Filter': self.weight
},
outputs={'Out': out, },
attrs={'max_depth': self._max_depth})
if self._bias_attr:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [out],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={'axis': 1})
else:
pre_activation = out
return self._helper.append_activation(pre_activation, act=self._act)
class Flatten(layers.Layer):
"""
This interface is used to construct a callable object of the ``FLatten`` class.
For more details, refer to code examples.
It implements flatten a contiguous range of dims into a tensor.
Parameters:
start_axis(int): first dim to flatten (default = 1)
stop_axis(int): last dim to flatten (default = -1).
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
inp_np = paddle.to_tensor(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np)
"""
def __init__(self, start_axis=1, stop_axis=-1):
super(Flatten, self).__init__()
self.start_axis = start_axis
self.stop_axis = stop_axis
def forward(self, input):
out = paddle.tensor.manipulation.flatten(
input, start_axis=self.start_axis, stop_axis=self.stop_axis)
return out
|
PaddlePaddle/Paddle
|
python/paddle/fluid/dygraph/nn.py
|
Python
|
apache-2.0
| 138,442
|
[
"NEURON"
] |
7a8da982dda86889a647db91edadbd55e578aa6fb86b8b9ff3b620c8993d2733
|
#!/usr/bin/env python3
#
# blist2xmlgo.py
#
# Copyright (C) 2013, Jian-Long Huang
# Licensed under The MIT License
# http://opensource.org/licenses/MIT
#
# Author: Jian-Long Huang (jianlong@ntu.edu.tw)
# Version: 0.1.0
# Created: 2013.1.6
#
# Usage: blist2xmlgo.py <blast_list> <map_ids> <output>
#
# Columns:
# 0 aln_rank
# 1 aln_hspno
# 2 aln_method
# 3 query_name
# 4 hit_name
# 5 query_length
# 6 query_hsp_start
# 7 query_hsp_end
# 8 query_strand
# 9 query_frame
# 10 hit_length
# 11 hit_hsp_start
# 12 hit_hsp_end
# 13 hsp_score
# 14 hsp_bits
# 15 hsp_evalue
# 16 hsp_length
# 17 hsp_gaps
# 18 hsp_identities
# 19 hsp_identity_percent
# 20 hsp_positives
# 21 hsp_positive_percent
# 22 query_coverage
# 23 hit_coverage
# 24 hit_description
import sys
def main():
with open(sys.argv[1], 'r') as fin, open(sys.argv[2], 'r') as fmap, open(sys.argv[3], 'w') as fo:
idm = {}
for line in fmap:
"""Feed id data"""
idm.update({line.split('\t')[0]: line.split('\t')[1].rstrip()})
query_count = 0
for line in fin:
if line[0] == 'a':
continue
data = line.split('\t')
if data[3] in idm and data[4] in idm[data[3]]:
query_count += 1
fo.write('<?xml versio="1.0"?>\n')
fo.write('<!DOCTYPE BlastOutput PUBLIC "-//NCBI//NCBI BlastOutput/EN" "NCBI_BlastOutput.dtd">\n')
fo.write('<BlastOutput><BlastOutput_program>blast</BlastOutput_program>\n')
fo.write('<BlastOutput_version>BLAST 2.2.27+</BlastOutput_version>\n')
fo.write('<BlastOutput_db>db.fa</BlastOutput_db>\n')
fo.flush()
fo.write('<BlastOutput_query-ID>' + 'Query' + str(query_count) + '</BlastOutput_query-ID>\n')
fo.write('<BlastOutput_query-def>' + data[3] + '</BlastOutput_query-def>\n')
fo.write('<BlastOutput_query-len>' + str(data[5]) + '</BlastOutput_query-len>\n')
fo.write('<BlastOutput_param>\n')
fo.write('<Parameters>\n')
fo.write('<Parameters_expect>10</Parameters_expect>\n')
fo.write('<Parameters_filter>L;</Parameters_filter>\n')
fo.write('</Parameters>\n')
fo.write('</BlastOutput_param>\n')
fo.write('<BlastOutput_iterations>\n')
fo.write('<Iteration>\n')
fo.write('<Iteration_iter-num>1</Iteration_iter-num>\n')
fo.write('<Iteration_query-ID>' + str(query_count) + '</Iteration_query-ID>\n')
fo.write('<Iteration_query-def>' + data[3] + '</Iteration_query-def>\n')
fo.write('<Iteration_query-len>' + str(data[5]) + '</Iteration_query-len>\n')
fo.write('<Iteration_hits>\n')
fo.write('<Hit>\n')
fo.write('<Hit_num>1</Hit_num>\n')
fo.write('<Hit_id>' + idm[data[3]] + '</Hit_id>\n')
fo.write('<Hit_def>' + data[24] + '</Hit_def>\n')
fo.write('<Hit_accession>1</Hit_accession>\n')
fo.write('<Hit_len>' + data[10] + '</Hit_len>\n')
fo.write('<Hit_hsps>\n')
fo.write('<Hsp>\n')
fo.write('<Hsp_num>1</Hsp_num>\n')
fo.write('<Hsp_bit-score>' + str(data[14]) + '</Hsp_bit-score>\n')
fo.write('<Hsp_evalue>' + data[15] + '</Hsp_evalue>\n')
fo.write('<Hsp_query-frame>' + str(data[9]) + '</Hsp_query-frame>\n')
fo.write('<Hsp_hit-frame>1</Hsp_hit-frame>\n')
fo.write('<Hsp_positive>' + str(data[20]) + '</Hsp_positive>\n')
fo.write('<Hsp_align-len>' + str(data[16]) + '</Hsp_align-len>\n')
fo.write('</Hsp>\n')
fo.write('</Hit_hsps>\n')
fo.write('</Hit>\n')
fo.write('</Iteration_hits>\n')
fo.write('</Iteration>\n')
fo.write('</BlastOutput_iterations>\n')
fo.write('</BlastOutput>\n\n\n')
fo.flush()
print('%d sequences have been parsed.' % (query_count))
sys.exit()
if __name__ == "__main__":
main()
|
jlhg/bdorpy
|
bdorpy/blist2xmlgo.py
|
Python
|
mit
| 4,234
|
[
"BLAST"
] |
93738fdba39d44899ef65b656fce9fc8dca4d854a6e8b3a15f3eeaee1fcee774
|
#!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Restriction Digest Enzymes.
Example:
>>> from Rana.fts import fts #
>>> from Rana.Vector import * # Just a way to get a sequence.
>>> from Bio.Seq import Seq # Use your prefered method here.
>>> pbr = fts(pBR322) #
>>> seq = Seq(str(pbr)) #
>>>
>>> from Bio.Restriction import *
>>> a = Analysis(AllEnzymes, seq, linear=False)
>>> b = a.blunt()
>>> a.print_that() # no argument -> print all the results
AasI : 2169, 2582.
AatII : 4289.
Acc16I : 263, 1359, 1457, 3589.
...
More enzymes here.
...
>>> b = a.without_site()
>>> a.print_that(b) # Enzymes which do not cut pBR322
AarI AatI Acc65I AcsI AcvI AdeI AflII AgeI
AhlI AleI AloI ApaI ApoI AscI AsiAI AsiSI
Asp718I AspA2I AsuII AvaIII AvrII AxyI BaeI BbrPI
BbvCI BclI BcuI BfrBI BfrI BglII BlnI BlpI
BmgBI BmgI BplI Bpu1102I Bpu14I BsaXI Bse21I BsePI
BseRI BshTI BsiWI Bsp119I Bsp120I Bsp1407I Bsp1720I Bsp19I
BspT104I BspTI BsrGI BssHI BssHII Bst98I BstAUI BstBI
BstEII BstPI BstSNI BstXI Bsu36I BtrI CciNI CelII
Cfr42I Cfr9I CpoI Csp45I CspAI CspCI CspI DraIII
DrdII Ecl136II Eco105I Eco147I Eco72I Eco81I Eco91I EcoICRI
EcoO65I EcoRI EcoT22I EspI FalI FbaI FseI FunII
HpaI KpnI Ksp22I KspAI KspI MabI MfeI MluI
Mph1103I MspCI MssI MunI NcoI NotI NsiI NspV
OliI PacI PaeR7I PasI PauI PceI Pfl23II PinAI
PmaCI PmeI PmlI Ppu10I PsiI Psp124BI PspAI PspCI
PspEI PspLI PspOMI PspXI PsrI RleAI Rsr2I RsrII
SacI SacII SanDI SauI SbfI SciI SdaI SexAI
SfiI Sfr274I Sfr303I SfuI SgfI SgrBI SlaI SmaI
SmiI SnaBI SpeI SplI SrfI Sse232I Sse8387I Sse8647I
SseBI SspBI SstI StuI SunI SwaI TliI UthSI
Vha464I XapI XbaI XcmI XhoI XmaCI XmaI XmaJI
Zsp2I
"""
from Bio.Restriction.Restriction import *
__docformat__ = "restructuredtext en"
#
# OK can't put the following code in Bio.Restriction.__init__ unless
# I put everything from Restriction in here.
# or at least the RestrictionBatch class.
#
# The reason for that is if I do that, I break the __contains__ method of
# the RestrictionBatch in Restriction, which expect to find the name of
# the enzymes in the locals() dictionary when evaluating string to see if
# it is an enzyme.
#
# This call for some explanations I guess:
# When testing for the presence of a Restriction enzyme in a
# RestrictionBatch, the user can use:
#
# 1) a class of type 'RestrictionType'
# 2) a string of the name of the enzyme (it's repr)
# i.e:
# >>> from Bio.Restriction import RestrictionBatch, EcoRI
# >>> MyBatch = RestrictionBatch(EcoRI)
# >>> #!/usr/bin/env python
# >>> EcoRI in MyBatch # the class EcoRI.
# True
# >>>
# >>> 'EcoRI' in MyBatch # a string representation
# True
#
# OK, that's how it is suppose to work. And I find it quite useful.
#
# Now if I leave the code here I got:
# >>> from Bio.Restriction import RestrictionBatch, EcoRI
# >>> MyBatch = RestrictionBatch(EcoRI)
# >>> EcoRI in MyBatch # the class EcoRI.
# True
# >>> 'EcoRI' in MyBatch # a string.
# False
# There is 5 ways to change that:
# 1) abandon the evaluation of string representation.
# 2) leave the code like that and hack something in RestrictionBatch.
# 3) Move back the code in Bio.Restriction.Restriction
# 4) Move RestrictionBatch here.
# 5) Remove Restriction.Restriction and move all the code in here
#
# 1) no fun in that.
# 2) there is a simpler way to do it.
# 3) I prefer to keep all the code together.
# 4) and 5) both are OK. Only a matter of preference.
#
# So the following code has been moved back to Bio.Restricion.Restriction
# For the user the results is transparent:
# from Bio.Restriction import * works as before.
#
# ##
# ## The restriction enzyme classes are created dynamically when the module is
# ## imported. Here is the magic which allow the creation of the
# ## restriction-enzyme classes.
# ##
# ## The reason for the two dictionaries in Restriction_Dictionary
# ## one for the types (which will be called pseudo-type as they really
# ## correspond to the values that instances of RestrictionType can take)
# ## and one for the enzymes is efficiency as the bases are evaluated
# ## once per pseudo-type.
# ##
# ## However Restriction is still a very inefficient module at import. But
# ## remember that around 660 classes (which is more or less the size of Rebase)
# ## have to be created dynamically. However, this processing take place only
# ## once.
# ## This inefficiency is however largely compensated by the use of metaclass
# ## which provide a very efficient layout for the class themselves mostly
# ## alleviating the need of if/else loops in the class methods.
# ##
# ## It is essential to run Restriction with doc string optimisation (-OO switch)
# ## as the doc string of 660 classes take a lot of processing.
# ##
# # CommOnly = RestrictionBatch() # commercial enzymes
# # NonComm = RestrictionBatch() # not available commercially
# # for TYPE, (bases, enzymes) in typedict.items():
# # #
# # # The keys are the pseudo-types TYPE (stored as type1, type2...)
# # # The names are not important and are only present to differentiate
# # # the keys in the dict. All the pseudo-types are in fact RestrictionType.
# # # These names will not be used after and the pseudo-types are not
# # # kept in the locals() dictionary. It is therefore impossible to
# # # import them.
# # # Now, if you have look at the dictionary, you will see that not all the
# # # types are present as those without corresponding enzymes have been
# # # removed by Dictionary_Builder().
# # #
# # # The values are tuples which contain
# # # as first element a tuple of bases (as string) and
# # # as second element the names of the enzymes.
# # #
# # # First eval the bases.
# # #
# # bases = tuple(eval(x) for x in bases)
# # #
# # # now create the particular value of RestrictionType for the classes
# # # in enzymes.
# # #
# # T = type.__new__(RestrictionType, 'RestrictionType', bases, {})
# # for k in enzymes:
# # #
# # # Now, we go through all the enzymes and assign them their type.
# # # enzymedict[k] contains the values of the attributes for this
# # # particular class (self.site, self.ovhg,....).
# # #
# # newenz = T(k, bases, enzymedict[k])
# # #
# # # we add the enzymes to the corresponding batch.
# # #
# # # No need to verify the enzyme is a RestrictionType -> add_nocheck
# # #
# # if newenz.is_comm() : CommOnly.add_nocheck(newenz)
# # else : NonComm.add_nocheck(newenz)
# ##
# ## AllEnzymes is a RestrictionBatch with all the enzymes from Rebase.
# ##
# # AllEnzymes = CommOnly | NonComm
# ##
# ## Now, place the enzymes in locals so they can be imported.
# ##
# # names = [str(x) for x in AllEnzymes]
# # locals().update(dict(map(None, names, AllEnzymes)))
# ##
# ## Limit what can be imported by from Restriction import *
# ## Most of the classes here will never be used outside this module
# ## (Defined,Palindromic...). It is still possible to request them specifically
# ##
# ## also delete the variable that are no longer needed.
# ##
# ##
# # __all__=['Analysis', 'RestrictionBatch','AllEnzymes','CommOnly','NonComm']+names
# # del k, x, enzymes, TYPE, bases, names
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Restriction/__init__.py
|
Python
|
gpl-2.0
| 8,705
|
[
"Biopython"
] |
c7216be886c9882e3ed7b65c95d14341f2031206c203a217b1be67f64b767249
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for calling ClustalW and parsing its output (DEPRECATED).
This module has been superseded by the Bio.AlignIO framework for
alignment parsing, and the ClustalW command line wrapper in
Bio.Align.Applications for calling the tool. These are both described
in the current version of the Biopython Tutorial and Cookbook.
This means Bio.Clustalw is now deprecated and likely to be
removed in future releases of Biopython.
A set of classes to interact with the multiple alignment command
line program clustalw.
Clustalw is the command line version of the graphical Clustalx
aligment program.
This requires clustalw available from:
ftp://ftp-igbmc.u-strasbg.fr/pub/ClustalW/.
functions:
o read
o parse_file
o do_alignment
classes:
o ClustalAlignment
o MultipleAlignCL"""
import Bio
import warnings
warnings.warn("Bio.Clustalw is deprecated. Please use the Bio.AlignIO framework for alignment parsing, and the ClustalW command line wrapper in Bio.Align.Applications for calling the tool. These are both described in the current version of the Biopython Tutorial and Cookbook.", Bio.BiopythonDeprecationWarning)
# standard library
import os
import sys
import subprocess
# biopython
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Align.Generic import Alignment
from Bio.Application import _escape_filename
def parse_file(file_name, alphabet = IUPAC.unambiguous_dna, debug_level = 0):
"""Parse the given file into a clustal aligment object (OBSOLETE).
Arguments:
o file_name - The name of the file to parse.
o alphabet - The type of alphabet to use for the alignment sequences.
This should correspond to the type of information contained in the file.
Defaults to be unambiguous_dna sequence.
There is a deprecated optional argument debug_level which has no effect.
This function is obsolete, and any new code should call Bio.AlignIO
instead. For example using Bio.Clustalw, you might have:
>>> from Bio import Clustalw
>>> from Bio import Alphabet
>>> filename = "Clustalw/protein.aln"
>>> alpha = Alphabet.Gapped(Alphabet.generic_protein)
>>> align = Clustalw.parse_file(filename, alphabet=alpha)
>>> print align.get_alignment_length()
411
>>> clustalw_string = str(align)
This becomes:
>>> from Bio import AlignIO
>>> from Bio import Alphabet
>>> filename = "Clustalw/protein.aln"
>>> alpha = Alphabet.Gapped(Alphabet.generic_protein)
>>> align = AlignIO.read(open(filename), "clustal", alphabet=alpha)
>>> print align.get_alignment_length()
411
>>> assert clustalw_string == align.format("clustal")
"""
import warnings
warnings.warn("This function is obsolete, and any new code should call Bio.AlignIO instead.", PendingDeprecationWarning)
# Avoid code duplication by calling Bio.AlignIO to do this for us.
handle = open(file_name, 'r')
from Bio import AlignIO
generic_alignment = AlignIO.read(handle, "clustal")
handle.close()
#Force this generic alignment into a ClustalAlignment... nasty hack
if isinstance(alphabet, Alphabet.Gapped):
alpha = alphabet
else:
alpha = Alphabet.Gapped(alphabet)
clustal_alignment = ClustalAlignment(alpha)
clustal_alignment._records = generic_alignment._records
for record in clustal_alignment._records:
record.seq.alphabet = alpha
try:
clustal_alignment._version = generic_alignment._version
except AttributeError:
#Missing the version, could be a 3rd party tool's output
pass
try :
clustal_alignment._star_info = generic_alignment._star_info
except AttributeError:
#Missing the consensus, again, this is not always present
pass
return clustal_alignment
def do_alignment(command_line, alphabet=None):
"""Perform an alignment with the given command line (OBSOLETE).
Arguments:
o command_line - A command line object that can give out
the command line we will input into clustalw.
o alphabet - the alphabet to use in the created alignment. If not
specified IUPAC.unambiguous_dna and IUPAC.protein will be used for
dna and protein alignment respectively.
Returns:
o A clustal alignment object corresponding to the created alignment.
If the alignment type was not a clustal object, None is returned.
This function (and the associated command line object) are now obsolete.
Please use the Bio.Align.Applications.ClustalwCommandline wrapper with
the Python subprocess module (and Bio.AlignIO for parsing) as described
in the tutorial.
"""
import warnings
warnings.warn("This function (and the associated command line object) are now obsolete. Please use the Bio.Align.Applications.ClustalwCommandline wrapper with the Python subprocess module (and Bio.AlignIO for parsing) as described in the tutorial.", PendingDeprecationWarning)
#We don't need to supply any piped input, but we setup the
#standard input pipe anyway as a work around for a python
#bug if this is called from a Windows GUI program. For
#details, see http://bugs.python.org/issue1124861
child_process = subprocess.Popen(str(command_line),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32")
)
#Use .communicate as can get deadlocks with .wait(), see Bug 2804
child_process.communicate() #ignore the stdout and strerr data
value = child_process.returncode
# check the return value for errors, as on 1.81 the return value
# from Clustalw is actually helpful for figuring out errors
# TODO - Update this for new error codes using in clustalw 2
# 1 => bad command line option
if value == 1:
raise ValueError("Bad command line option in the command: %s"
% str(command_line))
# 2 => can't open sequence file
elif value == 2:
raise IOError("Cannot open sequence file %s"
% command_line.sequence_file)
# 3 => wrong format in sequence file
elif value == 3:
raise IOError("Sequence file %s has an invalid format."
% command_line.sequence_file)
# 4 => sequence file only has one sequence
elif value == 4:
raise IOError("Sequence file %s has only one sequence present."
% command_line.sequence_file)
# if an output file was specified, we need to grab it
if command_line.output_file:
out_file = command_line.output_file
else:
out_file = os.path.splitext(command_line.sequence_file)[0] + '.aln'
# if we can't deal with the format, just return None
if command_line.output_type and command_line.output_type != 'CLUSTAL':
return None
# otherwise parse it into a ClustalAlignment object
else:
if not alphabet:
alphabet = (IUPAC.unambiguous_dna, IUPAC.protein)[
command_line.type == 'PROTEIN']
# check if the outfile exists before parsing
if not(os.path.exists(out_file)):
raise IOError("Output .aln file %s not produced, commandline: %s"
% (out_file, command_line))
return parse_file(out_file, alphabet)
class ClustalAlignment(Alignment):
"""Work with the clustal aligment format (OBSOLETE).
This format is the default output from clustal -- these files normally
have an extension of .aln.
This obsolete alignment object is a subclass of the more general alignment
object used in Bio.AlignIO. The old practical difference is here str(align)
would give the alignment as a string in clustal format, whereas in general
you must do align.format("clustal"), which supports other formats too.
"""
# the default version to use if one isn't set
import warnings
warnings.warn("This class is obsolete.", PendingDeprecationWarning)
DEFAULT_VERSION = '1.81'
def __init__(self, alphabet = Alphabet.Gapped(IUPAC.ambiguous_dna)):
Alignment.__init__(self, alphabet)
# represent all of those stars in the aln output format
self._star_info = ''
self._version = ''
def __str__(self):
"""Print out the alignment so it looks pretty.
The output produced from this should also be formatted in valid
clustal format.
"""
return self.format("clustal")
class MultipleAlignCL:
"""Represent a clustalw multiple alignment command line (OBSOLETE).
This command line wrapper is considerd obsolete. Please use the replacement
Bio.Align.Applications.ClustalwCommandline wrapper instead, which uses the
standardised Bio.Application style interface. This is described in the
tutorial, with examples using ClustalW.
"""
import warnings
warnings.warn("This command line wrapper is considerd obsolete. Please use the replacement Bio.Align.Applications.ClustalwCommandline wrapper instead, which uses the standardised Bio.Application style interface. This is described in the tutorial, with examples using ClustalW.", PendingDeprecationWarning)
# set the valid options for different parameters
OUTPUT_TYPES = ['GCG', 'GDE', 'PHYLIP', 'PIR', 'NEXUS', 'FASTA']
OUTPUT_ORDER = ['INPUT', 'ALIGNED']
OUTPUT_CASE = ['LOWER', 'UPPER']
OUTPUT_SEQNOS = ['OFF', 'ON']
RESIDUE_TYPES = ['PROTEIN', 'DNA']
PROTEIN_MATRIX = ['BLOSUM', 'PAM', 'GONNET', 'ID']
DNA_MATRIX = ['IUB', 'CLUSTALW']
def __init__(self, sequence_file, command = 'clustalw'):
"""Initialize some general parameters that can be set as attributes.
Arguments:
o sequence_file - The file to read the sequences for alignment from.
o command - The command used to run clustalw. This defaults to
just 'clustalw' (ie. assumes you have it on your path somewhere).
General attributes that can be set:
o is_quick - if set as 1, will use a fast algorithm to create
the alignment guide tree.
o allow_negative - allow negative values in the alignment matrix.
Multiple alignment attributes that can be set as attributes:
o gap_open_pen - Gap opening penalty
o gap_ext_pen - Gap extension penalty
o is_no_end_pen - A flag as to whether or not there should be a gap
separation penalty for the ends.
o gap_sep_range - The gap separation penalty range.
o is_no_pgap - A flag to turn off residue specific gaps
o is_no_hgap - A flag to turn off hydrophilic gaps
o h_gap_residues - A list of residues to count a hydrophilic
o max_div - A percent identity to use for delay (? - I don't undertand
this!)
o trans_weight - The weight to use for transitions
"""
self.sequence_file = sequence_file
self.command = command
self.is_quick = None
self.allow_negative = None
self.gap_open_pen = None
self.gap_ext_pen = None
self.is_no_end_pen = None
self.gap_sep_range = None
self.is_no_pgap = None
self.is_no_hgap = None
self.h_gap_residues = []
self.max_div = None
self.trans_weight = None
# other attributes that should be set via various functions
# 1. output parameters
self.output_file = None
self.output_type = None
self.output_order = None
self.change_case = None
self.add_seqnos = None
# 2. a guide tree to use
self.guide_tree = None
self.new_tree = None
# 3. matrices
self.protein_matrix = None
self.dna_matrix = None
# 4. type of residues
self.type = None
def __str__(self):
"""Write out the command line as a string."""
#On Linux with clustalw 1.83, you can do:
#clustalw input.faa
#clustalw /full/path/input.faa
#clustalw -INFILE=input.faa
#clustalw -INFILE=/full/path/input.faa
#
#Note these fail (using DOS style slashes):
#
#clustalw /INFILE=input.faa
#clustalw /INFILE=/full/path/input.faa
#
#On Windows XP with clustalw.exe 1.83, these work at
#the command prompt:
#
#clustalw.exe input.faa
#clustalw.exe /INFILE=input.faa
#clustalw.exe /INFILE="input.faa"
#clustalw.exe /INFILE="with space.faa"
#clustalw.exe /INFILE=C:\full\path\input.faa
#clustalw.exe /INFILE="C:\full path\with spaces.faa"
#
#Sadly these fail:
#clustalw.exe "input.faa"
#clustalw.exe "with space.faa"
#clustalw.exe C:\full\path\input.faa
#clustalw.exe "C:\full path\with spaces.faa"
#
#Testing today (using a different binary of clustalw.exe 1.83),
#using -INFILE as follows seems to work. However I had once noted:
#These also fail but a minus/dash does seem to
#work with other options (!):
#clustalw.exe -INFILE=input.faa
#clustalw.exe -INFILE=C:\full\path\input.faa
#
#Also these fail:
#clustalw.exe "/INFILE=input.faa"
#clustalw.exe "/INFILE=C:\full\path\input.faa"
#
#Thanks to Emanuel Hey for flagging this on the mailing list.
#
#In addition, both self.command and self.sequence_file
#may contain spaces, so should be quoted. But clustalw
#is fussy.
cline = _escape_filename(self.command)
cline += ' -INFILE=%s' % _escape_filename(self.sequence_file)
# general options
if self.type:
cline += " -TYPE=%s" % self.type
if self.is_quick == 1:
#Some versions of clustalw are case sensitive,
#and require -quicktree rather than -QUICKTREE
cline += " -quicktree"
if self.allow_negative == 1:
cline += " -NEGATIVE"
# output options
if self.output_file:
cline += " -OUTFILE=%s" % _escape_filename(self.output_file)
if self.output_type:
cline += " -OUTPUT=%s" % self.output_type
if self.output_order:
cline += " -OUTORDER=%s" % self.output_order
if self.change_case:
cline += " -CASE=%s" % self.change_case
if self.add_seqnos:
cline += " -SEQNOS=%s" % self.add_seqnos
if self.new_tree:
# clustal does not work if -align is written -ALIGN
cline += " -NEWTREE=%s -align" % _escape_filename(self.new_tree)
# multiple alignment options
if self.guide_tree:
cline += " -USETREE=%s" % _escape_filename(self.guide_tree)
if self.protein_matrix:
cline += " -MATRIX=%s" % self.protein_matrix
if self.dna_matrix:
cline += " -DNAMATRIX=%s" % self.dna_matrix
if self.gap_open_pen:
cline += " -GAPOPEN=%s" % self.gap_open_pen
if self.gap_ext_pen:
cline += " -GAPEXT=%s" % self.gap_ext_pen
if self.is_no_end_pen == 1:
cline += " -ENDGAPS"
if self.gap_sep_range:
cline += " -GAPDIST=%s" % self.gap_sep_range
if self.is_no_pgap == 1:
cline += " -NOPGAP"
if self.is_no_hgap == 1:
cline += " -NOHGAP"
if len(self.h_gap_residues) != 0:
# stick the list of residues together as one big list o' residues
residue_list = ''
for residue in self.h_gap_residues:
residue_list = residue_list + residue
cline += " -HGAPRESIDUES=%s" % residue_list
if self.max_div:
cline += " -MAXDIV=%s" % self.max_div
if self.trans_weight:
cline += " -TRANSWEIGHT=%s" % self.trans_weight
return cline
def set_output(self, output_file, output_type = None, output_order = None,
change_case = None, add_seqnos = None):
"""Set the output parameters for the command line.
"""
self.output_file = output_file
if output_type:
output_type = output_type.upper()
if output_type not in self.OUTPUT_TYPES:
raise ValueError("Invalid output type %s. Valid choices are %s"
% (output_type, self.OUTPUT_TYPES))
else:
self.output_type = output_type
if output_order:
output_order = output_order.upper()
if output_order not in self.OUTPUT_ORDER:
raise ValueError("Invalid output order %s. Valid choices are %s"
% (output_order, self.OUTPUT_ORDER))
else:
self.output_order = output_order
if change_case:
change_case = change_case.upper()
if output_type != "GDE":
raise ValueError("Change case only valid for GDE output.")
elif change_case not in self.CHANGE_CASE:
raise ValueError("Invalid change case %s. Valid choices are %s"
% (change_case, self.CHANGE_CASE))
else:
self.change_case = change_case
if add_seqnos:
add_seqnos = add_seqnos.upper()
if output_type:
raise ValueError("Add SeqNos only valid for CLUSTAL output.")
elif add_seqnos not in self.OUTPUT_SEQNOS:
raise ValueError("Invalid seqnos option %s. Valid choices: %s"
% (add_seqnos, self.OUTPUT_SEQNOS))
else:
self.add_seqnos = add_seqnos
def set_guide_tree(self, tree_file):
"""Provide a file to use as the guide tree for alignment.
Raises:
o IOError - If the tree_file doesn't exist."""
if not(os.path.exists(tree_file)):
raise IOError("Could not find the guide tree file %s." %
tree_file)
else:
self.guide_tree = tree_file
def set_new_guide_tree(self, tree_file):
"""Set the name of the guide tree file generated in the alignment.
"""
self.new_tree = tree_file
def set_protein_matrix(self, protein_matrix):
"""Set the type of protein matrix to use.
Protein matrix can be either one of the defined types (blosum, pam,
gonnet or id) or a file with your own defined matrix.
"""
if protein_matrix.upper() in self.PROTEIN_MATRIX:
self.protein_matrix = protein_matrix.upper()
elif os.path.exists(protein_matrix):
self.protein_matrix = protein_matrix
else:
raise ValueError("Invalid matrix %s. Options are %s or a file." %
(protein_matrix.upper(), self.PROTEIN_MATRIX))
def set_dna_matrix(self, dna_matrix):
"""Set the type of DNA matrix to use.
The dna_matrix can either be one of the defined types (iub or clustalw)
or a file with the matrix to use."""
if dna_matrix.upper() in self.DNA_MATRIX:
self.dna_matrix = dna_matrix.upper()
elif os.path.exists(dna_matrix):
self.dna_matrix = dna_matrix
else:
raise ValueError("Invalid matrix %s. Options are %s or a file." %
(dna_matrix, self.DNA_MATRIX))
def set_type(self, residue_type):
"""Set the type of residues within the file.
Clustal tries to guess whether the info is protein or DNA based on
the number of GATCs, but this can be wrong if you have a messed up
protein or DNA you are working with, so this allows you to set it
explicitly.
"""
residue_type = residue_type.upper()
if residue_type in self.RESIDUE_TYPES:
self.type = residue_type
else:
raise ValueError("Invalid residue type %s. Valid choices are %s"
% (residue_type, self.RESIDUE_TYPES))
def _test():
"""Run the Bio.Clustalw module's doctests (PRIVATE).
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..","..","Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..","..","Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Clustalw/__init__.py
|
Python
|
gpl-2.0
| 21,060
|
[
"Biopython"
] |
97b89ef08413148073edd4af4de7a4e6a1d4f7ba0422ce817111f41fd0aacbcd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.