text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""ASEr Setup Script."""
from setuptools import setup, find_packages
from codecs import open
from os import path
from os import listdir
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Generate a list of python scripts
scpts = []
for i in listdir(here + '/bin'):
if i.endswith('.py'):
scpts.append('bin/' + i)
setup(
name='ASEr',
version='0.3.0',
description='Get ASE counts from BAMs or raw fastq data -- repackage of pipeline by Carlo Artieri ',
long_description=long_description,
url='https://github.com/MikeDacre/ASEr',
author='Michael Dacre',
author_email='mike.dacre@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Beta',
'Intended Audience :: Science/Research',
'Environment :: Console',
'Operating System :: Linux',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='ASE allele-specific expression RNA-seq fastq BAM SAM SNP',
install_requires=['pybedtools', 'pysam'],
scripts=scpts,
packages=['ASEr']
)
|
TheFraserLab/ASEr
|
setup.py
|
Python
|
mit
| 1,637
|
[
"ASE",
"pysam"
] |
06541891230d98800f9094ce5468b09f3e56a06c1161204af3d6c038901456aa
|
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
"""Definition of the Atoms class.
This module defines the central object in the ASE package: the Atoms
object.
"""
import warnings
from math import cos, sin
import numpy as np
from ase.atom import Atom
from ase.data import atomic_numbers, chemical_symbols, atomic_masses
import ase.units as units
class Atoms(object):
"""Atoms object.
The Atoms object can represent an isolated molecule, or a
periodically repeated structure. It has a unit cell and
there may be periodic boundary conditions along any of the three
unit cell axes.
Information about the atoms (atomic numbers and position) is
stored in ndarrays. Optionally, there can be information about
tags, momenta, masses, magnetic moments and charges.
In order to calculate energies, forces and stresses, a calculator
object has to attached to the atoms object.
Parameters:
symbols: str (formula) or list of str
Can be a string formula, a list of symbols or a list of
Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'],
[Atom('Ne', (x, y, z)), ...].
positions: list of xyz-positions
Atomic positions. Anything that can be converted to an
ndarray of shape (n, 3) will do: [(x1,y1,z1), (x2,y2,z2),
...].
scaled_positions: list of scaled-positions
Like positions, but given in units of the unit cell.
Can not be set at the same time as positions.
numbers: list of int
Atomic numbers (use only one of symbols/numbers).
tags: list of int
Special purpose tags.
momenta: list of xyz-momenta
Momenta for all atoms.
masses: list of float
Atomic masses in atomic units.
magmoms: list of float or list of xyz-values
Magnetic moments. Can be either a single value for each atom
for collinear calculations or three numbers for each atom for
non-collinear calculations.
charges: list of float
Atomic charges.
cell: 3x3 matrix
Unit cell vectors. Can also be given as just three
numbers for orthorhombic cells. Default value: [1, 1, 1].
celldisp: Vector
Unit cell displacement vector. To visualize a displaced cell
around the center of mass of a Systems of atoms. Default value
= (0,0,0)
pbc: one or three bool
Periodic boundary conditions flags. Examples: True,
False, 0, 1, (1, 1, 0), (True, False, False). Default
value: False.
constraint: constraint object(s)
Used for applying one or more constraints during structure
optimization.
calculator: calculator object
Used to attach a calculator for calculating energies and atomic
forces.
info: dict of key-value pairs
Dictionary of key-value pairs with additional information
about the system. The following keys may be used by ase:
- spacegroup: Spacegroup instance
- unit_cell: 'conventional' | 'primitive' | int | 3 ints
- adsorbate_info:
Items in the info attribute survives copy and slicing and can
be store to and retrieved from trajectory files given that the
key is a string, the value is picklable and, if the value is a
user-defined object, its base class is importable. One should
not make any assumptions about the existence of keys.
Examples:
These three are equivalent:
>>> d = 1.104 # N2 bondlength
>>> a = Atoms('N2', [(0, 0, 0), (0, 0, d)])
>>> a = Atoms(numbers=[7, 7], positions=[(0, 0, 0), (0, 0, d)])
>>> a = Atoms([Atom('N', (0, 0, 0)), Atom('N', (0, 0, d)])
FCC gold:
>>> a = 4.05 # Gold lattice constant
>>> b = a / 2
>>> fcc = Atoms('Au',
... cell=[(0, b, b), (b, 0, b), (b, b, 0)],
... pbc=True)
Hydrogen wire:
>>> d = 0.9 # H-H distance
>>> L = 7.0
>>> h = Atoms('H', positions=[(0, L / 2, L / 2)],
... cell=(d, L, L),
... pbc=(1, 0, 0))
"""
def __init__(self, symbols=None,
positions=None, numbers=None,
tags=None, momenta=None, masses=None,
magmoms=None, charges=None,
scaled_positions=None,
cell=None, pbc=None, celldisp=None,
constraint=None,
calculator=None,
info=None):
atoms = None
if hasattr(symbols, 'get_positions'):
atoms = symbols
symbols = None
elif (isinstance(symbols, (list, tuple)) and
len(symbols) > 0 and isinstance(symbols[0], Atom)):
# Get data from a list or tuple of Atom objects:
data = [[atom.get_raw(name) for atom in symbols]
for name in
['position', 'number', 'tag', 'momentum',
'mass', 'magmom', 'charge']]
atoms = self.__class__(None, *data)
symbols = None
if atoms is not None:
# Get data from another Atoms object:
if scaled_positions is not None:
raise NotImplementedError
if symbols is None and numbers is None:
numbers = atoms.get_atomic_numbers()
if positions is None:
positions = atoms.get_positions()
if tags is None and atoms.has('tags'):
tags = atoms.get_tags()
if momenta is None and atoms.has('momenta'):
momenta = atoms.get_momenta()
if magmoms is None and atoms.has('magmoms'):
magmoms = atoms.get_initial_magnetic_moments()
if masses is None and atoms.has('masses'):
masses = atoms.get_masses()
if charges is None and atoms.has('charges'):
charges = atoms.get_initial_charges()
if cell is None:
cell = atoms.get_cell()
if celldisp is None:
celldisp = atoms.get_celldisp()
if pbc is None:
pbc = atoms.get_pbc()
if constraint is None:
constraint = [c.copy() for c in atoms.constraints]
if calculator is None:
calculator = atoms.get_calculator()
self.arrays = {}
if symbols is None:
if numbers is None:
if positions is not None:
natoms = len(positions)
elif scaled_positions is not None:
natoms = len(scaled_positions)
else:
natoms = 0
numbers = np.zeros(natoms, int)
self.new_array('numbers', numbers, int)
else:
if numbers is not None:
raise ValueError(
'Use only one of "symbols" and "numbers".')
else:
self.new_array('numbers', symbols2numbers(symbols), int)
if cell is None:
cell = np.eye(3)
self.set_cell(cell)
if celldisp is None:
celldisp = np.zeros(shape=(3, 1))
self.set_celldisp(celldisp)
if positions is None:
if scaled_positions is None:
positions = np.zeros((len(self.arrays['numbers']), 3))
else:
positions = np.dot(scaled_positions, self._cell)
else:
if scaled_positions is not None:
raise RuntimeError('Both scaled and cartesian positions set!')
self.new_array('positions', positions, float, (3,))
self.set_constraint(constraint)
self.set_tags(default(tags, 0))
self.set_momenta(default(momenta, (0.0, 0.0, 0.0)))
self.set_masses(default(masses, None))
self.set_initial_magnetic_moments(default(magmoms, 0.0))
self.set_initial_charges(default(charges, 0.0))
if pbc is None:
pbc = False
self.set_pbc(pbc)
if info is None:
self.info = {}
else:
self.info = dict(info)
self.adsorbate_info = {}
self.set_calculator(calculator)
def set_calculator(self, calc=None):
"""Attach calculator object."""
if hasattr(calc, '_SetListOfAtoms'):
from ase.old import OldASECalculatorWrapper
calc = OldASECalculatorWrapper(calc, self)
if hasattr(calc, 'set_atoms'):
calc.set_atoms(self)
self._calc = calc
def get_calculator(self):
"""Get currently attached calculator object."""
return self._calc
def _del_calculator(self):
self._calc = None
calc = property(get_calculator, set_calculator, _del_calculator,
doc='Calculator object.')
def set_constraint(self, constraint=None):
"""Apply one or more constrains.
The *constraint* argument must be one constraint object or a
list of constraint objects."""
if constraint is None:
self._constraints = []
else:
if isinstance(constraint, (list, tuple)):
self._constraints = constraint
else:
self._constraints = [constraint]
def _get_constraints(self):
return self._constraints
def _del_constraints(self):
self._constraints = []
constraints = property(_get_constraints, set_constraint, _del_constraints,
'Constraints of the atoms.')
def set_cell(self, cell, scale_atoms=False, fix=None):
"""Set unit cell vectors.
Parameters:
cell :
Unit cell. A 3x3 matrix (the three unit cell vectors) or
just three numbers for an orthorhombic cell.
scale_atoms : bool
Fix atomic positions or move atoms with the unit cell?
Default behavior is to *not* move the atoms (scale_atoms=False).
Examples:
Two equivalent ways to define an orthorhombic cell:
>>> a.set_cell([a, b, c])
>>> a.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)])
FCC unit cell:
>>> a.set_cell([(0, b, b), (b, 0, b), (b, b, 0)])
"""
if fix is not None:
raise TypeError('Please use scale_atoms=%s' % (not fix))
cell = np.array(cell, float)
if cell.shape == (3,):
cell = np.diag(cell)
elif cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence or '
'3x3 matrix!')
if scale_atoms:
M = np.linalg.solve(self._cell, cell)
self.arrays['positions'][:] = np.dot(self.arrays['positions'], M)
self._cell = cell
def set_celldisp(self, celldisp):
celldisp = np.array(celldisp, float)
self._celldisp = celldisp
def get_celldisp(self):
"""Get the unit cell displacement vectors ."""
return self._celldisp.copy()
def get_cell(self):
"""Get the three unit cell vectors as a 3x3 ndarray."""
return self._cell.copy()
def get_reciprocal_cell(self):
"""Get the three reciprocal lattice vectors as a 3x3 ndarray.
Note that the commonly used factor of 2 pi for Fourier
transforms is not included here."""
rec_unit_cell = np.linalg.inv(self.get_cell()).transpose()
return rec_unit_cell
def set_pbc(self, pbc):
"""Set periodic boundary condition flags."""
if isinstance(pbc, int):
pbc = (pbc,) * 3
self._pbc = np.array(pbc, bool)
def get_pbc(self):
"""Get periodic boundary condition flags."""
return self._pbc.copy()
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype)
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""Get an array.
Returns a copy unless the optional argument copy is false.
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'magmoms',
'charges'."""
return name in self.arrays
def set_atomic_numbers(self, numbers):
"""Set atomic numbers."""
self.set_array('numbers', numbers, int, ())
def get_atomic_numbers(self):
"""Get integer array of atomic numbers."""
return self.arrays['numbers'].copy()
def get_chemical_symbols(self):
"""Get list of chemical symbol strings."""
return [chemical_symbols[Z] for Z in self.arrays['numbers']]
def set_chemical_symbols(self, symbols):
"""Set chemical symbols."""
self.set_array('numbers', symbols2numbers(symbols), int, ())
def get_chemical_formula(self, mode='hill'):
"""Get the chemial formula as a string based on the chemical symbols.
Parameters:
mode: str
There are three different modes available:
'all': The list of chemical symbols are contracted to at string,
e.g. ['C', 'H', 'H', 'H', 'O', 'H'] becomes 'CHHHOH'.
'reduce': The same as 'all' where repeated elements are contracted
to a single symbol and a number, e.g. 'CHHHOCHHH' is reduced to
'CH3OCH3'.
'hill': The list of chemical symbols are contracted to a string
following the Hill notation (alphabetical order with C and H
first), e.g. 'CHHHOCHHH' is reduced to 'C2H6O' and 'SOOHOHO' to
'H2O4S'. This is default.
"""
if len(self) == 0:
return ''
if mode == 'reduce':
numbers = self.get_atomic_numbers()
n = len(numbers)
changes = np.concatenate(([0], np.arange(1, n)[numbers[1:] !=
numbers[:-1]]))
symbols = [chemical_symbols[e] for e in numbers[changes]]
counts = np.append(changes[1:], n) - changes
elif mode == 'hill':
numbers = self.get_atomic_numbers()
elements = np.unique(numbers)
symbols = np.array([chemical_symbols[e] for e in elements])
counts = np.array([(numbers == e).sum() for e in elements])
ind = symbols.argsort()
symbols = symbols[ind]
counts = counts[ind]
if 'H' in symbols:
i = np.arange(len(symbols))[symbols == 'H']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
if 'C' in symbols:
i = np.arange(len(symbols))[symbols == 'C']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
elif mode == 'all':
numbers = self.get_atomic_numbers()
symbols = [chemical_symbols[n] for n in numbers]
counts = [1] * len(numbers)
else:
raise ValueError("Use mode = 'all', 'reduce' or 'hill'.")
formula = ''
for s, c in zip(symbols, counts):
formula += s
if c > 1:
formula += str(c)
return formula
def set_tags(self, tags):
"""Set tags for all atoms. If only one tag is supplied, it is
applied to all atoms."""
if type(tags) == int:
tags = [tags] * len(self)
self.set_array('tags', tags, int, ())
def get_tags(self):
"""Get integer array of tags."""
if 'tags' in self.arrays:
return self.arrays['tags'].copy()
else:
return np.zeros(len(self), int)
def set_momenta(self, momenta):
"""Set momenta."""
if len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, 'adjust_momenta'):
constraint.adjust_momenta(self.arrays['positions'],
momenta)
self.set_array('momenta', momenta, float, (3,))
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if 'momenta' in self.arrays:
return self.arrays['momenta'].copy()
else:
return np.zeros((len(self), 3))
def set_masses(self, masses='defaults'):
"""Set atomic masses.
The array masses should contain a list of masses. In case
the masses argument is not given or for those elements of the
masses list that are None, standard values are set."""
if masses == 'defaults':
masses = atomic_masses[self.arrays['numbers']]
elif isinstance(masses, (list, tuple)):
newmasses = []
for m, Z in zip(masses, self.arrays['numbers']):
if m is None:
newmasses.append(atomic_masses[Z])
else:
newmasses.append(m)
masses = newmasses
self.set_array('masses', masses, float, ())
def get_masses(self):
"""Get array of masses."""
if 'masses' in self.arrays:
return self.arrays['masses'].copy()
else:
return atomic_masses[self.arrays['numbers']]
def set_initial_magnetic_moments(self, magmoms=None):
"""Set the initial magnetic moments.
Use either one or three numbers for every atom (collinear
or non-collinear spins)."""
if magmoms is None:
self.set_array('magmoms', None)
else:
magmoms = np.asarray(magmoms)
self.set_array('magmoms', magmoms, float, magmoms.shape[1:])
def get_initial_magnetic_moments(self):
"""Get array of initial magnetic moments."""
if 'magmoms' in self.arrays:
return self.arrays['magmoms'].copy()
else:
return np.zeros(len(self))
def get_magnetic_moments(self):
"""Get calculated local magnetic moments."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_magnetic_moments(self)
def get_magnetic_moment(self):
"""Get calculated total magnetic moment."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_magnetic_moment(self)
def set_initial_charges(self, charges=None):
"""Set the initial charges."""
if charges is None:
self.set_array('charges', None)
else:
self.set_array('charges', charges, float, ())
def get_initial_charges(self):
"""Get array of initial charges."""
if 'charges' in self.arrays:
return self.arrays['charges'].copy()
else:
return np.zeros(len(self))
def get_charges(self):
"""Get calculated charges."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
try:
return self._calc.get_charges(self)
except AttributeError:
raise NotImplementedError
def set_positions(self, newpositions):
"""Set positions, honoring any constraints."""
positions = self.arrays['positions']
if self.constraints:
newpositions = np.array(newpositions, float)
for constraint in self.constraints:
constraint.adjust_positions(positions, newpositions)
self.set_array('positions', newpositions, shape=(3,))
def get_positions(self, wrap=False):
"""Get array of positions. If wrap==True, wraps atoms back
into unit cell.
"""
if wrap:
scaled = self.get_scaled_positions()
return np.dot(scaled, self._cell)
else:
return self.arrays['positions'].copy()
def get_calculation_done(self):
"""Let the calculator calculate its thing,
using the current input.
"""
if self.calc is None:
raise RuntimeError('Atoms object has no calculator.')
self.calc.initialize(self)
self.calc.calculate(self)
def get_potential_energy(self, force_consistent=False,
apply_constraint=True):
"""Calculate potential energy.
Ask the attached calculator to calculate the potential energy and
apply constraints. Use *apply_constraint=False* to get the raw
forces.
When supported by the calculator, either the energy extrapolated
to zero Kelvin or the energy consistent with the forces (the free
energy) can be returned.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if force_consistent:
energy = self._calc.get_potential_energy(
self, force_consistent=force_consistent)
else:
energy = self._calc.get_potential_energy(self)
if apply_constraint:
constraints = [c for c in self.constraints
if hasattr(c, 'adjust_potential_energy')]
for constraint in constraints:
energy += constraint.adjust_potential_energy(
self.arrays['positions'], energy)
return energy
def get_potential_energies(self):
"""Calculate the potential energies of all the atoms.
Only available with calculators supporting per-atom energies
(e.g. classical potentials).
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energies(self)
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get('momenta')
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get('momenta')
if momenta is None:
return None
m = self.arrays.get('masses')
if m is None:
m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def get_total_energy(self):
"""Get the total energy - potential plus kinetic energy."""
return self.get_potential_energy() + self.get_kinetic_energy()
def get_forces(self, apply_constraint=True):
"""Calculate atomic forces.
Ask the attached calculator to calculate the forces and apply
constraints. Use *apply_constraint=False* to get the raw
forces."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
forces = self._calc.get_forces(self)
if apply_constraint:
for constraint in self.constraints:
constraint.adjust_forces(self.arrays['positions'], forces)
return forces
def get_stress(self, voigt=True):
"""Calculate stress tensor.
Returns an array of the six independent components of the
symmetric stress tensor, in the traditional Voigt order
(xx, yy, zz, yz, xz, xy) or as a 3x3 matrix. Default is Voigt
order.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
stress = self._calc.get_stress(self)
shape = stress.shape
if shape == (3, 3):
warnings.warn('Converting 3x3 stress tensor from %s ' %
self._calc.__class__.__name__ +
'calculator to the required Voigt form.')
stress = np.array([stress[0, 0], stress[1, 1], stress[2, 2],
stress[1, 2], stress[0, 2], stress[0, 1]])
else:
assert shape == (6,)
if voigt:
return stress
else:
xx, yy, zz, yz, xz, xy = stress
return np.array([(xx, xy, xz),
(xy, yy, yz),
(xz, yz, zz)])
def get_stresses(self):
"""Calculate the stress-tensor of all the atoms.
Only available with calculators supporting per-atom energies and
stresses (e.g. classical potentials). Even for such calculators
there is a certain arbitrariness in defining per-atom stresses.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_stresses(self)
def get_dipole_moment(self):
"""Calculate the electric dipole moment for the atoms object.
Only available for calculators which has a get_dipole_moment()
method."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_dipole_moment(self)
def copy(self):
"""Return a copy."""
import copy
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a.copy()
atoms.constraints = copy.deepcopy(self.constraints)
atoms.adsorbate_info = copy.deepcopy(self.adsorbate_info)
return atoms
def __len__(self):
return len(self.arrays['positions'])
def get_number_of_atoms(self):
"""Returns the number of atoms.
Equivalent to len(atoms) in the standard ASE Atoms class.
"""
return len(self)
def __repr__(self):
num = self.get_atomic_numbers()
N = len(num)
if N == 0:
symbols = ''
elif N <= 60:
symbols = self.get_chemical_formula('reduce')
else:
symbols = self.get_chemical_formula('hill')
s = "%s(symbols='%s', " % (self.__class__.__name__, symbols)
for name in self.arrays:
if name == 'numbers':
continue
s += '%s=..., ' % name
if (self._cell - np.diag(self._cell.diagonal())).any():
s += 'cell=%s, ' % self._cell.tolist()
else:
s += 'cell=%s, ' % self._cell.diagonal().tolist()
s += 'pbc=%s, ' % self._pbc.tolist()
if len(self.constraints) == 1:
s += 'constraint=%s, ' % repr(self.constraints[0])
if len(self.constraints) > 1:
s += 'constraint=%s, ' % repr(self.constraints)
if self._calc is not None:
s += 'calculator=%s(...), ' % self._calc.__class__.__name__
return s[:-2] + ')'
def __add__(self, other):
atoms = self.copy()
atoms += other
return atoms
def extend(self, other):
"""Extend atoms object by appending atoms from *other*."""
if isinstance(other, Atom):
other = self.__class__([other])
n1 = len(self)
n2 = len(other)
for name, a1 in self.arrays.items():
a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype)
a[:n1] = a1
if name == 'masses':
a2 = other.get_masses()
else:
a2 = other.arrays.get(name)
if a2 is not None:
a[n1:] = a2
self.arrays[name] = a
for name, a2 in other.arrays.items():
if name in self.arrays:
continue
a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype)
a[n1:] = a2
if name == 'masses':
a[:n1] = self.get_masses()[:n1]
else:
a[:n1] = 0
self.set_array(name, a)
return self
__iadd__ = extend
def append(self, atom):
"""Append atom to end."""
self.extend(self.__class__([atom]))
def __getitem__(self, i):
"""Return a subset of the atoms.
i -- scalar integer, list of integers, or slice object
describing which atoms to return.
If i is a scalar, return an Atom object. If i is a list or a
slice, return an Atoms object with the same cell, pbc, and
other associated info as the original Atoms object. The
indices of the constraints will be shuffled so that they match
the indexing in the subset returned.
"""
if isinstance(i, int):
natoms = len(self)
if i < -natoms or i >= natoms:
raise IndexError('Index out of range.')
return Atom(atoms=self, index=i)
import copy
from ase.constraints import FixConstraint
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
# TODO: Do we need to shuffle indices in adsorbate_info too?
atoms.adsorbate_info = self.adsorbate_info
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a[i].copy()
# Constraints need to be deepcopied, since we need to shuffle
# the indices
atoms.constraints = copy.deepcopy(self.constraints)
condel = []
for con in atoms.constraints:
if isinstance(con, FixConstraint):
try:
con.index_shuffle(i)
except IndexError:
condel.append(con)
for con in condel:
atoms.constraints.remove(con)
return atoms
def __delitem__(self, i):
from ase.constraints import FixAtoms
check_constraint = np.array([isinstance(c, FixAtoms)
for c in self._constraints])
if (len(self._constraints) > 0 and (not check_constraint.all() or
isinstance(i, list))):
raise RuntimeError('Remove constraint using set_constraint() '
'before deleting atoms.')
mask = np.ones(len(self), bool)
mask[i] = False
for name, a in self.arrays.items():
self.arrays[name] = a[mask]
if len(self._constraints) > 0:
for n in range(len(self._constraints)):
self._constraints[n].delete_atom(range(len(mask))[i])
def pop(self, i=-1):
"""Remove and return atom at index *i* (default last)."""
atom = self[i]
atom.cut_reference_to_atoms()
del self[i]
return atom
def __imul__(self, m):
"""In-place repeat of atoms."""
if isinstance(m, int):
m = (m, m, m)
M = np.product(m)
n = len(self)
for name, a in self.arrays.items():
self.arrays[name] = np.tile(a, (M,) + (1,) * (len(a.shape) - 1))
positions = self.arrays['positions']
i0 = 0
for m0 in range(m[0]):
for m1 in range(m[1]):
for m2 in range(m[2]):
i1 = i0 + n
positions[i0:i1] += np.dot((m0, m1, m2), self._cell)
i0 = i1
if self.constraints is not None:
self.constraints = [c.repeat(m, n) for c in self.constraints]
self._cell = np.array([m[c] * self._cell[c] for c in range(3)])
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
__mul__ = repeat
def translate(self, displacement):
"""Translate atomic positions.
The displacement argument can be a float an xyz vector or an
nx3 array (where n is the number of atoms)."""
self.arrays['positions'] += np.array(displacement)
def center(self, vacuum=None, axis=(0, 1, 2)):
"""Center atoms in unit cell.
Centers the atoms in the unit cell, so there is the same
amount of vacuum on all sides.
vacuum: float (default: None)
If specified adjust the amount of vacuum when centering.
If vacuum=10.0 there will thus be 10 Angstrom of vacuum
on each side.
axis: int or sequence of ints
Axis or axes to act on. Default: Act on all axes.
"""
# Find the orientations of the faces of the unit cell
c = self.get_cell()
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.sqrt(np.dot(dirs[i], dirs[i])) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.arrays['positions']
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.sqrt(np.dot(c[i], c[i]))
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self._cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.arrays['positions'] += translation
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays['positions']) / m.sum()
if scaled:
return np.linalg.solve(self._cell.T, com)
else:
return com
def get_moments_of_inertia(self, vectors=False):
"""Get the moments of inertia along the principal axes.
The three principal moments of inertia are computed from the
eigenvalues of the symmetric inertial tensor. Periodic boundary
conditions are ignored. Units of the moments of inertia are
amu*angstrom**2.
"""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
masses = self.get_masses()
# Initialize elements of the inertial tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(self)):
x, y, z = positions[i]
m = masses[i]
I11 += m * (y ** 2 + z ** 2)
I22 += m * (x ** 2 + z ** 2)
I33 += m * (x ** 2 + y ** 2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
I = np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
evals, evecs = np.linalg.eigh(I)
if vectors:
return evals, evecs.transpose()
else:
return evals
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def rotate(self, v, a=None, center=(0, 0, 0), rotate_cell=False):
"""Rotate atoms based on a vector and an angle, or two vectors.
Parameters:
v:
Vector to rotate the atoms around. Vectors can be given as
strings: 'x', '-x', 'y', ... .
a = None:
Angle that the atoms is rotated around the vecor 'v'. If an angle
is not specified, the length of 'v' is used as the angle
(default). The angle can also be a vector and then 'v' is rotated
into 'a'.
center = (0, 0, 0):
The center is kept fixed under the rotation. Use 'COM' to fix
the center of mass, 'COP' to fix the center of positions or
'COU' to fix the center of cell.
rotate_cell = False:
If true the cell is also rotated.
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> a = pi / 2
>>> atoms.rotate('z', a)
>>> atoms.rotate((0, 0, 1), a)
>>> atoms.rotate('-z', -a)
>>> atoms.rotate((0, 0, a))
>>> atoms.rotate('x', 'y')
"""
norm = np.linalg.norm
v = string2vector(v)
if a is None:
a = norm(v)
if isinstance(a, (float, int)):
v /= norm(v)
c = cos(a)
s = sin(a)
else:
v2 = string2vector(a)
v /= norm(v)
v2 /= norm(v2)
c = np.dot(v, v2)
v = np.cross(v, v2)
s = norm(v)
# In case *v* and *a* are parallel, np.cross(v, v2) vanish
# and can't be used as a rotation axis. However, in this
# case any rotation axis perpendicular to v2 will do.
eps = 1e-7
if s < eps:
v = np.cross((0, 0, 1), v2)
if norm(v) < eps:
v = np.cross((1, 0, 0), v2)
assert norm(v) >= eps
elif s > 0:
v /= s
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
p = self.arrays['positions'] - center
self.arrays['positions'][:] = (c * p -
np.cross(p, s * v) +
np.outer(np.dot(p, v), (1.0 - c) * v) +
center)
if rotate_cell:
rotcell = self.get_cell()
rotcell[:] = (c * rotcell -
np.cross(rotcell, s * v) +
np.outer(np.dot(rotcell, v), (1.0 - c) * v))
self.set_cell(rotcell)
def rotate_euler(self, center=(0, 0, 0), phi=0.0, theta=0.0, psi=0.0):
"""Rotate atoms via Euler angles.
See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation.
Parameters:
center :
The point to rotate about. A sequence of length 3 with the
coordinates, or 'COM' to select the center of mass, 'COP' to
select center of positions or 'COU' to select center of cell.
phi :
The 1st rotation angle around the z axis.
theta :
Rotation around the x axis.
psi :
2nd rotation around the z axis.
"""
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
# First move the molecule to the origin In contrast to MATLAB,
# numpy broadcasts the smaller array to the larger row-wise,
# so there is no need to play with the Kronecker product.
rcoords = self.positions - center
# First Euler rotation about z in matrix form
D = np.array(((cos(phi), sin(phi), 0.),
(-sin(phi), cos(phi), 0.),
(0., 0., 1.)))
# Second Euler rotation about x:
C = np.array(((1., 0., 0.),
(0., cos(theta), sin(theta)),
(0., -sin(theta), cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = np.array(((cos(psi), sin(psi), 0.),
(-sin(psi), cos(psi), 0.),
(0., 0., 1.)))
# Total Euler rotation
A = np.dot(B, np.dot(C, D))
# Do the rotation
rcoords = np.dot(A, np.transpose(rcoords))
# Move back to the rotation point
self.positions = np.transpose(rcoords) + center
def get_dihedral(self, list):
"""Calculate dihedral angle.
Calculate dihedral angle between the vectors list[0]->list[1]
and list[2]->list[3], where list contains the atomic indexes
in question.
"""
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = self.positions[list[1]] - self.positions[list[0]]
b = self.positions[list[2]] - self.positions[list[1]]
c = self.positions[list[3]] - self.positions[list[2]]
bxa = np.cross(b, a)
bxa /= np.linalg.norm(bxa)
cxb = np.cross(c, b)
cxb /= np.linalg.norm(cxb)
angle = np.vdot(bxa, cxb)
# check for numerical trouble due to finite precision:
if angle < -1:
angle = -1
if angle > 1:
angle = 1
angle = np.arccos(angle)
if np.vdot(bxa, c) > 0:
angle = 2 * np.pi - angle
return angle
def _masked_rotate(self, center, axis, diff, mask):
# do rotation of subgroup by copying it to temporary atoms object
# and then rotating that
#
# recursive object definition might not be the most elegant thing,
# more generally useful might be a rotation function with a mask?
group = self.__class__()
for i in range(len(self)):
if mask[i]:
group += self[i]
group.translate(-center)
group.rotate(axis, diff)
group.translate(center)
# set positions in original atoms object
j = 0
for i in range(len(self)):
if mask[i]:
self.positions[i] = group[j].position
j += 1
def set_dihedral(self, list, angle, mask=None):
"""
set the dihedral angle between vectors list[0]->list[1] and
list[2]->list[3] by changing the atom indexed by list[3]
if mask is not None, all the atoms described in mask
(read: the entire subgroup) are moved
example: the following defines a very crude
ethane-like molecule and twists one half of it by 30 degrees.
>>> atoms = Atoms('HHCCHH', [[-1, 1, 0], [-1, -1, 0], [0, 0, 0],
[1, 0, 0], [2, 1, 0], [2, -1, 0]])
>>> atoms.set_dihedral([1,2,3,4],7*pi/6,mask=[0,0,0,1,1,1])
"""
# if not provided, set mask to the last atom in the
# dihedral description
if mask is None:
mask = np.zeros(len(self))
mask[list[3]] = 1
# compute necessary in dihedral change, from current value
current = self.get_dihedral(list)
diff = angle - current
axis = self.positions[list[2]] - self.positions[list[1]]
center = self.positions[list[2]]
self._masked_rotate(center, axis, diff, mask)
def rotate_dihedral(self, list, angle, mask=None):
"""Rotate dihedral angle.
Complementing the two routines above: rotate a group by a
predefined dihedral angle, starting from its current
configuration
"""
start = self.get_dihedral(list)
self.set_dihedral(list, angle + start, mask)
def get_angle(self, list):
"""Get angle formed by three atoms.
calculate angle between the vectors list[1]->list[0] and
list[1]->list[2], where list contains the atomic indexes in
question."""
# normalized vector 1->0, 1->2:
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)
return angle
def set_angle(self, list, angle, mask=None):
"""Set angle formed by three atoms.
Sets the angle between vectors list[1]->list[0] and
list[1]->list[2].
Same usage as in set_dihedral."""
# If not provided, set mask to the last atom in the angle description
if mask is None:
mask = np.zeros(len(self))
mask[list[2]] = 1
# Compute necessary in angle change, from current value
current = self.get_angle(list)
diff = angle - current
# Do rotation of subgroup by copying it to temporary atoms object and
# then rotating that
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
axis = np.cross(v10, v12)
center = self.positions[list[1]]
self._masked_rotate(center, axis, diff, mask)
def rattle(self, stdev=0.001, seed=42):
"""Randomly displace atoms.
This method adds random displacements to the atomic positions,
taking a possible constraint into account. The random numbers are
drawn from a normal distribution of standard deviation stdev.
For a parallel calculation, it is important to use the same
seed on all processors! """
rs = np.random.RandomState(seed)
positions = self.arrays['positions']
self.set_positions(positions +
rs.normal(scale=stdev, size=positions.shape))
def get_distance(self, a0, a1, mic=False, vector=False):
"""Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
"""
R = self.arrays['positions']
D = R[a1] - R[a0]
if mic:
Dr = np.linalg.solve(self._cell.T, D)
D = np.dot(Dr - np.round(Dr) * self._pbc, self._cell)
if vector:
return D
return np.linalg.norm(D)
def get_distances(self, a, indices, mic=False):
"""Return distances of atom No.i with a list of atoms
Use mic=True to use the Minimum Image Convention.
"""
R = self.arrays['positions']
D = R[indices] - R[a]
if mic:
Dr = np.linalg.solve(self._cell, D.T)
D = np.dot(self._cell, Dr - (self._pbc * np.round(Dr).T).T).T
return np.sqrt((D**2).sum(1))
def get_all_distances(self, mic=False):
"""Return distances of all of the atoms with all of the atoms.
Use mic=True to use the Minimum Image Convention.
Use squared=True to return the squared distances.
"""
L = len(self)
R = self.arrays['positions']
D = []
for i in range(L):
D.append(R - R[i])
D = np.concatenate(D)
if mic:
Dr = np.linalg.solve(self._cell, D.T)
D = np.dot(self._cell, Dr - (self._pbc * np.round(Dr).T).T).T
results = np.sqrt((D**2).sum(1))
results.shape = (L, L)
return results
def set_distance(self, a0, a1, distance, fix=0.5, mic=False):
"""Set the distance between two atoms.
Set the distance between atoms *a0* and *a1* to *distance*.
By default, the center of the two atoms will be fixed. Use
*fix=0* to fix the first atom, *fix=1* to fix the second
atom and *fix=0.5* (default) to fix the center of the bond."""
R = self.arrays['positions']
D = R[a1] - R[a0]
if mic:
Dr = np.linalg.solve(self._cell.T, D)
D = np.dot(Dr - np.round(Dr) * self._pbc, self._cell)
x = 1.0 - distance / np.linalg.norm(D)
R[a0] += (x * fix) * D
R[a1] -= (x * (1.0 - fix)) * D
def get_scaled_positions(self):
"""Get positions relative to unit cell.
Atoms outside the unit cell will be wrapped into the cell in
those directions with periodic boundary conditions so that the
scaled coordinates are between zero and one."""
scaled = np.linalg.solve(self._cell.T, self.arrays['positions'].T).T
for i in range(3):
if self._pbc[i]:
# Yes, we need to do it twice.
# See the scaled_positions.py test
scaled[:, i] %= 1.0
scaled[:, i] %= 1.0
return scaled
def set_scaled_positions(self, scaled):
"""Set positions relative to unit cell."""
self.arrays['positions'][:] = np.dot(scaled, self._cell)
def get_temperature(self):
"""Get the temperature. in Kelvin"""
ekin = self.get_kinetic_energy() / len(self)
return ekin / (1.5 * units.kB)
def __eq__(self, other):
"""Check for identity of two atoms objects.
Identity means: same positions, atomic numbers, unit cell and
periodic boundary conditions."""
try:
a = self.arrays
b = other.arrays
return (len(self) == len(other) and
(a['positions'] == b['positions']).all() and
(a['numbers'] == b['numbers']).all() and
(self._cell == other.cell).all() and
(self._pbc == other.pbc).all())
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return eq
else:
return not eq
__hash__ = None
def get_volume(self):
"""Get volume of unit cell."""
return abs(np.linalg.det(self._cell))
def _get_positions(self):
"""Return reference to positions-array for in-place manipulations."""
return self.arrays['positions']
def _set_positions(self, pos):
"""Set positions directly, bypassing constraints."""
self.arrays['positions'][:] = pos
positions = property(_get_positions, _set_positions,
doc='Attribute for direct ' +
'manipulation of the positions.')
def _get_atomic_numbers(self):
"""Return reference to atomic numbers for in-place
manipulations."""
return self.arrays['numbers']
numbers = property(_get_atomic_numbers, set_atomic_numbers,
doc='Attribute for direct ' +
'manipulation of the atomic numbers.')
def _get_cell(self):
"""Return reference to unit cell for in-place manipulations."""
return self._cell
cell = property(_get_cell, set_cell, doc='Attribute for direct ' +
'manipulation of the unit cell.')
def _get_pbc(self):
"""Return reference to pbc-flags for in-place manipulations."""
return self._pbc
pbc = property(_get_pbc, set_pbc,
doc='Attribute for direct manipulation ' +
'of the periodic boundary condition flags.')
def write(self, filename, format=None, **kwargs):
"""Write yourself to a file."""
from ase.io import write
write(filename, self, format, **kwargs)
def edit(self):
"""Modify atoms interactively through ase-gui viewer.
Conflicts leading to undesirable behaviour might arise
when matplotlib has been pre-imported with certain
incompatible backends and while trying to use the
plot feature inside the interactive ag. To circumvent,
please set matplotlib.use('gtk') before calling this
method.
"""
from ase.gui.images import Images
from ase.gui.gui import GUI
images = Images([self])
gui = GUI(images)
gui.run()
# use atoms returned from gui:
# (1) delete all currently available atoms
self.set_constraint()
for z in range(len(self)):
self.pop()
edited_atoms = gui.images.get_atoms(0)
# (2) extract atoms from edit session
self.extend(edited_atoms)
self.set_constraint(edited_atoms._get_constraints())
self.set_cell(edited_atoms.get_cell())
self.set_initial_magnetic_moments(edited_atoms.get_magnetic_moments())
self.set_tags(edited_atoms.get_tags())
return
def string2symbols(s):
"""Convert string to list of chemical symbols."""
n = len(s)
if n == 0:
return []
c = s[0]
if c.isdigit():
i = 1
while i < n and s[i].isdigit():
i += 1
return int(s[:i]) * string2symbols(s[i:])
if c == '(':
p = 0
for i, c in enumerate(s):
if c == '(':
p += 1
elif c == ')':
p -= 1
if p == 0:
break
j = i + 1
while j < n and s[j].isdigit():
j += 1
if j > i + 1:
m = int(s[i + 1:j])
else:
m = 1
return m * string2symbols(s[1:i]) + string2symbols(s[j:])
if c.isupper():
i = 1
if 1 < n and s[1].islower():
i += 1
j = i
while j < n and s[j].isdigit():
j += 1
if j > i:
m = int(s[i:j])
else:
m = 1
return m * [s[:i]] + string2symbols(s[j:])
else:
raise ValueError
def symbols2numbers(symbols):
if isinstance(symbols, str):
symbols = string2symbols(symbols)
numbers = []
for s in symbols:
if isinstance(s, (str, unicode)):
numbers.append(atomic_numbers[s])
else:
numbers.append(s)
return numbers
def string2vector(v):
if isinstance(v, str):
if v[0] == '-':
return -string2vector(v[1:])
w = np.zeros(3)
w['xyz'.index(v)] = 1.0
return w
return np.array(v, float)
def default(data, dflt):
"""Helper function for setting default values."""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data
|
askhl/ase
|
ase/atoms.py
|
Python
|
gpl-2.0
| 56,652
|
[
"ASE"
] |
b90a1950b1738545fc2ead7f34a6811c2dad72ee97185de16ba9c4f93c2aa292
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from xml.sax.saxutils import escape
from urllib.parse import urljoin
from collections import Iterable
from rdflib import RDF, BNode
from pyxb.xmlschema.structures import datatypes
from pyxb.utils import domutils
from pyxb.namespace import XMLSchema_instance as Xsi, NamespaceContext
from pyxb.namespace import XMLNamespaces as Xmlns
from shexypy.shexyparser.parser.ShExDocVisitor import ShExDocVisitor
from shexypy.shexyparser.parser.ShExDocParser import ShExDocParser
from shexypy.schema.ShEx import *
exclude_namespaces = ['xsi', 'shex']
class ShExDocVisitor_impl(ShExDocVisitor):
def __init__(self):
ShExDocVisitor.__init__(self)
self._schema = None # Schema being built (type: Schema
self.base = "" # Base is context specific and can be changed throughout
self.cur_shape = None # current working shape definition (type: Shape)
self.shape_stack = [] # parent shape definitions
self.cur_tc = None # current working triple constraint definition
self.namespaces = {} # localn name to namespace dictionary
dns = pyxb.namespace.CreateAbsentNamespace() # default namespace
self.nsc = pyxb.namespace.NamespaceContext(target_namespace=dns)
Namespace.setPrefix(prefix="shex")
pyxb.defaultNamespace = Namespace
def to_dom(self):
""" Convert into DOM and map the various namespaces
:return:
"""
domutils.BindingDOMSupport.SetDefaultNamespace(Namespace)
exclusions = [ns for ns in exclude_namespaces if ns not in self.namespaces]
if exclusions:
self._schema.exclude_prefixes = ' '.join(exclusions)
schema_dom = self._schema.toDOM()
bs = domutils.BindingDOMSupport()
for ns, url in self.namespaces.items():
if ns:
bs.addXMLNSDeclaration(schema_dom.documentElement,
pyxb.namespace.NamespaceForURI(url, create_if_missing=True),
ns)
schema_dom.documentElement.setAttributeNS(Namespace.uri(),
'xsi:schemaLocation',
'http://www.w3.org/shex/ ../xsd/ShEx.xsd')
schema_dom.documentElement.setAttributeNS(Xmlns.uri, 'xmlns:xsi', Xsi.uri())
return schema_dom
@property
def schema(self):
return self._schema
def visitShExDoc(self, ctx: ShExDocParser.ShExDocContext) -> Schema:
# shExDoc : directive* ((notStartAction | startActions) statement*)? EOF; // leading CODE
self._schema = Schema()
self.visitChildren(ctx)
def visitStatement(self, ctx: ShExDocParser.StatementContext):
return self.visitChildren(ctx)
def visitNotStartAction(self, ctx: ShExDocParser.NotStartActionContext):
# notStartAction : start | shape | valueClassDefinition ;
self.visitChildren(ctx)
def visitDirective(self, ctx: ShExDocParser.DirectiveContext):
# directive : baseDecl | prefixDecl ;
self.visitChildren(ctx)
def visitValueClassDefinition(self, ctx: ShExDocParser.ValueClassDefinitionContext):
# valueClassDefinition : valueClassLabel ('=' valueClassExpr semanticActions | KW_EXTERNAL) ;
vcd = ValueClassDefinition()
if ctx.KW_EXTERNAL():
vcd.external = self.visit(ctx.valueClassLabel())
else:
vcd.definition = self.visit(ctx.valueClassExpr())
vcd.definition.valueClassLabel = self.visit(ctx.valueClassLabel())
acts = self.visit(ctx.semanticActions())
if acts:
vcd.actions = acts
self._schema.valueClass.append(vcd)
def visitValueClassExpr(self, ctx: ShExDocParser.ValueClassExprContext) -> InlineValueClassDefinition:
# valueClassExpr : valueClass ('+' valueClass)*
# Note that there is no visitValueClass -- just the sublabels:
# visitValueClassLiteral
# visitValueClassDatatype
# visitValueClassGroup
# visitValueClassValueSet
# visitValueClassAny
assert self.cur_tc is None, "Recursion not allowed on value class definitions"
ivcd = InlineValueClassDefinition()
for c in ctx.valueClass():
# We fill this out as if it were a triple constraint and then fold it back to a value class
# Note that predicate, inverse, actions and annotations are noe included in a value class definition
self.cur_tc = TripleConstraint()
self.cur_tc.reversed = False
self.visit(c)
if self.cur_tc.objectConstraint:
oc = self.cur_tc.objectConstraint
ivcd.facet = oc.facet
ivcd.or_ = oc.or_
ivcd.valueSet = oc.valueSet
if self.cur_tc.object:
vs = ValueSet()
irir = IRIRange(base=self.cur_tc.object)
vs.iriRange = irir
ivcd.valueSet.append(vs)
if self.cur_tc.objectShape:
gsc = GroupShapeConstr()
sr = ShapeRef(ref=self.cur_tc.objectShape)
gsc.disjunct.append(sr)
ivcd.or_.append(gsc)
if self.cur_tc.objectType:
ivcd.nodetype.append = self.cur_tc.objectType
if self.cur_tc.datatype:
ivcd.datatype.append(self.cur_tc.datatype)
self.cur_tc = None
return ivcd
def visitValueClassLabel(self, ctx: ShExDocParser.ValueClassLabelContext):
# valueClassLabel : '$' iri ;
return self.visit(ctx.iri())
def visitBaseDecl(self, ctx: ShExDocParser.BaseDeclContext):
# baseDecl : KW_BASE IRIREF
self.base = self._iriref(ctx)
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext):
# prefixDecl : KW_PREFIX PNAME_NS IRIREF
ns_txt = self._iriref(ctx)
prefix = ctx.PNAME_NS().getText().split(':')[0]
if prefix:
self.nsc.declareNamespace(pyxb.namespace.Namespace(ns_txt), prefix)
self.namespaces[prefix] = ns_txt
else:
self.nsc.setDefaultNamespace(pyxb.namespace.Namespace(ns_txt))
self._schema.default_namespace = ns_txt
def visitStart(self, ctx: ShExDocParser.StartContext):
# start : KW_START '=' (shapeLabel | shapeDefinition semanticActions)
if ctx.shapeLabel():
self._schema.start = self.visit(ctx.shapeLabel())
else:
shape = self.visit(ctx.shapeDefinition())
acts = self.visit(ctx.semanticActions())
if acts:
shape.actions = acts
shape.label = "_:" + str(BNode())
self._schema.start = shape.label
self._schema.shape.append(shape)
def visitShape(self, ctx: ShExDocParser.ShapeContext):
# shape : KW_VIRTUAL? shapeLabel shapeDefinition semanticActions ;
assert self.cur_shape is None, "Recursion error -- should be outermost shape"
shape = self.visit(ctx.shapeDefinition())
if ctx.KW_VIRTUAL():
shape.virtual = True
shape.label = self.visit(ctx.shapeLabel())
acts = self.visit(ctx.semanticActions())
if acts:
shape.actions = acts
self._schema.shape.append(shape)
def visitShapeDefinition(self, ctx: ShExDocParser.ShapeDefinitionContext) -> Shape:
# shapeDefinition : (includeSet | inclPropertySet | KW_CLOSED)* '{' someOfShape? '}' ;
self._push_shape(Shape())
# This is really screwey, as, on the shape definition label there are multiple sets of includes, while
# on the tripleConstraint level there are just single includes. We loose the "settiness" and make one
# bit include set here
if ctx.includeSet():
[self.visit(c) for c in ctx.includeSet()]
[self.visit(ips) for ips in ctx.inclPropertySet()]
if ctx.KW_CLOSED():
self.cur_shape.closed = True
if ctx.someOfShape():
self.visit(ctx.someOfShape())
return self._pop_shape()
def visitIncludeSet(self, ctx: ShExDocParser.IncludeSetContext):
# includeSet : '&' shapeLabel+
[self.cur_shape.import_.append(ShapeRef(ref=self.visit(sl))) for sl in ctx.shapeLabel()]
def visitInclPropertySet(self, ctx: ShExDocParser.InclPropertySetContext):
# inclPropertySet : KW_EXTRA predicate+
self.cur_shape.extra += [IRIRef(ref=self.visit(p)) for p in ctx.predicate()]
def visitSomeOfShape(self, ctx: ShExDocParser.SomeOfShapeContext):
# someOfShape : groupShape | multiElementSomeOf
self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#multiElementSomeOf.
def visitMultiElementSomeOf(self, ctx: ShExDocParser.MultiElementSomeOfContext) -> ShapeConstraint:
# multiElementSomeOf : groupShape ( '|' groupShape)+ ;
self._push_shape(ShapeConstraint())
[self.visit(c) for c in ctx.groupShape()]
rval = self._pop_shape()
self._set_someof(rval)
return rval
# Visit a parse tree produced by ShExDocParser#innerShape.
def visitInnerShape(self, ctx: ShExDocParser.InnerShapeContext):
# innerShape : multiElementGroup | multiElementSomeOf ;
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#groupShape.
def visitGroupShape(self, ctx: ShExDocParser.GroupShapeContext):
# groupShape : singleElementGroup | multiElementGroup ;
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#singleElementGroup.
def visitSingleElementGroup(self, ctx: ShExDocParser.SingleElementGroupContext):
# singleElementGroup : unaryShape ','? ;
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#multiElementGroup.
def visitMultiElementGroup(self, ctx: ShExDocParser.MultiElementGroupContext) -> ShapeConstraint:
# multiElementGroup : unaryShape (',' unaryShape)+ ','? ;
self._push_shape(ShapeConstraint())
[self.visit(c) for c in ctx.unaryShape()]
rval = self._pop_shape()
self._set_group(rval)
return rval
def _proc_card_annot_semacts(self, target, ctx):
""" Process cardinality, annotations and semantic actions for target
:param target:
:param ctx:
:return:
"""
self._cardinality(target, ctx)
acts = self.visit(ctx.semanticActions())
if acts:
target.actions = acts
[target.annotation.append(self.visit(a)) for a in ctx.annotation()]
def visitUnaryShape(self, ctx: ShExDocParser.UnaryShapeContext):
# unaryShape : tripleConstraint | include | encapsulatedShape ;
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#encapsulatedShape.
def visitEncapsulatedShape(self, ctx: ShExDocParser.EncapsulatedShapeContext):
# encapsulatedShape : '(' innerShape ')' cardinality? annotation* semanticActions ;
self._proc_card_annot_semacts(self.visit(ctx.innerShape()), ctx)
def visitInclude(self, ctx: ShExDocParser.IncludeContext):
# include : '&' shapeLabel
self.cur_shape.include.append(ShapeRef(ref=self.visit(ctx.shapeLabel())))
def visitShapeLabel(self, ctx: ShExDocParser.ShapeLabelContext) -> ShapeLabel:
# shapeLabel : iri | blankNode
return ShapeLabel(self.visitChildren(ctx))
def visitTripleConstraint(self, ctx: ShExDocParser.TripleConstraintContext):
# tripleConstraint : senseFlags? predicate valueClassOrRef cardinality? annotation* semanticActions;
# senseFlags : '!' '^'? | '^' '!'? ;
tc = TripleConstraint()
sf = self.visit(ctx.senseFlags()) if ctx.senseFlags() else ''
if '!' in sf:
tc.negated = True
tc.predicate = self.visit(ctx.predicate())
tc.reversed = '^' in sf
if ctx.valueClassOrRef().valueClassLabel():
tc.valueClass = self.visit(ctx.valueClassOrRef().valueClassLabel())
else:
parent_tc = self.cur_tc
self.cur_tc = tc
self.visit(ctx.valueClassOrRef().valueClass())
self.cur_tc = parent_tc
tc.__dict__.pop("reversed", None)
self._proc_card_annot_semacts(tc, ctx)
self._set_tc(tc)
def visitSenseFlags(self, ctx: ShExDocParser.SenseFlagsContext):
# senseFlags : '!' '^'? | '^' '!'? ;
return ctx.getText()
def visitPredicate(self, ctx: ShExDocParser.PredicateContext):
# predicate : iri | rdfType ;
if ctx.rdfType():
return str(RDF.type)
else:
return self.visit(ctx.iri())
def visitValueClassOrRef(self, ctx: ShExDocParser.ValueClassOrRefContext):
# valueClassOrRef : valueClass | valueClassLabel ;
# Note that valueClass isn't visited directly -- it is one of valueClassLiteral, valueClassNonLiteral, etc.
assert False, "Should not be visited"
def visitValueClassLiteral(self, ctx: ShExDocParser.ValueClassLiteralContext):
# valueClass : KW_LITERAL xsFacet*
self._subj_obj_type(NodeType.LITERAL)
if ctx.xsFacet():
vc = TripleConstraintValueClass()
for f in ctx.xsFacet():
vc.facet.append(self.visit(f))
self._subj_obj_constraint(vc)
def visitValueClassNonLiteral(self, ctx: ShExDocParser.ValueClassNonLiteralContext):
# valueClass : (KW_IRI | KW_BNODE | KW_NONLITERAL) groupShapeConstr? stringFacet*
self._subj_obj_type(NodeType.IRI if ctx.KW_IRI() else NodeType.BNODE if ctx.KW_BNODE() else NodeType.NONLITERAL)
vc = None
if ctx.groupShapeConstr():
gsc = self.visit(ctx.groupShapeConstr())
if len(gsc.disjunct) == 1:
if self.cur_tc.reversed:
self.cur_tc.subjectShape = gsc.disjunct[0].ref
else:
self.cur_tc.objectShape = gsc.disjunct[0].ref
else:
vc = TripleConstraintValueClass()
vc.groupShapeConstr = gsc
if ctx.stringFacet():
if vc is None:
vc = TripleConstraintValueClass()
for sf in ctx.stringFacet():
facet = XSFacet()
self._proc_string_facet(facet, sf)
vc.facet.append(facet)
if vc is not None:
self._subj_obj_constraint(vc)
def visitValueClassDatatype(self, ctx: ShExDocParser.ValueClassDatatypeContext):
# valueClass : datatype xsFacet* # valueClassDatatype
self.cur_tc.datatype = self.visit(ctx.datatype())
if ctx.xsFacet():
vc = TripleConstraintValueClass()
for f in ctx.xsFacet():
vc.facet.append(self.visit(f))
self._subj_obj_constraint(vc)
def visitValueClassGroup(self, ctx: ShExDocParser.ValueClassGroupContext):
# valueClass : groupShapeConstr # valueClassGroup
gsc = self.visit(ctx.groupShapeConstr())
if len(gsc.disjunct) == 1:
if self.cur_tc.reversed:
self.cur_tc.subjectShape = gsc.disjunct[0].ref
else:
self.cur_tc.objectShape = gsc.disjunct[0].ref
else:
vc = TripleConstraintValueClass()
vc.or_ = gsc
self._subj_obj_constraint(vc)
def visitValueClassValueSet(self, ctx: ShExDocParser.ValueClassValueSetContext):
# valueClass : valueSet # valueClassValueSet
# valueSet
vs = self.visit(ctx.valueSet())
# A single iri range without a stem maps to subject/object
if vs.iriRange and len(vs.iriRange) == 1 and not vs.iriRange[0].exclusion and not vs.iriRange[0].stem:
if self.cur_tc.reversed:
self.cur_tc.subject = vs.iriRange[0].base
else:
self.cur_tc.object = vs.iriRange[0].base
else:
vc = TripleConstraintValueClass()
vc.valueSet = vs
self._subj_obj_constraint(vc)
def visitValueClassAny(self, ctx: ShExDocParser.ValueClassAnyContext):
# valueClass : '.' # valueClassAny
# Any is indicated by the lack fo constraints
if self.cur_tc.reversed:
self.cur_tc.inverse = True
def visitGroupShapeConstr(self, ctx: ShExDocParser.GroupShapeConstrContext) -> GroupShapeConstr:
# groupShapeConstr : shapeOrRef (KW_OR shapeOrRef)*
rval = GroupShapeConstr()
for sor in ctx.shapeOrRef():
rval.disjunct.append(ShapeRef(ref=self.visit(sor)))
return rval
def visitShapeOrRef(self, ctx: ShExDocParser.ShapeOrRefContext) -> ShapeLabel:
# shapeOrRef : ATPNAME_LN | ATPNAME_NS | '@' shapeLabel | shapeDefinition
if ctx.shapeDefinition():
shape = self.visit(ctx.shapeDefinition())
shape.label = ShapeLabel('_:' + str(BNode()))
self._schema.shape.append(shape)
return shape.label
elif ctx.shapeLabel():
return self.visit(ctx.shapeLabel())
elif ctx.ATPNAME_LN():
return ShapeLabel(ctx.ATPNAME_LN().getText()[1:]) # strip the leading @
else:
return ShapeLabel(ctx.ATPNAME_NS().getText()[1:-1]) # strip the @ and trailing :
def visitXsFacet(self, ctx: ShExDocParser.XsFacetContext) -> XSFacet:
# xsFacet : stringFacet | numericFacet
rval = XSFacet()
if ctx.stringFacet():
return self._proc_string_facet(rval, ctx.stringFacet())
else:
self._proc_numeric_facet(rval, ctx.numericFacet())
return rval
def visitStringFacet(self, ctx: ShExDocParser.StringFacetContext):
# stringFacet : KW_PATTERN string | '~' string | stringLength INTEGER |
return self._proc_string_facet(StringFacet(), ctx)
def visitStringLength(self, ctx: ShExDocParser.StringLengthContext):
# stringLength : KW_LENGTH | KW_MINLENGTH | KW_MAXLENGTH;
# Not visited -- handled above
return self.visitChildren(ctx)
def visitNumericFacet(self, ctx: ShExDocParser.NumericFacetContext) -> NumericFacet:
# numericFacet : numericRange INTEGER | numericLength INTEGER
return self._proc_numeric_facet(NumericFacet(), ctx)
def visitNumericRange(self, ctx: ShExDocParser.NumericRangeContext):
# numericRange : KW_MININCLUSIVE | KW_MINEXCLUSIVE | KW_MAXINCLUSIVE | KW_MAXEXCLUSIVE ;
# Not visited -- handled above
return self.visitChildren(ctx)
def visitNumericLength(self, ctx: ShExDocParser.NumericLengthContext):
# numericLength : KW_TOTALDIGITS | KW_FRACTIONDIGITS ;
# not visited - handled above
return self.visitChildren(ctx)
def visitDatatype(self, ctx: ShExDocParser.DatatypeContext):
# datatype : iri ;
return self.visitChildren(ctx)
def visitAnnotation(self, ctx: ShExDocParser.AnnotationContext) -> Annotation:
# annotation : ';' predicate (iri | literal) ;
rval = Annotation()
rval.iri = self.visit(ctx.predicate())
if ctx.literal():
lit = self.visit(ctx.literal())
if not ctx.literal().rdfLiteral():
lit = RDFLiteral(lit)
rval.literal = lit
else:
rval.iriref = IRIRef(ref=self.visit(ctx.iri()))
return rval
def visitCardinality(self, ctx: ShExDocParser.CardinalityContext) -> list:
# cardinality : '*' | '+' | '?' | repeatRange ;
if not ctx:
return [1, 1]
elif ctx.repeatRange():
return self.visitRepeatRange(ctx.repeatRange())
elif ctx.getText() == '*':
return [0, None]
elif ctx.getText() == '+':
return [1, None]
else:
assert ctx.getText() == '?', "Unknown cardinality character"
return [0, 1]
# returns [min_range, max_range or None]
def visitRepeatRange(self, ctx: ShExDocParser.RepeatRangeContext) -> list:
# repeatRange : '{' min_range ( ',' max_range?)? '}' ;
minr = self.visit(ctx.min_range())
maxr = self.visit(ctx.max_range()) if ctx.max_range() else None if ctx.getChild(2).getText() == ',' else minr
return [minr, maxr]
def visitMin_range(self, ctx: ShExDocParser.Min_rangeContext):
# min_range : INTEGER ;
return int(ctx.INTEGER().getText())
def visitMax_range(self, ctx: ShExDocParser.Max_rangeContext):
# max_range : INTEGER | '*' ;
return int(ctx.INTEGER().getText()) if ctx.INTEGER() else None
# returns ValueSet or IRIRef
def visitValueSet(self, ctx: ShExDocParser.ValueSetContext):
# value : '(' value* ')' ;
vs = ValueSet()
for v in ctx.value():
val = self.visit(v)
if v.iriRange():
vs.iriRange.append(val)
elif v.literal().rdfLiteral():
vs.rdfLiteral.append(val)
elif v.literal().numericLiteral():
if v.literal().numericLiteral().DECIMAL():
vs.decimal.append(val.decimal)
elif v.literal().numericLiteral().INTEGER():
vs.integer.append(val.integer)
else:
vs.double.append(val.double)
else:
vs.boolean.append(val)
return vs
def visitValue(self, ctx: ShExDocParser.ValueContext):
# value : iriRange | literal
return self.visitChildren(ctx)
def visitIriRange(self, ctx: ShExDocParser.IriRangeContext) -> IRIRange:
# iriRange : iri ('~' exclusion*)? | '.' exclusion+
rval = IRIRange()
if ctx.iri():
rval.base = self.visit(ctx.iri())
if ctx.getChildCount() > 1 and str(ctx.getChild(1) == '~'):
rval.stem = True
for e in ctx.exclusion():
rval.exclusion.append(self.visit(e))
return rval
def visitExclusion(self, ctx: ShExDocParser.ExclusionContext) -> IRIStem:
# exclusion : '-' iri '~'?
rval = IRIStem(base=self.visit(ctx.iri()))
if ctx.getChildCount() > 2:
assert str(ctx.getChild(2)) == '~', "Expecting stem (~) instruction"
rval.stem = True
return rval
def visitLiteral(self, ctx: ShExDocParser.LiteralContext):
# literal : rdfLiteral | numericLiteral | booleanLiteral ;
return self.visitChildren(ctx)
def visitNumericLiteral(self, ctx: ShExDocParser.NumericLiteralContext):
# numericLiteral : INTEGER | DECIMAL | DOUBLE ;
return self._proc_numeric_literal(NumericLiteral(), ctx)
def visitRdfLiteral(self, ctx: ShExDocParser.RdfLiteralContext) -> RDFLiteral:
# rdfLiteral : string (LANGTAG | '^^' datatype)? ;
rval = RDFLiteral(self.visit(ctx.string()))
if ctx.LANGTAG():
rval.langtag = ctx.LANGTAG().getText()[1:]
elif ctx.datatype():
rval.datatype = self.visit(ctx.datatype())
return rval
def visitBooleanLiteral(self, ctx: ShExDocParser.BooleanLiteralContext) -> bool:
# booleanLiteral : KW_TRUE | KW_FALSE
return bool(ctx.KW_TRUE())
def visitString(self, ctx: ShExDocParser.StringContext):
# string : STRING_LITERAL1
# | STRING_LITERAL2
# | STRING_LITERAL_LONG1
# | STRING_LITERAL_LONG2
if ctx.STRING_LITERAL1() or ctx.STRING_LITERAL2():
return ctx.getText()[1:-1]
else:
return ctx.getText()[3:-3]
def visitIri(self, ctx: ShExDocParser.IriContext):
# iri : IRIREF | prefixedName ;
if ctx.IRIREF():
return self._iriref(ctx)
else:
return escape(self.visitChildren(ctx))
def visitPrefixedName(self, ctx: ShExDocParser.PrefixedNameContext) -> str:
# prefixedName : PNAME_LN | PNAME_NS ;
#
# includes: "ns:", ":foo" and "ns:foo"
return ctx.getText()
# Visit a parse tree produced by ShExDocParser#blankNode.
def visitBlankNode(self, ctx: ShExDocParser.BlankNodeContext) -> str:
# blankNode : BLANK_NODE_LABEL
return ctx.getText()
def visitCodeDecl(self, ctx: ShExDocParser.CodeDeclContext) -> SemanticAction:
# codeDecl : '%' (productionName | iri? CODE?) ;
# CODE : '{' (~[%\\] | '\\%')* '%' '}' ;
action = SemanticAction()
if ctx.productionName():
action.productionName = self.visit(ctx.productionName())
else:
action.codeDecl = CodeDecl(ctx.CODE().getText()[1:-2])
if ctx.iri():
NamespaceContext.PushContext(self.nsc)
action.codeDecl.iri = self.visit(ctx.iri())
return action
def _proc_actions(self, ctx) -> SemanticActions:
rval = SemanticActions()
for dcl in ctx.codeDecl():
rval.action.append(self.visit(dcl))
return rval
def visitProductionName(self, ctx: ShExDocParser.ProductionNameContext) -> str:
# productionName : UCASE_LABEL ;
return ProductionName(ref=ctx.UCASE_LABEL().getText())
def visitStartActions(self, ctx: ShExDocParser.StartActionsContext) -> SemanticActions:
# startActions : codeDecl+
self._schema.startActions = self._proc_actions(ctx)
def visitSemanticActions(self, ctx: ShExDocParser.SemanticActionsContext) -> SemanticActions:
# semanticActions : codeDecl*
if ctx.codeDecl():
return self._proc_actions(ctx)
else:
return None
def visitRdfType(self, ctx:ShExDocParser.RdfTypeContext):
return str(RDF.type)
# ---------------------------------------
# Support methods
# ----------------------------------------
def _push_shape(self, new_shape):
# Push the current shape being process and set the current shape to new_shape
self.shape_stack.append(self.cur_shape)
self.cur_shape = new_shape
def _pop_shape(self):
# Return the current shape and replace it with the top of the shape stack
# NOTE: self.cur_shape.foo.append(self._pop_shape()) doesn't work!
rval = self.cur_shape
self.cur_shape = self.shape_stack.pop()
return rval
def _iriref(self, ctx):
""" Parse a uri in the form "<iri>"
:param ctx: container with an IRIREF inside
:return: absolute URI
"""
return urljoin(self.base, escape(ctx.IRIREF().getText()[1:-1]), allow_fragments=False)
def _cardinality(self, container, ctx):
minr, maxr = self.visitCardinality(ctx.cardinality())
if minr != 1:
container.min = minr
if maxr != 1:
container.max = maxr if maxr else "unbounded"
def _subj_obj_type(self, node_type: NodeType):
if self.cur_tc.reversed:
self.cur_tc.subjectType = node_type
else:
self.cur_tc.objectType = node_type
def _subj_obj_constraint(self, vc: TripleConstraintValueClass):
if self.cur_tc.reversed:
self.cur_tc.subjectConstraint = vc
else:
self.cur_tc.objectConstraint = vc
def _proc_numeric_facet(self, target, ctx: ShExDocParser.NumericFacetContext) -> NumericFacet:
if ctx.numericLength():
val = ctx.INTEGER().getText()
if ctx.numericLength().KW_TOTALDIGITS():
target.totalDigits = val
else:
target.fractionDigits = val
else:
val = self._proc_numeric_literal(EndPoint(), ctx.numericLiteral())
if ctx.numericRange().KW_MININCLUSIVE():
target.minValue = val
elif ctx.numericRange().KW_MINEXCLUSIVE():
val.open = True
target.minValue = val
elif ctx.numericRange().KW_MAXINCLUSIVE():
target.maxValue = val
else:
val.open = True
target.maxValue = val
return target
@staticmethod
def _proc_numeric_literal(target: NumericLiteral, ctx: ShExDocParser.NumericLiteralContext) -> NumericLiteral:
if ctx.INTEGER():
target.integer = datatypes.int(ctx.INTEGER().getText())
elif ctx.DECIMAL():
target.decimal = datatypes.decimal(ctx.DECIMAL().getText())
else:
target.double = datatypes.double(ctx.DOUBLE().getText())
return target
def _proc_string_facet(self, target, ctx: ShExDocParser.StringFacetContext) -> NumericFacet:
if ctx.stringLength():
intval = int(ctx.INTEGER().getText())
if ctx.stringLength().KW_LENGTH():
target.length = intval
elif ctx.stringLength().KW_MINLENGTH():
target.minLength = intval
elif ctx.stringLength().KW_MAXLENGTH():
target.maxLength = intval
else:
assert False, "Unrecognized stringlength facet"
else:
if ctx.getChild(0).getText() == '~':
target.not_ = self.visit(ctx.string())
else:
target.pattern = self.visit(ctx.string())
return target
def _set_someof(self, v):
if isinstance(self.cur_shape.someOf, Iterable):
self.cur_shape.someOf.append(v)
else:
self.cur_shape.someOf = v
def _set_group(self, v):
if isinstance(self.cur_shape.group, Iterable):
self.cur_shape.group.append(v)
else:
self.cur_shape.group = v
def _set_tc(self, v):
if isinstance(self.cur_shape.tripleConstraint, Iterable):
self.cur_shape.tripleConstraint.append(v)
else:
self.cur_shape.tripleConstraint = v
|
hsolbrig/shexypy
|
shexypy/shexyparser/parser_impl/ShExDocVisitor_impl.py
|
Python
|
mit
| 31,570
|
[
"VisIt"
] |
72e7b7d9399f752d7e55131d9f51cab3bacc21d459b72f6580565395ce4a1093
|
# Author: Pietro Sormanni
## FUNCTIONS FOR SEQUENCE PROCESSING OF INPUTS ####
import sys
from .structure_processor import NUM_EXTRA_RESIDUES
def get_CDR_simple(sequence ,allow=set(["H", "K", "L"]),scheme='chothia',seqname='' \
,cdr1_scheme={'H':range(26-NUM_EXTRA_RESIDUES,33+NUM_EXTRA_RESIDUES),'L':range(24-NUM_EXTRA_RESIDUES,35+NUM_EXTRA_RESIDUES)} \
,cdr2_scheme={'H':range(52-NUM_EXTRA_RESIDUES,57+NUM_EXTRA_RESIDUES),'L':range(50-NUM_EXTRA_RESIDUES,57+NUM_EXTRA_RESIDUES)} \
,cdr3_scheme={'H':range(95-NUM_EXTRA_RESIDUES,103+NUM_EXTRA_RESIDUES),'L':range(89-NUM_EXTRA_RESIDUES,98+NUM_EXTRA_RESIDUES)}) :
'''
From a VH or VL amino acid sequences returns the three CDR sequences as determined from the input numbering (scheme) and the given ranges.
default ranges are Chothia CDRs +/- NUM_EXTRA_RESIDUES residues per side.
requires the python module anarci - Available from http://opig.stats.ox.ac.uk/webapps/sabdab-sabpred/ANARCI.php
For other numbering schemes see also http://www.bioinf.org.uk/abs/#cdrdef
Loop Kabat AbM Chothia1 Contact2
L1 L24--L34 L24--L34 L24--L34 L30--L36
L2 L50--L56 L50--L56 L50--L56 L46--L55
L3 L89--L97 L89--L97 L89--L97 L89--L96
H1 H31--H35B H26--H35B H26--H32..34 H30--H35B
H1 H31--H35 H26--H35 H26--H32 H30--H35
H2 H50--H65 H50--H58 H52--H56 H47--H58
H3 H95--H102 H95--H102 H95--H102 H93--H101
For generic Chothia identification can set auto_detect_chain_type=True and use:
cdr1_scheme={'H':range(26,34),'L':range(24,34)}
cdr2_scheme={'H':range(52,56),'L':range(50,56)}
cdr3_scheme={'H':range(95,102),'L':range(89,97)}
'''
try :
import anarci
except ImportError :
raise Exception("\n**ImportError** function get_CDR_simple() requires the python module anarci\n Available from http://opig.stats.ox.ac.uk/webapps/sabdab-sabpred/ANARCI.php\n\n")
res_num_all=anarci.number(sequence, scheme=scheme, allow=allow)
if not hasattr(res_num_all[0], '__len__') :
sys.stderr.write( "*ERROR* in get_CDR_simple() anarci failed on %s -returned %s chaintype=%s\n" % (seqname,str(res_num_all[0]),str(res_num_all[1])))
return None
cdr1,cdr2,cdr3='','',''
chain_type=res_num_all[1]
sys.stdout.write( '%s chain_type= %s\n'%(seqname,chain_type))
if hasattr(cdr1_scheme, 'keys') : # supports dictionary or OrderedDict as input type - assume all cdr ranges are like this
if chain_type=='K' and chain_type not in cdr1_scheme : chain_type='L' # Kappa light chain to Lambda light chain for this purpose
if chain_type not in cdr1_scheme :
raise Exception("\n chain_type %s not in input cdr1_scheme\n" % (chain_type))
cdr1_scheme=cdr1_scheme[chain_type]
cdr2_scheme=cdr2_scheme[chain_type]
cdr3_scheme=cdr3_scheme[chain_type]
# extract CDR sequences
for num_tuple,res in res_num_all[0] :
if num_tuple[0] in cdr1_scheme: cdr1+=res # num_tuple[1] may be an insertion code, (e.g. 111B)
elif num_tuple[0] in cdr2_scheme: cdr2+=res
elif num_tuple[0] in cdr3_scheme: cdr3+=res
# put in parapred formta
cdrs={'CDR1':cdr1,'CDR2':cdr2,'CDR3':cdr3}
return cdrs
class FakeSeq :
# mimic of the Biopython SeqRecord object, but in this way there is no need to have biopython installed
def __init__(self,seq='',seq_id='',seq_name='',description='') :
self.seq=seq
self.id=seq_id
self.name=seq_name
self.description=description
def __len__(self) :
return len(self.seq)
def __str__(self):
return self.seq
def __repr__(self):
restr='FakeSeq:%s:' % (self.name)
if len(self.seq)>20 :
restr+=self.seq[:15]+'...'+self.seq[-2:]
else : restr+=self.seq
return restr
def __getslice__(self,i,j):
return self.seq[i:j]
def __getitem__(self,y):
return self.seq[y]
def __add__(self,y):
return FakeSeq(seq=self.seq+str(y),seq_id=self.id,seq_name=self.name,description=self.description)
def uniq(input_el):
#given list/tuple it returns the unique elements (in the order they first appear)
output = []
for x in input_el:
if x not in output:
output.append(x)
return output
amino_list1=['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y', 'X']
def read_fasta(filename, return_as_dictionary=False, description_parser_function=None,use_seq_class=True, check_sequence=amino_list1, name_first_spilt=True):
'''
reads an input file in fasta format - returns the sequences
'''
sequences=[]
ids=[]
names=[]
descriptions=[]
remove=[]
first_residues=[]
nseq=0
for line in open(filename) :
if line[0]=='>' :
line=line[1:].strip()
if description_parser_function!=None :
seqid, name, description= description_parser_function(line)
elif name_first_spilt:
name=line.split()[0]
seqid=name
description=line[len(name)+1:] # could be ''
else :
description=''
name=line
seqid=line
if use_seq_class :
sequences+=[ FakeSeq(seq='',seq_id=seqid,seq_name=name,description=description)]
else :
sequences+=['']
if '|' in name : # for uniprot downloaded regions (e.g. without signal peptides) the last part may be the amino acid range
putative_range=name.split('|')[-1]
if '-' in putative_range :
try :
start,end=map(int, putative_range.split('-'))
first_residues+=[start]
except Exception : first_residues+=[1]
else : first_residues+=[1]
else : first_residues+=[1]
names+=[name]
ids+=[seqid]
descriptions+=[description]
if check_sequence!=None and nseq>0 :
for j,aa in enumerate(sequences[-2]) :
if aa not in check_sequence :
sys.stderr.write("\n**ERROR** residue %d %s in sequence %d %s NOT STANDARD --> can't process\n" % (j+1,aa,nseq,names[-2]) )
sys.stderr.flush()
remove+=[nseq-1]
break
nseq+=1
elif line!='' and line!='\n' :
sequences[-1]+=line.strip()
if check_sequence!=None and nseq>0 :
for j,aa in enumerate(sequences[-1]) :
if aa not in check_sequence :
sys.stderr.write("\n**ERROR** residue %d %s in sequence %d %s NOT STANDARD --> can't process\n" % (j+1,aa,nseq,names[-2]) )
sys.stderr.flush()
remove+=[nseq-1]
break
if remove!=[] : # remove sequences not containing only standard amino acids
remove=uniq(remove)
for j in sorted(remove,reverse=True) :
sys.stderr.write("**** SKIPPING sequence %d %s --> contains NOT STANDARD residues that cannot be processed\n" % (j+1,names[j]) )
sys.stderr.flush()
del sequences[j],ids[j],names[j],descriptions[j]
if len(sequences)==0 :
sys.stderr.write("\n**** WARNING *** NO VALID sequence in %s\n" % (filename))
sys.stderr.flush()
if use_seq_class :
return sequences,first_residues
else :
return sequences,ids,names,descriptions,first_residues
|
eliberis/parapred
|
parapred/full_seq_processor.py
|
Python
|
mit
| 7,698
|
[
"Biopython"
] |
bbd6070fe120c40d97c77a51963f76092d27202fd6f377afdb9eac1b8a23afb6
|
# Copyright 2006-2009 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# This module is for reading and writing FASTA format files as SeqRecord
# objects. The code is partly inspired by earlier Biopython modules,
# Bio.Fasta.* and the now deprecated Bio.SeqIO.FASTA
"""Bio.SeqIO support for the "fasta" (aka FastA or Pearson) file format.
You are expected to use this module via the Bio.SeqIO functions."""
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
#This is a generator function!
def FastaIterator(handle, alphabet = single_letter_alphabet, title2ids = None):
"""Generator function to iterate over Fasta records (as SeqRecord objects).
handle - input file
alphabet - optional alphabet
title2ids - A function that, when given the title of the FASTA
file (without the beginning >), will return the id, name and
description (in that order) for the record as a tuple of strings.
If this is not given, then the entire title line will be used
as the description, and the first word as the id and name.
Note that use of title2ids matches that of Bio.Fasta.SequenceParser
but the defaults are slightly different.
"""
#Skip any text before the first record (e.g. blank lines, comments)
while True:
line = handle.readline()
if line == "" : return #Premature end of file, or just empty?
if line[0] == ">":
break
while True:
if line[0]!=">":
raise ValueError("Records in Fasta files should start with '>' character")
if title2ids:
id, name, descr = title2ids(line[1:].rstrip())
else:
descr = line[1:].rstrip()
try:
id = descr.split()[0]
except IndexError:
assert not descr, repr(line)
#Should we use SeqRecord default for no ID?
id = ""
name = id
lines = []
line = handle.readline()
while True:
if not line : break
if line[0] == ">": break
lines.append(line.rstrip())
line = handle.readline()
#Remove trailing whitespace, and any internal spaces
#(and any embedded \r which are possible in mangled files
#when not opened in universal read lines mode)
result = "".join(lines).replace(" ", "").replace("\r", "")
#Return the record and then continue...
yield SeqRecord(Seq(result, alphabet),
id = id, name = name, description = descr)
if not line : return #StopIteration
assert False, "Should not reach this line"
class FastaWriter(SequentialSequenceWriter):
"""Class to write Fasta format files."""
def __init__(self, handle, wrap=60, record2title=None):
"""Create a Fasta writer.
handle - Handle to an output file, e.g. as returned
by open(filename, "w")
wrap - Optional line length used to wrap sequence lines.
Defaults to wrapping the sequence at 60 characters
Use zero (or None) for no wrapping, giving a single
long line for the sequence.
record2title - Optional function to return the text to be
used for the title line of each record. By default the
a combination of the record.id and record.description
is used. If the record.description starts with the
record.id, then just the record.description is used.
You can either use:
myWriter = FastaWriter(open(filename,"w"))
writer.write_file(myRecords)
Or, follow the sequential file writer system, for example:
myWriter = FastaWriter(open(filename,"w"))
writer.write_header() # does nothing for Fasta files
...
Multiple calls to writer.write_record() and/or writer.write_records()
...
writer.write_footer() # does nothing for Fasta files
writer.close()
"""
SequentialSequenceWriter.__init__(self, handle)
#self.handle = handle
self.wrap = None
if wrap:
if wrap < 1:
raise ValueError
self.wrap = wrap
self.record2title = record2title
def write_record(self, record):
"""Write a single Fasta record to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
if self.record2title:
title=self.clean(self.record2title(record))
else:
id = self.clean(record.id)
description = self.clean(record.description)
#if description[:len(id)]==id:
if description and description.split(None,1)[0]==id:
#The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
assert "\n" not in title
assert "\r" not in title
self.handle.write(">%s\n" % title)
data = self._get_seq_string(record) #Catches sequence being None
assert "\n" not in data
assert "\r" not in data
if self.wrap:
for i in range(0, len(data), self.wrap):
self.handle.write(data[i:i+self.wrap] + "\n")
else:
self.handle.write(data + "\n")
if __name__ == "__main__":
print "Running quick self test"
import os
from Bio.Alphabet import generic_protein, generic_nucleotide
#Download the files from here:
#ftp://ftp.ncbi.nlm.nih.gov/genomes/Bacteria/Nanoarchaeum_equitans
fna_filename = "NC_005213.fna"
faa_filename = "NC_005213.faa"
def genbank_name_function(text):
text, descr = text.split(None,1)
id = text.split("|")[3]
name = id.split(".",1)[0]
return id, name, descr
def print_record(record):
#See also bug 2057
#http://bugzilla.open-bio.org/show_bug.cgi?id=2057
print "ID:" + record.id
print "Name:" + record.name
print "Descr:" + record.description
print record.seq
for feature in record.annotations:
print '/%s=%s' % (feature, record.annotations[feature])
if record.dbxrefs:
print "Database cross references:"
for x in record.dbxrefs : print " - %s" % x
if os.path.isfile(fna_filename):
print "--------"
print "FastaIterator (single sequence)"
iterator = FastaIterator(open(fna_filename, "r"), alphabet=generic_nucleotide, title2ids=genbank_name_function)
count=0
for record in iterator:
count=count+1
print_record(record)
assert count == 1
print str(record.__class__)
if os.path.isfile(faa_filename):
print "--------"
print "FastaIterator (multiple sequences)"
iterator = FastaIterator(open(faa_filename, "r"), alphabet=generic_protein, title2ids=genbank_name_function)
count=0
for record in iterator:
count=count+1
print_record(record)
break
assert count>0
print str(record.__class__)
from cStringIO import StringIO
print "--------"
print "FastaIterator (empty input file)"
#Just to make sure no errors happen
iterator = FastaIterator(StringIO(""))
count = 0
for record in iterator:
count = count+1
assert count==0
print "Done"
|
bryback/quickseq
|
genescript/Bio/SeqIO/FastaIO.py
|
Python
|
mit
| 7,812
|
[
"Biopython"
] |
e5178a3f6bdf275e26a063dafb382d6ad9451847ee9aabdc06d53038f57270a8
|
from dark.blast.hsp import printHSP
def printBlastRecord(record):
"""
Print a BLAST record.
@param record: A BioPython C{Bio.Blast.Record.Blast} instance.
"""
for key in sorted(record.__dict__.keys()):
if key not in ['alignments', 'descriptions', 'reference']:
print('%s: %r' % (key, record.__dict__[key]))
print('alignments: (%d in total):' % len(record.alignments))
for i, alignment in enumerate(record.alignments):
print(' description %d:' % (i + 1))
for attr in ['accession', 'bits', 'e', 'num_alignments', 'score']:
print(' %s: %s' % (attr, getattr(record.descriptions[i], attr)))
print(' alignment %d:' % (i + 1))
for attr in 'accession', 'hit_def', 'hit_id', 'length', 'title':
print(' %s: %s' % (attr, getattr(alignment, attr)))
print(' HSPs (%d in total):' % len(alignment.hsps))
for hspIndex, hsp in enumerate(alignment.hsps, start=1):
print(' hsp %d:' % hspIndex)
printHSP(hsp, ' ')
|
bamueh/dark-matter
|
dark/blast/records.py
|
Python
|
mit
| 1,064
|
[
"BLAST",
"Biopython"
] |
c1d62d0fb9060896713d6f4d19648ab8f5434db2cebf8b44a1f3e333dd59cd9f
|
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 James Buren and Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools import (AudioFile, InvalidFile)
from audiotools.ape import ApeTaggedAudio
from audiotools.bitstream import BitstreamReader, BitstreamWriter
class MPC_Size:
def __init__(self, value, length):
self.__value__ = value
self.__length__ = length
def __repr__(self):
return "MPC_Size({!r}, {!r})".format(self.__value__, self.__length__)
def __int__(self):
return self.__value__
def __len__(self):
return self.__length__
@classmethod
def parse(cls, reader):
cont, value = reader.parse("1u 7u")
length = 1
while cont == 1:
cont, value2 = reader.parse("1u 7u")
value = (value << 7) | value2
length += 1
return cls(value, length)
def build(self, writer):
for i in reversed(range(self.__length__)):
writer.write(1, 1 if (i > 0) else 0)
writer.write(7, (self.__value__ >> (i * 7)) & 0x7F)
class InvalidMPC(InvalidFile):
"""raised by invalid files during MPC initialization"""
pass
class MPCAudio(ApeTaggedAudio, AudioFile):
"""an MPC audio file"""
SUFFIX = "mpc"
NAME = SUFFIX
DESCRIPTION = u"MusePack"
DEFAULT_COMPRESSION = "5"
# Ranges from 0 to 10. Lower levels mean lower kbps, and therefore
# lower quality.
COMPRESSION_MODES = tuple(map(str, range(0, 11)))
COMPRESSION_DESCRIPTIONS = {"0": u"poor quality (~20 kbps)",
"1": u"poor quality (~30 kbps)",
"2": u"low quality (~60 kbps)",
"3": u"low/medium quality (~90 kbps)",
"4": u"medium quality (~130 kbps)",
"5": u"high quality (~180 kbps)",
"6": u"excellent quality (~210 kbps)",
"7": u"excellent quality (~240 kbps)",
"8": u"excellent quality (~270 kbps)",
"9": u"excellent quality (~300 kbps)",
"10": u"excellent quality (~350 kbps)"}
def __init__(self, filename):
"""filename is a plain string"""
AudioFile.__init__(self, filename)
try:
block = BitstreamReader(self.get_block(b"SH"), False)
crc = block.read(32)
if block.read(8) != 8:
from audiotools.text import ERR_MPC_INVALID_VERSION
raise InvalidMPC(ERR_MPC_INVALID_VERSION)
self.__samples__ = int(MPC_Size.parse(block))
beg_silence = int(MPC_Size.parse(block))
self.__sample_rate__ = \
[44100, 48000, 37800, 32000][block.read(3)]
max_band = block.read(5) + 1
self.__channels__ = block.read(4) + 1
ms = block.read(1)
block_pwr = block.read(3) * 2
except IOError as err:
raise InvalidMPC(str(err))
def blocks(self):
with BitstreamReader(open(self.filename, "rb"), False) as r:
if r.read_bytes(4) != b"MPCK":
from audiotools.text import ERR_MPC_INVALID_ID
raise InvalidMPC(ERR_MPC_INVALID_ID)
key = r.read_bytes(2)
size = MPC_Size.parse(r)
while key != b"SE":
yield key, size, r.read_bytes(int(size) - len(size) - 2)
key = r.read_bytes(2)
size = MPC_Size.parse(r)
yield key, size, r.read_bytes(int(size) - len(size) - 2)
def get_block(self, block_id):
for key, size, block in self.blocks():
if key == block_id:
return block
else:
raise KeyError(block_id)
def bits_per_sample(self):
"""returns an integer number of bits-per-sample this track contains"""
return 16
def channels(self):
"""returns an integer number of channels this track contains"""
return self.__channels__
def lossless(self):
"""returns True if this track's data is stored losslessly"""
return False
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
return self.__samples__
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return self.__sample_rate__
@classmethod
def supports_to_pcm(cls):
"""returns True if all necessary components are available
to support the .to_pcm() method"""
try:
from audiotools.decoders import MPCDecoder
return True
except ImportError:
return False
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data
if an error occurs initializing a decoder, this should
return a PCMReaderError with an appropriate error message"""
from audiotools.decoders import MPCDecoder
try:
return MPCDecoder(self.filename)
except (IOError, ValueError) as err:
from audiotools import PCMReaderError
return PCMReaderError(error_message=str(err),
sample_rate=self.sample_rate(),
channels=self.channels(),
channel_mask=int(self.channel_mask()),
bits_per_sample=self.bits_per_sample())
@classmethod
def supports_from_pcm(cls):
"""returns True if all necessary components are available
to support the .from_pcm() classmethod"""
try:
from audiotools.encoders import encode_mpc
return True
except ImportError:
return False
@classmethod
def from_pcm(cls, filename, pcmreader,
compression=None,
total_pcm_frames=None):
from audiotools import __default_quality__
from audiotools import PCMConverter
from audiotools import ChannelMask
from audiotools.encoders import encode_mpc
if (compression is None) or (compression not in cls.COMPRESSION_MODES):
compression = __default_quality__(cls.NAME)
if pcmreader.bits_per_sample not in {8, 16, 24}:
from audiotools import UnsupportedBitsPerSample
pcmreader.close()
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
if pcmreader.sample_rate in (32000, 37800, 44100, 48000):
sample_rate = pcmreader.sample_rate
if total_pcm_frames is not None:
from audiotools import CounterPCMReader
pcmreader = CounterPCMReader(pcmreader)
else:
from bisect import bisect
sample_rate = [32000,
32000,
37800,
44100,
48000][bisect([32000, 37800, 44100, 4800],
pcmreader.sample_rate)]
total_pcm_frames = None
try:
encode_mpc(
filename,
PCMConverter(pcmreader,
sample_rate=sample_rate,
channels=min(pcmreader.channels, 2),
channel_mask=int(ChannelMask.from_channels(
min(pcmreader.channels, 2))),
bits_per_sample=16),
float(compression),
total_pcm_frames if (total_pcm_frames is not None) else 0)
# ensure PCM frames match, if indicated
if ((total_pcm_frames is not None) and
(total_pcm_frames != pcmreader.frames_written)):
from audiotools.text import ERR_TOTAL_PCM_FRAMES_MISMATCH
from audiotools import EncodingError
raise EncodingError(ERR_TOTAL_PCM_FRAMES_MISMATCH)
return MPCAudio(filename)
except (IOError, ValueError) as err:
from audiotools import EncodingError
cls.__unlink__(filename)
raise EncodingError(str(err))
except Exception:
cls.__unlink__(filename)
raise
finally:
pcmreader.close()
@classmethod
def supports_replay_gain(cls):
"""returns True if this class supports ReplayGain"""
return True
def get_replay_gain(self):
"""returns a ReplayGain object of our ReplayGain values
returns None if we have no values
may raise IOError if unable to read the file"""
from audiotools import ReplayGain
try:
rg = BitstreamReader(self.get_block(b"RG"), False)
except KeyError:
return None
version = rg.read(8)
if version != 1:
return None
gain_title = rg.read(16)
peak_title = rg.read(16)
gain_album = rg.read(16)
peak_album = rg.read(16)
if ((gain_title == 0) and (peak_title == 0) and
(gain_album == 0) and (peak_album == 0)):
return None
else:
return ReplayGain(
track_gain=64.82 - float(gain_title) / 256,
track_peak=(10 ** (float(peak_title) / 256 / 20)) / 2 ** 15,
album_gain=64.82 - float(gain_album) / 256,
album_peak=(10 ** (float(peak_album) / 256 / 20)) / 2 ** 15)
def set_replay_gain(self, replaygain):
"""given a ReplayGain object, sets the track's gain to those values
may raise IOError if unable to modify the file"""
from math import log10
from audiotools import TemporaryFile
gain_title = int(round((64.82 - replaygain.track_gain) * 256))
if replaygain.track_peak > 0.0:
peak_title = int(log10(replaygain.track_peak * 2 ** 15) * 20 * 256)
else:
peak_title = 0
gain_album = int(round((64.82 - replaygain.album_gain) * 256))
if replaygain.album_peak > 0.0:
peak_album = int(log10(replaygain.album_peak * 2 ** 15) * 20 * 256)
else:
peak_album = 0
#FIXME - check for missing "RG" block and add one if not present
metadata = self.get_metadata()
writer = BitstreamWriter(TemporaryFile(self.filename), False)
writer.write_bytes(b"MPCK")
for key, size, block in self.blocks():
if key != b"RG":
writer.write_bytes(key)
size.build(writer)
writer.write_bytes(block)
else:
writer.write_bytes(b"RG")
MPC_Size(2 + 1 + 1 + 2 * 4, 1).build(writer)
writer.write(8, 1)
writer.write(16, gain_title)
writer.write(16, peak_title)
writer.write(16, gain_album)
writer.write(16, peak_album)
if metadata is not None:
writer.set_endianness(True)
metadata.build(writer)
writer.close()
def delete_replay_gain(self):
"""removes ReplayGain values from file, if any
may raise IOError if unable to modify the file"""
from audiotools import TemporaryFile
writer = BitstreamWriter(TemporaryFile(self.filename), False)
writer.write_bytes(b"MPCK")
for key, size, block in self.blocks():
if key != b"RG":
writer.write_bytes(key)
size.build(writer)
writer.write_bytes(block)
else:
writer.write_bytes(b"RG")
MPC_Size(2 + 1 + 1 + 2 * 4, 1).build(writer)
writer.write(8, 1)
writer.write(16, 0)
writer.write(16, 0)
writer.write(16, 0)
writer.write(16, 0)
writer.close()
|
tuffy/python-audio-tools
|
audiotools/mpc.py
|
Python
|
gpl-2.0
| 12,755
|
[
"Brian"
] |
51d8844339acf8c30a433aa203a0d4159b74ae214eaf2dc9b5444bbdd8c7d695
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for role_path_dir in galaxy.roles_paths:
role_path = os.path.join(role_path_dir, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
local_file = False
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'CON' and not os.environ.get('ANSIBLE_CONTAINER'):
# Container Enabled, running outside of a container
display.warning("%s is a Container Enabled role and should only be installed using "
"Ansible Container" % self.name)
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
meta_file = member
break
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e[0] == 13 and len(self.paths) > 1:
current = self.paths.index(self.path)
nextidx = current + 1
if len(self.paths) >= current:
self.path = self.paths[nextidx]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % self.name)
if not local_file:
try:
os.unlink(tmp_file)
except (OSError,IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
|
kaarolch/ansible
|
lib/ansible/galaxy/role.py
|
Python
|
gpl-3.0
| 14,083
|
[
"Brian",
"Galaxy"
] |
d257b02023314a5772e7b2129486d199555262da122dcae3d2a890e4cf22b42c
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# License: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
aabadie/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
Python
|
bsd-3-clause
| 6,813
|
[
"Gaussian"
] |
a8842645515903ed6a5b18458477a8e32d54d359ae69c10f186e8be6e16c4603
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyscf import gto, lib
from pyscf import scf, dft
from pyscf import ci
from pyscf import grad
from pyscf.grad import cisd as cisd_grad
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol)
mf.conv_tol_grad = 1e-8
mf.kernel()
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test_cisd_grad(self):
myci = ci.cisd.CISD(mf)
myci.conv_tol = 1e-10
myci.kernel()
g1 = myci.nuc_grad_method().kernel(myci.ci, atmlst=[0,1,2])
self.assertAlmostEqual(lib.finger(g1), -0.032562347119070523, 6)
def test_cisd_grad_finite_diff(self):
mol = gto.M(
verbose = 0,
atom = 'H 0 0 0; H 0 0 1.706',
basis = '631g',
unit='Bohr')
ci_scanner = scf.RHF(mol).set(conv_tol=1e-14).apply(ci.CISD).as_scanner()
e0 = ci_scanner(mol)
e1 = ci_scanner(mol.set_geom_('H 0 0 0; H 0 0 1.704'))
ci_scanner.nroots = 2
ci_scanner(mol.set_geom_('H 0 0 0; H 0 0 1.705'))
g1 = ci_scanner.nuc_grad_method().kernel()
self.assertAlmostEqual(g1[0,2], (e1-e0)*500, 6)
def test_cisd_grad_excited_state(self):
mol = gto.M(
verbose = 0,
atom = 'H 0 0 0; H 0 0 1.706',
basis = '631g',
unit='Bohr')
myci = scf.RHF(mol).set(conv_tol=1e-14).apply(ci.CISD).set(nroots=3)
ci_scanner = myci.as_scanner()
e0 = ci_scanner(mol)
e1 = ci_scanner(mol.set_geom_('H 0 0 0; H 0 0 1.704'))
g_scan = myci.nuc_grad_method().as_scanner(state=2)
g1 = g_scan('H 0 0 0; H 0 0 1.705', atmlst=range(2))[1]
self.assertAlmostEqual(g1[0,2], (e1[2]-e0[2])*500, 6)
def test_frozen(self):
myci = ci.cisd.CISD(mf)
myci.frozen = [0,1,10,11,12]
myci.max_memory = 1
myci.kernel()
g1 = cisd_grad.Gradients(myci).kernel(myci.ci)
self.assertAlmostEqual(lib.finger(g1), 0.10224149952700579, 6)
def test_as_scanner(self):
myci = ci.cisd.CISD(mf)
myci.frozen = [0,1,10,11,12]
gscan = myci.nuc_grad_method().as_scanner().as_scanner()
e, g1 = gscan(mol)
self.assertTrue(gscan.converged)
self.assertAlmostEqual(e, -76.032220245016717, 9)
self.assertAlmostEqual(lib.finger(g1), 0.10224149952700579, 6)
def test_symmetrize(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', symmetry=True)
g = mol.RHF.run().CISD().run().Gradients().kernel()
self.assertAlmostEqual(lib.finger(g), 0.11924457198332741, 7)
if __name__ == "__main__":
print("Tests for CISD gradients")
unittest.main()
|
gkc1000/pyscf
|
pyscf/grad/test/test_cisd.py
|
Python
|
apache-2.0
| 3,518
|
[
"PySCF"
] |
f51036633a9050755998dfdfd84e1bb157b0b84b78a0d8f3b8bc1c8ffc75fe99
|
#
# Copyright (C) 2001,2003 greg Landrum and Rational Discovery LLC
#
""" unit tests for the QuantTree implementation """
import io
import unittest
from rdkit import RDConfig
from rdkit.ML.DecTree import BuildQuantTree
from rdkit.ML.DecTree.QuantTree import QuantTreeNode
from rdkit.TestRunner import redirect_stdout
from io import StringIO
import pickle
def cmp(t1, t2):
return (t1 < t2) * -1 or (t1 > t2) * 1
class TestCase(unittest.TestCase):
def setUp(self):
self.qTree1Name = RDConfig.RDCodeDir + '/ML/DecTree/test_data/QuantTree1.pkl'
self.qTree2Name = RDConfig.RDCodeDir + '/ML/DecTree/test_data/QuantTree2.pkl'
def _setupTree1(self):
examples1 = [['p1', 0, 1, 0.1, 0], ['p2', 0, 0, 0.1, 1], ['p3', 0, 0, 1.1, 2],
['p4', 0, 1, 1.1, 2], ['p5', 1, 0, 0.1, 2], ['p6', 1, 0, 1.1, 2],
['p7', 1, 1, 0.1, 2], ['p8', 1, 1, 1.1, 0]]
attrs = list(range(1, len(examples1[0]) - 1))
nPossibleVals = [0, 2, 2, 0, 3]
boundsPerVar = [0, 0, 0, 1, 0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar)
self.examples1 = examples1
def _setupTree2(self):
examples1 = [['p1', 0.1, 1, 0.1, 0], ['p2', 0.1, 0, 0.1, 1], ['p3', 0.1, 0, 1.1, 2],
['p4', 0.1, 1, 1.1, 2], ['p5', 1.1, 0, 0.1, 2], ['p6', 1.1, 0, 1.1, 2],
['p7', 1.1, 1, 0.1, 2], ['p8', 1.1, 1, 1.1, 0]]
attrs = list(range(1, len(examples1[0]) - 1))
nPossibleVals = [0, 0, 2, 0, 3]
boundsPerVar = [0, 1, 0, 1, 0]
self.t2 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar)
self.examples2 = examples1
def _setupTree1a(self):
examples1 = [['p1', 0, 1, 0.1, 4.0, 0], ['p2', 0, 0, 0.1, 4.1, 1], ['p3', 0, 0, 1.1, 4.2, 2],
['p4', 0, 1, 1.1, 4.2, 2], ['p5', 1, 0, 0.1, 4.2, 2], ['p6', 1, 0, 1.1, 4.2, 2],
['p7', 1, 1, 0.1, 4.2, 2], ['p8', 1, 1, 1.1, 4.0, 0]]
attrs = list(range(1, len(examples1[0]) - 1))
nPossibleVals = [0, 2, 2, 0, 0, 3]
boundsPerVar = [0, 0, 0, 1, -1, 0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar)
self.examples1 = examples1
def test0Cmp(self):
# " testing tree comparisons "
self._setupTree1()
self._setupTree2()
assert self.t1 == self.t1, 'self equals failed'
assert self.t2 == self.t2, 'self equals failed'
assert self.t1 != self.t2, 'not equals failed'
def test1Tree(self):
# " testing tree1 "
self._setupTree1()
with open(self.qTree1Name, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = pickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated. '
self.assertIn('Var: 2 []', str(self.t1))
self.assertEqual(self.t1.GetQuantBounds(), [])
def test2Tree(self):
# " testing tree2 "
self._setupTree2()
with open(self.qTree2Name, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = pickle.load(inFile)
assert self.t2 == t2, 'Incorrect tree generated.'
def test3Classify(self):
# " testing classification "
self._setupTree1()
self._setupTree2()
for i in range(len(self.examples1)):
self.assertEqual(
self.t1.ClassifyExample(self.examples1[i]), self.examples1[i][-1],
msg='examples1[%d] misclassified' % i)
for i in range(len(self.examples2)):
self.assertEqual(
self.t2.ClassifyExample(self.examples2[i]), self.examples2[i][-1],
msg='examples2[%d] misclassified' % i)
def test4UnusedVars(self):
# " testing unused variables "
self._setupTree1a()
with open(self.qTree1Name, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = pickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
for i in range(len(self.examples1)):
self.assertEqual(
self.t1.ClassifyExample(self.examples1[i]), self.examples1[i][-1],
'examples1[%d] misclassified' % i)
def test5Bug29(self):
# """ a more extensive test of the cmp stuff using hand-built trees """
import copy
t1 = QuantTreeNode(None, 't1')
t1.SetQuantBounds([1.])
c1 = QuantTreeNode(t1, 'c1')
c1.SetQuantBounds([2.])
t1.AddChildNode(c1)
c2 = QuantTreeNode(t1, 'c2')
c2.SetQuantBounds([2.])
t1.AddChildNode(c2)
c11 = QuantTreeNode(c1, 'c11')
c11.SetQuantBounds([3.])
c1.AddChildNode(c11)
c12 = QuantTreeNode(c1, 'c12')
c12.SetQuantBounds([3.])
c1.AddChildNode(c12)
assert not cmp(t1, copy.deepcopy(t1)), 'self equality failed'
t2 = QuantTreeNode(None, 't1')
t2.SetQuantBounds([1.])
c1 = QuantTreeNode(t2, 'c1')
c1.SetQuantBounds([2.])
t2.AddChildNode(c1)
c2 = QuantTreeNode(t2, 'c2')
c2.SetQuantBounds([2.])
t2.AddChildNode(c2)
c11 = QuantTreeNode(c1, 'c11')
c11.SetQuantBounds([3.])
c1.AddChildNode(c11)
c12 = QuantTreeNode(c1, 'c12')
c12.SetQuantBounds([3.00003])
c1.AddChildNode(c12)
assert cmp(t1, t2), 'inequality failed'
def test6Bug29_2(self):
# """ a more extensive test of the cmp stuff using pickled trees"""
import os
with open(os.path.join(RDConfig.RDCodeDir, 'ML', 'DecTree', 'test_data', 'CmpTree1.pkl'),
'r') as t1TFile:
buf = t1TFile.read().replace('\r\n', '\n').encode('utf-8')
t1TFile.close()
with io.BytesIO(buf) as t1File:
t1 = pickle.load(t1File)
with open(os.path.join(RDConfig.RDCodeDir, 'ML', 'DecTree', 'test_data', 'CmpTree2.pkl'),
'r') as t2TFile:
buf = t2TFile.read().replace('\r\n', '\n').encode('utf-8')
t2TFile.close()
with io.BytesIO(buf) as t2File:
t2 = pickle.load(t2File)
assert cmp(t1, t2), 'equality failed'
def test7Recycle(self):
# """ try recycling descriptors """
examples1 = [[3, 0, 0],
[3, 1, 1],
[1, 0, 0],
[0, 0, 1],
[1, 1, 0], ]
attrs = list(range(2))
nPossibleVals = [2, 2, 2]
boundsPerVar = [1, 0, 0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1, attrs, nPossibleVals, boundsPerVar,
recycleVars=1)
assert self.t1.GetLabel() == 0, self.t1.GetLabel()
assert self.t1.GetChildren()[0].GetLabel() == 1
assert self.t1.GetChildren()[1].GetLabel() == 1
assert self.t1.GetChildren()[1].GetChildren()[0].GetLabel() == 0
assert self.t1.GetChildren()[1].GetChildren()[1].GetLabel() == 0
def test8RandomForest(self):
# """ try random forests descriptors """
import random
random.seed(23)
nAttrs = 100
nPts = 10
examples = []
for _ in range(nPts):
descrs = [int(random.random() > 0.5) for _ in range(nAttrs)]
act = sum(descrs) > nAttrs / 2
examples.append(descrs + [act])
attrs = list(range(nAttrs))
nPossibleVals = [2] * (nAttrs + 1)
boundsPerVar = [0] * nAttrs + [0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples, attrs, nPossibleVals, boundsPerVar, maxDepth=1,
recycleVars=1, randomDescriptors=3)
self.assertEqual(self.t1.GetLabel(), 49)
self.assertEqual(self.t1.GetChildren()[0].GetLabel(), 3)
self.assertEqual(self.t1.GetChildren()[1].GetLabel(), 54)
def test_exampleCode(self):
f = StringIO()
with redirect_stdout(f):
BuildQuantTree.TestTree()
self.assertIn('Var: 2', f.getvalue())
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/ML/DecTree/UnitTestQuantTree.py
|
Python
|
bsd-3-clause
| 8,450
|
[
"RDKit"
] |
e41051038e702a99e5dee7da999802b334f1888ecaf53720ff78e846a77fa71f
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
sanketloke/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 68,901
|
[
"Gaussian"
] |
2acbd3c3221ddc6968c071461808301025c6da76033f0b810a7b9b2e1cdf869f
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DB Entity and classes to manage the creation and modification of clusters.
"""
__author__ = 'Milagro Teruel (milit@google.com)'
import appengine_config
import collections
import json
import math
import os
import urllib
import zlib
from mapreduce import context
from common import schema_fields
from controllers import utils
from models import courses
from models import jobs
from models import models
from models import progress
from models import transforms
from models import data_sources
from models.entities import BaseEntity
from modules.analytics import student_aggregate
from modules.analytics import student_answers
from modules.dashboard import dto_editor
from google.appengine.ext import db
DIM_TYPE_UNIT = 'u'
DIM_TYPE_LESSON = 'l'
DIM_TYPE_QUESTION = 'q'
DIM_TYPE_UNIT_VISIT = 'uv'
DIM_TYPE_UNIT_PROGRESS = 'up'
DIM_TYPE_LESSON_PROGRESS = 'lp'
# All of the possible fields that can be in a dimension
DIM_TYPE = 'type'
DIM_ID = 'id'
DIM_HIGH = 'high' # The upper bound. Optional
DIM_LOW = 'low' # The lower bound. Optional
DIM_EXTRA_INFO = 'extra-info' # Optional
DIM_VALUE = 'value' # For students vectors. Optional
class ClusterEntity(BaseEntity):
"""Representation of a cluster used for clasification of students.
A cluster is defined by a set of dimensions and a range of numeric values
for each dimension. For dimensions with boolean values, they must be
converted to a numeric representation. The identifier for a dimension is
the type (unit, lesson, question...) plus the id of this type.
The attribute data contains a json dictionary with the following structure:
{
'name': 'string with name of cluster',
'description': 'string with description of the cluster',
'vector': [{dictionary dimension 1}, {dictionary dimension 2}, ... ]
}
The value of 'vector' is a list with one dictionary for each dimension.
Example of dimension:
{
clustering.DIM_TYPE: clustering.DIM_TYPE_UNIT,
clustering.DIM_ID: 1,
clustering.DIM_LOW: 0,
clustering.DIM_HIGH: 50,
clustering.DIM_EXTRA_INFO: ''
}
Dimension-extra-info is a field for any information needed to
calculate the value of the dimension. It is also a json dictionary.
The same question can be used several times inserting it into different
units or lessons. we distinguish this different uses, and consequently
the id of a question dimension is constructed also with the ids of the
unit and lesson in wich the question was found. To get this id, use the
function pack_question_dimid. The inverse function is
unpack_question_dimid.
A question can also appear several times in the same unit and lesson. In
that case, we consider all usages as a single question dimension for
compatibility with the information in StudentAggregateEntity.
"""
# TODO(milit): Add an active/inactive property to exclude the cluster
# from calculations and visualizations without having to delete it.
data = db.TextProperty(indexed=False)
def pack_question_dimid(unit_id, lesson_id, question_id):
"""Constructs the dimension id for a question using unit and lesson id.
Args:
unit_id: a number or string indicating the unit id.
lesson_id: a number, string or None indicating the lesson id.
question_id: a number or string indicating the question id.
Returns:
A string with the dimension id."""
return ':'.join((str(unit_id), str(lesson_id), str(question_id)))
def unpack_question_dimid(dimension_id):
"""Decompose the dimension id into unit, lesson and question id.
Returns:
A tuple unit_id, lesson_id, question_id.
unit_id and question_id are strings. lesson_id can be a string or
None.
"""
unit_id, lesson_id, question_id = dimension_id.split(':')
if lesson_id == 'None':
lesson_id = None
return unit_id, lesson_id, question_id
class ClusterDTO(object):
"""Data transfer object for ClusterEntity."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def name(self):
return self.dict.get('name', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def vector(self):
return self.dict.get('vector', [])
class ClusterDAO(models.BaseJsonDao):
DTO = ClusterDTO
ENTITY = ClusterEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeId
class ClusterDataSource(data_sources.SynchronousQuery):
"""Gets the information of the available clusters in the course.
Renders the jinja template clustering.html.
"""
@staticmethod
def any_clusterable_objects_exist(app_context):
course = courses.Course(None, app_context=app_context)
if course.get_units() or models.QuestionDAO.get_all():
return True
return False
@staticmethod
def fill_values(app_context, template_values):
"""Sets values into the dict used to fill out the Jinja template."""
template_values['clusters'] = ClusterDAO.get_all()
edit_urls = []
for cluster in template_values['clusters']:
params = urllib.urlencode({
'action' : 'edit_cluster',
'key': cluster.id})
edit_urls.append('dashboard?{}'.format(params))
template_values['edit_urls'] = edit_urls
if not ClusterDataSource.any_clusterable_objects_exist(app_context):
template_values['no_clusterables'] = (
'No course items exist on which to make clusters. '
'At least one unit, lesson, or question must exist '
'for clustering functionality is relevant.')
def _has_right_side(dim):
"""Returns True if the value of dim[DIM_HIGH] is not None or ''."""
return dim.get(DIM_HIGH) != None and dim.get(DIM_HIGH) != ''
def _has_left_side(dim):
"""Returns True if the value of dim[DIM_LOW] is not None or ''."""
return dim.get(DIM_LOW) != None and dim.get(DIM_LOW) != ''
def _add_unit_visits(unit, result):
new_dim = {
DIM_TYPE: DIM_TYPE_UNIT_VISIT,
DIM_ID: unit.unit_id,
'name': unit.title + ' (visits)'
}
result.append(new_dim)
def _add_unit_and_content(unit, result):
"""Adds the score dimensions for units and its lessons and questions."""
# The content of an assessment is indicated by a lesson_id of None.
# Inside that lesson we can find all the questions added directly
# to the assessment.
unit_dict = {
DIM_TYPE: DIM_TYPE_UNIT, # Unit or assessment
DIM_ID: unit['unit_id'],
'name': unit['title']} # Name won't be saved in ClusterEntity
result.append(unit_dict)
unit_scored_lessons = 0
for item in unit['contents']:
lesson_id = item.get('lesson_id')
# A unit may have a pre or post assessment, in that case the item
# has unit_id, not a lesson_id.
included_assessment_id = item.get('unit_id')
lesson_title = item.get('title')
if lesson_title and lesson_id and item.get('tallied'):
result.append({
DIM_TYPE: DIM_TYPE_LESSON,
DIM_ID: lesson_id,
'name': lesson_title})
unit_scored_lessons += 1
elif included_assessment_id and lesson_title:
result.append({
DIM_TYPE: DIM_TYPE_UNIT,
DIM_ID: included_assessment_id,
'name': lesson_title})
unit_scored_lessons += 1
# If lesson is not tallied (graded) is not considered a dimension
for question in item['questions']:
if included_assessment_id:
question_id = pack_question_dimid(
included_assessment_id, None, question['id'])
else:
question_id = pack_question_dimid(
unit['unit_id'], lesson_id, question['id'])
result.append({
DIM_TYPE: DIM_TYPE_QUESTION,
DIM_ID: question_id,
'name': question['description']})
# This should affect the result list as well.
unit_dict[DIM_EXTRA_INFO] = transforms.dumps(
{'unit_scored_lessons': unit_scored_lessons})
def _add_unit_and_lesson_progress(unit, course, result):
"""Adds the dimensions for the progress of units and lessons.
The progress is obtained from the StudentPropertyEntity."""
result.append({
DIM_TYPE: DIM_TYPE_UNIT_PROGRESS,
DIM_ID: unit.unit_id,
'name': unit.title + ' (progress)'
})
# TODO(milit): Add better order or indications of the structure of
# content.
for lesson in course.get_lessons(unit.unit_id):
result.append({
DIM_TYPE: DIM_TYPE_LESSON_PROGRESS,
DIM_ID: lesson.lesson_id,
'name': lesson.title + ' (progress)',
DIM_EXTRA_INFO: transforms.dumps({'unit_id': unit.unit_id})
})
def get_possible_dimensions(app_context):
"""Returns a list of dictionaries with all possible dimensions.
Any scored unit, lessons, assessment or question can be a dimension. If a
question is used in differents units and lessons, then a dimension will
be created for each use of the question. However, if the question in used
twice or more in the same unit and lesson, then only one dimension will
be created for this question, unit and lesson.
Aditionaly, units and assessments have a dimension for the number of
visits to the page.
For more details in the structure of dimensions see ClusterEntity
documentation.
"""
datasource = student_answers.OrderedQuestionsDataSource()
template_values = {}
# This has extra information but it was already implemented.
# Also, the OrderedQuestionsDataSource takes care of the case
# where assessments are used as pre- or post- items in Units, so
# we don't have to code for that case here.
datasource.fill_values(app_context, template_values)
units_with_content = {u['unit_id']: u for u in template_values['units']}
result = []
course = courses.Course(None, app_context)
for unit in course.get_units():
_add_unit_visits(unit, result)
if not unit.is_assessment():
_add_unit_and_lesson_progress(unit, course, result)
if unit.unit_id in units_with_content:
# Adding the lessons and questions of the unit
_add_unit_and_content(units_with_content[unit.unit_id], result)
return result
class ClusterRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""REST Handler for ClusterEntity model."""
URI = '/rest/cluster'
XSRF_TOKEN = 'cluster-edit'
DAO = ClusterDAO
SCHEMA_VERSIONS = ['1.0']
REQUIRED_MODULES = []
EXTRA_JS_FILES = ['cluster_rest.js']
EXTRA_CSS_FILES = []
ADDITIONAL_DIRS = [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'analytics')]
TYPES_INFO = { # The js script file depend on this dictionary.
DIM_TYPE_QUESTION: 'question',
DIM_TYPE_LESSON: 'lesson',
DIM_TYPE_UNIT: 'unit',
DIM_TYPE_UNIT_VISIT: 'unit_visit',
DIM_TYPE_UNIT_PROGRESS: 'unit_progress',
DIM_TYPE_LESSON_PROGRESS: 'lesson_progress',
}
@staticmethod
def pack_id(dim_id, dim_type):
"""Concatenates the id and type of the dimension"""
return '{}---{}'.format(dim_id, dim_type)
@staticmethod
def unpack_id(packed_id):
"""Unpacks the id and type of the dimension"""
return packed_id.split('---')
@classmethod
def get_schema(cls, app_context=None):
cluster_schema = schema_fields.FieldRegistry(
'Cluster Definition',
description='cluster definition',
extra_schema_dict_values={'className': 'cluster-container'})
cluster_schema.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
cluster_schema.add_property(schema_fields.SchemaField(
'name', 'Name', 'string', optional=False,
extra_schema_dict_values={'className': 'cluster-name'}))
cluster_schema.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'cluster-description'}))
dimension = schema_fields.FieldRegistry('Dimension',
extra_schema_dict_values={'className': 'cluster-dim'})
to_select = []
dim_types = {}
if app_context:
dimensions = get_possible_dimensions(app_context)
for dim in dimensions:
select_id = cls.pack_id(dim[DIM_ID], dim[DIM_TYPE])
to_select.append((select_id, dim['name']))
dim_types[select_id] = dim[DIM_TYPE]
dimension.add_property(schema_fields.SchemaField(
DIM_ID, 'Dimension Name', 'string', i18n=False,
extra_schema_dict_values={'className': 'dim-name'},
select_data=to_select))
# Only description for the first dimension. All the descriptions
# are in the cluster_rest.js file.
dimension.add_property(schema_fields.SchemaField(
DIM_LOW, 'Minimum number of visits to the page', 'string',
i18n=False, optional=True,
extra_schema_dict_values={'className': 'dim-range-low'}))
dimension.add_property(schema_fields.SchemaField(
DIM_HIGH, 'Maximum number of visits to the page', 'string',
i18n=False, optional=True,
extra_schema_dict_values={'className': 'dim-range-high'}))
dimension_array = schema_fields.FieldArray(
'vector', '', item_type=dimension,
description='Dimensions of the cluster. Add a new dimension '
'for each criteria the student has to acomplish to be '
'included in the cluster',
extra_schema_dict_values={
'className': 'cluster-dim-container',
'listAddLabel': 'Add a dimension',
'listRemoveLabel': 'Delete dimension',
'dim_types': dim_types,
'types_info': cls.TYPES_INFO})
cluster_schema.add_property(dimension_array)
return cluster_schema
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'name': '',
'description': '',
'vector': []}
def transform_for_editor_hook(self, item_dict):
"""Packs the id and type for the select field in the html."""
for dim in item_dict['vector']:
dim[DIM_ID] = ClusterRESTHandler.pack_id(dim[DIM_ID],
dim[DIM_TYPE])
return item_dict
def validate(self, item_dict, key, schema_version, errors):
"""Validates the user input.
The cluster must:
- Have a name
- Have numeric values for the fields low and high of all
dimensions.
- Have a smaller value in the low field than in the high field.
This function completes the low and high ranges with None values. Also
divides the id from the select into id and type.
"""
if not item_dict['name']:
errors.append('Empty name.')
error_str = ('Non numeric value in dimension '
'range (dimension number {}).')
# Convert to float and complete the missing ranges with None.
for index, dim in enumerate(item_dict['vector']):
if _has_right_side(dim):
try:
dim[DIM_HIGH] = float(dim[DIM_HIGH])
except ValueError:
errors.append(error_str.format(index))
else:
dim[DIM_HIGH] = None
if _has_left_side(dim):
try:
dim[DIM_LOW] = float(dim[DIM_LOW])
except ValueError:
errors.append(error_str.format(index))
else:
dim[DIM_LOW] = None
if (_has_left_side(dim) and _has_right_side(dim)
and dim[DIM_HIGH] < dim[DIM_LOW]):
errors.append('Wrong range interval in dimension'
'number {}'.format(index))
# Unpack the select id.
dim[DIM_ID], dim[DIM_TYPE] = ClusterRESTHandler.unpack_id(
dim[DIM_ID])
def pre_save_hook(self, dto):
"""Filter out dimensions with missing start- and end- range."""
dto.dict['vector'] = [dim for dim in dto.dict['vector']
if _has_left_side(dim) or _has_right_side(dim)]
class StudentVector(BaseEntity):
"""Representation of a single student based on a fixed set of dimensions.
The attribute vector stores the value of the student for each possible
dimension. This value must be a number, and it is generated by the job
StudentVectorGenerator. The information is organized in a dictionary,
for example:
{
DIM_TYPE: clustering.DIM_TYPE_QUESTION,
DIM_ID: 3,
DIM_VALUE: 60
}
"""
vector = db.TextProperty(indexed=False)
# TODO(milit): add a data source type so that all entities of this type
# can be exported via data pump for external analysis.
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
@staticmethod
def get_dimension_value(vector, dim_id, dim_type):
"""Returns the value of the dimension with the given id and type.
Return None if there is no matching dimension.
Args:
vector: A list of dictionaries. Corresponds to the StudentVector
vector attribute unpacked.
"""
candidates = [dim[DIM_VALUE] for dim in vector
if str(dim[DIM_ID]) == str(dim_id) and
dim[DIM_TYPE] == dim_type]
if candidates:
return candidates[0]
class StudentClusters(BaseEntity):
"""Representation of the relation between StudentVector and ClusterEntity.
There is a StudentClusters entity for each StudentVector, created by the
ClusteringGenerator job. The key name corresponds to the key_name of the
StudentVector entity.
The attribute clusters is a dictionary mapping ClusterEntity ids to
distance values for a given distance type (Hamming as default). This
distances are claculated using the job ClusteringGenerator. For example:
{'1': 3, '2': 0, ... }
"""
clusters = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentVectorGenerator(jobs.MapReduceJob):
"""A map reduce job to create StudentVector.
The data cames from StudentAggregateEntity and StudentPropertyEntity.
This job updates the vector field in the associated StudentVector, or
creates a new one if there is none. This vector has a value for each
score dimension type, calculated from the submissions field of
StudentAggregateEntity as follows:
Questions: The last weighted score of the question.
Lessons: The last weighted score of the lesson.
Units or Assessments: The average of all the scored lessons in
the unit or assessment. If no lessons, then the unit has a score
by itself.
The unit visit dimension is the number of 'enter-page' events registered
for the unit in the page_views fiels of StudentAggregateEntity.
The unit and lesson progress dimensions are the same as the student
progress in StudentPropertyEntity.
NOTE: StudentAggregateEntity is created by the job
StudentAggregateGenerator, so they have to run one after the other.
"""
# This dictionary maps each dimension type to a function that extracts
# its value from a StudentAggregateEntity data field. The function receives
# two arguments, the data relevant to the dimension as list of
# dictionaries and the dimension dictionary. The data is the output of the
# function _inverse_submission_data.
# To define a new dimension type you must define the function and include
# it here. That way we avoid changing the map function.
DIMENSION_FUNCTIONS = {
DIM_TYPE_QUESTION: '_get_question_score',
DIM_TYPE_LESSON: '_get_lesson_score',
DIM_TYPE_UNIT: '_get_unit_score',
DIM_TYPE_UNIT_VISIT: '_get_unit_visits',
DIM_TYPE_UNIT_PROGRESS: '_get_unit_progress',
DIM_TYPE_LESSON_PROGRESS: '_get_lesson_progress',
}
@classmethod
def get_function_for_dimension(cls, dimension_type):
"""Returns the function to calculate the score of a dimension type.
The mapping between dimension types and function names is in the
class attribute DIMENSION_FUNCTIONS."""
return getattr(cls, cls.DIMENSION_FUNCTIONS[dimension_type],
lambda x, y: 0)
@staticmethod
def get_description():
return 'StudentVector generation'
@classmethod
def entity_class(cls):
return student_aggregate.StudentAggregateEntity
def build_additional_mapper_params(self, app_context):
return {'possible_dimensions': get_possible_dimensions(app_context)}
@staticmethod
def map(item):
"""Updates the values in vector.
Creates a new StudentVector using the id of the item, a
StudentAggregateEntity. Calculates the value for every dimension
from the assessment data in item.
"""
mapper_params = context.get().mapreduce_spec.mapper.params
raw_data = transforms.loads(zlib.decompress(item.data))
raw_assessments = raw_data.get('assessments', [])
sub_data = StudentVectorGenerator._inverse_submission_data(
mapper_params['possible_dimensions'], raw_assessments)
raw_page_views = raw_data.get('page_views', [])
view_data = StudentVectorGenerator._inverse_page_view_data(
raw_page_views)
progress_data = None
user_id = item.key().name()
student = models.Student.get_by_user_id(user_id)
if student:
raw_data = models.StudentPropertyEntity.get(
student, progress.UnitLessonCompletionTracker.PROPERTY_KEY)
if hasattr(raw_data, 'value') and raw_data.value:
progress_data = transforms.loads(raw_data.value)
if not (sub_data or view_data or progress_data):
return
vector = []
for dim in mapper_params['possible_dimensions']:
type_ = dim[DIM_TYPE]
if type_ == DIM_TYPE_UNIT_VISIT:
data_for_dimension = view_data[type_, str(dim[DIM_ID])]
elif type_ in [DIM_TYPE_UNIT_PROGRESS, DIM_TYPE_LESSON_PROGRESS]:
data_for_dimension = progress_data
else:
data_for_dimension = sub_data[type_, str(dim[DIM_ID])]
value = StudentVectorGenerator.get_function_for_dimension(
dim[DIM_TYPE])(data_for_dimension, dim)
new_dim = {
DIM_TYPE: dim[DIM_TYPE],
DIM_ID: dim[DIM_ID],
DIM_VALUE: value}
vector.append(new_dim)
StudentVector(key_name=str(item.key().name()),
vector=transforms.dumps(vector)).put()
@staticmethod
def reduce(item_id, values):
"""Empty function, there is nothing to reduce."""
pass
@staticmethod
def _inverse_submission_data(dimensions, raw_data):
"""Build a dictionary with the information from raw_data by dimension.
For each dimension builds an entry in the result. The value is a list
with all the submissions relevant to that dimension. The concept of
relevant is different for each type of dimension. For example, for a
unit the relevant data are the submissions of all lessons for that
unit.
Returns:
An instance of defaultdict with default empty list."""
result = collections.defaultdict(lambda: [])
for activity in raw_data:
activity_lesson = activity.get('lesson_id')
activity_unit = activity.get('unit_id')
# This creates aliasing but it's fine beacuse is read only.
# It only adds a copy of the timestamp for the questions.
result[DIM_TYPE_UNIT, activity_unit].append(activity)
result[DIM_TYPE_LESSON, activity_lesson].append(activity)
for submission in activity.get('submissions', []):
for answer in submission.get('answers', []):
question_id = answer.get('question_id')
answer['timestamp'] = submission['timestamp']
dim_id = pack_question_dimid(activity_unit,
activity_lesson, question_id)
result[DIM_TYPE_QUESTION, dim_id].append(answer)
return result
@staticmethod
def _inverse_page_view_data(raw_data):
"""Build a dictionary with the information from raw_data by dimension.
For each dimension builds an entry in the result. The value is a list
with all the submissions relevant to that dimension. In the case
of DIM_TYPE_UNIT_VISIT the relevant submissions are those
with name 'unit' or 'assessment'
Returns:
An instance of defaultdict with default empty list."""
result = collections.defaultdict(lambda: [])
for page_view in raw_data:
name = page_view.get('name')
if name not in ['unit', 'assessment']:
continue
item_id = page_view.get('item_id')
result[DIM_TYPE_UNIT_VISIT, item_id].append(page_view)
return result
@staticmethod
def _get_question_score(data, unused_dimension):
"""The score of a question is the last weighted score obtained.
If a question in present multiple times in the same submission, then
the score is the average weighted score of the question in that
submission. If there is no submission for the question the score is 0.
Args:
data: a list of dictionaries.
"""
if not data:
return 0
last_scores = []
last_timestamp = 0
for answer in data:
# Could be more than one question with the same timestamp
score = answer.get('weighted_score')
if score and answer['timestamp'] > last_timestamp:
last_scores = [score]
last_timestamp = answer['timestamp']
elif score and answer['timestamp'] == last_timestamp:
last_scores.append(score)
if last_scores:
return math.fsum(last_scores) / len(last_scores)
return 0
@staticmethod
def _get_lesson_score(data, dimension):
"""The score of a lesson is its last score."""
if not data:
return 0
for submission in data:
if ('lesson_id' in submission and 'last_score' in submission
and submission['lesson_id'] == str(dimension[DIM_ID])):
return submission['last_score']
return 0
@staticmethod
def _get_unit_score(data, dimension):
"""The score of a unit is the average score of its scored lessons.
If the unit has no lessons (assessment), the unit will have its
own score.
"""
if not data:
return 0
if not DIM_EXTRA_INFO in dimension:
scored_lessons = 1
else:
extra_info = json.loads(dimension[DIM_EXTRA_INFO])
if not 'unit_scored_lessons' in extra_info:
scored_lessons = 1
else:
scored_lessons = max(extra_info['unit_scored_lessons'], 1)
score = 0
for submission in data:
if ('unit_id' in submission and 'last_score' in submission
and submission['unit_id'] == str(dimension[DIM_ID])):
score += submission['last_score']
return score/float(scored_lessons)
@staticmethod
def _get_unit_visits(data, dimension):
if not data:
return 0
result = 0
for page_view in data:
activities = page_view.get('activities')
if not activities:
continue
for activity in activities:
if activity.get('action') == 'enter-page':
result += 1
return result
@staticmethod
def _get_unit_progress(data, dimension):
"""Reads the progress from the value of StudentPropertyEntity."""
# This value is obtained directly from the JSON dictionary
# in value because we can't pass the UnitLessonCompletionTracker
# object as a parameter of the map reduce to use the proper accessors.
if not data:
return 0
return data.get('u.{}'.format(dimension[DIM_ID]), 0)
@staticmethod
def _get_lesson_progress(data, dimension):
"""Reads the progress from the value of StudentPropertyEntity.
The dimension has to have a field DIM_EXTRA_INFO with the unit id."""
if not data:
return 0
extra_info = dimension.get(DIM_EXTRA_INFO)
if not extra_info:
return 0
extra_info = transforms.loads(extra_info)
if not extra_info:
return 0
unit_id = extra_info.get('unit_id')
if unit_id:
return data.get('u.{}.l.{}'.format(unit_id, dimension[DIM_ID]), 0)
return 0
def hamming_distance(vector, student_vector):
"""Return the hamming distance between a ClusterEntity and a StudentVector.
The hamming distance between an ClusterEntity and a StudentVector is the
number of dimensions in which the student value is not inside the vector
range. If a dimension is not present in the student vector, we assume its
value is 0. If a dimension is not present in the cluster_value, we assume
that every value is included in the range.
Params:
vector: the vector field of a ClusterEntity instance.
student_vector: the vector field of a StudentVector instance.
"""
# TODO(milit): As we are discarding all distances greater than
# ClusteringGenerator.MAX_DISTANCE, add it as a parameter so we stop
# calculating the distance once this limit is reached.
def fits_left_side(dim, value):
"""_has_left_side(dim) -> dim[DIM_LOW] <= value"""
return not _has_left_side(dim) or dim[DIM_LOW] <= value
def fits_right_side(dim, value):
"""_has_right_side(dim) -> dim[DIM_HIGH] >= value"""
return not _has_right_side(dim) or dim[DIM_HIGH] >= value
distance = 0
for dim in vector:
value = StudentVector.get_dimension_value(student_vector,
dim[DIM_ID], dim[DIM_TYPE])
if not value:
value = 0
if not fits_left_side(dim, value) or not fits_right_side(dim, value):
distance += 1
return distance
class ClusteringGenerator(jobs.MapReduceJob):
"""A map reduce job to calculate which students belong to each cluster.
This job calculates the distance between each StudentVector and each
ClusterEntity using the Hamming distance. The value of the distance is
going to be stored in the StudentVector attibute clusters. This attribute
is a json dictionary where the keys are the ids (as strings) of the
clusters and the values are the distances. All previous distances are
discarded.
All distances that don't fall in the range (MIN_DISTANCE, MAX_DISTANCE)
are ignored and not stored in the StudentVector entity.
In the reduce step it returns calculated two statistics: the number of
students in each cluster and the intersection of pairs of clusters.
"""
MAX_DISTANCE = 2
# TODO(milit): Add settings to disable heavy statistics.
@staticmethod
def get_description():
return 'StudentVector clusterization'
@classmethod
def entity_class(cls):
return models.Student
def build_additional_mapper_params(self, app_context):
clusters = [{'id': cluster.id, 'vector': cluster.vector}
for cluster in ClusterDAO.get_all()]
return {
'clusters': clusters,
'max_distance': getattr(self, 'MAX_DISTANCE', 2)
}
@staticmethod
def map(item):
"""Calculates the distance from the StudentVector to ClusterEntites.
Stores this distances in the clusters attibute of item. Ignores
distances not in range (MIN_DISTANCE, MAX_DISTANCE).
Yields:
Pairs (key, value). There are two types of keys:
1. A cluster id: the value is a tuple (student_id, distance).
2. A pair of clusters ids: the value is a 3-uple
(student_id, distance1, distance2)
distance1 is the distance from the student vector to the
cluster with the first id of the tuple and distance2 is
the distance to the second cluster in the tuple.
3. A string 'student_count' with value 1.
One result is yielded for every cluster id and pair of clusters
ids. If (cluster1_id, cluster2_id) is yielded, then
(cluster2_id, cluster1_id) won't be yielded.
"""
student = StudentVector.get_by_key_name(item.user_id)
if student:
mapper_params = context.get().mapreduce_spec.mapper.params
max_distance = mapper_params['max_distance']
clusters = {}
item_vector = transforms.loads(student.vector)
for cluster in mapper_params['clusters']:
distance = hamming_distance(cluster['vector'], item_vector)
if distance > max_distance:
continue
for cluster2_id, distance2 in clusters.items():
key = transforms.dumps((cluster2_id, cluster['id']))
value = (item.user_id, distance, distance2)
yield (key, transforms.dumps(value))
clusters[cluster['id']] = distance
to_yield = (item.user_id, distance)
yield(cluster['id'], transforms.dumps(to_yield))
clusters = transforms.dumps(clusters)
StudentClusters(key_name=item.user_id, clusters=clusters).put()
yield ('student_count', 1)
@staticmethod
def combine(key, values, previously_combined_outputs=None):
"""Combiner function called before the reducer.
Params:
key: the value of the key from the map output.
values: the values for that key from the map output.
previously_combined_outputs: a list or a RepeatedScalarContainer
that holds the combined output for other instances for the
same key."""
if key != 'student_count':
for value in values:
yield value
for value in previously_combined_outputs:
yield value
else:
total = sum([int(value) for value in values])
if previously_combined_outputs is not None:
total += sum([int(value) for value in
previously_combined_outputs])
yield total
@staticmethod
def reduce(item_id, values):
"""
This function can take two types of item_id (as json string).
A number: the values are 2-uples (student_id, distance) and is
used to calculate a count statistic.
A list: the item_id holds the IDs of two clusters and the value
corresponds to 3-uple (student_id, distance1, distance2). The
value is used to calculate an intersection stats.
A string 'student_count': The values is going to be a list of
partial sums of numbers.
Yields:
A json string representing a tuple ('stat_name', (item_id,
distances)). For count stats, the i-th number in the distances
list corresponds to the number of students with distance equal to
i to the vector. For intersection, the i-th number in the distance
list corresponds to the students with distance less or equal than
i to both clusters. item_id is the same item_id received as
a parameter, but converted from the json string.
For the stat student_count the value is a single number
representing the total number of StudentVector
"""
if item_id == 'student_count':
yield (item_id, sum(int(value) for value in values))
else:
item_id = transforms.loads(item_id)
distances = collections.defaultdict(lambda: 0)
if isinstance(item_id, list):
stat_name = 'intersection'
for value in values:
value = transforms.loads(value)
# If a student vector has a distance 1 to cluster A
# and distance 3 to cluster B, then it has a
# distance of 3 (the greater) to the intersection
intersection_distance = max(value[1], value[2])
distances[intersection_distance] += 1
item_id = tuple(item_id)
else:
stat_name = 'count'
for value in values:
value = transforms.loads(value)
distances[value[1]] += 1
distances = dict(distances)
list_distances = [0] * (max([int(k) for k in distances]) + 1)
for distance, count in distances.items():
list_distances[int(distance)] = count
if stat_name == 'intersection':
# Accumulate the distances.
for index in range(1, len(list_distances)):
list_distances[index] += list_distances[index - 1]
yield transforms.dumps((stat_name, (item_id, list_distances)))
class TentpoleStudentVectorDataSource(data_sources.SynchronousQuery):
"""This datasource does not retrieve elements.
This datasource exists to put a button in the Visualization html that
allows the user to run the job StudentVectorGenerator and to create the
StudentVector entities. Also gives information about the state of the
StudentAggregateGenerator job, which is a requisite to run
StudentVectorGenerator.
However, it is NOT expected to retrieve the StudentVector entities for
display.
"""
@staticmethod
def required_generators():
return [StudentVectorGenerator]
@staticmethod
def fill_values(app_context, template_values, unused_gen):
"""Check if the StudentAggregateGenerator has run."""
job = student_aggregate.StudentAggregateGenerator(app_context).load()
if not job:
template_values['message'] = ('The student aggregated job has '
'never run.')
message_str = ('The student aggregated values where '
'last calculated on {}.')
last_update = getattr(job, 'updated_on', None)
if not last_update:
template_values['message'] = ('The student aggregated job has '
'never run.')
else:
template_values['message'] = message_str.format(
job.updated_on.strftime(utils.HUMAN_READABLE_DATETIME_FORMAT))
class ClusterStatisticsDataSource(data_sources.AbstractSmallRestDataSource):
"""Returns the values obtained by ClusteringGenerator."""
@staticmethod
def required_generators():
return [ClusteringGenerator]
@classmethod
def get_name(cls):
return 'cluster_statistics'
@classmethod
def get_title(cls):
return '' # Not used.
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
# Without schema the fetch_values function won't be called.
return 'List with dummy objects'.split()
@staticmethod
def _process_job_result(results):
def add_zeros(iterable, length):
return iterable + [0] * (length - len(iterable))
def process_count(value, count):
if value[0] not in count:
return
count[value[0]][1:] = add_zeros(value[1], max_distance + 1)
def process_intersection(value, count, inter):
cluster1, cluster2 = value[0]
if not (cluster2 in count and cluster1 in count):
return
map1 = id_mapping.index(cluster1)
map2 = id_mapping.index(cluster2)
for dist in range(max_distance + 1): # Include the last one
c1_count = sum(count[cluster1][1:dist + 2])
c2_count = sum(count[cluster2][1:dist + 2])
if dist >= len(value[1]): # Complete missing values
int_count = value[1][-1]
else:
int_count = value[1][dist] # We know is not empty
inter[dist]['count'][map1][map2] = int_count
percentage = round(int_count*100/float(student_count), 2)
inter[dist]['percentage'][map1][map2] = percentage
# P(c2 | c1) = count(c1 and c2) / count(c1)
probability = 0
if c1_count:
probability = round(int_count/float(c1_count), 2)
inter[dist]['probability'][map1][map2] = probability
# P(c1 | c2) = count(c1 and c2) / count(c2)
probability = 0
if c2_count:
probability = round(int_count/float(c2_count), 2)
inter[dist]['probability'][map2][map1] = probability
max_distance = ClusteringGenerator.MAX_DISTANCE
student_count = 1
count = {}
dimension_count = {}
for cluster in ClusterDAO.get_all():
count[cluster.id] = [cluster.name] + [0] * (max_distance + 1)
dimension_count[cluster.id] = len(cluster.vector)
id_mapping = count.keys()
name_mapping = [count[cid][0] for cid in id_mapping]
l = lambda: collections.defaultdict(l)
inter = [{'count': l(), 'percentage': l(), 'probability': l()}
for _ in range(max_distance + 1)]
# Process all counts first
for result in results:
stat, value = result
if stat == 'count':
process_count(value, count)
elif stat == 'student_count':
student_count = value
# Once counting is complete, process the intersections
for result in results:
stat, value = result
if stat != 'intersection':
continue
process_intersection(value, count, inter)
# Reprocess counts to eliminate non relevant information
for cluster_id in count:
dimension = dimension_count[cluster_id]
if dimension <= max_distance:
count[cluster_id] = add_zeros(
count[cluster_id][:dimension + 1], max_distance + 2)
other = student_count - sum(count[cluster_id][1:])
count[cluster_id].append(other)
extra_info = {'max_distance': max_distance}
return [count.values(), inter, name_mapping, extra_info]
@classmethod
def fetch_values(cls, unused_app_context, unused_source_context,
unused_schema, unused_catch_and_log, unused_page_number,
clustering_generator_job):
"""Returns the statistics calculated by clustering_generator_job.
The information extracted from the intersection data can be of three
types:
1. 'count' is the number of students in the intersection
2. 'percentage' is the percentage of students in the intersection
over the total of StudentVector entities in the db.
3. 'probability' of the cluster B given the cluster A is the count
of students in the intersection divided by the number of
students in A.
Returns:
A list of dictionaries and the page number, always 0. The list
has four elements:
1. The results of the count statistic: A matrix with the
format
[cluster_name, distance0, distance1, ... distanceN]
where distanceX is the number of students at distance X of
the cluster.
2. The results of the intersection statistics: A list of
dictionaries. The dictionary in position i contains the
infomation of students at distance less or equal than i.
The keys of the dictionaries are the types of data in the
values: 'count', 'percentage' or 'probability'.
The values are two level dictionary with the numbers of pairs
of clusters. The clusters are mapped with secuential numbers.
For example:
{'count': {0: {1: 1},
1: {2: 1},
3: {2: 0}},
'percentage': {0: {1: 16.67},
1: {2: 16.67},
3: {2: 0.00}},
'probability': {0: {1: 1.0}, 1: {0: 0.5, 2: 0.5},
2: {1: 0.5, 3: 0.0},
3: {2: 0.0}}}
Not all pairs are included in this intersection. If
an entry pair [a][b] is missing is safe to assume that the
intersection is in the entry [b][a] or is 0.
3. The mapping from cluster number to cluster name. A list where
the index indicate the number of the cluster in that position.
4. A dictionary with extra information. It has a key max_distance
and a numeric value.
"""
# This function is long and complicated, but it is so to send the data
# as much processed as possible to the javascript in the page.
# The information is adjusted to fit the graphics easily.
results = list(jobs.MapReduceJob.get_results(clustering_generator_job))
# data, page_number
return ClusterStatisticsDataSource._process_job_result(results), 0
|
wijnandb/CodeCult-Scratch
|
modules/analytics/clustering.py
|
Python
|
apache-2.0
| 47,710
|
[
"VisIt"
] |
7bf1b1035d2b0fbbc70aca80ed8278d64d32925a8bd500dafa73e31cc8d47cd5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes for parsing the FEFF output files.
Currently supports the xmu.dat, ldos.dat output files are for non-spin case.
"""
import re
from collections import OrderedDict, defaultdict
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core.periodic_table import Element
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.io.feff import Header, Potential, Tags
__author__ = "Alan Dozier, Kiran Mathew, Chen Zheng"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__status__ = "Beta"
__date__ = "April 7, 2013"
class LDos(MSONable):
"""
Parser for ldos files ldos01, ldos02, .....
"""
def __init__(self, complete_dos, charge_transfer):
"""
Args:
complete_dos (CompleteDos): complete dos object
charge_transfer (dict): computed charge transfer between atoms
dictionary
"""
self.complete_dos = complete_dos
self.charge_transfer = charge_transfer
@staticmethod
def from_file(feff_inp_file="feff.inp", ldos_file="ldos"):
"""
Creates LDos object from raw Feff ldos files by
by assuming they are numbered consecutively, i.e. ldos01.dat
ldos02.dat...
Args:
feff_inp_file (str): input file of run to obtain structure
ldos_file (str): output ldos file of run to obtain dos info, etc.
"""
header_str = Header.header_string_from_file(feff_inp_file)
header = Header.from_string(header_str)
structure = header.struct
nsites = structure.num_sites
parameters = Tags.from_file(feff_inp_file)
if "RECIPROCAL" in parameters:
pot_dict = dict()
pot_readstart = re.compile(".*iz.*lmaxsc.*xnatph.*xion.*folp.*")
pot_readend = re.compile(".*ExternalPot.*switch.*")
pot_inp = re.sub(r"feff.inp", r"pot.inp", feff_inp_file)
dos_index = 1
begin = 0
with zopen(pot_inp, "r") as potfile:
for line in potfile:
if len(pot_readend.findall(line)) > 0:
break
if begin == 1:
begin += 1
continue
if begin == 2:
z_number = int(line.strip().split()[0])
ele_name = Element.from_Z(z_number).name
if ele_name not in pot_dict:
pot_dict[ele_name] = dos_index
else:
pot_dict[ele_name] = min(dos_index, pot_dict[ele_name])
dos_index += 1
if len(pot_readstart.findall(line)) > 0:
begin = 1
else:
pot_string = Potential.pot_string_from_file(feff_inp_file)
dicts = Potential.pot_dict_from_string(pot_string)
pot_dict = dicts[0]
with zopen(ldos_file + "00.dat", "r") as fobject:
f = fobject.readlines()
efermi = float(f[0].split()[4])
dos_energies = []
ldos = {}
for i in range(1, len(pot_dict) + 1):
if len(str(i)) == 1:
ldos[i] = np.loadtxt("{}0{}.dat".format(ldos_file, i))
else:
ldos[i] = np.loadtxt("{}{}.dat".format(ldos_file, i))
for i in range(0, len(ldos[1])):
dos_energies.append(ldos[1][i][0])
all_pdos = []
vorb = {"s": Orbital.s, "p": Orbital.py, "d": Orbital.dxy, "f": Orbital.f0}
forb = {"s": 0, "p": 1, "d": 2, "f": 3}
dlength = len(ldos[1])
for i in range(nsites):
pot_index = pot_dict[structure.species[i].symbol]
all_pdos.append(defaultdict(dict))
for k, v in vorb.items():
density = [ldos[pot_index][j][forb[k] + 1] for j in range(dlength)]
updos = density
downdos = None
if downdos:
all_pdos[-1][v] = {Spin.up: updos, Spin.down: downdos}
else:
all_pdos[-1][v] = {Spin.up: updos}
pdos = all_pdos
vorb2 = {0: Orbital.s, 1: Orbital.py, 2: Orbital.dxy, 3: Orbital.f0}
pdoss = {structure[i]: {v: pdos[i][v] for v in vorb2.values()} for i in range(len(pdos))}
forb = {"s": 0, "p": 1, "d": 2, "f": 3}
tdos = [0] * dlength
for i in range(nsites):
pot_index = pot_dict[structure.species[i].symbol]
for v in forb.values():
density = [ldos[pot_index][j][v + 1] for j in range(dlength)]
for j in range(dlength):
tdos[j] = tdos[j] + density[j]
tdos = {Spin.up: tdos}
dos = Dos(efermi, dos_energies, tdos)
complete_dos = CompleteDos(structure, dos, pdoss)
charge_transfer = LDos.charge_transfer_from_file(feff_inp_file, ldos_file)
return LDos(complete_dos, charge_transfer)
@staticmethod
def charge_transfer_from_file(feff_inp_file, ldos_file):
"""
Get charge transfer from file.
Args:
feff_inp_file (str): name of feff.inp file for run
ldos_file (str): ldos filename for run, assume consequetive order,
i.e., ldos01.dat, ldos02.dat....
Returns:
dictionary of dictionaries in order of potential sites
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
"""
cht = OrderedDict()
parameters = Tags.from_file(feff_inp_file)
if "RECIPROCAL" in parameters:
dicts = [dict()]
pot_dict = dict()
dos_index = 1
begin = 0
pot_inp = re.sub(r"feff.inp", r"pot.inp", feff_inp_file)
pot_readstart = re.compile(".*iz.*lmaxsc.*xnatph.*xion.*folp.*")
pot_readend = re.compile(".*ExternalPot.*switch.*")
with zopen(pot_inp, "r") as potfile:
for line in potfile:
if len(pot_readend.findall(line)) > 0:
break
if begin == 1:
z_number = int(line.strip().split()[0])
ele_name = Element.from_Z(z_number).name
if len(pot_dict) == 0:
pot_dict[0] = ele_name
elif len(pot_dict) > 0:
pot_dict[max(pot_dict.keys()) + 1] = ele_name
begin += 1
continue
if begin == 2:
z_number = int(line.strip().split()[0])
ele_name = Element.from_Z(z_number).name
dicts[0][ele_name] = dos_index
dos_index += 1
if len(pot_dict) == 0:
pot_dict[0] = ele_name
elif len(pot_dict) > 0:
pot_dict[max(pot_dict.keys()) + 1] = ele_name
if len(pot_readstart.findall(line)) > 0:
begin = 1
else:
pot_string = Potential.pot_string_from_file(feff_inp_file)
dicts = Potential.pot_dict_from_string(pot_string)
pot_dict = dicts[1]
for i in range(0, len(dicts[0]) + 1):
if len(str(i)) == 1:
with zopen("{}0{}.dat".format(ldos_file, i), "rt") as fobject:
f = fobject.readlines()
s = float(f[3].split()[2])
p = float(f[4].split()[2])
d = float(f[5].split()[2])
f1 = float(f[6].split()[2])
tot = float(f[1].split()[4])
cht[str(i)] = {pot_dict[i]: {"s": s, "p": p, "d": d, "f": f1, "tot": tot}}
else:
with zopen(ldos_file + str(i) + ".dat", "rt") as fid:
f = fid.readlines()
s = float(f[3].split()[2])
p = float(f[4].split()[2])
d = float(f[5].split()[2])
f1 = float(f[6].split()[2])
tot = float(f[1].split()[4])
cht[str(i)] = {pot_dict[i]: {"s": s, "p": p, "d": d, "f": f1, "tot": tot}}
return cht
def charge_transfer_to_string(self):
"""returns shrage transfer as string"""
ch = self.charge_transfer
chts = ["\nCharge Transfer\n\nabsorbing atom"]
for i in range(len(ch)):
for atom, v2 in ch[str(i)].items():
a = [
"\n",
atom,
"\n",
"s ",
str(v2["s"]),
"\n",
"p ",
str(v2["p"]),
"\n",
"d ",
str(v2["d"]),
"\n",
"f ",
str(v2["f"]),
"\n",
"tot ",
str(v2["tot"]),
"\n",
]
chts.extend(a)
return "".join(chts)
class Xmu(MSONable):
r"""
Parser for data in 'xmu.dat' file.
The file 'xmu.dat' contains XANES, EXAFS or NRIXS data depending on the
situation; \\mu, \\mu_0, and \\chi = \\chi * \\mu_0/ \\mu_0/(edge+50eV) as
functions of absolute energy E, relative energy E − E_f and wave number k.
Default attributes:
xmu: Photon absorption cross section of absorbing atom in material
Energies: Energies of data point
relative_energies: E - E_fermi
wavenumber: k=\\sqrt(E −E_fermi)
mu: The total absorption cross-section.
mu0: The embedded atomic background absorption.
chi: fine structure.
Edge: Aborption Edge
Absorbing atom: Species of absorbing atom
Material: Formula of material
Source: Source of structure
Calculation: Type of Feff calculation performed
"""
def __init__(self, header, parameters, absorbing_atom, data):
"""
Args:
header: Header object
parameters: Tags object
absorbing_atom (str/int): absorbing atom symbol or index
data (numpy.ndarray, Nx6): cross_sections
"""
self.header = header
self.parameters = parameters
self.absorbing_atom = absorbing_atom
self.data = np.array(data)
@staticmethod
def from_file(xmu_dat_file="xmu.dat", feff_inp_file="feff.inp"):
"""
Get Xmu from file.
Args:
xmu_dat_file (str): filename and path for xmu.dat
feff_inp_file (str): filename and path of feff.inp input file
Returns:
Xmu object
"""
data = np.loadtxt(xmu_dat_file)
header = Header.from_file(feff_inp_file)
parameters = Tags.from_file(feff_inp_file)
pots = Potential.pot_string_from_file(feff_inp_file)
# site index (Note: in feff it starts from 1)
if "RECIPROCAL" in parameters:
absorbing_atom = parameters["TARGET"]
# species symbol
else:
absorbing_atom = pots.splitlines()[3].split()[2]
return Xmu(header, parameters, absorbing_atom, data)
@property
def energies(self):
"""
Returns the absolute energies in eV.
"""
return self.data[:, 0]
@property
def relative_energies(self):
"""
Returns energy with respect to the fermi level.
E - E_f
"""
return self.data[:, 1]
@property
def wavenumber(self):
r"""
Returns The wave number in units of \\AA^-1. k=\\sqrt(E −E_f) where E is
the energy and E_f is the Fermi level computed from electron gas theory
at the average interstitial charge density.
"""
return self.data[:, 2]
@property
def mu(self):
"""
Returns the total absorption cross-section.
"""
return self.data[:, 3]
@property
def mu0(self):
"""
Returns the embedded atomic background absorption.
"""
return self.data[:, 4]
@property
def chi(self):
"""
Returns the normalized fine structure.
"""
return self.data[:, 5]
@property
def e_fermi(self):
"""
Returns the fermi level in eV.
"""
return self.energies[0] - self.relative_energies[0]
@property
def source(self):
"""
Returns source identification from Header file
"""
return self.header.source
@property
def calc(self):
"""
Returns type of Feff calculation, XANES or EXAFS
"""
return "XANES" if "XANES" in self.parameters else "EXAFS"
@property
def material_formula(self):
"""
Returns chemical formula of material from feff.inp file
"""
try:
form = self.header.formula
except IndexError:
form = "No formula provided"
return "".join(map(str, form))
@property
def edge(self):
"""
Returns excitation edge.
"""
return self.parameters["EDGE"]
def as_dict(self):
"""
Returns dict representations of Xmu object
"""
d = MSONable.as_dict(self)
d["data"] = self.data.tolist()
return d
class Eels(MSONable):
"""
Parse'eels.dat' file.
"""
def __init__(self, data):
"""
Args:
data (): Eels data.
"""
self.data = np.array(data)
@property
def energies(self):
"""
Returns the energies in eV.
"""
return self.data[:, 0]
@property
def total_spectrum(self):
"""
Returns the total eels spectrum.
"""
return self.data[:, 1]
@property
def atomic_background(self):
"""
Returns: atomic background.
"""
return self.data[:, 2]
@property
def fine_structure(self):
"""
Returns: Fine structure of EELS.
"""
return self.data[:, 3]
@staticmethod
def from_file(eels_dat_file="eels.dat"):
"""
Parse eels spectrum.
Args:
eels_dat_file (str): filename and path for eels.dat
Returns:
Eels object
"""
data = np.loadtxt(eels_dat_file)
return Eels(data)
def as_dict(self):
"""
Returns dict representations of Xmu object
"""
d = MSONable.as_dict(self)
d["data"] = self.data.tolist()
return d
|
gmatteo/pymatgen
|
pymatgen/io/feff/outputs.py
|
Python
|
mit
| 15,146
|
[
"FEFF",
"pymatgen"
] |
450408644d67e43748395f1a2c682eba0ce07e4de94dd8b0e6ebea327c169519
|
# the ttwotheta motor and detector
# test xpd sim of motor movement
import numpy as np
from ophyd.sim import SynSignal, motor1, motor2
from lmfit import Model, Parameter, Parameters
from lmfit.models import VoigtModel, LinearModel
from lmfit.lineshapes import voigt
class SynGaussPeaks(SynSignal):
"""
Evaluate a point on a peaks based on the value of a motor.
Parameters
----------
name : string
motor : Device
motor_field : string
center : number
center of peak
Imax : number
max intensity of peak
sigma : number, optional
Default is 1.
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak.
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
def __init__(self, name, motor, motor_field, centers, Imax, sigma=1,
noise=None, noise_multiplier=1, random_state=None, offset=None,
**kwargs):
if noise not in ('poisson', 'uniform', None):
raise ValueError("noise must be one of 'poisson', 'uniform', None")
self._motor = motor
if random_state is None:
random_state = np.random
def func():
m = motor.read()[motor_field]['value']
v = m*0
for center in centers:
v += Imax * np.exp(-(m - center) ** 2 / (2 * sigma ** 2))
if offset is not None:
v += offset
if noise == 'poisson':
v += int(random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += random_state.uniform(-1, 1) * noise_multiplier
return v
super().__init__(func=func, name=name, **kwargs)
D_SPACINGS = {'LaB6': np.array([4.15772, 2.94676, 2.40116]),
'Si': 5.43095 / np.array([np.sqrt(3), np.sqrt(8), np.sqrt(11), np.sqrt(27)]),
}
import numpy as np
#def gaussian(theta, center, width):
# return 1500 / (np.sqrt(2*np.pi) * width) * np.exp(-((theta - center) / width)**2 / 2)
# for the simulation
SIMULATED_D = "Si"
def intensity(theta, amplitude, width, wavelength):
result = np.clip(5 * np.random.randn(), 0, None) # Gaussian noise
for d in D_SPACINGS['Si']:
assert wavelength < 2 * d, \
"wavelength would result in illegal arg to arcsin"
try:
center = np.arcsin(wavelength / (2 * d))
except Exception:
print("DEAD"); center = 0
result += voigt(theta, amplitude, center, width)
result += voigt(-theta, amplitude, center, width)
return result
def current_intensity_peaks():
amplitude = 0.5
width = 0.004 # degrees
wavelength = 12.398 / 66.4 # angtroms
two_theta = motor1.read()['motor1']['value'] # degrees
theta = np.deg2rad(two_theta / 2) # radians
return intensity(theta, amplitude, np.deg2rad(width), wavelength)
def current_intensity_dips():
amplitude = 0.5
width = 0.004 # degrees
wavelength = 12.398 / 66.4 # angtroms
hw_theta = motor1.read()['motor1']['value'] # degrees
theta = np.deg2rad(hw_theta + 35.26) # radians
return -intensity(theta, amplitude, np.deg2rad(width), wavelength) + 10000
th_cal = motor1
sc = SynSignal(name="det", func=current_intensity_dips)
''' test sim motors
import bluesky.plan_stubs as bps
import bluesky.plans as bp
from bluesky.callbacks import LivePlot
def myplan():
yield from bps.abs_set(motor1, 0)
yield from bp.rel_scan([det_6peaks], motor1, -10, 10, 1000)
RE(myplan(), LivePlot('det_6peaks', 'motor1'))
'''
|
NSLS-II-XPD/ipython_ophyd
|
profile_collection/simulators/10-motors-dets-sim.py
|
Python
|
bsd-2-clause
| 3,925
|
[
"Gaussian"
] |
f1b6e3e9a14ccf04f6671f7d5490a34556996d482bcb4fba1d31ffd0b8aa602f
|
#!/usr/bin/env python
__all__ = [ 'read_beta_hf_string' ]
import os,sys, re, argparse, ctypes, multiprocessing, functools
import numpy as np
import math as m
#from particles import *
from matplotlib import pyplot as plt
from .molecules import Atom
from .template import Template
try:
from applequist.gaussian import *
except ImportError:
pass
a0 = 0.52917721092
lab = [ "X", "Y", "Z"]
charge_dic = {"H1": 1.0 ,"H2":1.0 , "C1":6.0, "C7":6.0, "H3":1.0,
"H4":1.0, "H6": 1.0, "H8":1.0,
"H9":1.0, "H10": 1.0, "H12":1.0,
"O5":8.0, "O11": 8.0,
"H": 1.0, "C": 6.0, "N": 7.0, "O": 8.0, "S": 16.0}
mass_dict = {"H": 1.008, "C": 6.0, "N": 7.0, "O": 15.999, "S": 16.0}
freq_dict = {"0.0": "static","0.0238927": "1907_nm", "0.0428227" : "1064_nm",
"0.0773571" : "589_nm" }
allowed_elements = ( 'H', 'O' )
def polar_to_cartesian( r, tau, theta):
x, y, z = r* np.sin( theta )*np.cos( tau ) \
, r* np.sin( theta )*np.sin( tau ) \
, r* np.cos( theta )
return x , y , z
def write_related( args ):
if args.xyz.endswith(".pdb"):
name = args.xyz.split(".")[0] + "_" + str(args.waters) + ".mol"
waters = molecules.Water.read_waters( args.xyz ,
in_AA = args.xAA,
out_AA = args.oAA,
N_waters = args.waters )
elif args.xyz.endswith( ".xyz" ):
name = args.x.split(".")[0] + ".mol"
f_ = open( name , "w" )
if args.oAA:
str_ = "Angstrom"
else:
str_ = ""
f_.write( "ATOMBASIS\n\nComment\nmolecules.Atomtypes=2 Charge=0 Nosymm %s\n" %str_)
if not args.wat:
"Can't write to .mol file, didn't read water molecules"
raise SystemExit
hCnt = len(waters) * 2
oCnt = len(waters)
f_.write( "Charge=1.0 molecules.Atoms=%d Basis=cc-pVDZ\n" % hCnt)
for i in waters:
for j in i:
if j.element == "H":
f_.write( "%s %.5f %.5f %.5f\n" %( j.element, j.x, j.y, j.z ))
f_.write( "Charge=8.0 molecules.Atoms=%d Basis=cc-pVDZ\n" % oCnt)
for i in waters:
for j in i:
if j.element == "O":
f_.write( "%s %.5f %.5f %.5f\n" %( j.element, j.x, j.y, j.z ))
print "Finished writing mol files %s" %name
raise SystemExit
def run_argparse( args ):
A = argparse.ArgumentParser( )
# ----------------------------
# GENERIC VARIABLES
# ----------------------------
#
A.add_argument("-dal", type= str, default = 'hflin' )
A.add_argument("-mol", type= str, default = 'tip3p' )
A.add_argument( "-dist", action = "store_true", default = False )
# ----------------------------
# READ ALPHA
# ----------------------------
A.add_argument( "-alpha", type = str, )
# ----------------------------
# BETA ANALYSIS RELATED
# ----------------------------
A.add_argument( "-beta_analysis_par", action = "store_true", default = False )
A.add_argument( "-beta_analysis", action = "store_true", default = False )
A.add_argument( "-freq", type = str, default = "0.0",
choices = ["0.0", "0.0238927", "0.0428227", "0.0773571"] )
A.add_argument( "-R", type = float, default = 0.000001)
A.add_argument( "-beta",dest="beta", type = str,help="File that contains QUADRATIC response output with hyperpolarizabilities" )
A.add_argument( "-in_AA", action = "store_true", default = False )
A.add_argument( "-out_AA", action = "store_true", default = False )
A.add_argument( "-basis", type= str, nargs = '*', default = "ANOPVDZ" )
A.add_argument( "-beta_dal", type= str, default = "hfqua_" )
A.add_argument( "-Ncpu", type= int, default = "4" )
A.add_argument( "-N_waters", type= int, default = 15 )
A.add_argument( "-model", default = "tip3p" )
# ----------------------------
# ALPHA ANALYSIS RELATED
# ----------------------------
#
A.add_argument( "-alpha_analysis", action = "store_true", default = False )
A.add_argument( "-nums", type = str, nargs = '*',
default = map(str, range(1,10)) )
A.add_argument( "-x", type = str, default = ["nums"],
choices = ["snaps", "nums", "freqs"] )
A.add_argument( "-y", type = str, default = ["yy"],
choices = ["xx", "yy", "zz", "mean", "aniso"] )
A.add_argument( "-freqs", type = str, nargs = '*',
default = ['0.0', "0.0238927", "0.0428227", "0.0773571"]
)
A.add_argument( "-comps", type = str, nargs = '*', default = ["xx", "yy", "zz"],
choices = ["xx", "yy", "zz", "mean", "aniso"])
A.add_argument( "-snaps", type = str, nargs = '*',
default = map(str, range(10)) )
A.add_argument( "-eps_out", type = str )
A.add_argument( "-template_freq", type = str,
choices = ['0.0', "0.0238927", "0.0428227", "0.0773571"]
)
A.add_argument( "-hdf", action = "store_true", default = False )
# ----------------------------
# RELATED TO PLOT WINDOW APPEARANCE
# ----------------------------
#
A.add_argument( "-ymin", type = float, default = -0.10 )
A.add_argument( "-ymax", type = float, default = 0.10 )
# ----------------------------
# QM GENERATION RELATED
# ----------------------------
A.add_argument( "-qm_generation", action = "store_true", default = False )
# ----------------------------
# QM ANALYSIS RELATED
# ----------------------------
A.add_argument( "-qm_analysis", action = "store_true", default = False )
# ----------------------------
# QMMM GENERATION RELATED
# ----------------------------
A.add_argument( "-qmmm_generation", action = "store_true", default = False )
A.add_argument( "-potstyle", default = "QMMM",
choices = ["QMMM", "PEQM"])
A.add_argument( "-qm_waters", type = int, nargs = '*',
default = [1] )
A.add_argument( "-mm_waters", type = int, nargs = '*',
default = [1] )
A.add_argument( "-file_type", type = str, default = "pdb" )
A.add_argument( "-tname", type = str, default = "TIP3P" )
A.add_argument( "-tmethod", type = str, default = "HF" )
A.add_argument( "-tbasis", type = str, default = "ANOPVDZ" )
#also share same arguments -snaps -freqs with -alpha_analysis
# ----------------------------
# QMMM ANALYSIS RELATED
# ----------------------------
A.add_argument( "-qmmm_analysis", action = "store_true", default = False )
A.add_argument( "-n_qm", type = str, nargs = '*',
default = map(str, range(1,10)) )
A.add_argument( "-n_mm", type = str, nargs = '*',
default = map(str, range(1,101)) )
A.add_argument( "-potfreqs", type = str, nargs = '*',
default = ["0.0", "0.0238927", "0.0428227", "0.0773571"] )
# ----------------------------
# WRITE RELATED pdb to mol generation RELATED
# ----------------------------
A.add_argument("-waters", type = int , default = 4, help = "how many waters to take closest to center atom, default: 4")
A.add_argument("-v","--verbose", action='store_true' , default = False)
A.add_argument("-write", nargs='*', default = [], help = "Supply any which files to write from a selection: pot, xyz" )
A.add_argument( "-xyz", dest="xyz", type = str, help = 'Coordinate file with water molecules for the output .pot file. [ xyz , pdb ]')
A.add_argument( "-xAA", default = False ,action='store_true',
help = 'Default coordinate type in AA or AU in -x input water coordinate file, default: False ')
A.add_argument( "-oAA", default = False, action='store_true' , help='Default coordinate type AA or AU for -op output potential file, default: "AU"' )
A.add_argument( "-tw", type = float, default = 0.0 )
A.add_argument( "-wat", action = 'store_true' , default= True )
a = A.parse_args( args[1:] )
return a
def is_ccsd( filename):
""" Return true if the filename, which is DALTON .out file, is a quadratic ccsd calculation"""
pat_ccsd = re.compile(r'FINAL CCSD RESULTS FOR THE FIRST HYPERPOLARIZABILITIES')
for i in open(filename).readlines():
if pat_ccsd.search( i ):
return True
return False
def read_alpha_hf( fstr, freq = '0.0', in_AA = False, freqs = 1 ):
# If freqs > 1, will return a tuple of all alphas for each frequency
#
# Reading in Alpha tensor
fre = freq[0:7]
pat_alpha = re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*= *(-?\d*\.{1}\d+D*-?\+?\d*)')
pat_new_freq = re.compile(r'FREQUENCY.*SECOND ORDER')
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
# For every new frequency, will append this one and store alpha in the last
# element, otherwise, first column is first frequency by default
freqlist = None
lines = fstr.split('\n')
for i in lines:
if pat_new_freq.search( i ):
if freqlist is None:
freqlist = []
freqlist.append( np.zeros( (3,3 )) )
if pat_alpha.search( i ):
matched = pat_alpha.search(i).groups()
if "D" in matched[2]:
frac = float( matched[2].replace("D","E") )
else:
frac = float( matched[2] )
A = matched[0]
B = matched[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
freqlist[-1][lab.index( A ), lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
freqlist[-1][lab.index( B ), lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
freqlist[-1][lab.index( B ), lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
freqlist[-1][lab.index( B ), lab.index( A ) ] = frac
if freqs > 1:
return freqlist
return alpha
def read_energy( fname, calctype = 'HF' ):
"""Return the energy from dalton .out file fname"""
for line in open(fname).readlines():
if re.compile(r'.*Final.*energy').match(line):
return line.split()[-1]
def read_alpha_ccsd( fstr ):
mol_dip = np.zeros(3)
alpha = np.zeros( [3,3])
beta = np.zeros( [3,3,3])
beta_dict = {}
atoms = []
lab = ["X", "Y", "Z"]
pat_dipole = re.compile(r'Total Molecular Dipole Moment')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
pat_alpha= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*')
pat_beta= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*([XYZ])DIPLEN')
lines = fstr.split('\n')
# Reading in Alfa
for i in lines:
if pat_alpha.search( i ):
if len(i.split()) < 8:
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.search(i).groups(1)[0]
B = pat_alpha.search(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
return alpha
def read_beta_ccsd( fstr ):
mol_dip = np.zeros(3)
alpha = np.zeros( [3,3])
beta = np.zeros( [3,3,3])
beta_dict = {}
atoms = []
lab = ["X", "Y", "Z"]
pat_dipole = re.compile(r'Total Molecular Dipole Moment')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
pat_alpha= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*')
pat_beta= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*([XYZ])DIPLEN')
# Reading in dipole
lines = fstr.split('\n')
for i in range(len( lines )):
if pat_dipole.search( lines[i] ):
mol_dip[0] = lines[i+5].split()[1]
mol_dip[1] = lines[i+6].split()[1]
mol_dip[2] = lines[i+7].split()[1]
# Reading in Alfa
for i in lines:
if pat_alpha.search( i ):
if len(i.split()) < 8:
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.search(i).groups(1)[0]
B = pat_alpha.search(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
#For Beta
for i in lines:
if pat_beta.search( i ):
if len(i.split()) >8:
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
lab1 = pat_beta.search(i).groups(1)[0]
lab2 = pat_beta.search(i).groups(1)[1]
lab3 = pat_beta.search(i).groups(1)[2]
beta_dict[ "".join( [lab1 + lab2 + lab3]) ] = frac
for i, l1 in enumerate(lab):
for j, l2 in enumerate(lab):
for k, l3 in enumerate(lab):
beta[i, j, k] = beta_dict[ l1 + l2 + l3 ]
return atoms, mol_dip, alpha , beta
def read_beta( fstr, freq = "0.0", in_AA = False, out_AA = False ):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
# Reading in Beta tensor
fre = str("%.5f" % float(freq))
lab = ['X', 'Y', 'Z', ]
pat_beta = re.compile(r'@ B-freq')
lines = fstr.split('\n')
for i in lines:
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
return beta
def read_beta_hf( fstr, freq = "0.0", in_AA = False, out_AA = False ):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_Q = re.compile(r'Total charge of the molecule')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
#Special xyz hack for camb3lyp output from akka dalton to find atoms
pat_akka_xyz = re.compile(r'^\s*(\w+)\s+:\s+\d\s+x\s+(-*\d*\.+\d+)\s+\d\s+y\s+(-*\d*\.+\d+)\s+\d\s+z\s+(-*\d*\.+\d+) *$')
pat_labels_xyz = re.compile(r'^\s*(\S+-+\S+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
# Reading in dipole and charge
lines = fstr.split( '\n' )
for i in lines:
if pat_Q.search( i ):
Q = float(i.split()[-1])
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_akka_xyz.match(i):
f = pat_akka_xyz.match(i).groups()
matched = pat_akka_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_labels_xyz.match(i):
f = pat_labels_xyz.match(i).groups()
matched = pat_labels_xyz.match(i).groups()
lab = matched[0]
if len(lab.split('-')) == 4:
element = "H"
else:
element = lab.split('-')[2][0]
kwargs = { "AA": in_AA, "element" : element, "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
#Set center of nuceli charge to 0
coc = sum([ x.r * charge_dic[x.element] for x in atoms ]) /\
sum([ charge_dic[x.element] for x in atoms ])
for i in atoms:
nuc_dip += charge_dic[ i.element ] * (i.r - coc )
if in_AA:
# Make sure center of charge is in Atomic units to give correct electronic dipole
coc /= a0
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in lines:
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in lines:
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
N_el = sum([charge_dic[at.element] for at in atoms]) - Q
tot_dip = el_dip - coc * N_el
return atoms, tot_dip, alpha , beta
def read_props_qmmm( file_, freq = "0.0", in_AA = False ):
""" Same as read_beta_hf but skips coordinates not in allowd_elements
"""
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
# Reading in dipole
for i in open( file_ ).readlines():
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
if matched[0] not in allowed_elements:
continue
kwargs = { "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
for i in atoms:
nuc_dip[0] += charge_dic[ i.element ] * i.x
nuc_dip[1] += charge_dic[ i.element ] * i.y
nuc_dip[2] += charge_dic[ i.element ] * i.z
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in open( file_ ).readlines():
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in open( file_ ).readlines():
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
if in_AA:
nuc_dip /= a0
tot_dip = nuc_dip - el_dip
return atoms, nuc_dip - el_dip, alpha , beta
def main():
"""
Program reads alpha and beta tensor and dipole moment from DALTON output
"""
args = run_argparse( sys.argv )
if args.alpha:
a = read_alpha( args.alpha, )
if args.beta_analysis:
beta_analysis(args, basis = args.basis,
dal = args.beta_dal, in_AA = args.in_AA,
out_AA = args.out_AA,
ncpu = args.Ncpu,
N_waters = args.N_waters)
if args.beta_analysis_par:
run_beta_analysis_par( N_waters = args.N_waters,
ncpu = args.Ncpu,
model = args.model )
if args.alpha_analysis:
alpha_analysis(args)
if args.qm_generation:
qm_generation(
qm_waters = args.qm_waters,
basis = args.basis
)
if args.qmmm_generation:
qmmm_generation(
qm_waters = args.qm_waters,
mm_waters = args.mm_waters,
potfreqs = args.potfreqs,
potstyle = args.potstyle,
basis = args.basis)
if args.qm_analysis:
qm_analysis( in_AA = args.in_AA,
out_AA = args.out_AA )
if args.qmmm_analysis:
qmmm_analysis( args )
if args.write:
write_related( args )
def read_dipole( file_, freq = "0.0", in_AA = False, out_AA = False ):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_Q = re.compile(r'Total charge of the molecule')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
#Special xyz hack for camb3lyp output from akka dalton to find atoms
pat_akka_xyz = re.compile(r'^\s*(\w+)\s+:\s+\d\s+x\s+(-*\d*\.+\d+)\s+\d\s+y\s+(-*\d*\.+\d+)\s+\d\s+z\s+(-*\d*\.+\d+) *$')
pat_labels_xyz = re.compile(r'^\s*((?!-)\S+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
# Reading in dipole and charge
for i in open( file_ ).readlines():
if pat_Q.search( i ):
Q = float(i.split()[-1])
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_akka_xyz.match(i):
f = pat_akka_xyz.match(i).groups()
matched = pat_akka_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_labels_xyz.match(i):
f = pat_labels_xyz.match(i).groups()
matched = pat_labels_xyz.match(i).groups()
lab = matched[0]
if len(lab.split('-')) == 4:
element = "H"
else:
try:
element = lab.split('-')[2][0]
except IndexError as e:
warnings.warn( 'Occured when finding wrong pattern for .xyz in read_beta_hf_string ' )
continue
kwargs = { "AA": in_AA, "element" : element, "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
#Set center of nuceli charge to 0
coc = sum([ x.r * charge_dic[x.element] for x in atoms ]) /\
sum([ charge_dic[x.element] for x in atoms ])
for i in atoms:
nuc_dip += charge_dic[ i.element ] * (i.r - coc )
if in_AA:
# Make sure center of charge is in Atomic units to give correct electronic dipole
coc /= a0
N_el = sum([charge_dic[at.element] for at in atoms]) - Q
tot_dip = el_dip - coc * N_el
return tot_dip
def read_props_qmmm( file_, freq = "0.0", in_AA = False ):
""" Same as read_beta_hf but skips coordinates not in allowd_elements
"""
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
# Reading in dipole
for i in open( file_ ).readlines():
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
if matched[0] not in allowed_elements:
continue
kwargs = { "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
for i in atoms:
nuc_dip[0] += charge_dic[ i.element ] * i.x
nuc_dip[1] += charge_dic[ i.element ] * i.y
nuc_dip[2] += charge_dic[ i.element ] * i.z
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in open( file_ ).readlines():
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in open( file_ ).readlines():
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
if in_AA:
nuc_dip /= a0
tot_dip = nuc_dip - el_dip
return atoms, nuc_dip - el_dip, alpha , beta
def read_beta_hf( file_, freq = "0.0", in_AA = False, out_AA = False ):
with open( file_ ) as f:
return read_beta_hf_string( f.read(), freq = freq,
in_AA = in_AA, out_AA = out_AA )
def read_beta_hf_string( string_, freq = "0.0", in_AA = False, out_AA = False, akka = False):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_Q = re.compile(r'Total charge of the molecule')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
#Special xyz hack for camb3lyp output from akka dalton to find atoms
if akka:
pat_akka_xyz = re.compile(r'^\s*(\w+)\s+:\s+\d\s+x\s+(-*\d*\.+\d+)\s+\d\s+y\s+(-*\d*\.+\d+)\s+\d\s+z\s+(-*\d*\.+\d+) *$')
else:
pat_akka_xyz = re.compile(r'^(?!a)a')
pat_labels_xyz = re.compile(r'^\s*((?!-)\S+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
# Reading in dipole and charge
for i in string_.split('\n'):
if pat_Q.search( i ):
Q = float(i.split()[-1])
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_akka_xyz.match(i):
print i
print 'asdf'
raise SystemExit
f = pat_akka_xyz.match(i).groups()
matched = pat_akka_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_labels_xyz.match(i):
f = pat_labels_xyz.match(i).groups()
matched = pat_labels_xyz.match(i).groups()
lab = matched[0]
if len(lab.split('-')) == 4:
element = "H"
else:
try:
element = lab.split('-')[2][0]
except IndexError as e:
warnings.warn( 'Occured when finding wrong pattern for .xyz in read_beta_hf_string ' )
continue
kwargs = { "AA": in_AA, "element" : element, "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
remove = []
for ind, at in enumerate(atoms[:-1]):
for other in atoms[ind+1:]:
if at.equal( other ):
remove.append( other )
for each in remove:
atoms.remove( each )
#Set center of nuceli charge to 0
coc = sum([ x.r * charge_dic[x.element] for x in atoms ]) /\
sum([ charge_dic[x.element] for x in atoms ])
for i in atoms:
nuc_dip += charge_dic[ i.element ] * (i.r - coc )
if in_AA and not out_AA:
# Make sure center of charge is in Atomic units to give correct electronic dipole
coc /= a0
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in string_.split('\n'):
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in string_.split('\n'):
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
N_el = sum([charge_dic[at.element] for at in atoms]) - Q
tot_dip = -el_dip + coc * N_el
return atoms, tot_dip, alpha , beta
if __name__ == '__main__':
main()
|
fishstamp82/moltools
|
moltools/read_dal.py
|
Python
|
mit
| 45,486
|
[
"Dalton",
"Gaussian"
] |
34cf75861eef659357b2ecf9fd8b18767da7c9edd8f24057d17722e43b520647
|
"""Convert BAM/SAM to FASTQ format*
@name
sequence
+name
quality score (phred33)
files may be SAM or BAM (autodetected)
If the file(s) contain paired-end sequences, we will write to two files
(in the current working directory)
If the files contain single end sequences, we will write to stdout by default
Output is always to stdout (err goes to stderr, redirect it if you need to)
"""
import pysam
import os
import select
from scripter import path_to_executable
from argparse import ArgumentParser
from copy import copy
from subprocess import Popen, PIPE
from os import mkfifo, getcwd, devnull
from os.path import join, exists, abspath
import subprocess
from sys import argv, stdin, stdout, stderr, exit, executable
from gzip import GzipFile
from .discover import PATH_TO_GZIP, gzip_class_factory
class UnpairedBAMToFastqConverter(object):
"""Works with unpaired SAM/BAM file"""
def __init__(self, file_, wd=None, stderr=None, logger=None):
self.require_bam(file_)
if wd is None: wd = getcwd()
fifofile = join(wd, '._sot_fifo')
if exists(fifofile):
raise RuntimeError('%s already exists' % fifofile)
mkfifo(fifofile)
self.fifo = abspath(fifofile)
self._args = [executable, '-m', 'seriesoftubes.converters.bamtofastq',
file_, '--single-stdout', fifofile]
self._logger = logger
self._stderr = stderr
def launch(self):
if self._logger is not None:
self._logger.info('Launching %s', ' '.join(self._args))
self.subprocess = Popen(self._args, stdout=open(devnull, 'w'),
stderr=self._stderr, bufsize=-1)
def get_fifo_readers(self):
return (open(self.fifos[0], 'r'), open(self.fifos[1], 'r'))
def require_bam(self, filename):
with open(filename, 'rb') as f:
head = f.read(3)
# check magic words for compression
if head == '\x1f\x8b\x08':
open_func = GzipFile
else:
open_func = open
uncompressed = open_func(filename)
head2 = uncompressed.read(4)
# check for BAM
if head2 == 'BAM\x01': return
# check for SAM
if head2 == '@HD\t': return
else: raise ValueError('Not a SAM/BAM file')
def main():
"""
what to do if we execute the module as a script
bamtofastq can only convert files (not stdin) because of the paired-end problem
"""
parser = ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', help='List of input files')
parser.add_argument('--no-gzip', action='store_true', default=False,
help='Do not compress output')
parser.add_argument('--single-stdout',
help='Save single-end reads to here (default: stdout)',
default=None)
args = parser.parse_args()
context = vars(args)
read_files(**context)
def pair_writer(out1, out2):
def writer(read1, read2):
out1.write('@%s\n%s\n+\n%s\n' % (read1.qname, read1.seq, read1.qual))
out2.write('@%s\n%s\n+\n%s\n' % (read2.qname, read2.seq, read2.qual))
return writer
def read_files(files=None, no_gzip=False, single_stdout=stdout):
"""
actually reads the SAM/BAM files
"""
for file in files:
if file is None: continue
f = pysam.Samfile(file)
#check if first read is paired
aread = f.next()
f.close()
f = pysam.Samfile(file)
if aread.is_paired:
if no_gzip:
print 'Detected paired-end reads, redirecting output to text files'
file1 = file + '_1.txt'
file2 = file + '_2.txt'
fh1 = open(file1, 'w')
fh2 = open(file2, 'w')
elif PATH_TO_GZIP is not None:
print 'Detected paired-end reads, redirecting output to .gz text files (using system gzip)'
file1 = file + '_1.txt.gz'
file2 = file + '_2.txt.gz'
open_func = gzip_class_factory(PATH_TO_GZIP)
fh1 = open_func(file1, 'wb')
fh2 = open_func(file2, 'wb')
else:
print 'Detected paired-end reads, redirecting output to .gz text files'
file1 = file + '_1.txt.gz'
file2 = file + '_2.txt.gz'
fh1 = GzipFile(file1, 'wb')
fh2 = GzipFile(file2, 'wb')
is_paired = False
write = pair_writer(fh1, fh2)
incomplete_pairs = []
for aread in f:
is_paired = False
qname = aread.qname
for i in xrange(len(incomplete_pairs)):
if incomplete_pairs[i].qname == qname:
mate_read = incomplete_pairs.pop(i)
# figure out order
if aread.flag & 0x4 == 0x4:
write(aread, mate_read)
else:
write(mate_read, aread)
is_paired = True
break
if not is_paired: incomplete_pairs.append(aread)
unpaired = len(incomplete_pairs)
if not unpaired == 0:
raise RuntimeError('%d unpaired reads remaining' % unpaired)
else:
if no_gzip:
open_func = open
elif PATH_TO_GZIP is not None:
open_func = gzip_class_factory(PATH_TO_GZIP)
else:
open_func = GzipFile
if single_stdout is None:
fh1 = stdout
else:
fh1 = open_func(single_stdout, 'wb')
for aread in f:
qname = aread.qname or ''
seq = aread.seq or ''
qual = aread.qual or ''
rec = '@%s\n%s\n+\n%s\n' % (qname, seq, qual)
fh1.write(rec)
fh1.close()
exit(0)
if __name__ == '__main__': main()
|
benjschiller/seriesoftubes
|
seriesoftubes/converters/bamtofastq.py
|
Python
|
artistic-2.0
| 6,093
|
[
"pysam"
] |
9398f8b15753dd81f6eb8bbf7c26f31b3826b6e0167c04df90b4af3d026d7f41
|
# Copyright 2013 Yajie Miao Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import numpy, theano
import theano.tensor as T
import collections
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
class RBM(object):
"""Bernoulli-bernoulli restricted Boltzmann machine (RBM) """
def __init__(self, input=None, n_visible=1024, n_hidden=1024,
W = None, hbias = None, vbias = None, numpy_rng = None,
theano_rng = None):
self.type = 'fc'
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None :
theano_rng = RandomStreams(numpy_rng.randint(2**30))
if W is None :
initial_W = numpy.asarray( numpy_rng.uniform(
low = -4*numpy.sqrt(6./(n_hidden+n_visible)),
high = 4*numpy.sqrt(6./(n_hidden+n_visible)),
size = (n_visible, n_hidden)),
dtype = theano.config.floatX)
# shared variables for weights and biases
W = theano.shared(value = initial_W, name = 'W')
if hbias is None :
# shared variable for hidden units bias
hbias = theano.shared(value = numpy.zeros(n_hidden,
dtype = theano.config.floatX), name='hbias')
if vbias is None :
# shared variable for visible units bias
vbias = theano.shared(value = numpy.zeros(n_visible,
dtype = theano.config.floatX), name='vbias')
self.input = input
if not input:
self.input = T.matrix('input')
self.delta_W = theano.shared(value=numpy.zeros_like(W.get_value(borrow=True), dtype=theano.config.floatX), name='delta_W')
self.delta_hbias = theano.shared(value=numpy.zeros_like(hbias.get_value(borrow=True), dtype=theano.config.floatX), name='delta_hbias')
self.delta_vbias = theano.shared(value=numpy.zeros_like(vbias.get_value(borrow=True), dtype=theano.config.floatX), name='delta_vbias')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# delta_parameters used with momentum
self.delta_params = [self.delta_W, self.delta_hbias, self.delta_vbias]
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1+T.exp(wx_b)),axis = 1)
return -hidden_term - vbias_term
def propup(self, vis):
''' Propagate the visible activations up to the hidden units '''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' Generates hidden unit outputs given visible inputs '''
# the activation of the hidden units given visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# assume that the hidden units will take sigmoid functions
h1_sample = self.theano_rng.binomial(size = h1_mean.shape, n = 1, p = h1_mean,
dtype = theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''Propagates the hidden activation downwards to the visible units'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' Generates visible units given hidden units '''
# the activation of the visible units given hiddens
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# assume that the visible inputs are binary values
v1_sample = self.theano_rng.binomial(size = v1_mean.shape,n = 1,p = v1_mean,
dtype = theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' Gibbs sampling starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample, pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' Gibbs sampling starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample, pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, batch_size = 128, lr = 0.0001, momentum=0.5, weight_cost=0.00001, persistent=None, k = 1):
x, hp_data, h_data = self.sample_h_given_v(self.input)
v_rec, v_rec_sigm, v_rec_sample = self.sample_v_given_h(h_data)
a, hp_rec, b = self.sample_h_given_v(v_rec_sigm)
# gradient of parameters
updates = collections.OrderedDict()
updates[self.delta_W] = momentum * self.delta_W + lr * (1.0/batch_size) * (T.dot(self.input.T, hp_data) - T.dot(v_rec_sigm.T, hp_rec)) - lr * weight_cost * self.W
updates[self.delta_hbias] = momentum * self.delta_hbias + lr * (1.0/batch_size) * (T.sum(h_data, axis=0) - T.sum(hp_rec, axis=0))
updates[self.delta_vbias] = momentum * self.delta_vbias + lr * (1.0/batch_size) * (T.sum(self.input, axis=0) - T.sum(v_rec_sigm, axis=0))
for param, dparam in zip(self.params, self.delta_params):
updates[param] = param + updates[dparam]
# approximation?? to free-energy cost
cost = T.mean(self.free_energy(self.input)) - T.mean(self.free_energy(v_rec_sample))
# reconstruction cost
monitoring_cost = T.mean(T.sqr(self.input-v_rec_sigm))
return monitoring_cost, cost, updates
def is_gbrbm(self):
return False
class GBRBM(RBM):
"""Gaussian-bernoulli restricted Boltzmann machine"""
def __init__(self, input=None, n_visible=351, n_hidden=1000,
W = None, hbias = None, vbias = None,
numpy_rng = None, theano_rng = None):
super(GBRBM, self).__init__(input=input, n_visible=n_visible, n_hidden=n_hidden,
W=W, hbias=hbias, vbias=vbias, numpy_rng=numpy_rng, theano_rng=theano_rng)
def free_energy(self, v_sample):
''' Compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = 0.5 * T.dot((v_sample - self.vbias), (v_sample - self.vbias).T)
hidden_term = T.sum(T.log(1+T.exp(wx_b)), axis = 1)
return -hidden_term - vbias_term
def sample_v_given_h(self, h0_sample):
''' Generates visible units given hidden units '''
# Compute the activation of the visible given the hiddens
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
v1_sample = self.theano_rng.normal(size = v1_mean.shape, avg=0.0, std=1.0,
dtype = theano.config.floatX) + pre_sigmoid_v1
return [pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, batch_size = 128, lr = 0.0001, momentum=0.5, weight_cost=0.00001, persistent=None, k = 1):
x, hp_data, h_data = self.sample_h_given_v(self.input)
v_rec, z, t = self.sample_v_given_h(h_data)
a, hp_rec, b = self.sample_h_given_v(v_rec) #hid rec
updates = collections.OrderedDict()
updates[self.delta_W] = momentum * self.delta_W + lr * (1.0/batch_size) * (T.dot(self.input.T, hp_data) - T.dot(v_rec.T, hp_rec)) - lr * weight_cost * self.W
updates[self.delta_hbias] = momentum * self.delta_hbias + lr * (1.0/batch_size) * (T.sum(h_data, axis=0) - T.sum(hp_rec, axis=0))
updates[self.delta_vbias] = momentum * self.delta_vbias + lr * (1.0/batch_size) * (T.sum(self.input, axis=0) - T.sum(v_rec, axis=0))
updates[self.W] = self.W + updates[self.delta_W]
updates[self.hbias] = self.hbias + updates[self.delta_hbias]
updates[self.vbias] = self.vbias + updates[self.delta_vbias]
# approximation?? to free-energy cost
cost = T.mean(self.free_energy(self.input)) - T.mean(self.free_energy(v_rec))
# reconstruction cost
monitoring_cost = T.mean(T.sqr(self.input - v_rec))
return monitoring_cost, cost, updates
def is_gbrbm(self):
return True
|
yajiemiao/pdnn
|
layers/rbm.py
|
Python
|
apache-2.0
| 9,456
|
[
"Gaussian"
] |
487d40cd334bafb06293388650a8c28fe09ed8c450e1697ad48254431fb77b03
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2010 Jeffrey Finkelstein. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit test for Scop"""
from __future__ import print_function
import unittest
from Bio._py3k import StringIO
from Bio._py3k import zip
from Bio.SCOP import *
class ScopTests(unittest.TestCase):
def _compare_cla_lines(self, cla_line_1, cla_line_2):
"""Compares the two specified Cla lines for equality.
The order of the key-value pairs in the sixth field of the lines does
not matter. For more information, see
http://scop.mrc-lmb.cam.ac.uk/scop/release-notes.html.
"""
fields1 = cla_line_1.rstrip().split('\t')
fields2 = cla_line_2.rstrip().split('\t')
print(fields1)
print(fields2)
# compare the first five fields in a Cla line, which should be exactly
# the same
if fields1[:5] != fields2[:5]:
return False
# compare the hierarchy key-value pairs, which are unordered
if set(fields1[5].split(',')) != set(fields2[5].split(',')):
return False
return True
def testParse(self):
f = open("./SCOP/dir.cla.scop.txt_test")
try:
cla = f.read()
f.close()
f = open("./SCOP/dir.des.scop.txt_test")
des = f.read()
f.close()
f = open("./SCOP/dir.hie.scop.txt_test")
hie = f.read()
finally:
f.close()
scop = Scop(StringIO(cla), StringIO(des), StringIO(hie))
cla_out = StringIO()
scop.write_cla(cla_out)
lines = zip(cla.rstrip().split('\n'),
cla_out.getvalue().rstrip().split('\n'))
for expected_line, line in lines:
self.assertTrue(self._compare_cla_lines(expected_line, line))
des_out = StringIO()
scop.write_des(des_out)
self.assertEqual(des_out.getvalue(), des)
hie_out = StringIO()
scop.write_hie(hie_out)
self.assertEqual(hie_out.getvalue(), hie)
domain = scop.getDomainBySid("d1hbia_")
self.assertEqual(domain.sunid, 14996)
domains = scop.getDomains()
self.assertEqual(len(domains), 14)
self.assertEqual(domains[4].sunid, 14988)
dom = scop.getNodeBySunid(-111)
self.assertEqual(dom, None)
dom = scop.getDomainBySid("no such domain")
self.assertEqual(dom, None)
def testSccsOrder(self):
self.assertEqual(cmp_sccs("a.1.1.1", "a.1.1.1"), 0)
self.assertEqual(cmp_sccs("a.1.1.2", "a.1.1.1"), 1)
self.assertEqual(cmp_sccs("a.1.1.2", "a.1.1.11"), -1)
self.assertEqual(cmp_sccs("a.1.2.2", "a.1.1.11"), 1)
self.assertEqual(cmp_sccs("a.1.2.2", "a.5.1.11"), -1)
self.assertEqual(cmp_sccs("b.1.2.2", "a.5.1.11"), 1)
self.assertEqual(cmp_sccs("b.1.2.2", "b.1.2"), 1)
def testParseDomain(self):
s=">d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}"
dom = parse_domain(s)
self.assertEqual(dom.sid, 'd1tpt_1')
self.assertEqual(dom.sccs, 'a.46.2.1')
self.assertEqual(dom.residues.pdbid, '1tpt')
self.assertEqual(dom.description, 'Thymidine phosphorylase {Escherichia coli}')
s2="d1tpt_1 a.46.2.1 (1tpt 1-70) Thymidine phosphorylase {E. coli}"
self.assertEqual(s2, str(parse_domain(s2)))
# Genetic domains (See Astral release notes)
s3="g1cph.1 g.1.1.1 (1cph B:,A:) Insulin {Cow (Bos taurus)}"
self.assertEqual(s3, str(parse_domain(s3)))
s4="e1cph.1a g.1.1.1 (1cph A:) Insulin {Cow (Bos taurus)}"
self.assertEqual(s4, str(parse_domain(s4)))
# Raw Astral header
s5=">e1cph.1a g.1.1.1 (A:) Insulin {Cow (Bos taurus)}"
self.assertEqual(s4, str(parse_domain(s5)))
self.assertRaises(ValueError, parse_domain, "Totally wrong")
def testConstructFromDirectory(self):
scop = Scop(dir_path="SCOP", version="test")
self.assertTrue(isinstance(scop, Scop))
domain = scop.getDomainBySid("d1hbia_")
self.assertEqual(domain.sunid, 14996)
def testGetAscendent(self):
scop = Scop(dir_path="SCOP", version="test")
domain = scop.getDomainBySid("d1hbia_")
# get the fold
fold = domain.getAscendent('cf')
self.assertEqual(fold.sunid, 46457)
# get the superfamily
sf = domain.getAscendent('superfamily')
self.assertEqual(sf.sunid, 46458)
# px has no px ascendent
px = domain.getAscendent('px')
self.assertEqual(px, None)
# an sf has no px ascendent
px2 = sf.getAscendent('px')
self.assertEqual(px2, None)
def test_get_descendents(self):
"""Test getDescendents method"""
scop = Scop(dir_path="SCOP", version="test")
fold = scop.getNodeBySunid(46457)
# get px descendents
domains = fold.getDescendents('px')
self.assertEqual(len(domains), 14)
for d in domains:
self.assertEqual(d.type, 'px')
sfs = fold.getDescendents('superfamily')
self.assertEqual(len(sfs), 1)
for d in sfs:
self.assertEqual(d.type, 'sf')
# cl has no cl descendent
cl = fold.getDescendents('cl')
self.assertEqual(cl, [])
if __name__=='__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_SCOP_Scop.py
|
Python
|
gpl-2.0
| 5,661
|
[
"Biopython"
] |
d9b9e648d40a872ca0c3f869e89a6454acb267eb50981ceb24a2f90859511c83
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Test hoomd.hpmc.update.QuickCompress."""
import hoomd
from hoomd.conftest import operation_pickling_check
import pytest
import math
# note: The parameterized tests validate parameters so we can't pass in values
# here that require preprocessing
valid_constructor_args = [
dict(trigger=hoomd.trigger.Periodic(10),
target_box=hoomd.Box.from_box([10, 10, 10])),
dict(trigger=hoomd.trigger.After(100),
target_box=hoomd.Box.from_box([10, 20, 40]),
max_overlaps_per_particle=0.2),
dict(trigger=hoomd.trigger.Before(100),
target_box=hoomd.Box.from_box([50, 50]),
min_scale=0.75),
dict(trigger=hoomd.trigger.Periodic(1000),
target_box=hoomd.Box.from_box([80, 50, 40, 0.2, 0.4, 0.5]),
max_overlaps_per_particle=0.2,
min_scale=0.999),
]
valid_attrs = [
('trigger', hoomd.trigger.Periodic(10000)),
('trigger', hoomd.trigger.After(100)),
('trigger', hoomd.trigger.Before(12345)),
('target_box', hoomd.Box.from_box([10, 20, 30])),
('target_box', hoomd.Box.from_box([50, 50])),
('max_overlaps_per_particle', 0.2),
('max_overlaps_per_particle', 0.5),
('max_overlaps_per_particle', 2.5),
('min_scale', 0.1),
('min_scale', 0.5),
('min_scale', 0.9999),
]
@pytest.mark.parametrize("constructor_args", valid_constructor_args)
def test_valid_construction(constructor_args):
"""Test that QuickCompress can be constructed with valid arguments."""
qc = hoomd.hpmc.update.QuickCompress(**constructor_args)
# validate the params were set properly
for attr, value in constructor_args.items():
assert getattr(qc, attr) == value
@pytest.mark.parametrize("constructor_args", valid_constructor_args)
def test_valid_construction_and_attach(simulation_factory,
two_particle_snapshot_factory,
constructor_args):
"""Test that QuickCompress can be attached with valid arguments."""
qc = hoomd.hpmc.update.QuickCompress(**constructor_args)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.updaters.append(qc)
# QuickCompress requires an HPMC integrator
mc = hoomd.hpmc.integrate.Sphere()
mc.shape['A'] = dict(diameter=1)
sim.operations.integrator = mc
sim.operations._schedule()
# validate the params were set properly
for attr, value in constructor_args.items():
assert getattr(qc, attr) == value
@pytest.mark.parametrize("attr,value", valid_attrs)
def test_valid_setattr(attr, value):
"""Test that QuickCompress can get and set attributes."""
qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10),
target_box=hoomd.Box.from_box(
[10, 10, 10]))
setattr(qc, attr, value)
assert getattr(qc, attr) == value
@pytest.mark.parametrize("attr,value", valid_attrs)
def test_valid_setattr_attached(attr, value, simulation_factory,
two_particle_snapshot_factory):
"""Test that QuickCompress can get and set attributes while attached."""
qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10),
target_box=hoomd.Box.from_box(
[10, 10, 10]))
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.updaters.append(qc)
# QuickCompress requires an HPMC integrator
mc = hoomd.hpmc.integrate.Sphere()
mc.shape['A'] = dict(diameter=1)
sim.operations.integrator = mc
sim.operations._schedule()
setattr(qc, attr, value)
assert getattr(qc, attr) == value
@pytest.mark.parametrize("phi", [0.2, 0.3, 0.4, 0.5, 0.55, 0.58, 0.6])
@pytest.mark.validate
def test_sphere_compression(phi, simulation_factory, lattice_snapshot_factory):
"""Test that QuickCompress can compress (and expand) simulation boxes."""
n = 7
snap = lattice_snapshot_factory(n=n, a=1.1)
v_particle = 4 / 3 * math.pi * (0.5)**3
target_box = hoomd.Box.cube((n * n * n * v_particle / phi)**(1 / 3))
qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10),
target_box=target_box)
sim = simulation_factory(snap)
sim.operations.updaters.append(qc)
mc = hoomd.hpmc.integrate.Sphere(default_d=0.05)
mc.shape['A'] = dict(diameter=1)
sim.operations.integrator = mc
sim.run(1)
# compression should not be complete yet
assert not qc.complete
# run long enough to compress the box
while not qc.complete and sim.timestep < 1e5:
sim.run(100)
# compression should end the run early
assert qc.complete
assert mc.overlaps == 0
assert sim.state.box == target_box
@pytest.mark.parametrize("phi", [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
@pytest.mark.validate
def test_disk_compression(phi, simulation_factory, lattice_snapshot_factory):
"""Test that QuickCompress can compress (and expand) simulation boxes."""
n = 7
snap = lattice_snapshot_factory(dimensions=2, n=n, a=1.1)
v_particle = math.pi * (0.5)**2
target_box = hoomd.Box.square((n * n * v_particle / phi)**(1 / 2))
qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10),
target_box=target_box)
sim = simulation_factory(snap)
sim.operations.updaters.append(qc)
mc = hoomd.hpmc.integrate.Sphere(default_d=0.05)
mc.shape['A'] = dict(diameter=1)
sim.operations.integrator = mc
sim.run(1)
# compression should not be complete yet
assert not qc.complete
while not qc.complete and sim.timestep < 1e5:
sim.run(100)
# compression should end the run early
assert qc.complete
assert mc.overlaps == 0
assert sim.state.box == target_box
def test_pickling(simulation_factory, two_particle_snapshot_factory):
"""Test that QuickCompress objects are picklable."""
qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10),
target_box=hoomd.Box.square(10.))
sim = simulation_factory(two_particle_snapshot_factory())
mc = hoomd.hpmc.integrate.Sphere(default_d=0.05)
mc.shape['A'] = dict(diameter=1)
sim.operations.integrator = mc
operation_pickling_check(qc, sim)
|
joaander/hoomd-blue
|
hoomd/hpmc/pytest/test_quick_compress.py
|
Python
|
bsd-3-clause
| 6,587
|
[
"HOOMD-blue"
] |
9f851d4cf6cfec816cf0aa5cf13e847a441da034dbb4bcb92520315e487a0e74
|
# -*- coding: utf-8 -*-
from .base import FunctionalTestCase
from .pages import game
DATE_REGEX = r'\[\d{1,2}-\d{1,2} \d{2}:\d{2}\] '
class LogTests(FunctionalTestCase):
"""Tests for logging events"""
def test_shows_new_game_message_on_game_creation(self):
self.story('Alice is a user who starts a new game')
self.browser.get(self.server_url)
page = game.Homepage(self.browser)
page.start_button.click()
self.assertRegex(self.browser.current_url, r'/game/([^/]+)$')
self.story('There is a log section, the first entry displays a new'
' game started message')
gamepage = game.GamePage(self.browser)
self.assertRegex(gamepage.log[0].text,
DATE_REGEX + 'New game started')
def test_creating_player_adds_entry_to_log(self):
self.story('Alice is a user who starts a new game')
self.browser.get(self.server_url)
homepage = game.Homepage(self.browser)
homepage.start_button.click()
self.story('She continues to create a new player')
game_page = game.GamePage(self.browser)
game_page.add_player_link.click()
add_player = game.AddPlayerPage(self.browser)
add_player.name.send_keys('Alice')
add_player.cash.send_keys('250\n')
self.story('She returns to the game page and sees that an extra item '
'has been added to the log')
self.assertEqual(len(game_page.log), 2)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Added player Alice with 250 starting cash')
def test_creating_company_adds_entry_to_log(self):
self.story('Alice is a user who starts a new game')
self.browser.get(self.server_url)
homepage = game.Homepage(self.browser)
homepage.start_button.click()
self.story('She continues to create a new company')
game_page = game.GamePage(self.browser)
game_page.add_company_link.click()
add_company = game.AddCompanyPage(self.browser)
add_company.name.send_keys('B&O')
add_company.cash.send_keys('820')
add_company.select_text_color('yellow-600')
add_company.select_background_color('blue-700')
add_company.shares.clear()
add_company.shares.send_keys('4\n')
self.story('She returns to the game page and sees that an extra item '
'has been added to the log')
self.assertEqual(len(game_page.log), 2)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Added 4-share company B&O with 820 starting cash')
self.assertIn('fg-yellow-600',
game_page.log[0].get_attribute('class'))
self.assertIn('bg-blue-700',
game_page.log[0].get_attribute('class'))
def test_transfering_money_from_player_to_bank_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game(cash=1000)
self.create_player(game_uuid, 'Alice', cash=1000)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Alice opens her player section and transfers money to '
'the bank (which is the default)')
transfer_form = game.TransferForm(self.browser)
player = game_page.get_players()[0]
player['row'].click()
transfer_form.amount.clear()
transfer_form.amount.send_keys('50\n')
self.story('The page reloads and she sees money has changed hands')
player = game_page.get_players()[0]
self.assertEqual(player['cash'].text, '950')
self.story('There is also a new entry in the log (no initial entry)')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice transfered 50 to the bank')
def test_transfering_money_from_player_to_player_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_player(game_uuid, 'Alice', cash=1000)
self.create_player(game_uuid, 'Bob', cash=1000)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Alice opens her player section and transfers money to Bob')
transfer_form = game.TransferForm(self.browser)
alice = game_page.get_players()[0]
alice['row'].click()
transfer_form.select_target('Bob')
transfer_form.amount.send_keys('60\n')
self.story('There is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice transfered 60 to Bob')
def test_transfering_money_from_player_to_company_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_player(game_uuid, 'Alice', cash=1000)
self.create_company(game_uuid, 'NNH', cash=0)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Alice opens her player section and transfers money to NNH')
transfer_form = game.TransferForm(self.browser)
alice = game_page.get_players()[0]
alice['row'].click()
transfer_form.select_target('NNH')
transfer_form.amount.send_keys('70\n')
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice transfered 70 to NNH')
def test_transfering_money_from_company_to_bank_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_company(game_uuid, 'B&M', cash=1000, text='amber-500',
background='red-900')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Alice opens the B&M and transfers money to the bank')
transfer_form = game.TransferForm(self.browser)
company = game_page.get_companies()[0]
company['elem'].click()
transfer_form.amount.send_keys('80\n')
self.story('The page reloads and there is a new log entry')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'B&M transfered 80 to the bank')
self.assertIn('fg-amber-500',
game_page.log[0].get_attribute('class'))
self.assertIn('bg-red-900',
game_page.log[0].get_attribute('class'))
def test_transfering_money_from_company_to_company_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_company(game_uuid, 'NNH', cash=1000, text='orange-500')
self.create_company(game_uuid, 'NYC', cash=1000, text='black')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Alice opens the NNH and transfers money to the bank')
transfer_form = game.TransferForm(self.browser)
company = game_page.get_companies()[0]
company['elem'].click()
transfer_form.select_target('NYC')
transfer_form.amount.send_keys('90\n')
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'NNH transfered 90 to NYC')
self.assertIn('fg-orange-500',
game_page.log[0].get_attribute('class'))
def test_transfering_money_from_company_to_player_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_player(game_uuid, 'Alice', cash=1000)
self.create_company(game_uuid, 'PRR', cash=0, text='green-500')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Alice opens PRR section and transfers money to herself')
transfer_form = game.TransferForm(self.browser)
company = game_page.get_companies()[0]
company['elem'].click()
transfer_form.select_target('Alice')
transfer_form.amount.send_keys('100\n')
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'PRR transfered 100 to Alice')
self.assertIn('fg-green-500',
game_page.log[0].get_attribute('class'))
def test_player_buying_share_from_IPO_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_player(game_uuid, 'Alice', cash=1000)
self.create_company(game_uuid, 'C&O', cash=1000)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Set the value of the C&O')
company = game_page.get_companies()[0]
company['value'].send_keys('10')
self.story('Open Alices detail section, buy a share C&O')
player = game_page.get_players()[0]
player['row'].click()
share_form = game.ShareForm(self.browser)
share_form.shares.clear()
share_form.shares.send_keys('2')
share_form.select_company('C&O')
share_form.transfer_button.click()
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice bought 2 shares C&O from the IPO for 10 each')
def test_company_buying_share_from_pool_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_company(game_uuid, 'NYC', bank_shares=5, text='black')
self.create_company(game_uuid, 'PRR', cash=1000, text='green-500')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Set the value of the NYC')
nyc, prr = game_page.get_companies()
self.assertEqual(nyc['name'].text, 'NYC')
nyc['value'].send_keys('20')
self.story('Alice opens the PRRs detail section and buys NYC')
prr['row'].click()
share_form = game.ShareForm(self.browser)
share_form.shares.clear()
share_form.shares.send_keys('3')
share_form.select_company('NYC')
share_form.select_source('bank')
share_form.transfer_button.click()
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'PRR bought 3 shares NYC from the bank for 20 each')
self.assertIn('fg-green-500',
game_page.log[0].get_attribute('class'))
def test_player_buying_share_from_company_treasury_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
self.create_player(game_uuid, 'Alice', cash=1000)
company_uuid = self.create_company(game_uuid, 'B&O', ipo_shares=0)
self.create_company_share(company_uuid, company_uuid, shares=10)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Set the value of the B&O')
company = game_page.get_companies()[0]
company['value'].send_keys('30')
self.story('Open Alices detail section, buy a share B&O')
player = game_page.get_players()[0]
player['row'].click()
share_form = game.ShareForm(self.browser)
share_form.shares.clear()
share_form.shares.send_keys('6')
share_form.select_company('B&O')
share_form.select_source('B&O')
share_form.transfer_button.click()
self.story('The page updates and t here is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice bought 6 shares B&O from B&O for 30 each')
def test_player_selling_shares_to_pool_adds_log_entry(self):
self.story('Alice is a user who starts a new game')
game_uuid = self.create_game()
player_uuid = self.create_player(game_uuid, 'Alice')
company_uuid = self.create_company(game_uuid, 'C&O')
self.create_player_share(player_uuid, company_uuid, shares=5)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Set the value of the C&O')
company = game_page.get_companies()[0]
company['value'].send_keys('40')
self.story('Alice opens her detail section and sells some shares')
player = game_page.get_players()[0]
player['row'].click()
share_form = game.ShareForm(self.browser)
share_form.shares.clear()
share_form.shares.send_keys('4')
share_form.sell_share.click()
share_form.select_company('C&O')
share_form.select_source('bank')
share_form.transfer_button.click()
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice sold 4 shares C&O to the bank for 40 each')
def test_company_selling_shares_to_IPO_adds_log_entry(self):
self.story('Alice is a user who starts a game')
game_uuid = self.create_game()
company_uuid = self.create_company(game_uuid, 'CPR', text='red-500')
self.create_company_share(company_uuid, company_uuid, shares=10)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Set the value of the CPR')
company = game_page.get_companies()[0]
company['value'].send_keys('50')
self.story('Alice opens the CPRs detail section and sells shares')
company['elem'].click()
share_form = game.ShareForm(self.browser)
share_form.sell_share.click()
share_form.select_company('CPR')
share_form.shares.clear()
share_form.shares.send_keys('2\n')
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'CPR sold 2 shares CPR to the IPO for 50 each')
self.assertIn('fg-red-500',
game_page.log[0].get_attribute('class'))
def test_player_selling_shares_to_company_adds_log_entry(self):
self.story('Alice is a user who starts a game')
game_uuid = self.create_game()
player_uuid = self.create_player(game_uuid, 'Alice')
company_uuid = self.create_company(game_uuid, 'B&M', cash=1000)
self.create_player_share(player_uuid, company_uuid, shares=5)
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Set the value of the B&M')
company = game_page.get_companies()[0]
company['value'].send_keys('60')
self.story('Alice opens her detail section and sells the shares')
player = game_page.get_players()[0]
player['row'].click()
share_form = game.ShareForm(self.browser)
share_form.sell_share.click()
share_form.select_company('B&M')
share_form.select_source('B&M')
share_form.shares.clear()
share_form.shares.send_keys('3\n')
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Alice sold 3 shares B&M to B&M for 60 each')
def test_company_operating_adds_log_entry(self):
self.story('Create a game with a company')
game_uuid = self.create_game()
self.create_company(game_uuid, 'CPR', text='red-500',
background='black')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Open the CPR detail section, operate for some money')
company = game_page.get_companies()[0]
company['elem'].click()
operate_form = game.OperateForm(self.browser)
operate_form.revenue.clear()
operate_form.revenue.send_keys('70')
operate_form.full.click()
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'CPR operates for 70 which is paid as dividends')
self.assertIn('fg-red-500',
game_page.log[0].get_attribute('class'))
self.assertIn('bg-black',
game_page.log[0].get_attribute('class'))
def test_company_withholding_adds_log_entry(self):
self.story('Create a game with a company')
game_uuid = self.create_game()
self.create_company(game_uuid, 'Erie', background='amber-300')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Open the Erie detail section, operate for some money')
company = game_page.get_companies()[0]
company['elem'].click()
operate_form = game.OperateForm(self.browser)
operate_form.revenue.clear()
operate_form.revenue.send_keys('80')
operate_form.withhold.click()
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Erie withholds 80')
self.assertIn('bg-amber-300',
game_page.log[0].get_attribute('class'))
def test_company_paying_half_adds_log_entry(self):
self.story('Create a game with a company')
game_uuid = self.create_game()
self.create_company(game_uuid, 'NNH', background='orange-500')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Open the NNH detail section, operate for some money')
company = game_page.get_companies()[0]
company['elem'].click()
operate_form = game.OperateForm(self.browser)
operate_form.revenue.clear()
operate_form.revenue.send_keys('90')
operate_form.half.click()
self.story('The page updates and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'NNH operates for 90 of which it retains half')
self.assertIn('bg-orange-500',
game_page.log[0].get_attribute('class'))
def test_editing_company_adds_log_entry(self):
self.story('Create a game with a company')
game_uuid = self.create_game()
self.create_company(game_uuid, 'B&O')
self.browser.get(self.server_url + '/game/' + game_uuid)
game_page = game.GamePage(self.browser)
self.assertEqual(len(game_page.log), 0)
self.story('Go to B&O edit screen, change some info')
company = game_page.get_companies()[0]
company['elem'].click()
company['edit'].click()
edit_company = game.EditCompanyPage(self.browser)
edit_company.select_background_color('blue-800')
edit_company.shares.send_keys('0\n')
self.story('Return to the game page and there is an entry in the log')
self.assertEqual(len(game_page.log), 1)
self.assertRegex(game_page.log[0].text,
DATE_REGEX + 'Company B&O has been edited')
self.assertIn('bg-blue-800', game_page.log[0].get_attribute('class'))
|
XeryusTC/18xx-accountant
|
accountant/functional_tests/test_log.py
|
Python
|
mit
| 21,085
|
[
"Amber"
] |
29997ef29b9515d1e09c9a20bebc9887a1d201471a7f06667d9e99bff901a244
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import time
from abc import ABC, abstractmethod
from typing import Callable, List
import numpy as np
from psi4 import core
from .exceptions import ValidationError
"""
Generalized iterative solvers for Psi4.
"""
def cg_solver(rhs_vec: List[core.Matrix], hx_function: Callable, preconditioner: Callable, guess: List[core.Matrix] = None, printer: Callable = None, printlvl: int = 1, maxiter: int = 20, rcond: float = 1.e-6) -> List[core.Matrix]:
"""
Solves the Ax = b linear equations via Conjugate Gradient. The `A` matrix must be a hermitian, positive definite matrix.
Parameters
----------
rhs_vec
The RHS vector in the Ax=b equation.
hx_function
Takes in a list of :py:class:`~psi4.core.Matrix` objects and a mask of active indices. Returns the Hessian-vector product.
preconditioner
Takes in a list of :py:class:`~psi4.core.Matrix` objects and a mask of active indices. Returns the preconditioned value.
guess
Starting vectors, if None use a preconditioner(rhs) guess
printer
Takes in a list of current x and residual vectors and provides a print function. This function can also
return a value that represents the current residual.
printlvl
The level of printing provided by this function.
maxiter
The maximum number of iterations this function will take.
rcond
The residual norm for convergence.
Returns
-------
ret : List[Matrix]
Returns the solved `x` vectors and `r` vectors.
Notes
-----
This is a generalized cg solver that can also take advantage of solving multiple RHS's simultaneously when
it is advantageous to do so.
Examples
--------
"""
tstart = time.time()
if printlvl:
core.print_out("\n -----------------------------------------------------\n")
core.print_out(" " + "Generalized CG Solver".center(52) + "\n")
core.print_out(" " + "by Daniel. G. A. Smith".center(52) + "\n")
core.print_out(" -----------------------------------------------------\n")
core.print_out(" Maxiter = %11d\n" % maxiter)
core.print_out(" Convergence = %11.3E\n" % rcond)
core.print_out(" Number of equations = %11ld\n\n" % len(rhs_vec))
core.print_out(" %4s %14s %12s %6s %6s\n" % ("Iter", "Residual RMS", "Max RMS", "Remain", "Time [s]"))
core.print_out(" -----------------------------------------------------\n")
nrhs = len(rhs_vec)
active_mask = [True for x in range(nrhs)]
# Start function
if guess is None:
x_vec = preconditioner(rhs_vec, active_mask)
else:
if len(guess) != len(rhs_vec):
raise ValidationError("CG Solver: Guess vector length does not match RHS vector length.")
x_vec = [x.clone() for x in guess]
Ax_vec = hx_function(x_vec, active_mask)
# Set it up
r_vec = [] # Residual vectors
for x in range(nrhs):
tmp_r = rhs_vec[x].clone()
tmp_r.axpy(-1.0, Ax_vec[x])
r_vec.append(tmp_r)
z_vec = preconditioner(r_vec, active_mask)
p_vec = [x.clone() for x in z_vec]
# First RMS
grad_dot = [x.sum_of_squares() for x in rhs_vec]
resid = [(r_vec[x].sum_of_squares() / grad_dot[x])**0.5 for x in range(nrhs)]
if printer:
resid = printer(0, x_vec, r_vec)
elif printlvl:
# core.print_out(' CG Iteration Guess: Rel. RMS = %1.5e\n' % np.mean(resid))
core.print_out(" %5s %14.3e %12.3e %7d %9d\n" %
("Guess", np.mean(resid), np.max(resid), len(z_vec), time.time() - tstart))
rms = np.mean(resid)
rz_old = [0.0 for x in range(nrhs)]
alpha = [0.0 for x in range(nrhs)]
active = np.where(active_mask)[0]
# CG iterations
for rot_iter in range(maxiter):
# Build old RZ so we can discard vectors
for x in active:
rz_old[x] = r_vec[x].vector_dot(z_vec[x])
# Build Hx product
Ap_vec = hx_function(p_vec, active_mask)
# Update x and r
for x in active:
alpha[x] = rz_old[x] / Ap_vec[x].vector_dot(p_vec[x])
if np.isnan(alpha)[0]:
core.print_out("CG: Alpha is NaN for vector %d. Stopping vector." % x)
active_mask[x] = False
continue
x_vec[x].axpy(alpha[x], p_vec[x])
r_vec[x].axpy(-alpha[x], Ap_vec[x])
resid[x] = (r_vec[x].sum_of_squares() / grad_dot[x])**0.5
# Print out or compute the resid function
if printer:
resid = printer(rot_iter + 1, x_vec, r_vec)
# Figure out active updated active mask
for x in active:
if (resid[x] < rcond):
active_mask[x] = False
# Print out if requested
if printlvl:
core.print_out(" %5d %14.3e %12.3e %7d %9d\n" %
(rot_iter + 1, np.mean(resid), np.max(resid), sum(active_mask), time.time() - tstart))
active = np.where(active_mask)[0]
if sum(active_mask) == 0:
break
# Update p
z_vec = preconditioner(r_vec, active_mask)
for x in active:
beta = r_vec[x].vector_dot(z_vec[x]) / rz_old[x]
p_vec[x].scale(beta)
p_vec[x].axpy(1.0, z_vec[x])
if printlvl:
core.print_out(" -----------------------------------------------------\n")
return x_vec, r_vec
class DIIS:
"""
An object to assist in the DIIS extrpolation procedure.
"""
def __init__(self, max_vec: int = 6, removal_policy: str = "OLDEST"):
"""
An object to assist in the DIIS extrpolation procedure.
Parameters
----------
max_vec
The maximum number of error and state vectors to hold. These are pruned based off the removal policy.
removal_policy
{"OLDEST", "LARGEST"}
How the state and error vectors are removed once at the maximum. OLDEST will remove the oldest vector while
largest will remove the residual with the largest RMS value.
"""
self.error = []
self.state = []
self.max_vec = max_vec
self.removal_policy = removal_policy.upper()
if self.removal_policy not in ["LARGEST", "OLDEST"]:
raise ValidationError("DIIS: removal_policy must either be oldest or largest.")
def add(self, state, error):
"""
Adds a DIIS state and error vector to the DIIS object.
state : :py:class:`~psi4.core.Matrix`
The current state vector.
error : :py:class:`~psi4.core.Matrix`
The current error vector.
"""
self.error.append(error.clone())
self.state.append(state.clone())
def extrapolate(self, out: core.Matrix = None) -> core.Matrix:
"""
Extrapolates next state vector from the current set of state and error vectors.
Parameters
----------
out
A array in which to place the next state vector.
Returns
-------
ret : Matrix
Returns the next state vector.
"""
# Limit size of DIIS vector
diis_count = len(self.state)
if diis_count == 0:
raise ValidationError("DIIS: No previous vectors.")
if diis_count == 1:
return self.state[0]
if diis_count > self.max_vec:
if self.removal_policy == "OLDEST":
pos = 0
else:
pos = np.argmax([x.rms() for x in self.error])
del self.state[pos]
del self.error[pos]
diis_count -= 1
# Build error matrix B
B = np.empty((diis_count + 1, diis_count + 1))
B[-1, :] = 1
B[:, -1] = 1
B[-1, -1] = 0
for num1, e1 in enumerate(self.error):
B[num1, num1] = e1.vector_dot(e1)
for num2, e2 in enumerate(self.error):
if num2 >= num1:
continue
val = e1.vector_dot(e2)
B[num1, num2] = B[num2, num1] = val
# Build residual vector
resid = np.zeros(diis_count + 1)
resid[-1] = 1
# Solve pulay equations
# Yea, yea this is unstable make it stable
iszero = np.any(np.diag(B)[:-1] <= 0.0)
if iszero:
S = np.ones((diis_count + 1))
else:
S = np.diag(B).copy()
S[:-1] **= -0.5
S[-1] = 1
# Then we gotta do a custom inverse
B *= S[:, None] * S
invB = core.Matrix.from_array(B)
invB.power(-1.0, 1.e-12)
ci = np.dot(invB, resid)
ci *= S
# combination of previous fock matrices
if out is None:
out = core.Matrix("DIIS result", self.state[0].rowdim(), self.state[1].coldim())
else:
out.zero()
for num, c in enumerate(ci[:-1]):
out.axpy(c, self.state[num])
return out
def _diag_print_heading(title_lines, solver_name, max_ss_size, nroot, r_convergence, maxiter, verbose=1):
"""Print a message to the output file when the solver has processed all options and is ready to begin"""
if verbose < 1:
# no printing
return
# show title if not silent
core.print_out("\n\n")
core.print_out("\n".join([x.center(77) for x in title_lines]))
core.print_out("\n")
core.print_out("\n ==> Options <==\n\n")
core.print_out(f" Max number of iterations = {maxiter:<5d}\n")
core.print_out(f" Eigenvector tolerance = {r_convergence:.4e}\n")
core.print_out(f" Max number of expansion vectors = {max_ss_size:<5d}\n")
core.print_out("\n")
# show iteration info headings if not silent
core.print_out(" => Iterations <=\n")
if verbose == 1:
# default printing one line per iter max delta value and max residual norm
core.print_out(f" {' ' * len(solver_name)} Max[D[value]] Max[|R|] # vectors\n")
else:
# verbose printing, value, delta, and |R| for each root
core.print_out(" {' ' * len(solver_name)} value D[value] |R| # vectors\n")
def _diag_print_info(solver_name, info, verbose=1):
"""Print a message to the output file at each iteration"""
if verbose < 1:
# no printing
return
elif verbose == 1:
# print iter maxde max|R| conv/restart
flags = []
if info['collapse']:
flags.append("Restart")
if info['done']:
flags.append("Converged")
m_de = np.max(info['delta_val'])
m_r = np.max(info['res_norm'])
nvec = info["nvec"]
flgs = "/".join(flags)
core.print_out(
f" {solver_name} iter {info['count']:3d}: {m_de:-11.5e} {m_r:12.5e} {nvec:>6d} {flgs}\n")
else:
# print iter / ssdim folowed by de/|R| for each root
core.print_out(f" {solver_name} iter {info['count']:3d}: {info['nvec']:4d} guess vectors\n")
for i, (e, de, rn) in enumerate(zip(info['val'], info['delta_val'], info['res_norm'])):
s = " " * len(solver_name)
core.print_out(f" {i+1:2d}: {s:} {e:-11.5f} {de:-11.5e} {rn:12.5e}\n")
if info['done']:
core.print_out(" Solver Converged! all roots\n\n")
elif info['collapse']:
core.print_out(" Subspace limits exceeded restarting\n\n")
def _diag_print_converged(solver_name, stats, vals, verbose=1, **kwargs):
"""Print a message to the output file when the solver is converged."""
if verbose < 1:
# no printing
return
if verbose > 1:
# print values summary + number of iterations + # of "big" product evals
core.print_out(" Root # eigenvalue\n")
for (i, vi) in enumerate(vals):
core.print_out(f" {i+1:^6} {vi:20.12f}\n")
max_nvec = max(istat['nvec'] for istat in stats)
core.print_out(f"\n {solver_name} converged in {stats[-1]['count']} iterations\n")
core.print_out(f" Computed a total of {stats[-1]['product_count']} large products\n\n")
def _print_array(name, arr, verbose):
"""print a subspace quantity (numpy array) to the output file
Parameters
----------
name : str
The name to print above the array
arr : :py:class:`np.ndarray`
The array to print
verbose : int
The amount of information to print. Only prints for verbose > 2
"""
if verbose > 2:
core.print_out(f"\n\n{name}:\n{str(arr)}\n")
def _gs_orth(engine, U, V, thresh=1.0e-8):
"""Perform Gram-Schmidt orthonormalization of a set V against a previously orthonormalized set U
Parameters
----------
engine : object
The engine passed to the solver, required to define vector algebraic operations needed
U : list of `vector`
A set of orthonormal vectors, len(U) = l; satisfies ||I^{lxl}-U^tU|| < thresh
V : list of `vectors`
The vectors used to augment U
thresh : float
If the orthogonalized vector has a norm smaller than this value it is considered LD to the set
Returns
-------
U_aug : list of `vector`
The orthonormal set of vectors U' with span(U') = span(U) + span(V), len(U) <= len(U_aug) <= len(U) + len(V)
"""
for vi in V:
for j in range(len(U)):
dij = engine.vector_dot(vi, U[j])
Vi = engine.vector_axpy(-1.0 * dij, U[j], vi)
norm_vi = np.sqrt(engine.vector_dot(vi, vi))
if norm_vi >= thresh:
U.append(engine.vector_scale(1.0 / norm_vi, vi))
return U
def _best_vectors(engine, ss_vectors: np.ndarray, basis_vectors: List) -> List:
r"""Compute the best approximation of the true eigenvectors as a linear combination of basis vectors:
..math:: V_{k} = \Sum_{i} \tilde{V}_{i,k}X_{i}
Where :math:`\tilde{V}` is the matrix with columns that are eigenvectors of the subspace matrix. And
:math:`X_{i}` is a basis vector.
Parameters
----------
engine : object
The engine passed to the solver, required to define vector algebraic operations needed
ss_vectors
Numpy array {l, k}.
The k eigenvectors of the subspace problem, l = dimension of the subspace basis, and k is the number of roots
basis_vectors
list of `vector` {l}.
The current basis vectors
Returns
-------
new_vecs
list of `vector` {k}.
The approximations of the k true eigenvectors.
"""
l, n = ss_vectors.shape
new_vecs = []
for i in range(n):
cv_i = engine.new_vector()
for j in range(l):
cv_i = engine.vector_axpy(ss_vectors[j, i], basis_vectors[j], cv_i)
new_vecs.append(cv_i)
return new_vecs
class SolverEngine(ABC):
"""Abstract Base Class defining the API required by solver engines
Engines implement the correct product functions for iterative solvers that
do not require the target matrix be stored directly.
Classes intended to be used as an `engine` for :func:`davidson_solver` or
:func:`hamiltonian_solver` should inherit from this base class to ensure
that the required methods are defined.
..note:: The `vector` referred to here is intentionally vague, the solver
does not care what it is and only holds individual or sets of
them. In fact an individual `vector` could be split across two
elements in a list, such as for different spin.
Whatever data type is used and individual vector should be a
single element in a list such that len(list) returns the number
of vector-like objects.
"""
@abstractmethod
def compute_products(self, X):
r"""Compute a Matrix * trial vector products
Parameters
----------
X : list of `vectors`
Returns
-------
Expected by :func:`davidson_solver`
AX : list of `vectors`
The product :math:`A x X_{i}` for each `X_{i}` in `X`, in that
order. Where `A` is the hermitian matrix to be diagonalized.
`len(AX) == len(X)`
n : int
The number of products that were evaluated. If the object implements
product caching this may be less than len(X)
Expected by :func:`hamiltonian_solver`
H1X : list of `vectors`
The product :math:`H1 x X_{i}` for each `X_{i}` in `X`, in that
order. Where H1 is described in :func:`hamiltonian_solver`.
`len(H1X) == len(X)`
H2X : list of `vectors`
The product :math:`H2 x X_{i}` for each `X_{i}` in `X`, in that
order. Where H2 is described in :func:`hamiltonian_solver`.
`len(H2X) == len(X)`
"""
pass
@abstractmethod
def precondition(self, R_k, w_k):
r"""Apply the preconditioner to a Residual vector
The preconditioner is usually defined as :math:`(w_k - D_{i})^-1` where
`D` is an approximation of the diagonal of the matrix that is being
diagonalized.
Parameters
----------
R_k : single `vector`
The residual vector
w_k : float
The eigenvalue associated with this vector
Returns
-------
new_X_k : single `vector`
The preconditioned residual vector, a correction vector that will be
used to augment the guess space
"""
pass
@abstractmethod
def new_vector(self):
"""Return a new `vector` object.
The solver is oblivious to the data structure used for a `vector` this
method provides the engine with a means to create `vector` like
quantities.
The engine calls this method with no arguments. So any defined by the
engine for its own use should be optional
Returns
-------
X : singlet `vector`
This should be a new vector object with the correct dimensions,
assumed to be zeroed out
"""
pass
def vector_dot(X, Y):
"""Compute a dot product between two `vectors`
Parameters
----------
X : single `vector`
Y : single `vector`
Returns
-------
a : float
The dot product (X x Y)
"""
pass
# cython doesn't like static+ decorators https://github.com/cython/cython/issues/1434#issuecomment-608975116
vector_dot = staticmethod(abstractmethod(vector_dot))
@abstractmethod
def vector_axpy(a, X, Y):
"""Compute scaled `vector` addition operation `a*X + Y`
Parameters
----------
a : float
The scale factor applied to `X`
X : singlet `vector`
The `vector` which will be scaled and added to `Y`
Y : single `vector`
The `vector` which the result of `a*X` is added to
Returns
-------
Y : single `vector`
The solver assumes that Y is updated, and returned. So it is safe to
avoid a copy of Y if possible
"""
pass
@abstractmethod
def vector_scale(a, X):
"""Scale a vector by some factor
Parameters
----------
a : float
The scale facor
X : single `vector`
The vector that will be scaled
Returns
-------
X : single `vector`
The solver assumes that the passed vector is modifed. So it is save
to avoid a copy of X if possible.
"""
pass
@abstractmethod
def vector_copy(X):
"""Make a copy of a `vector`
Parameters
----------
X : single `vector`
The `vector` to copy
Returns
-------
X' : single `vector`
A copy of `X` should be distinct object that can be modified
independently of the passed object, Has the same data when returned.
"""
pass
@abstractmethod
def residue(self, X, so_prop_ints):
"""Compute residue
Parameters
----------
X
The single `vector` to use to compute the property.
so_prop_ints :
Property integrals in SO basis for the desired transition property.
prefactor
Optional float scaling factor.
Returns
-------
residue : Any
The transition property.
"""
pass
def davidson_solver(engine, guess: List, *, nroot: int, r_convergence: float = 1.0E-4, max_ss_size: int = 100, maxiter: int = 60, verbose: int = 1):
"""Solves for the lowest few eigenvalues and eigenvectors of a large problem emulated through an engine.
If the large matrix `A` has dimension `{NxN}` and N is very large, and only
a small number of roots, `k` are desired this algorithm is preferable to
standard methods as uses on the order of `N * k` memory. One only needs to
have the ability to compute the product of a times a vector.
For non-hermitan `A` the basis of the algorithm breaks down. However in
practice, for strongly diagonally-dominant `A` such as the
similarity-transformed Hamiltonian in EOM-CC this algorithm is commonly still
used.
Parameters
-----------
engine : object (subclass of :class:`SolverEngine`)
The engine drive all operations involving data structures that have at
least one "large" dimension. See :class:`SolverEngine` for requirements
guess
list {engine dependent}
At least `nroot` initial expansion vectors
nroot
Number of roots desired
r_convergence
Convergence tolerance for residual vectors
max_ss_size:
The maximum number of trial vectors in the iterative subspace that will
be stored before a collapse is done.
maxiter
The maximum number of iterations
verbose
The amount of logging info to print (0 -> none, 1 -> some, >1 -> everything)
Returns
-------
best_values : numpy.ndarray (nroots, )
The best approximation of the eigenvalues of A, computed on the last iteration of the solver
best_vectors: list of `vector` (nroots)
The best approximation of the eigenvectors of A, computed on the last iteration of the solver
stats : List[Dict]
Statistics collected on each iteration
count : int, iteration number
res_norm : np.ndarray (nroots, ), the norm of residual vector for each roots
val : np.ndarray (nroots, ), the eigenvalue corresponding to each root
delta_val : np.ndarray (nroots, ), the change in eigenvalue from the last iteration to this ones
collapse : bool, if a subspace collapse was performed
product_count : int, the running total of product evaluations that was performed
done : bool, if all roots were converged
Notes
-----
The solution vector is normalized to 1/2
The solver will return even when ``maxiter`` iterations are performed without convergence.
The caller **must check** `stats[-1]['done']` for failure and handle each case accordingly.
"""
nk = nroot
iter_info = {
"count": 0,
"res_norm": np.zeros((nk)),
"val": np.zeros((nk)),
"delta_val": np.zeros((nk)),
# conv defaults to true, and will be flipped when a non-conv root is hit
"done": True,
"nvec": 0,
"collapse": False,
"product_count": 0,
}
print_name = "DavidsonSolver"
title_lines = ["Generalized Davidson Solver", "By Ruhee Dcunha"]
_diag_print_heading(title_lines, print_name, max_ss_size, nroot, r_convergence, maxiter, verbose)
vecs = guess
stats = []
best_eigvecs = []
best_eigvals = []
while iter_info['count'] < maxiter:
# increment iteration/ save old vals
iter_info['count'] += 1
old_vals = iter_info['val'].copy()
# reset flags
iter_info['collapse'] = False
iter_info['done'] = True
# get subspace dimension
l = len(vecs)
iter_info['nvec'] = l
# check if ss dimension has exceeded limits
if l >= max_ss_size:
iter_info['collapse'] = True
# compute A times trial vector products
Ax, nprod = engine.compute_products(vecs)
iter_info['product_count'] += nprod
# Build Subspace matrix
G = np.zeros((l, l))
for i in range(l):
for j in range(i):
G[i, j] = G[j, i] = engine.vector_dot(vecs[i], Ax[j])
G[i, i] = engine.vector_dot(vecs[i], Ax[i])
_print_array("SS transformed A", G, verbose)
# diagonalize subspace matrix
lam, alpha = np.linalg.eigh(G)
_print_array("SS eigenvectors", alpha, verbose)
_print_array("SS eigenvalues", lam, verbose)
# remove zeros/negatives
alpha = alpha[:, lam > 1.0e-10]
lam = lam[lam > 1.0e-10]
# sort/truncate to nroot
idx = np.argsort(lam)
lam = lam[idx]
alpha = alpha[:, idx]
# update best_solution
best_eigvecs = _best_vectors(engine, alpha[:, :nk], vecs)
best_eigvals = lam[:nk]
# check convergence of each solution
new_vecs = []
for k in range(nk):
# residual vector
Rk = engine.new_vector()
lam_k = lam[k]
for i in range(l):
Axi = Ax[i]
Rk = engine.vector_axpy(alpha[i, k], Axi, Rk)
Rk = engine.vector_axpy(-1.0 * lam_k, best_eigvecs[k], Rk)
iter_info['val'][k] = lam_k
iter_info['delta_val'][k] = abs(old_vals[k] - lam_k)
iter_info['res_norm'][k] = np.sqrt((engine.vector_dot(Rk, Rk)))
# augment guess vector for non-converged roots
if (iter_info["res_norm"][k] > r_convergence):
iter_info['done'] = False
Qk = engine.precondition(Rk, lam_k)
new_vecs.append(Qk)
# print iteration info to output
_diag_print_info(print_name, iter_info, verbose)
# save stats for this iteration
stats.append(iter_info.copy())
if iter_info['done']:
# finished
_diag_print_converged(print_name, stats, best_eigvals, verbose)
break
elif iter_info['collapse']:
# restart needed
vecs = best_eigvecs
else:
# Regular subspace update, orthonormalize preconditioned residuals and add to the trial set
vecs = _gs_orth(engine, vecs, new_vecs)
# always return, the caller should check ret["stats"][-1]['done'] == True for convergence
return {"eigvals": best_eigvals, "eigvecs": list(zip(best_eigvecs, best_eigvecs)), "stats": stats}
def hamiltonian_solver(engine, guess: List, *, nroot: int, r_convergence: float = 1.0E-4, max_ss_size: int = 100, maxiter: int = 60, verbose: int = 1):
"""Finds the smallest eigenvalues and associated right and left hand
eigenvectors of a large real Hamiltonian eigenvalue problem emulated
through an engine.
A Hamiltonian eigenvalue problem (EVP) has the following structure:
[A B][X] = [1 0](w)[X]
[B A][Y] [0 -1](w)[Y]
with A, B of some large dimension N, the problem is of dimension 2Nx2N.
The real, Hamiltonian EVP can be rewritten as the NxN, non-hermitian EVP:
(A+B)(A-B)(X+Y) = w^2(X+Y)
With left-hand eigenvectors:
(X-Y)(A-B)(A+B) = w^2(X-Y)
if (A-B) is positive definite, we can transform the problem to arrive at the hermitian NxN EVP:
(A-B)^1/2(A+B)(A-B)^1/2 = w^2 T
Where T = (A-B)^-1/2(X+Y).
We use a Davidson like iteration where we transform (A+B) (H1) and (A-B)
(H2) in to the subspace defined by the trial vectors.
The subspace analog of the NxN hermitian EVP is diagonalized and left (X-Y)
and right (X+Y) eigenvectors of the NxN non-hermitian EVP are approximated.
Residual vectors are formed for both and the guess space is augmented with
two correction vectors per iteration. The advantages and properties of this
algorithm are described in the literature [stratmann:1998]_ .
Parameters
-----------
engine : object (subclass of :class:`SolverEngine`)
The engine drive all operations involving data structures that have at
least one "large" dimension. See :class:`SolverEngine` for requirements
guess
list {engine dependent}
At least `nroot` initial expansion vectors
nroot
Number of roots desired
r_convergence
Convergence tolerance for residual vectors
max_ss_size:
The maximum number of trial vectors in the iterative subspace that will
be stored before a collapse is done.
maxiter
The maximum number of iterations
verbose
The amount of logging info to print (0 -> none, 1 -> some, >1 -> everything)
Returns
-------
best_values : numpy.ndarray (nroots, )
The best approximation of the eigenvalues of `w`, computed on the last iteration of the solver
best_R: list of `vector` (nroots)
The best approximation of the right hand eigenvectors, `X+Y`, computed on the last iteration of the solver.
best_L: list of `vector` (nroots)
The best approximation of the left hand eigenvectors, `X-Y`, computed on the last iteration of the solver.
stats : list of `dict`
Statistics collected on each iteration
count : int, iteration number
res_norm : np.ndarray (nroots, ), the norm of residual vector for each roots
val : np.ndarray (nroots, ), the eigenvalue corresponding to each root
delta_val : np.ndarray (nroots, ), the change in eigenvalue from the last iteration to this ones
collapse : bool, if a subspace collapse was performed
product_count : int, the running total of product evaluations that was performed
done : bool, if all roots were converged
Notes
-----
The solution vector is normalized to 1/2
The solver will return even when ``maxiter`` iterations are performed without convergence.
The caller **must check** `stats[-1]['done']` for failure and handle each case accordingly.
References
----------
R. Eric Stratmann, G. E. Scuseria, and M. J. Frisch, "An efficient
implementation of time-dependent density-functional theory for the
calculation of excitation energies of large molecules." J. Chem. Phys.,
109, 8218 (1998)
"""
nk = nroot
iter_info = {
"count": 0,
"res_norm": np.zeros((nk)),
"val": np.zeros((nk)),
"delta_val": np.zeros((nk)),
# conv defaults to true, and will be flipped when a non-conv root is hit
"conv": True,
"nvec": 0,
"product_count": 0,
}
print_name = "HamiltonianSolver"
title_lines = ["Generalized Hamiltonian Solver", "By Andrew M. James"]
_diag_print_heading(title_lines, print_name, max_ss_size, nroot, r_convergence, maxiter, verbose)
vecs = guess
best_L = []
best_R = []
best_vals = []
stats = []
while iter_info['count'] < maxiter:
# increment iteration/ save old vals
iter_info['count'] += 1
old_w = iter_info['val'].copy()
# reset flags
iter_info['collapse'] = False
iter_info['done'] = True
# get subspace dimension
l = len(vecs)
iter_info['nvec'] = l
# check if subspace dimension has exceeded limits
if l >= max_ss_size:
iter_info['collapse'] = True
# compute [A+B]*v (H1x) and [A-B]*v (H2x)
H1x, H2x, nprod = engine.compute_products(vecs)
iter_info['product_count'] += nprod
# form x*H1x (H1_ss) and x*H2x (H2_ss)
H1_ss = np.zeros((l, l))
H2_ss = np.zeros((l, l))
for i in range(l):
for j in range(l):
H1_ss[i, j] = engine.vector_dot(vecs[i], H1x[j])
H2_ss[i, j] = engine.vector_dot(vecs[i], H2x[j])
_print_array("Subspace Transformed (A+B)", H1_ss, verbose)
_print_array("Subspace Transformed (A-B)", H2_ss, verbose)
# Diagonalize H2 in the subspace (eigen-decomposition to compute H2^(1/2))
H2_ss_val, H2_ss_vec = np.linalg.eigh(H2_ss)
_print_array("eigenvalues H2_ss", H2_ss_val, verbose)
_print_array("eigenvectors H2_ss", H2_ss_vec, verbose)
# Check H2 is PD
# NOTE: If this triggers failure the SCF solution is not stable. A few ways to handle this
# 1. Use davidson solver where product function evaluates (H2 * (H1 * X))
# - Poor convergence
# 2. Switch to CIS/TDA
# - User would probably not expect this
# 3. Perform Stability update and restart with new reference
if np.any(H2_ss_val < 0.0):
msg = ("The H2 matrix is not Positive Definite. " "This means the reference state is not stable.")
raise RuntimeError(msg)
# Build H2^(1/2)
H2_ss_half = np.einsum("ik,k,jk->ij", H2_ss_vec, np.sqrt(H2_ss_val), H2_ss_vec, optimize=True)
_print_array("SS Transformed (A-B)^(1/2)", H2_ss_half, verbose)
# Build Hermitian SS product (H2)^(1/2)(H1)(H2)^(1/2)
Hss = np.einsum('ij,jk,km->im', H2_ss_half, H1_ss, H2_ss_half, optimize=True)
_print_array("(H2)^(1/2)(H1)(H2)^(1/2)", Hss, verbose)
#diagonalize Hss -> w^2, Tss
w2, Tss = np.linalg.eigh(Hss)
_print_array("Eigenvalues (A-B)^(1/2)(A+B)(A-B)^(1/2)", w2, verbose)
_print_array("Eigvectors (A-B)^(1/2)(A+B)(A-B)^(1/2)", Tss, verbose)
# pick positive roots
Tss = Tss[:, w2 > 1.0e-10]
w2 = w2[w2 > 1.0e-10]
# check for invalid eigvals
with np.errstate(invalid='raise'):
w = np.sqrt(w2)
# sort roots
idx = w.argsort()[:nk]
Tss = Tss[:, idx]
w = w[idx]
# Extract Rss = H2^{1/2}Tss
Rss = np.dot(H2_ss_half, Tss)
# Extract Lss = (H1 R)/ w
Lss = np.dot(H1_ss, Rss).dot(np.diag(1.0 / w))
# Biorthonormalize R/L solution vectors
inners = np.einsum("ix,ix->x", Rss, Lss, optimize=True)
Rss = np.einsum("x,ix->ix", 1. / np.sqrt(inners), Rss, optimize=True)
Lss = np.einsum("x,ix->ix", 1. / np.sqrt(inners), Lss, optimize=True)
# Save best R/L vectors and eigenvalues
best_R = _best_vectors(engine, Rss[:, :nk], vecs)
best_L = _best_vectors(engine, Lss[:, :nk], vecs)
best_vals = w[:nk]
# check convergence of each solution
new_vecs = []
for k in range(nk):
# residual vectors for right and left eigenvectors
WR_k = engine.new_vector()
WL_k = engine.new_vector()
wk = w[k]
for i in range(l):
H1x_i = H1x[i]
H2x_i = H2x[i]
WL_k = engine.vector_axpy(Rss[i, k], H1x_i, WL_k)
WR_k = engine.vector_axpy(Lss[i, k], H2x_i, WR_k)
WL_k = engine.vector_axpy(-1.0 * wk, best_L[k], WL_k)
WR_k = engine.vector_axpy(-1.0 * wk, best_R[k], WR_k)
norm_R = np.sqrt(engine.vector_dot(WR_k, WR_k))
norm_L = np.sqrt(engine.vector_dot(WL_k, WL_k))
norm = norm_R + norm_L
iter_info['res_norm'][k] = norm
iter_info['delta_val'][k] = np.abs(old_w[k] - w[k])
iter_info['val'][k] = w[k]
# augment the guess space for non-converged roots
if (iter_info['res_norm'][k] > r_convergence):
iter_info['done'] = False
new_vecs.append(engine.precondition(WR_k, w[k]))
new_vecs.append(engine.precondition(WL_k, w[k]))
# print iteration info to output
_diag_print_info(print_name, iter_info, verbose)
# save stats for this iteration
stats.append(iter_info.copy())
if iter_info['done']:
# Finished
_diag_print_converged(print_name, stats, w[:nk], rvec=best_R, lvec=best_L, verbose=verbose)
break
elif iter_info['collapse']:
# need to orthonormalize union of the Left/Right solutions on restart
vecs = _gs_orth(engine, [], best_R + best_L)
else:
# Regular subspace update, orthonormalize preconditioned residuals and add to the trial set
vecs = _gs_orth(engine, vecs, new_vecs)
# always return, the caller should check ret["stats"][-1]['done'] == True for convergence
return {"eigvals": best_vals, "eigvecs": list(zip(best_R, best_L)), "stats": stats}
|
lothian/psi4
|
psi4/driver/p4util/solvers.py
|
Python
|
lgpl-3.0
| 37,813
|
[
"Psi4"
] |
a25fcc64d52c18ae007f58d26ead6aa8a3171af86e4de8dc00c02746570d2890
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import tempfile
import numpy
import h5py
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo import outcore
from pyscf import __config__
IOBLK_SIZE = getattr(__config__, 'ao2mo_outcore_ioblk_size', 256) # 256 MB
IOBUF_WORDS = getattr(__config__, 'ao2mo_outcore_iobuf_words', 1e8) # 1.6 GB
IOBUF_ROW_MIN = getattr(__config__, 'ao2mo_outcore_row_min', 160)
MAX_MEMORY = getattr(__config__, 'ao2mo_outcore_max_memory', 4000) # 4GB
def full(mol, mo_coeff, erifile, dataname='eri_mo',
intor='int2e_spinor', aosym='s4', comp=None,
max_memory=MAX_MEMORY, ioblk_size=IOBLK_SIZE, verbose=logger.WARN):
general(mol, (mo_coeff,)*4, erifile, dataname,
intor, aosym, comp, max_memory, ioblk_size, verbose)
return erifile
def general(mol, mo_coeffs, erifile, dataname='eri_mo',
intor='int2e_spinor', aosym='s4', comp=None,
max_memory=MAX_MEMORY, ioblk_size=IOBLK_SIZE, verbose=logger.WARN):
time_0pass = (time.clock(), time.time())
log = logger.new_logger(mol, verbose)
if '_spinor' not in intor:
log.warn('r_ao2mo requires spinor integrals.\n'
'Suffix _spinor is added to %s', intor)
intor = intor + '_spinor'
intor, comp = gto.moleintor._get_intor_and_comp(mol._add_suffix(intor), comp)
klsame = iden_coeffs(mo_coeffs[2], mo_coeffs[3])
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
nmok = mo_coeffs[2].shape[1]
nmol = mo_coeffs[3].shape[1]
nao = mo_coeffs[0].shape[0]
aosym = outcore._stand_sym_code(aosym)
if aosym in ('s1', 's2ij', 'a2ij'):
nao_pair = nao * nao
else:
nao_pair = _count_naopair(mol, nao)
nij_pair = nmoi*nmoj
nkl_pair = nmok*nmol
if klsame and aosym in ('s4', 's2kl', 'a2kl', 'a4ij', 'a4kl', 'a4'):
log.debug('k-mo == l-mo')
mokl = numpy.asarray(mo_coeffs[2], dtype=numpy.complex128, order='F')
klshape = (0, nmok, 0, nmok)
else:
mokl = numpy.asarray(numpy.hstack((mo_coeffs[2],mo_coeffs[3])),
dtype=numpy.complex128, order='F')
klshape = (0, nmok, nmok, nmok+nmol)
if isinstance(erifile, str):
if h5py.is_hdf5(erifile):
feri = h5py.File(erifile, 'a')
if dataname in feri:
del(feri[dataname])
else:
feri = h5py.File(erifile, 'w')
else:
assert(isinstance(erifile, h5py.Group))
feri = erifile
if comp == 1:
chunks = (nmoj,nmol)
shape = (nij_pair, nkl_pair)
else:
chunks = (1,nmoj,nmol)
shape = (comp, nij_pair, nkl_pair)
if nij_pair == 0 or nkl_pair == 0:
feri.create_dataset(dataname, shape, 'c16')
if isinstance(erifile, str):
feri.close()
return erifile
else:
h5d_eri = feri.create_dataset(dataname, shape, 'c16', chunks=chunks)
log.debug('MO integrals %s are saved in %s/%s', intor, erifile, dataname)
log.debug('num. MO ints = %.8g, required disk %.8g MB',
float(nij_pair)*nkl_pair*comp, nij_pair*nkl_pair*comp*16/1e6)
# transform e1
swapfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
half_e1(mol, mo_coeffs, swapfile.name, intor, aosym, comp,
max_memory, ioblk_size, log)
time_1pass = log.timer('AO->MO transformation for %s 1 pass'%intor,
*time_0pass)
e2buflen = guess_e2bufsize(ioblk_size, nij_pair, nao_pair)[0]
log.debug('step2: kl-pair (ao %d, mo %d), mem %.8g MB, '
'ioblock (r/w) %.8g/%.8g MB', \
nao_pair, nkl_pair, e2buflen*nao_pair*16/1e6,
e2buflen*nij_pair*16/1e6, e2buflen*nkl_pair*16/1e6)
fswap = h5py.File(swapfile.name, 'r')
klaoblks = len(fswap['0'])
ijmoblks = int(numpy.ceil(float(nij_pair)/e2buflen)) * comp
ao_loc = numpy.asarray(mol.ao_loc_2c(), dtype=numpy.int32)
tao = numpy.asarray(mol.tmap(), dtype=numpy.int32)
ti0 = time_1pass
buf = numpy.empty((e2buflen, nao_pair), dtype=numpy.complex)
istep = 0
for row0, row1 in prange(0, nij_pair, e2buflen):
nrow = row1 - row0
for icomp in range(comp):
istep += 1
tioi = 0
log.debug('step 2 [%d/%d], [%d,%d:%d], row = %d', \
istep, ijmoblks, icomp, row0, row1, nrow)
col0 = 0
for ic in range(klaoblks):
dat = fswap['%d/%d'%(icomp,ic)]
col1 = col0 + dat.shape[1]
buf[:nrow,col0:col1] = dat[row0:row1]
col0 = col1
ti2 = log.timer('step 2 [%d/%d], load buf'%(istep,ijmoblks), *ti0)
tioi += ti2[1]-ti0[1]
pbuf = _ao2mo.r_e2(buf[:nrow], mokl, klshape, tao, ao_loc, aosym)
tw1 = time.time()
if comp == 1:
h5d_eri[row0:row1] = pbuf
else:
h5d_eri[icomp,row0:row1] = pbuf
tioi += time.time()-tw1
ti1 = (time.clock(), time.time())
log.debug('step 2 [%d/%d] CPU time: %9.2f, Wall time: %9.2f, I/O time: %9.2f', \
istep, ijmoblks, ti1[0]-ti0[0], ti1[1]-ti0[1], tioi)
ti0 = ti1
buf = pbuf = None
fswap.close()
if isinstance(erifile, str):
feri.close()
log.timer('AO->MO transformation for %s 2 pass'%intor, *time_1pass)
log.timer('AO->MO transformation for %s '%intor, *time_0pass)
return erifile
# swapfile will be overwritten if exists.
def half_e1(mol, mo_coeffs, swapfile,
intor='int2e_spinor', aosym='s4', comp=None,
max_memory=MAX_MEMORY, ioblk_size=IOBLK_SIZE, verbose=logger.WARN,
ao2mopt=None):
time0 = (time.clock(), time.time())
log = logger.new_logger(mol, verbose)
ijsame = iden_coeffs(mo_coeffs[0], mo_coeffs[1])
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
nao = mo_coeffs[0].shape[0]
aosym = outcore._stand_sym_code(aosym)
if aosym in ('s1', 's2kl', 'a2kl'):
nao_pair = nao * nao
else:
nao_pair = _count_naopair(mol, nao)
nij_pair = nmoi * nmoj
if ijsame and aosym in ('s4', 's2ij', 'a2ij', 'a4ij', 'a4kl', 'a4'):
log.debug('i-mo == j-mo')
moij = numpy.asarray(mo_coeffs[0], order='F')
ijshape = (0, nmoi, 0, nmoi)
else:
moij = numpy.asarray(numpy.hstack((mo_coeffs[0],mo_coeffs[1])), order='F')
ijshape = (0, nmoi, nmoi, nmoi+nmoj)
e1buflen, mem_words, iobuf_words, ioblk_words = \
guess_e1bufsize(max_memory, ioblk_size, nij_pair, nao_pair, comp)
# The buffer to hold AO integrals in C code
aobuflen = int((mem_words - iobuf_words) // (nao*nao*comp))
shranges = outcore.guess_shell_ranges(mol, (aosym not in ('s1', 's2ij', 'a2ij')),
aobuflen, e1buflen, mol.ao_loc_2c(), False)
if ao2mopt is None:
# if intor == 'int2e_spinor':
# ao2mopt = _ao2mo.AO2MOpt(mol, intor, 'CVHFnr_schwarz_cond',
# 'CVHFsetnr_direct_scf')
# elif intor == 'int2e_spsp1_spinor':
# elif intor == 'int2e_spsp1spsp2_spinor':
# else:
# ao2mopt = _ao2mo.AO2MOpt(mol, intor)
ao2mopt = _ao2mo.AO2MOpt(mol, intor)
log.debug('step1: tmpfile %.8g MB', nij_pair*nao_pair*16/1e6)
log.debug('step1: (ij,kl) = (%d,%d), mem cache %.8g MB, iobuf %.8g MB',
nij_pair, nao_pair, mem_words*16/1e6, iobuf_words*16/1e6)
fswap = h5py.File(swapfile, 'w')
for icomp in range(comp):
g = fswap.create_group(str(icomp)) # for h5py old version
tao = numpy.asarray(mol.tmap(), dtype=numpy.int32)
# transform e1
ti0 = log.timer('Initializing ao2mo.outcore.half_e1', *time0)
nstep = len(shranges)
for istep,sh_range in enumerate(shranges):
log.debug('step 1 [%d/%d], AO [%d:%d], len(buf) = %d', \
istep+1, nstep, *(sh_range[:3]))
buflen = sh_range[2]
iobuf = numpy.empty((comp,buflen,nij_pair), dtype=numpy.complex)
nmic = len(sh_range[3])
p0 = 0
for imic, aoshs in enumerate(sh_range[3]):
log.debug1(' fill iobuf micro [%d/%d], AO [%d:%d], len(aobuf) = %d', \
imic+1, nmic, *aoshs)
buf = _ao2mo.r_e1(intor, moij, ijshape, aoshs,
mol._atm, mol._bas, mol._env,
tao, aosym, comp, ao2mopt)
iobuf[:,p0:p0+aoshs[2]] = buf
p0 += aoshs[2]
ti2 = log.timer('gen AO/transform MO [%d/%d]'%(istep+1,nstep), *ti0)
e2buflen, chunks = guess_e2bufsize(ioblk_size, nij_pair, buflen)
for icomp in range(comp):
dset = fswap.create_dataset('%d/%d'%(icomp,istep),
(nij_pair,iobuf.shape[1]), 'c16',
chunks=None)
for col0, col1 in prange(0, nij_pair, e2buflen):
dset[col0:col1] = lib.transpose(iobuf[icomp,:,col0:col1])
ti0 = log.timer('transposing to disk', *ti2)
fswap.close()
return swapfile
def full_iofree(mol, mo_coeff, intor='int2e_spinor', aosym='s4', comp=None,
verbose=logger.WARN, **kwargs):
erifile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
general(mol, (mo_coeff,)*4, erifile.name, dataname='eri_mo',
intor=intor, aosym=aosym, comp=comp,
verbose=verbose)
with h5py.File(erifile.name, 'r') as feri:
return numpy.asarray(feri['eri_mo'])
def general_iofree(mol, mo_coeffs, intor='int2e_spinor', aosym='s4', comp=None,
verbose=logger.WARN, **kwargs):
erifile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
general(mol, mo_coeffs, erifile.name, dataname='eri_mo',
intor=intor, aosym=aosym, comp=comp,
verbose=verbose)
with h5py.File(erifile.name, 'r') as feri:
return numpy.asarray(feri['eri_mo'])
def iden_coeffs(mo1, mo2):
return (id(mo1) == id(mo2)) \
or (mo1.shape==mo2.shape and numpy.allclose(mo1,mo2))
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
def guess_e1bufsize(max_memory, ioblk_size, nij_pair, nao_pair, comp):
mem_words = max_memory * 1e6 / 16
# part of the max_memory is used to hold the AO integrals. The iobuf is the
# buffer to temporary hold the transformed integrals before streaming to disk.
# iobuf is then divided to small blocks (ioblk_words) and streamed to disk.
if mem_words > IOBUF_WORDS * 2:
iobuf_words = int(IOBUF_WORDS)
else:
iobuf_words = int(mem_words // 2)
ioblk_words = int(min(ioblk_size*1e6/16, iobuf_words))
e1buflen = int(min(iobuf_words//(comp*nij_pair), nao_pair))
return e1buflen, mem_words, iobuf_words, ioblk_words
def guess_e2bufsize(ioblk_size, nrows, ncols):
e2buflen = int(min(ioblk_size*1e6/16/ncols, nrows))
e2buflen = max(e2buflen//IOBUF_ROW_MIN, 1) * IOBUF_ROW_MIN
chunks = (IOBUF_ROW_MIN, ncols)
return e2buflen, chunks
def _count_naopair(mol, nao):
ao_loc = mol.ao_loc_2c()
nao_pair = 0
for i in range(mol.nbas):
di = ao_loc[i+1] - ao_loc[i]
for j in range(i+1):
dj = ao_loc[j+1] - ao_loc[j]
nao_pair += di * dj
return nao_pair
del(MAX_MEMORY)
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.verbose = 5
mol.output = 'out_outcore'
mol.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
n2c = mol.nao_2c()
numpy.random.seed(1)
mo = numpy.random.random((n2c,n2c)) + numpy.random.random((n2c,n2c))*1j
eri0 = numpy.empty((n2c,n2c,n2c,n2c), dtype=numpy.complex)
pi = 0
for i in range(mol.nbas):
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas):
pl = 0
for l in range(mol.nbas):
buf = gto.getints_by_shell('int2e_spinor', (i,j,k,l),
mol._atm, mol._bas, mol._env)
di, dj, dk, dl = buf.shape
eri0[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
nao, nmo = mo.shape
eri0 = numpy.dot(mo.T.conj(), eri0.reshape(nao,-1))
eri0 = numpy.dot(eri0.reshape(-1,nao), mo)
eri0 = eri0.reshape(nmo,nao,nao,nmo).transpose(2,3,0,1).copy()
eri0 = numpy.dot(mo.T.conj(), eri0.reshape(nao,-1))
eri0 = numpy.dot(eri0.reshape(-1,nao), mo)
eri0 = eri0.reshape((nmo,)*4)
print(time.clock())
full(mol, mo, 'h2oeri.h5', max_memory=10, ioblk_size=5)
with h5py.File('h2oeri.h5', 'r') as feri:
eri1 = numpy.array(feri['eri_mo']).reshape((nmo,)*4)
print(time.clock())
print(numpy.allclose(eri0, eri1))
|
gkc1000/pyscf
|
pyscf/ao2mo/r_outcore.py
|
Python
|
apache-2.0
| 13,798
|
[
"PySCF"
] |
e29d7b8fe9f2a8671260154e3ccf4520c3d3575b791ed106404c7126c23179e0
|
"""
This is the client of the Monitoring service based on Elasticsearch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.Core.Utilities.Plotting.FileCoding import codeRequestInFileId
__RCSID__ = "$Id$"
@createClient('Monitoring/Monitoring')
class MonitoringClient(Client):
"""
.. class:: MonitoringClient
This class expose the methods of the Monitoring Service
"""
def __init__(self, **kwargs):
""" Simple constructor
"""
super(MonitoringClient, self).__init__(**kwargs)
self.setServer('Monitoring/Monitoring')
def generateDelayedPlot(
self,
typeName,
reportName,
startTime,
endTime,
condDict,
grouping,
extraArgs=None,
compress=True):
"""
It is used to encode the plot parameters used to create a certain plot.
:param str typeName: the type of the monitoring
:param int startTime: epoch time, start time of the plot
:param int endTime: epoch time, end time of the plot
:param dict condDict: is the conditions used to gnerate the plot: {'Status':['Running'],'grouping': ['Site'] }
:param str grouping: is the grouping of the data for example: 'Site'
:param dict extraArgs: epoch time which can be last day, last week, last month
:param bool compress: apply compression of the encoded values.
:return: S_OK(str) or S_ERROR() it returns the encoded plot parameters
"""
if not isinstance(extraArgs, dict):
extraArgs = {}
plotRequest = {'typeName': typeName,
'reportName': reportName,
'startTime': startTime,
'endTime': endTime,
'condDict': condDict,
'grouping': grouping,
'extraArgs': extraArgs}
return codeRequestInFileId(plotRequest, compress)
def getReport(self, typeName, reportName, startTime, endTime, condDict, grouping, extraArgs=None):
"""
It is used to get the raw data used to create a plot.
:param str typeName: the type of the monitoring
:param str reportName: the name of the plotter used to create the plot for example: NumberOfJobs
:param int startTime: epoch time, start time of the plot
:param int endTime: epoch time, end time of the plot
:param dict condDict: is the conditions used to gnerate the plot: {'Status':['Running'],'grouping': ['Site'] }
:param str grouping: is the grouping of the data for example: 'Site'
:param dict extraArgs: epoch time which can be last day, last week, last month
:rerturn: S_OK or S_ERROR
"""
if not isinstance(extraArgs, dict):
extraArgs = {}
plotRequest = {'typeName': typeName,
'reportName': reportName,
'startTime': startTime,
'endTime': endTime,
'condDict': condDict,
'grouping': grouping,
'extraArgs': extraArgs}
result = self._getRPC().getReport(plotRequest)
if 'rpcStub' in result:
del result['rpcStub']
return result
|
yujikato/DIRAC
|
src/DIRAC/MonitoringSystem/Client/MonitoringClient.py
|
Python
|
gpl-3.0
| 3,209
|
[
"DIRAC"
] |
d183f3d8b68125edb6882e9e9331a0727f5b23105a0e281e16ce695c30bc0552
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
import gtk
from kiwi.currency import currency
from stoqdrivers.exceptions import DriverError
from storm.expr import And
from stoqlib.api import api
from stoqlib.domain.events import (TillAddCashEvent, TillAddTillEntryEvent)
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.payment.views import InPaymentView
from stoqlib.domain.till import Till
from stoqlib.exceptions import DeviceError, TillError
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.base.gtkadds import change_button_appearance
from stoqlib.gui.slaves.paymentconfirmslave import SalePaymentConfirmSlave
from stoqlib.gui.search.searchcolumns import SearchColumn, IdentifierColumn
from stoqlib.gui.search.searchdialog import SearchDialog, SearchDialogButtonSlave
from stoqlib.gui.search.searchfilters import DateSearchFilter
from stoqlib.lib.message import warning
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class PaymentReceivingSearch(SearchDialog):
title = _('Payments to Receive Search')
size = (775, 450)
search_spec = InPaymentView
def __init__(self, store):
SearchDialog.__init__(self, store)
self.results.connect('selection-changed', self._on_selection_changed)
self._setup_button_slave()
def _setup_button_slave(self):
self._button_slave = SearchDialogButtonSlave()
change_button_appearance(self._button_slave.button,
gtk.STOCK_APPLY, _("Receive"))
self.attach_slave('print_holder', self._button_slave)
self._button_slave.connect('click', self.on_receive_button_clicked)
self._button_slave.button.set_sensitive(False)
def _receive(self):
with api.new_store() as store:
till = Till.get_current(store)
assert till
in_payment = self.results.get_selected()
payment = store.fetch(in_payment.payment)
assert self._can_receive(payment)
retval = run_dialog(SalePaymentConfirmSlave, self, store,
payments=[payment], show_till_info=False)
if not retval:
return
try:
TillAddCashEvent.emit(till=till, value=payment.value)
except (TillError, DeviceError, DriverError) as e:
warning(str(e))
return
till_entry = till.add_credit_entry(payment.value,
_(u'Received payment: %s') % payment.description)
TillAddTillEntryEvent.emit(till_entry, store)
if store.committed:
self.search.refresh()
def _can_receive(self, payment):
if not payment:
return False
return payment.status == Payment.STATUS_PENDING
#
# SearchDialog Hooks
#
def create_filters(self):
self.set_text_field_columns(['description', 'identifier_str'])
self.search.set_query(self.executer_query)
# Date
date_filter = DateSearchFilter(_('Date:'))
date_filter.select(0)
columns = [Payment.due_date,
Payment.open_date,
Payment.paid_date]
self.add_filter(date_filter, columns=columns)
self.date_filter = date_filter
def get_columns(self):
return [IdentifierColumn('identifier', title=_('Payment #'), sorted=True),
SearchColumn('description', title=_('Description'),
data_type=str, expand=True),
SearchColumn('drawee', title=_('Drawee'),
data_type=str, width=200),
SearchColumn('due_date', title=_('Due Date'),
data_type=datetime.date, width=100),
SearchColumn('value', title=_('Value'),
data_type=currency, width=145), ]
def executer_query(self, store):
store_credit_method = PaymentMethod.get_by_name(
self.store, u'store_credit')
query = And(Payment.status == Payment.STATUS_PENDING,
Payment.method == store_credit_method)
return store.find(self.search_spec, query)
#
# Callbacks
#
def _on_selection_changed(self, results, selected):
can_click = bool(selected)
self._button_slave.button.set_sensitive(can_click)
def on_receive_button_clicked(self, button):
self._receive()
|
andrebellafronte/stoq
|
stoqlib/gui/search/paymentreceivingsearch.py
|
Python
|
gpl-2.0
| 5,433
|
[
"VisIt"
] |
073fdd4446d33176ad85637cff270db9f6c1d21e5b8625cc5cb17a5d4dd2b0f0
|
# -- TRAINING for [250, 300] window
from keras.layers import containers
from keras.models import Sequential, model_from_yaml
from keras.layers.core import Dense, Dropout, AutoEncoder, MaxoutDense, Activation, Merge
from keras.layers.advanced_activations import PReLU
from keras.layers.embeddings import Embedding
from keras.layers.noise import GaussianNoise
from keras.optimizers import SGD, RMSprop, Adagrad, Adam
from keras import regularizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
# %run ../viz/visualize.py
# %run ../viz/performance.py
from viz import *
from likelihood import *
MIN_PT, MAX_PT = (300, 300)
PLOT_DIR = './plots/ds/%s'
data = np.load('../../jet-simulations/unnormalized/small-unnormalized.npy')
print '{} jets before preselection'.format(data.shape[0])
signal, pt, mass, tau_21 = data['signal'], data['jet_pt'], data['jet_mass'], data['tau_21']
print '{} jets after preselection'.format(data.shape[0])
# _ = data['signal']
signal_pct = data['signal'].mean()
print '{}% signal'.format(signal_pct)
signal, pt, mass, tau_21 = data['signal'], data['jet_pt'], data['jet_mass'], data['tau_21']
signal = (signal == 1)
background = (signal == False)
# -- plot some kinematics...
n1, _, _ = plt.hist(pt[signal], bins=np.linspace(250, 300, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", linewidth=2)
n2, _, _ = plt.hist(pt[background], bins=np.linspace(250, 300, 100), histtype='step', color='blue', label='QCD', linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.xlabel(r'$p_T$ [GeV]')
plt.ylabel('Count')
plt.ylim(0, 1.2 * mx)
plt.title(r'Jet $p_T$ distribution, $p_T \in [250, 300]$ GeV' + '\n' +
r'$m_{\mathsf{jet}}\in [65, 95]$ GeV')
plt.legend()
plt.savefig(PLOT_DIR % 'unweighted-pt-distribution-[250-300].pdf')
plt.show()
# -- calculate the weights
weights = np.ones(data.shape[0])
# reference_distribution = np.random.uniform(250, 300, signal.sum())
reference_distribution = pt[background]
weights[signal] = get_weights(reference_distribution, pt[signal],
bins=np.linspace(250, 300, 200))
weights[background] = get_weights(reference_distribution, pt[background],
bins=np.linspace(250, 300, 200))
# weights[signal] = get_weights(pt[signal != 1], pt[signal],
# bins=np.concatenate((
# np.linspace(200, 300, 1000), np.linspace(300, 1005, 500)))
# )
# -- plot reweighted...
plt.hist(pt[signal], bins=np.linspace(250, 300, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=weights[signal], linewidth=2)
plt.hist(pt[background], bins=np.linspace(250, 300, 100), histtype='step', color='blue', label='QCD', weights=weights[background], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.ylim(0, 1.2 * mx)
plt.xlabel(r'$p_T$ (GeV)')
plt.ylabel('Count')
plt.title(r'Weighted Jet $p_T$ distribution (matched to QCD)')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-pt-distribution[250-300].pdf')
plt.show()
# -- plot weighted mass
n1, _, _ = plt.hist(mass[signal], bins=np.linspace(65, 95, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=weights[signal], linewidth=2)
n2, _, _ = plt.hist(mass[background], bins=np.linspace(65, 95, 100), histtype='step', color='blue', label='QCD', weights=weights[background], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.ylim(0, 1.2 * mx)
plt.xlabel(r'Jet $m$ [GeV]')
plt.ylabel('Weighted Count')
plt.title(r'Weighted Jet $m$ distribution ($p_T \in [250, 300]$ GeV, matched to QCD)' + '\n' +
r'$m_{\mathsf{jet}}\in [65, 95]$ GeV')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-mass-distribution[250-300].pdf')
plt.show()
# -- plot weighted tau_21
n1, _, _ = plt.hist(tau_21[signal], bins=np.linspace(0, 0.95, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=weights[signal], linewidth=2)
n2, _, _ = plt.hist(tau_21[background], bins=np.linspace(0, 0.95, 100), histtype='step', color='blue', label='QCD', weights=weights[background], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.ylim(0, 1.2 * mx)
plt.xlabel(r'Jet $\tau_{21}$')
plt.ylabel('Weighted Count')
plt.title(r'Weighted Jet $\tau_{21}$ distribution ($p_T \in [250, 300]$ GeV, matched to QCD)' + '\n' +
r'$m_{\mathsf{jet}}\in [65, 95]$ GeV')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-tau21-distribution[250-300].pdf')
plt.show()
# -- likelihood
# mass_bins = np.linspace(64.99, 95.01, 10)#, np.linspace(35, 159.99, 30), np.array([160, 900])))
# tau_bins = np.concatenate((np.array([0, 0.1]), np.linspace(0.1001, 0.7999999999, 10), np.array([0.8, 1])))
# P_2d = Likelihood2D(mass_bins, tau_bins)
# P_2d.fit((mass[signal], tau_21[signal]), (mass[background], tau_21[background]), weights=(weights[signal], weights[background]))
# P_2d.fit((mass[signal], tau_21[signal]), (mass[background], tau_21[background]), weights=(weights[signal], weights[background]))
# mass_nsj_likelihood = P_2d.predict((mass, tau_21))
# log_likelihood = np.log(mass_nsj_likelihood)
# -- plot weighted mass + nsj likelihood
# plt.hist((log_likelihood[signal == True]), bins=np.linspace(-3.6, 6.5, 20), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=weights[signal])
# plt.hist((log_likelihood[signal == False]), bins=np.linspace(-3.6, 6.5, 20), histtype='step', color='blue', label='QCD')
# plt.xlabel(r'$\log(P(\mathrm{signal}) / P(\mathrm{background}))$')
# plt.ylabel('Weighted Count')
# plt.title(r'Weighted Jet $m, \tau_{21}$ likelihood distribution ($p_T$ matched to QCD)')
# plt.legend()
# plt.savefig(PLOT_DIR % 'weighted-mass-nsj-likelihood-distribution.pdf')
plt.show()
# y_dl = (np.load('./yhat-max.npy') + np.load('./yhat-SmallConv.npy'))/2
# y_dl = np.load('./yhat-max-unnormalized-small.npy')
# y_dl = np.load('./yhat-conv.npy')
y_dl = np.load('./DNN-yhat-max-unnormalized-small.npy')
# -- plot DL output
n1, _, _ = plt.hist(y_dl[signal], bins=np.linspace(0, 1, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=weights[signal], linewidth=2)
n2, _, _ = plt.hist(y_dl[background], bins=np.linspace(0, 1, 100), histtype='step', color='blue', label='QCD', weights=weights[background], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.ylim(0, 1.2 * mx)
plt.xlabel(r'Deep Network Output')
plt.ylabel('Weighted Count')
plt.title(r'Weighted Deep Network distribution ($p_T \in [250, 300]$ matched to QCD)' + '\n' +
r'$m_{\mathsf{jet}}\in [65, 95]$ GeV')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-deep-net-distribution.pdf')
plt.show()
# DL_lh = Likelihood1D(np.linspace(0, 1, 60))
# DL_lh.fit(y_dl[signal], y_dl[background], weights=(weights[signal], weights[background]))
# DLlikelihood = DL_lh.predict(y_dl)
# from sklearn.ensemble import RandomForestClassifier
# rf = RandomForestClassifier(n_jobs=2)
# df = np.zeros((y_dl.shape[0], 3))
# df[:, 0] = y_dl
# df[:, 1] = mass
# df[:, 2] = tau_21
# rf.fit(df[:n_train], y_[:n_train], sample_weight=weights[:n_train])
# y_rf = rf.predict_proba(df[n_train:])
# from sklearn.ensemble import GradientBoostingClassifier
# lh_bins = np.linspace(-4, 6, 4)#, np.linspace(35, 159.99, 30), np.array([160, 900])))
# # lh_bins = np.concatenate((np.array([0, 0.1]), np.linspace(0.1001, 0.7999999999, 10), np.array([0.8, 1])))
# dnn_bins = np.linspace(0, 1, 100)
# CLH = Likelihood2D(lh_bins, dnn_bins)
# CLH.fit((tau_21[signal], y_dl[signal]), (log_likelihood[background], y_dl[background]), weights=(weights[signal], weights[background]))
# CLH.fit((log_likelihood[signal], y_dl[signal]), (log_likelihood[background], y_dl[background]), weights=(weights[signal], weights[background]))
# combined_likelihood = CLH.predict((log_likelihood, y_dl))
# # log_likelihood = np.log(mass_nsj_likelihood)
# ROC curves
discs = {}
add_curve(r'$\tau_{21}$', 'black', calculate_roc(signal, 2-tau_21, weights=weights), discs)
add_curve(r'Deep Network, trained on $p_T \in [250, 300]$ GeV', 'red', calculate_roc(signal, y_dl, weights=weights, bins=1000000), discs)
# add_curve(r'3D likelihood on DNN, $\tau_{21}$, and $m$', 'green', calculate_roc(signal, (combined_likelihood), weights=weights), discs)
# add_curve(r'$m_{\mathrm{jet}}, \tau_{21}$ (2D likelihood)', 'blue', calculate_roc(signal, (mass_nsj_likelihood), bins=1000000, weights=weights), discs)
fg = ROC_plotter(discs, title=r"$W' \rightarrow WZ$ vs. QCD ($p_T \in [250, 300]$ GeV, matched to QCD)" + '\n' +
r'$m_{\mathsf{jet}}\in [65, 95]$ GeV', min_eff = 0.2, max_eff=0.8, logscale=False)
fg.savefig(PLOT_DIR % 'combined-roc.pdf')
plt.show()
# print 'loading window data'
# data_ben = np.load('../ben-additions/ben-window.npy')
# print 'generating model from yaml'
# # -- build the model
# dl = model_from_yaml('./SLACNetBFboringNet2-final.yaml')
# print 'compiling...'
# dl.compile(loss='binary_crossentropy', optimizer='adam', class_mode='binary')
# print 'loading weights...'
# dl.load_weights('./SLACNetBFboringNet2-final-roc.h5')
# X_ben = data_ben['image'].reshape((data_ben.shape[0], 25**2))
# y_dl_ben = dl.predict(X_ben, verbose=True, batch_size=200).ravel()
# ben_likelihood = P_2d.predict((data_ben['jet_mass'], data_ben['tau_21']))
# signal_ben = data_ben['signal'] == 1
# print 'estimating LDA'
# from sklearn.lda import LDA
# clf = LDA()
# ylda = clf.fit_transform(X_ben, data_ben['signal'])
# discs = {}
# add_curve(r'$\tau_{21}$', 'black', calculate_roc(signal_ben, 2-data_ben['tau_21']), discs)
# add_curve(r'Deep Network, trained on $p_T \in [250, 300]$ GeV', 'red', calculate_roc(signal_ben, y_dl_ben, bins=1000000), discs)
# add_curve(r'LDA inside window', 'green', calculate_roc(signal_ben, (ylda)), discs)
# add_curve(r'$m_{\mathrm{jet}}, \tau_{21}$ (2D likelihood, outside window)', 'blue', calculate_roc(signal_ben, (ben_likelihood), bins=1000000), discs)
# fg = ROC_plotter(discs, title=r"$W' \rightarrow WZ$ vs. QCD $p_T \in [250, 255]$ GeV" + '\n' +
# r'$m_{\mathsf{jet}}\in [79, 81]$ GeV, $\tau_{21} \in [0.19, 0.21]$', min_eff = 0.2, max_eff=0.8, logscale=False)
# fg.savefig(PLOT_DIR % 'small-window-combined-roc.pdf')
# plt.show()
# from statsmodels.stats.weightstats import DescrStatsW
# ds = DescrStatsW(x, weights=weights)
import matplotlib.cm as cm
X_ = data['image'].reshape((data.shape[0], 25**2))
zr=np.zeros(25**2)
for i in xrange(625):
print i; zr[i] = np.corrcoef(X_[:, i], y_dl)[0, 1]
rec = zr.reshape((25, 25))
rec[np.isnan(rec)] = 0.0
# plt.imshow(rec, interpolation='nearest', cmap=custom_div_cmap(101), vmax = np.max(np.abs(rec)), vmin=-np.max(np.abs(rec)), extent=[-1,1,-1,1])
plt.imshow(rec, interpolation='nearest', cmap=cm.seismic, vmax = np.max(np.abs(rec)), vmin=-np.max(np.abs(rec)), extent=[-1,1,-1,1])
# plt.axis('off')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Pearson Correlation Coefficient')
plt.title(r'Correlation of Deep Network output with pixel activations.' + '\n' +
r'$p_T^W \in [250, 300]$ matched to QCD, $m_{W}\in [65, 95]$ GeV')
plt.xlabel(r'[Transformed] Pseudorapidity $(\eta)$')
plt.ylabel(r'[Transformed] Azimuthal Angle $(\phi)$')
plt.savefig(PLOT_DIR % 'pixel-activations-corr.pdf')
plt.show()
tau_windows = [(0.19, 0.21), (0.43, 0.44), (0.7, 0.72)]
mass_windows = [(65, 67), (79, 81), (93, 95)]
windows = []
import itertools
for tw, mw in itertools.product(tau_windows, mass_windows):
print 'working on nsj in [{}, {}] and mass in [{}, {}]'.format(*list(itertools.chain(tw, mw)))
print 'selection of cube'
small_window = (tau_21 < tw[1]) & (tau_21 > tw[0]) & (mass > mw[0]) & (mass < mw[1])
print 'selection of s'
swsignal = small_window & signal
print 'selection of b'
swbackground = small_window & background
print 'plotting difference'
rec = np.average(data['image'][swsignal], weights=weights[swsignal], axis=0) - np.average(data['image'][swbackground], axis=0)
windows.append({'image' : rec, 'nsj' : tw, 'mass' : mw})
# plt.imshow(rec, interpolation='nearest', cmap=custom_div_cmap(101), vmax = np.max(np.abs(rec)), vmin=-np.max(np.abs(rec)), extent=[-1,1,-1,1])
# cb = plt.colorbar()
# cb.ax.set_ylabel(r'$\Delta E_{\mathsf{normed}}$ deposition')
# plt.xlabel(r'[Transformed] Pseudorapidity $(\eta)$')
# plt.ylabel(r'[Transformed] Azimuthal Angle $(\phi)$')
# idfr = [mw[0], mw[1], tw[0], tw[1]]
# plt.title(
# r"Difference in per pixel normalized energy deposition" +
# '\n' +
# r"between $W' \rightarrow WZ$ and QCD in $m \in [%.2f, %.2f]$ GeV, $\tau_{21}\in [%.2f, %.2f]$ window." % (idfr[0], idfr[1], idfr[2], idfr[3]))
# plt.savefig(PLOT_DIR % 'im-diff-m{}-{}-nsj.{}.{}.pdf'.format(idfr[0], idfr[1], idfr[2], idfr[3]))
# plt.show()
max_diff = np.max([np.percentile(np.abs(w['image']), 99.99) for w in windows])
#model
for w in windows:
rec = w['image']
plt.imshow(rec, interpolation='nearest', cmap=custom_div_cmap(101), vmax = max_diff, vmin=-max_diff, extent=[-1,1,-1,1])
cb = plt.colorbar()
cb.ax.set_ylabel(r'$\Delta E_{\mathsf{normed}}$ deposition')
plt.xlabel(r'[Transformed] Pseudorapidity $(\eta)$')
plt.ylabel(r'[Transformed] Azimuthal Angle $(\phi)$')
plt.title(
r"Difference in per pixel normalized energy deposition between" +
'\n' +
r"$W' \rightarrow WZ$ and QCD in $m \in [%i, %i]$ GeV, $\tau_{21}\in [%.2f, %.2f]$ window." % (int(w['mass'][0]), int(w['mass'][1]), w['nsj'][0], w['nsj'][1]))
plt.savefig(PLOT_DIR % 'new-im-diff-m{}-{}-nsj.{}.{}.pdf'.format(int(w['mass'][0]), int(w['mass'][1]), w['nsj'][0], w['nsj'][1]))
plt.show()
window = (tau_21 < 0.8) & (tau_21 > 0.2)
pt, mass, tau_21, signal, bakground, y_dl = pt[window], mass[window], tau_21[window], signal[window], background[window], y_dl[window],
n_obs = int(window.sum())
ref = np.zeros((n_obs, 3))
ref[:, 0] = pt
ref[:, 1] = mass
ref[:, 2] = tau_21
cube = np.zeros((n_obs / 2, 3))
cube[:, 0] = np.random.uniform(250, 300, n_obs / 2)
cube[:, 1] = np.random.uniform(65, 95, n_obs / 2)
cube[:, 2] = np.random.uniform(0.2, 0.8, n_obs / 2)
binning = (
np.linspace(250, 300, 20),
np.linspace(65, 95, 19),
# np.concatenate(
# (
# np.array([0, 0.2]),
np.linspace(0.2, 0.8, 19)
# np.array([0, 0.1, 0.2, 0.4, 0.55, 0.7, 1])
# )
# )
)
# H_s, _ = np.histogramdd(ref[signal], bins=binning, normed=False)
# H_b, _ = np.histogramdd(ref[background], bins=binning, normed=False)
# H_ref, _ = np.histogramdd(cube, bins=binning, normed=False)
# flat_cube = H_ref / H_s
class NDWeights(object):
"""docstring for NDWeights"""
def __init__(self, bins):
super(NDWeights, self).__init__()
self.bins = bins
def fit(self, X, truth, reference):
H_s, _ = np.histogramdd(X[truth == 1], bins=self.bins, normed=False)
H_b, _ = np.histogramdd(X[truth == 0], bins=self.bins, normed=False)
H_ref, _ = np.histogramdd(reference, bins=self.bins, normed=False)
self.flat_cube_s = H_ref / H_s
self.flat_cube_b = H_ref / H_b
def predict(self, X, truth):
ix = [(self.bins[i].searchsorted(X[:, i]) - 1) for i in xrange(len(self.bins))]
ix = np.array(ix).T
print ix
weights = []
for i, label in zip(ix, truth):
if label == 1:
w = np.copy(self.flat_cube_s[i[0]])
else:
w = np.copy(self.flat_cube_b[i[0]])
for j in xrange(1, len(self.bins)):
w = w[i[j]]
weights.append(w)
weights = np.array(weights)
weights[np.isinf(weights)] = weights[np.isfinite(weights)].max()
return weights
ndweights = NDWeights(binning)
ndweights.fit(ref, signal, cube)
cube_weights = ndweights.predict(ref, signal)
cube_weights[np.isinf(cube_weights)] = cube_weights[np.isfinite(cube_weights)].max()
# -- plot reweighted...
n1, _, _ = plt.hist(pt[signal], bins=np.linspace(250, 300, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=cube_weights[signal], linewidth=2)
n2, _, _ = plt.hist(pt[signal == False], bins=np.linspace(250, 300, 100), histtype='step', color='blue', label='QCD', weights=cube_weights[signal==False], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.xlabel(r'$p_T$ (GeV)')
plt.ylabel('Count')
plt.ylim(0, 1.2 * mx)
plt.title(r'Weighted Jet $p_T$ distribution in $(p_T, m, \tau_{21})$ flat hypercube')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-pt-distribution[250-300]-cube.pdf')
plt.show()
# -- plot weighted mass
n1, _, _ = plt.hist(mass[signal == True], bins=np.linspace(65, 95, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=cube_weights[signal], linewidth=2)
n2, _, _ = plt.hist(mass[signal == False], bins=np.linspace(65, 95, 100), histtype='step', color='blue', label='QCD', weights=cube_weights[signal==False], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.ylim(0, 1.2 * mx)
plt.xlabel(r'Jet $m$ [GeV]')
plt.ylabel('Count')
plt.title(r'Weighted Jet $m$ distribution in $(p_T, m, \tau_{21})$ flat hypercube')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-mass-distribution[250-300]-cube.pdf')
plt.show()
# -- plot weighted tau_21
n1, _, _ = plt.hist(tau_21[signal == True], bins=np.linspace(0.2, 0.8, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=cube_weights[signal], linewidth=2)
n2, _, _ = plt.hist(tau_21[signal == False], bins=np.linspace(0.2, 0.8, 100), histtype='step', color='blue', label='QCD', weights=cube_weights[signal==False], linewidth=2)
mx = max(np.max(n1), np.max(n2))
plt.ylim(0, 1.2 * mx)
plt.xlabel(r'Jet $\tau_{21}$')
plt.ylabel('Count')
plt.title(r'Weighted Jet $\tau_{21}$ distribution in $(p_T, m, \tau_{21})$ flat hypercube')
plt.legend()
plt.savefig(PLOT_DIR % 'weighted-tau21-distribution[250-300]-cube.pdf')
plt.show()
# P_mass = Likelihood1D(np.concatenate((np.array([0, 25]), np.linspace(25, 35, 5), np.linspace(35, 160, 20), np.array([160, 900]))))
# P_mass.fit(mass[:n_train][signal[:n_train] == 1], mass[:n_train][signal[:n_train] == 0], weights=(cube_weights[:n_train][signal[:n_train] == 1], cube_weights[:n_train][signal[:n_train] == 0]))
# mass_likelihood = P_mass.predict(mass)
# # -- plot weighted mass likelihood
# plt.hist((mass_likelihood[signal == True]), bins=np.linspace(0, 10, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=cube_weights[signal])
# plt.hist((mass_likelihood[signal == False]), bins=np.linspace(0, 10, 100), histtype='step', color='blue', label='QCD')
# plt.xlabel(r'$P(\mathrm{signal}) / P(\mathrm{background})$')
# plt.ylabel('Count')
# plt.title(r'Weighted Jet $m$ likelihood distribution ($p_T$ matched to QCD)')
# plt.legend()
# plt.savefig(PLOT_DIR % 'weighted-mass-likelihood-distribution.pdf')
plt.show()
mass_bins = np.linspace(64.99, 95.01, 10)#, np.linspace(35, 159.99, 30), np.array([160, 900])))
tau_bins = np.linspace(0.2, 0.8, 10)
P_2d = Likelihood2D(mass_bins, tau_bins)
P_2d.fit((mass[signal], tau_21[signal]), (mass[signal == False], tau_21[signal == False]), weights=(cube_weights[signal], cube_weights[signal == False]))
P_2d.fit((mass[signal], tau_21[signal]), (mass[signal == False], tau_21[signal == False]), weights=(cube_weights[signal], cube_weights[signal == False]))
mass_nsj_likelihood = P_2d.predict((mass, tau_21))
log_likelihood = np.log(mass_nsj_likelihood)
# -- plot weighted mass + nsj likelihood
# plt.hist((log_likelihood[signal == True]), bins=np.linspace(-0.5, 0.5, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=cube_weights[signal], linewidth=2)
# plt.hist((log_likelihood[signal == False]), bins=np.linspace(-0.5, 0.5, 100), histtype='step', color='blue', label='QCD', weights=cube_weights[background], linewidth=2)
# plt.xlabel(r'$\log P(\mathrm{signal}) / P(\mathrm{background})$')
# plt.ylabel('Count')
# plt.title(r'Weighted Jet $m, \tau_{21}$ likelihood distribution in $(p_T, m, \tau_{21})$ flat hypercube')
# plt.legend()
# plt.savefig(PLOT_DIR % 'weighted-mass-nsj-likelihood-distribution-cube.pdf')
plt.show()
# y_dl = np.load('./yhat.npy')
# # -- plot DL output
# plt.hist(y_dl[signal == True], bins=np.linspace(0, 1, 100), histtype='step', color='red', label=r"$W' \rightarrow WZ$", weights=cube_weights[signal], linewidth=2)
# plt.hist(y_dl[signal == False], bins=np.linspace(0, 1, 100), histtype='step', color='blue', label='QCD', weights=cube_weights[background], linewidth=2)
# # plt.ylim(0, 82000)
# plt.xlabel(r'Deep Network Output')
# plt.ylabel('Count')
# plt.title(r'Weighted Deep Network distribution in $(p_T, m, \tau_{21})$ flat hypercube')
# plt.legend()
# plt.savefig(PLOT_DIR % 'weighted-deep-net-distribution-cube.pdf')
plt.show()
cube_discs = {}
add_curve(r'$\tau_{21}$', 'black', calculate_roc(signal, 2-tau_21, weights=cube_weights), cube_discs)
add_curve(r'Deep Network, trained on $p_T \in [250, 300]$ GeV outside cube', 'red', calculate_roc(signal, y_dl, weights=cube_weights, bins = 1000000), cube_discs)
add_curve(r'$m_{\mathrm{jet}}, \tau_{21}$ (2D likelihood)', 'blue', calculate_roc(signal, (log_likelihood), bins=1000000, weights=cube_weights), cube_discs)
fg = ROC_plotter(cube_discs, title=r"$W' \rightarrow WZ$ vs. QCD $(p_T, m, \tau_{21})$ flat hypercube" + '\n' +
r'$m\in [65, 95]$ GeV, $p_T\in [250, 300]$ GeV, $\tau_{21}\in [0.2, 0.8]$.', min_eff = 0.2, max_eff=0.8, logscale=False)
fg.savefig(PLOT_DIR % 'roc-cube-outside.pdf')
plt.show()
sel = (data['tau_21'] > 0.2) & (data['tau_21'] < 0.8)
X_cube = data['image'][sel].reshape((sel.sum(), 25**2)).astype('float32')
# X_ = X_ * data['total_energy'][:, np.newaxis]
y_cube = data['signal'][sel].astype('float32')
# -- build the model
dl = Sequential()
# dl.add(Merge([raw, gaussian], mode='concat'))
# dl.add(Dense(1000, 512))
# dl.add(Dropout(0.1))
dl.add(MaxoutDense(625, 256, 10, init='he_normal'))
dl.add(Dropout(0.3))
dl.add(MaxoutDense(256, 128, 5, init='he_normal'))
dl.add(Dropout(0.1))
dl.add(Dense(128, 64, init='he_normal'))
dl.add(Activation('relu'))
dl.add(Dropout(0.3))
dl.add(Dense(64, 25, init='he_normal'))
dl.add(Activation('relu'))
dl.add(Dropout(0.1))
dl.add(Dense(25, 1, init='he_normal'))
dl.add(Activation('sigmoid'))
dl.compile(loss='binary_crossentropy', optimizer='adam', class_mode='binary')
dl.load_weights('./SLACNet-cube.h5')
# try:
# print 'Training!'
# h = dl.fit(X_cube, y_cube, batch_size=28, nb_epoch=20, show_accuracy=True,
# validation_split=0.5,
# callbacks = [
# EarlyStopping(verbose=True, patience=6, monitor='val_loss'),
# ModelCheckpoint('./SLACNet-cube.h5', monitor='val_loss', verbose=True, save_best_only=True)
# ],
# sample_weight=np.sqrt(cube_weights))
# except KeyboardInterrupt:
# print 'stop'
y_dl_cube = dl.predict(X_cube, verbose=True, batch_size=200).ravel()
def normalize_rows(x):
def norm1d(a):
return a / a.sum()
x = np.array([norm1d(r) for r in x])
return x
H, b_x, b_y = np.histogram2d(
mass[(signal == False)],
y_dl_cube[(signal == False)],
bins=(np.linspace(65, 95, 35), np.linspace(0, 1, 35)),
normed=True)
plt.imshow(np.flipud(normalize_rows(H.T)), extent=(65, 95, 0, 1), aspect='auto', interpolation='nearest')
plt.xlabel('QCD Jet Mass [GeV]')
plt.ylabel(r'Deep Network output')
plt.title(r'PDF of QCD Jet Mass, binned vs. Deep Network output' + '\n' +
r'Jet $p_T\in[250, 300]$ $\mathsf{GeV},\vert\eta\vert<2$, $m_{\mathsf{jet}}\in [65, 95]$ GeV')
cb = plt.colorbar()
cb.set_label(r'$P(\mathrm{mass} \vert \hat{y})$')
plt.savefig(PLOT_DIR % 'mass-dist-yhat-unweighted.pdf')
plt.show()
cube_discs = {}
add_curve(r'$\tau_{21}$', 'black', calculate_roc(signal, 2-tau_21, weights=cube_weights), cube_discs)
add_curve(r'Deep Network, trained on $p_T \in [250, 300]$ GeV outside cube', 'red', calculate_roc(signal, y_dl, weights=cube_weights, bins = 1000000), cube_discs)
add_curve(r'Deep Network, trained on $p_T \in [250, 300]$ GeV inside cube', 'purple', calculate_roc(signal, y_dl_cube, weights=cube_weights, bins = 1000000), cube_discs)
add_curve(r'$m_{\mathrm{jet}}, \tau_{21}$ (2D likelihood)', 'blue', calculate_roc(signal, (log_likelihood), bins=1000000, weights=cube_weights), cube_discs)
fg = ROC_plotter(cube_discs, title=r"$W' \rightarrow WZ$ vs. QCD $(p_T, m, \tau_{21})$ flat hypercube" + '\n' +
r'$m\in [65, 95]$ GeV, $p_T\in [250, 300]$ GeV, $\tau_{21}\in [0.2, 0.8]$.', min_eff = 0.2, max_eff=0.8, logscale=False)
fg.savefig(PLOT_DIR % 'roc-cube-inside.pdf')
plt.show()
|
ml-slac/deep-jets
|
training/plotscript.py
|
Python
|
mit
| 24,160
|
[
"Gaussian"
] |
8b10d457a85c89ca5ed97557936041c869dcf6f73cf908c673c5814ab5613583
|
"""The Iteration/Expression Tree (IET) hierarchy."""
import abc
import inspect
import numbers
from cached_property import cached_property
from collections import OrderedDict, namedtuple
from collections.abc import Iterable
import cgen as c
from devito.data import FULL
from devito.ir.equations import DummyEq
from devito.ir.support import (SEQUENTIAL, PARALLEL, PARALLEL_IF_ATOMIC,
PARALLEL_IF_PVT, VECTORIZED, AFFINE, COLLAPSED,
Property, Forward, detect_io)
from devito.symbolics import ListInitializer, FunctionFromPointer, as_symbol, ccode
from devito.tools import Signer, as_tuple, filter_ordered, filter_sorted, flatten
from devito.types.basic import AbstractFunction, Indexed, LocalObject, Symbol
__all__ = ['Node', 'Block', 'Expression', 'Element', 'Callable', 'Call', 'Conditional',
'Iteration', 'List', 'LocalExpression', 'Section', 'TimedList', 'Prodder',
'MetaCall', 'PointerCast', 'ForeignExpression', 'HaloSpot', 'IterationTree',
'ExpressionBundle', 'AugmentedExpression', 'Increment', 'Return', 'While',
'ParallelIteration', 'ParallelBlock', 'Dereference', 'Lambda', 'SyncSpot',
'PragmaList', 'DummyExpr', 'BlankLine', 'ParallelTree']
# First-class IET nodes
class Node(Signer):
__metaclass__ = abc.ABCMeta
is_Node = True
is_Block = False
is_Iteration = False
is_IterationFold = False
is_While = False
is_Expression = False
is_Increment = False
is_ForeignExpression = False
is_LocalExpression = False
is_Callable = False
is_Lambda = False
is_ElementalFunction = False
is_Call = False
is_List = False
is_PointerCast = False
is_Dereference = False
is_Element = False
is_Section = False
is_HaloSpot = False
is_ExpressionBundle = False
is_ParallelIteration = False
is_ParallelBlock = False
is_SyncSpot = False
_traversable = []
"""
:attr:`_traversable`. The traversable fields of the Node; that is, fields
walked over by a Visitor. All arguments in __init__ whose name
appears in this list are treated as traversable fields.
"""
def __new__(cls, *args, **kwargs):
obj = super(Node, cls).__new__(cls)
argnames, _, _, defaultvalues, _, _, _ = inspect.getfullargspec(cls.__init__)
try:
defaults = dict(zip(argnames[-len(defaultvalues):], defaultvalues))
except TypeError:
# No default kwarg values
defaults = {}
obj._args = {k: v for k, v in zip(argnames[1:], args)}
obj._args.update(kwargs.items())
obj._args.update({k: defaults.get(k) for k in argnames[1:] if k not in obj._args})
return obj
def _rebuild(self, *args, **kwargs):
"""Reconstruct ``self``."""
handle = self._args.copy() # Original constructor arguments
argnames = [i for i in self._traversable if i not in kwargs]
handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args)]))
handle.update(kwargs)
return type(self)(**handle)
@cached_property
def ccode(self):
"""
Generate C code.
This is a shorthand for
.. code-block:: python
from devito.ir.iet import CGen
CGen().visit(self)
"""
from devito.ir.iet.visitors import CGen
return CGen().visit(self)
@property
def view(self):
"""A representation of the IET rooted in ``self``."""
from devito.ir.iet.visitors import printAST
return printAST(self)
@property
def children(self):
"""Return the traversable children."""
return tuple(getattr(self, i) for i in self._traversable)
@property
def args(self):
"""Arguments used to construct the Node."""
return self._args.copy()
@property
def args_frozen(self):
"""Arguments used to construct the Node that cannot be traversed."""
return {k: v for k, v in self.args.items() if k not in self._traversable}
def __str__(self):
return str(self.ccode)
@abc.abstractproperty
def functions(self):
"""All AbstractFunction objects used by this node."""
raise NotImplementedError()
@abc.abstractproperty
def free_symbols(self):
"""All Symbol objects used by this node."""
raise NotImplementedError()
@abc.abstractproperty
def defines(self):
"""All Symbol objects defined by this node."""
raise NotImplementedError()
def _signature_items(self):
return (str(self.ccode),)
class ExprStmt(object):
"""
A mixin for Nodes that represent C expression statements, which are expressions
followed by a semicolon. For example, the lines:
* i = 0;
* j = a[i] + 8;
* int a = 3;
* foo(b)
are all expression statements.
Notes
-----
An ExprStmt does *not* have children Nodes.
"""
pass
class List(Node):
"""A sequence of Nodes."""
is_List = True
_traversable = ['body']
def __init__(self, header=None, body=None, footer=None):
body = as_tuple(body)
if len(body) == 1 and all(type(i) == List for i in [self, body[0]]):
# De-nest Lists
#
# Note: to avoid disgusting metaclass voodoo (due to
# https://stackoverflow.com/questions/56514586/\
# arguments-of-new-and-init-for-metaclasses)
# we change the internal state here in __init__
# rather than in __new__
self._args['header'] = self.header = as_tuple(header) + body[0].header
self._args['body'] = self.body = body[0].body
self._args['footer'] = self.footer = as_tuple(footer) + body[0].footer
else:
self.header = as_tuple(header)
self.body = as_tuple(body)
self.footer = as_tuple(footer)
def __repr__(self):
return "<%s (%d, %d, %d)>" % (self.__class__.__name__, len(self.header),
len(self.body), len(self.footer))
@property
def functions(self):
return ()
@property
def free_symbols(self):
return ()
@property
def defines(self):
return ()
class Block(List):
"""A sequence of Nodes, wrapped in a block {...}."""
is_Block = True
def __init__(self, header=None, body=None, footer=None):
self.header = as_tuple(header)
self.body = as_tuple(body)
self.footer = as_tuple(footer)
class Element(Node):
"""
A generic node. Can be a comment, a statement, ... or anything that cannot
be expressed through an IET type.
"""
is_Element = True
def __init__(self, element):
assert isinstance(element, (c.Comment, c.Statement, c.Value, c.Initializer,
c.Pragma, c.Line, c.Assign, c.POD))
self.element = element
def __repr__(self):
return "Element::\n\t%s" % (self.element)
class Call(ExprStmt, Node):
"""
A function call.
Parameters
----------
name : str or FunctionFromPointer
The called function.
arguments : list of Basic, optional
The objects in input to the function call.
retobj : Symbol or Indexed, optional
The object the return value of the Call is assigned to.
is_indirect : bool, optional
If True, the object represents an indirect function call. The emitted
code will be `name, arg1, ..., argN` rather than `name(arg1, ..., argN)`.
Defaults to False.
"""
is_Call = True
def __init__(self, name, arguments=None, retobj=None, is_indirect=False):
if isinstance(name, FunctionFromPointer):
self.base = name.base
else:
self.base = None
self.name = str(name)
self.arguments = as_tuple(arguments)
self.retobj = retobj
self.is_indirect = is_indirect
def __repr__(self):
ret = "" if self.retobj is None else "%s = " % self.retobj
return "%sCall::\n\t%s(...)" % (ret, self.name)
@property
def functions(self):
retval = [i.function for i in self.arguments
if isinstance(i, (AbstractFunction, Indexed, LocalObject))]
if self.base is not None:
retval.append(self.base.function)
if self.retobj is not None:
retval.append(self.retobj.function)
return tuple(retval)
@property
def children(self):
return tuple(i for i in self.arguments if isinstance(i, (Call, Lambda)))
@cached_property
def free_symbols(self):
free = set()
for i in self.arguments:
if isinstance(i, numbers.Number):
continue
elif isinstance(i, AbstractFunction):
if i.is_ArrayBasic:
free.add(i)
else:
# Always passed by _C_name since what actually needs to be
# provided is the pointer to the corresponding C struct
free.add(i._C_symbol)
else:
free.update(i.free_symbols)
if self.base is not None:
free.add(self.base)
if self.retobj is not None:
free.update(self.retobj.free_symbols)
return free
@property
def defines(self):
ret = ()
if self.base is not None:
ret += (self.base,)
if self.retobj is not None:
ret += (self.retobj,)
return ret
class Expression(ExprStmt, Node):
"""
A node encapsulating a ClusterizedEq.
Parameters
----------
expr : ClusterizedEq
The encapsulated expression.
pragmas : cgen.Pragma or list of cgen.Pragma, optional
A bag of pragmas attached to this Expression.
"""
is_Expression = True
def __init__(self, expr, pragmas=None):
self.__expr_finalize__(expr, pragmas)
def __expr_finalize__(self, expr, pragmas):
"""Finalize the Expression initialization."""
self._expr = expr
self._pragmas = as_tuple(pragmas)
def __repr__(self):
return "<%s::%s>" % (self.__class__.__name__,
filter_ordered([f.func for f in self.functions]))
@property
def expr(self):
return self._expr
@property
def pragmas(self):
return self._pragmas
@property
def dtype(self):
return self.expr.dtype
@property
def output(self):
"""The Symbol/Indexed this Expression writes to."""
return self.expr.lhs
@cached_property
def reads(self):
"""The Functions read by the Expression."""
return detect_io(self.expr, relax=True)[0]
@cached_property
def write(self):
"""The Function written by the Expression."""
return self.expr.lhs.base.function
@cached_property
def dimensions(self):
retval = flatten(i.indices for i in self.functions if i.is_Indexed)
return tuple(filter_ordered(retval))
@property
def is_scalar(self):
"""True if a scalar expression, False otherwise."""
return self.expr.lhs.is_Symbol
@property
def is_tensor(self):
"""True if a tensor expression, False otherwise."""
return not self.is_scalar
@property
def is_definition(self):
"""
True if it is an assignment, False otherwise
"""
return ((self.is_scalar and not self.is_Increment) or
(self.is_tensor and isinstance(self.expr.rhs, ListInitializer)))
@property
def defines(self):
return (self.write,) if self.is_definition else ()
@property
def free_symbols(self):
return tuple(self.expr.free_symbols)
@cached_property
def functions(self):
functions = list(self.reads)
if self.write is not None:
functions.append(self.write)
return tuple(filter_ordered(functions))
class AugmentedExpression(Expression):
"""A node representing an augmented assignment, such as +=, -=, &=, ...."""
is_Increment = True
def __init__(self, expr, op, pragmas=None):
super(AugmentedExpression, self).__init__(expr, pragmas=pragmas)
self.op = op
class Increment(AugmentedExpression):
"""Shortcut for ``AugmentedExpression(expr, '+'), since it's so widely used."""
def __init__(self, expr, pragmas=None):
super(Increment, self).__init__(expr, '+', pragmas=pragmas)
class Iteration(Node):
"""
Implement a for-loop over nodes.
Parameters
----------
nodes : Node or list of Node
The for-loop body.
dimension : Dimension
The Dimension over which the for-loop iterates.
limits : expr-like or 3-tuple
If an expression, it represents the for-loop max point; in this case, the
min point is 0 and the step increment is unitary. If a 3-tuple, the
format is ``(min point, max point, stepping)``.
direction: IterationDirection, optional
The for-loop direction. Accepted:
- ``Forward``: i += stepping (defaults)
- ``Backward``: i -= stepping
properties : Property or list of Property, optional
Iteration decorators, denoting properties such as parallelism.
pragmas : cgen.Pragma or list of cgen.Pragma, optional
A bag of pragmas attached to this Iteration.
uindices : DerivedDimension or list of DerivedDimension, optional
An uindex is an additional iteration variable defined by the for-loop. The
for-loop bounds are independent of all ``uindices`` (hence the name uindex,
or "unbounded index"). An uindex must have ``dimension`` as its parent.
"""
is_Iteration = True
_traversable = ['nodes']
def __init__(self, nodes, dimension, limits, direction=None, properties=None,
pragmas=None, uindices=None):
self.nodes = as_tuple(nodes)
self.dim = dimension
self.index = self.dim.name
self.direction = direction or Forward
# Generate loop limits
if isinstance(limits, Iterable):
assert(len(limits) == 3)
self.limits = tuple(limits)
elif self.dim.is_Incr:
self.limits = (self.dim.symbolic_min, limits, self.dim.step)
else:
self.limits = (0, limits, 1)
# Track this Iteration's properties, pragmas and unbounded indices
properties = as_tuple(properties)
assert (i in Property._KNOWN for i in properties)
self.properties = as_tuple(filter_sorted(properties))
self.pragmas = as_tuple(pragmas)
self.uindices = as_tuple(uindices)
assert all(i.is_Derived and self.dim in i._defines for i in self.uindices)
def __repr__(self):
properties = ""
if self.properties:
properties = [str(i) for i in self.properties]
properties = "WithProperties[%s]::" % ",".join(properties)
index = self.index
if self.uindices:
index += '[%s]' % ','.join(i.name for i in self.uindices)
return "<%sIteration %s; %s>" % (properties, index, self.limits)
@property
def is_Affine(self):
return AFFINE in self.properties
@property
def is_Sequential(self):
return SEQUENTIAL in self.properties
@property
def is_Parallel(self):
return PARALLEL in self.properties
@property
def is_ParallelAtomic(self):
return PARALLEL_IF_ATOMIC in self.properties
@property
def is_ParallelPrivate(self):
return PARALLEL_IF_PVT in self.properties
@property
def is_ParallelRelaxed(self):
return any([self.is_Parallel, self.is_ParallelAtomic, self.is_ParallelPrivate])
@property
def is_Vectorized(self):
return VECTORIZED in self.properties
@property
def ncollapsed(self):
for i in self.properties:
if i.name == 'collapsed':
return i.val
return 0
@property
def symbolic_bounds(self):
"""A 2-tuple representing the symbolic bounds [min, max] of the Iteration."""
_min = self.limits[0]
_max = self.limits[1]
try:
_min = as_symbol(_min)
except TypeError:
# A symbolic expression
pass
try:
_max = as_symbol(_max)
except TypeError:
# A symbolic expression
pass
return (_min, _max)
@property
def symbolic_size(self):
"""The symbolic size of the Iteration."""
return self.symbolic_bounds[1] - self.symbolic_bounds[0] + 1
@property
def symbolic_min(self):
"""The symbolic min of the Iteration."""
return self.symbolic_bounds[0]
@property
def symbolic_max(self):
"""The symbolic max of the Iteration."""
return self.symbolic_bounds[1]
def bounds(self, _min=None, _max=None):
"""
The bounds [min, max] of the Iteration, as numbers if min/max are supplied,
as symbols otherwise.
"""
_min = _min if _min is not None else self.limits[0]
_max = _max if _max is not None else self.limits[1]
return (_min, _max)
@property
def step(self):
"""The step value."""
return self.limits[2]
def size(self, _min=None, _max=None):
"""The size of the iteration space if _min/_max are supplied, None otherwise."""
_min, _max = self.bounds(_min, _max)
return _max - _min + 1
@property
def functions(self):
"""All Functions appearing in the Iteration header."""
return ()
@property
def free_symbols(self):
"""All Symbols appearing in the Iteration header."""
return tuple(self.symbolic_min.free_symbols) \
+ tuple(self.symbolic_max.free_symbols) \
+ self.uindices \
+ tuple(flatten(i.symbolic_min.free_symbols for i in self.uindices)) \
+ tuple(flatten(i.symbolic_incr.free_symbols for i in self.uindices))
@property
def defines(self):
"""All Symbols defined in the Iteration header."""
return self.dimensions
@property
def dimensions(self):
"""All Dimensions appearing in the Iteration header."""
return tuple(self.dim._defines) + self.uindices
@property
def write(self):
"""All Functions written to in this Iteration"""
return []
class While(Node):
"""
Implement a while-loop.
Parameters
----------
condition : sympy.Function or sympy.Relation or bool
The while-loop exit condition.
body : Node or list of Node, optional
The whie-loop body.
"""
is_While = True
_traversable = ['body']
def __init__(self, condition, body=None):
self.condition = condition
self.body = as_tuple(body)
def __repr__(self):
return "<While %s; %d>" % (self.condition, len(self.body))
class Callable(Node):
"""
A callable function.
Parameters
----------
name : str
The name of the callable.
body : Node or list of Node
The Callable body.
retval : str
The return type of Callable.
parameters : list of Basic, optional
The objects in input to the Callable.
prefix : list of str, optional
Qualifiers to prepend to the Callable signature. Defaults to ``('static',
'inline')``.
"""
is_Callable = True
_traversable = ['body']
def __init__(self, name, body, retval, parameters=None, prefix=('static', 'inline')):
self.name = name
self.body = as_tuple(body)
self.retval = retval
self.prefix = as_tuple(prefix)
self.parameters = as_tuple(parameters)
def __repr__(self):
return "%s[%s]<%s; %s>" % (self.__class__.__name__, self.name, self.retval,
",".join([i._C_typename for i in self.parameters]))
@property
def functions(self):
return tuple(i for i in self.parameters if isinstance(i, AbstractFunction))
@property
def free_symbols(self):
return ()
@property
def defines(self):
return ()
class Conditional(Node):
"""
A node to express if-then-else blocks.
Parameters
----------
condition : expr-like
The if condition.
then_body : Node or list of Node
The then body.
else_body : Node or list of Node
The else body.
"""
is_Conditional = True
_traversable = ['then_body', 'else_body']
def __init__(self, condition, then_body, else_body=None):
self.condition = condition
self.then_body = as_tuple(then_body)
self.else_body = as_tuple(else_body)
def __repr__(self):
if self.else_body:
return "<[%s] ? [%s] : [%s]>" %\
(ccode(self.condition), repr(self.then_body), repr(self.else_body))
else:
return "<[%s] ? [%s]" % (ccode(self.condition), repr(self.then_body))
@property
def functions(self):
ret = []
for i in self.condition.free_symbols:
try:
ret.append(i.function)
except AttributeError:
pass
return tuple(ret)
@property
def free_symbols(self):
return tuple(self.condition.free_symbols)
@property
def defines(self):
return ()
# Second level IET nodes
class TimedList(List):
"""
Wrap a Node with C-level timers.
Parameters
----------
timer : Timer
The Timer used by the TimedList.
lname : str
A unique name for the timed code block.
body : Node or list of Node
The TimedList body.
"""
def __init__(self, timer, lname, body):
self._name = lname
self._timer = timer
super().__init__(header=c.Line('START_TIMER(%s)' % lname),
body=body,
footer=c.Line('STOP_TIMER(%s,%s)' % (lname, timer.name)))
@classmethod
def _start_timer_header(cls):
return ('START_TIMER(S)', ('struct timeval start_ ## S , end_ ## S ; '
'gettimeofday(&start_ ## S , NULL);'))
@classmethod
def _stop_timer_header(cls):
return ('STOP_TIMER(S,T)', ('gettimeofday(&end_ ## S, NULL); T->S += (double)'
'(end_ ## S .tv_sec-start_ ## S.tv_sec)+(double)'
'(end_ ## S .tv_usec-start_ ## S .tv_usec)/1000000;'))
@property
def name(self):
return self._name
@property
def timer(self):
return self._timer
@property
def free_symbols(self):
return ()
class PointerCast(ExprStmt, Node):
"""
A node encapsulating a cast of a raw C pointer to a multi-dimensional array.
"""
is_PointerCast = True
def __init__(self, function, obj=None, alignment=True):
self.function = function
self.obj = obj
self.alignment = alignment
def __repr__(self):
return "<PointerCast(%s)>" % self.function
@property
def castshape(self):
"""
The shape used in the left-hand side and right-hand side of the PointerCast.
"""
if self.function.is_ArrayBasic:
return self.function.symbolic_shape[1:]
else:
return tuple(self.function._C_get_field(FULL, d).size
for d in self.function.dimensions[1:])
@property
def functions(self):
return (self.function,)
@property
def free_symbols(self):
"""
The symbols required by the PointerCast.
This may include DiscreteFunctions as well as Dimensions.
"""
f = self.function
if f.is_ArrayBasic:
return tuple(flatten(s.free_symbols for s in f.symbolic_shape[1:]))
else:
return ()
@property
def defines(self):
return ()
class Dereference(ExprStmt, Node):
"""
A node encapsulating a dereference from a `pointer` to a `pointee`.
The following cases are supported:
* `pointer` is a PointerArray and `pointee` is an Array (typical case).
* `pointer` is an ArrayObject representing a pointer to a C struct while
`pointee` is a field in `pointer`.
"""
is_Dereference = True
def __init__(self, pointee, pointer):
self.pointee = pointee
self.pointer = pointer
def __repr__(self):
return "<Dereference(%s,%s)>" % (self.pointee, self.pointer)
@property
def functions(self):
return (self.pointee, self.pointer)
@property
def free_symbols(self):
return ((self.pointee.indexed.label, self.pointer.indexed.label) +
tuple(flatten(i.free_symbols for i in self.pointee.symbolic_shape[1:])) +
tuple(self.pointer.free_symbols))
@property
def defines(self):
return (self.pointee,)
class LocalExpression(Expression):
"""
A node encapsulating a SymPy equation which also defines its LHS.
"""
is_LocalExpression = True
@cached_property
def write(self):
return self.expr.lhs.function
@property
def defines(self):
return (self.write, )
class ForeignExpression(Expression):
"""A node representing a SymPy FunctionFromPointer expression."""
is_ForeignExpression = True
def __init__(self, expr, dtype, **kwargs):
self._dtype = dtype
self._is_increment = kwargs.get('is_Increment', False)
self.__expr_finalize__(expr)
@property
def dtype(self):
return self._dtype
@property
def output(self):
return self.expr.base
@property
def write(self):
if isinstance(self.output, (Symbol, Indexed)):
return self.output.function
else:
return None
@property
def is_Increment(self):
return self._is_increment
@property
def is_scalar(self):
return False
@property
def is_tensor(self):
return False
class Lambda(Node):
"""
A callable C++ lambda function. Several syntaxes are possible; here we
implement one of the common ones:
[captures](parameters){body}
For more info about C++ lambda functions:
https://en.cppreference.com/w/cpp/language/lambda
Parameters
----------
body : Node or list of Node
The lambda function body.
captures : list of str or expr-like, optional
The captures of the lambda function.
parameters : list of Basic or expr-like, optional
The objects in input to the lambda function.
"""
is_Lambda = True
_traversable = ['body']
def __init__(self, body, captures=None, parameters=None):
self.body = as_tuple(body)
self.captures = as_tuple(captures)
self.parameters = as_tuple(parameters)
def __repr__(self):
return "Lambda[%s](%s)" % (self.captures, self.parameters)
@cached_property
def free_symbols(self):
return set(self.parameters)
@property
def defines(self):
return ()
class Section(List):
"""
A sequence of nodes.
Functionally, a Section is identical to a List; that is,
they generate the same code (i.e., their ``body``). However, a Section should
be used to define sub-trees that, for some reasons, have a relevance within
the IET (e.g., groups of statements that logically represent the same
computation unit).
"""
is_Section = True
def __init__(self, name, body=None, is_subsection=False):
super(Section, self).__init__(body=body)
self.name = name
self.is_subsection = is_subsection
def __repr__(self):
return "<Section (%s)>" % self.name
@property
def roots(self):
return self.body
class ExpressionBundle(List):
"""
A sequence of Expressions.
"""
is_ExpressionBundle = True
def __init__(self, ispace, ops, traffic, body=None):
super(ExpressionBundle, self).__init__(body=body)
self.ispace = ispace
self.ops = ops
self.traffic = traffic
def __repr__(self):
return "<ExpressionBundle (%d)>" % len(self.exprs)
@property
def exprs(self):
return self.body
@property
def size(self):
return self.ispace.size
class Prodder(Call):
"""
A Call promoting asynchronous progress, to minimize latency.
Example use cases:
* To trigger asynchronous progress in the case of distributed-memory
parallelism.
* Software prefetching.
"""
def __init__(self, name, arguments=None, single_thread=False, periodic=False):
super(Prodder, self).__init__(name, arguments)
# Prodder properties
self._single_thread = single_thread
self._periodic = periodic
@property
def single_thread(self):
return self._single_thread
@property
def periodic(self):
return self._periodic
class PragmaList(List):
"""
A floating sequence of pragmas.
"""
def __init__(self, pragmas, functions=None, **kwargs):
super().__init__(header=pragmas)
self._functions = as_tuple(functions)
@property
def pragmas(self):
return self.header
@property
def functions(self):
return self._functions
@property
def free_symbols(self):
return self._functions
class ParallelIteration(Iteration):
"""
Implement a parallel for-loop.
"""
is_ParallelIteration = True
def __init__(self, *args, **kwargs):
pragmas, kwargs, properties = self._make_header(**kwargs)
super().__init__(*args, pragmas=pragmas, properties=properties, **kwargs)
@classmethod
def _make_header(cls, **kwargs):
construct = cls._make_construct(**kwargs)
clauses = cls._make_clauses(**kwargs)
header = c.Pragma(' '.join([construct] + clauses))
# Extract the Iteration Properties
properties = cls._process_properties(**kwargs)
# Drop the unrecognised or unused kwargs
kwargs = cls._process_kwargs(**kwargs)
return (header,), kwargs, properties
@classmethod
def _make_construct(cls, **kwargs):
# To be overridden by subclasses
raise NotImplementedError
@classmethod
def _make_clauses(cls, **kwargs):
return []
@classmethod
def _process_properties(cls, **kwargs):
properties = as_tuple(kwargs.get('properties'))
properties += (COLLAPSED(kwargs.get('ncollapse', 1)),)
return properties
@classmethod
def _process_kwargs(cls, **kwargs):
kwargs.pop('pragmas', None)
kwargs.pop('properties', None)
# Recognised clauses
kwargs.pop('ncollapse', None)
kwargs.pop('reduction', None)
return kwargs
@cached_property
def collapsed(self):
ret = [self]
for i in range(self.ncollapsed - 1):
ret.append(ret[i].nodes[0])
assert all(i.is_Iteration for i in ret)
return tuple(ret)
class ParallelTree(List):
"""
This class is to group together a parallel for-loop with some setup
statements, for example:
.. code-block:: C
int chunk_size = ...
#pragma parallel for ... schedule(..., chunk_size)
for (int i = ...)
{
...
}
"""
_traversable = ['prefix', 'body']
def __init__(self, prefix, body, nthreads=None):
# Normalize and sanity-check input
body = as_tuple(body)
assert len(body) == 1 and body[0].is_Iteration
self.prefix = as_tuple(prefix)
self.nthreads = nthreads
super().__init__(body=body)
def __getattr__(self, name):
if 'body' in self.__dict__:
# During unpickling, `__setattr__` calls `__getattr__(..., 'body')`,
# which would cause infinite recursion if we didn't check whether
# 'body' is present or not
return getattr(self.body[0], name)
raise AttributeError
@property
def functions(self):
return as_tuple(self.nthreads)
@property
def root(self):
return self.body[0]
class ParallelBlock(Block):
"""
A sequence of Nodes, wrapped in a parallel block {...}.
"""
is_ParallelBlock = True
def __init__(self, body, private=None):
# Normalize and sanity-check input. A bit ugly, but it makes everything
# much simpler to manage and reconstruct
body = as_tuple(body)
assert len(body) == 1
body = body[0]
assert body.is_List
if isinstance(body, ParallelTree):
partree = body
elif body.is_List:
assert len(body.body) == 1 and isinstance(body.body[0], ParallelTree)
assert len(body.footer) == 0
partree = body.body[0]
partree = partree._rebuild(prefix=(List(header=body.header,
body=partree.prefix)))
header = self._make_header(partree.nthreads, private)
super().__init__(header=header, body=partree)
@classmethod
def _make_header(cls, nthreads, private=None):
return None
@property
def partree(self):
return self.body[0]
@property
def root(self):
return self.partree.root
@property
def nthreads(self):
return self.partree.nthreads
@property
def collapsed(self):
return self.partree.collapsed
class SyncSpot(List):
"""
A node representing one or more synchronization operations, e.g., WaitLock,
withLock, etc.
"""
is_SyncSpot = True
def __init__(self, sync_ops, body=None):
super().__init__(body=body)
self.sync_ops = sync_ops
def __repr__(self):
return "<SyncSpot (%s)>" % ",".join(str(i) for i in self.sync_ops)
class CBlankLine(List):
def __init__(self, **kwargs):
super().__init__(header=c.Line())
def __repr__(self):
return ""
def DummyExpr(*args):
return Expression(DummyEq(*args))
BlankLine = CBlankLine()
Return = lambda i='': Element(c.Statement('return%s' % ((' %s' % i) if i else i)))
# Nodes required for distributed-memory halo exchange
class HaloSpot(Node):
"""
A halo exchange operation (e.g., send, recv, wait, ...) required to
correctly execute the subtree in the case of distributed-memory parallelism.
"""
is_HaloSpot = True
_traversable = ['body']
def __init__(self, halo_scheme, body=None):
super(HaloSpot, self).__init__()
self._halo_scheme = halo_scheme
if isinstance(body, Node):
self._body = body
elif isinstance(body, (list, tuple)) and len(body) == 1:
self._body = body[0]
elif body is None:
self._body = List()
else:
raise ValueError("`body` is expected to be a single Node")
def __repr__(self):
functions = "(%s)" % ",".join(i.name for i in self.functions)
return "<%s%s>" % (self.__class__.__name__, functions)
@property
def halo_scheme(self):
return self._halo_scheme
@property
def fmapper(self):
return self.halo_scheme.fmapper
@property
def omapper(self):
return self.halo_scheme.omapper
@property
def dimensions(self):
return self.halo_scheme.dimensions
@property
def arguments(self):
return self.halo_scheme.arguments
@property
def is_empty(self):
return len(self.halo_scheme) == 0
@property
def body(self):
return self._body
@property
def functions(self):
return tuple(self.fmapper)
@property
def free_symbols(self):
return ()
@property
def defines(self):
return ()
# Utility classes
class IterationTree(tuple):
"""
Represent a sequence of nested Iterations.
"""
@property
def root(self):
return self[0] if self else None
@property
def inner(self):
return self[-1] if self else None
@property
def dimensions(self):
return [i.dim for i in self]
def __repr__(self):
return "IterationTree%s" % super(IterationTree, self).__repr__()
def __getitem__(self, key):
ret = super(IterationTree, self).__getitem__(key)
return IterationTree(ret) if isinstance(key, slice) else ret
MetaCall = namedtuple('MetaCall', 'root local')
"""
Metadata for Callables. ``root`` is a pointer to the callable
Iteration/Expression tree. ``local`` is a boolean indicating whether the
definition of the callable is known or not.
"""
|
opesci/devito
|
devito/ir/iet/nodes.py
|
Python
|
mit
| 36,966
|
[
"VisIt"
] |
c68aa96a30b46ce4e686f5ac673c319b9e6c6b83fdd8c2569a677647e26fda89
|
from DIRAC import S_ERROR, S_OK, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
class FTSRoute(object):
"""
This class represents the route of a transfer: source, dest and which server
"""
def __init__( self, sourceSE, targetSE, ftsServer ):
"""
:param sourceSE : source se
:param targetSE : destination SE
:param ftsServer : fts server to use
"""
self.sourceSE = sourceSE
self.targetSE = targetSE
self.ftsServer = ftsServer
class FTSAbstractPlacement( object ):
"""
This class manages all the FTS strategies, routes and what not
"""
def __init__( self, csPath = None, ftsHistoryViews = None ):
"""
Nothing special done here
:param csPath : path of the CS
:param ftsHistoryViews : history view of the db (useful for FTS2)
"""
self.csPath = csPath
self.ftsHistoryViews = ftsHistoryViews
self.rssStatus = ResourceStatus()
self.log = gLogger.getSubLogger( 'FTSAbstractPlacement', True )
def getReplicationTree( self, sourceSEs, targetSEs, size, strategy = None ):
""" For multiple source to multiple destination, find the optimal replication
strategy.
:param sourceSEs : list of source SE
:param targetSEs : list of destination SE
:param size : size of the File
:param strategy : which strategy to use
:returns S_OK(dict) < route name : { dict with key Ancestor, SourceSE, TargetSEtargetSE, Strategy } >
"""
return S_ERROR( 'IMPLEMENT ME' )
def refresh( self, ftsHistoryViews = None ):
"""
Refresh, whatever that means... recalculate all what you need,
fetches the latest conf and what not.
"""
return S_OK()
def findRoute( self, sourceSE, targetSE ):
""" Find the appropriate route from point A to B
:param sourceSE : source SE
:param targetSE : destination SE
:returns S_OK(FTSRoute)
"""
return S_ERROR( 'IMPLEMENT ME' )
def isRouteValid( self, route ):
""" Check whether a given route is valid
(whatever that means here)
:param route : FTSRoute
:returns S_OK or S_ERROR(reason)
"""
return S_ERROR( 'IMPLEMENT ME' )
def startTransferOnRoute( self, route ):
"""Declare that one starts a transfer on a given route.
Accounting purpose only
:param route : FTSRoute that is used
"""
return S_OK()
def finishTransferOnRoute( self, route ):
"""Declare that one finishes a transfer on a given route.
Accounting purpose only
:param route : FTSRoute that is used
"""
return S_OK()
|
vmendez/DIRAC
|
DataManagementSystem/private/FTSAbstractPlacement.py
|
Python
|
gpl-3.0
| 2,654
|
[
"DIRAC"
] |
0f5f33c9d0473a9f23ca5b0fb50c75935e25bf707335f769240e2ece242c31ab
|
""" CStoJSONSynchronizer
Module that keeps the pilot parameters file synchronized with the information
in the Operations/Pilot section of the CS. If there are additions in the CS,
these are incorporated to the file.
The module uploads to a web server the latest version of the pilot scripts.
"""
import os
import glob
import shutil
import tarfile
import datetime
from git import Repo
from DIRAC import gLogger, gConfig, S_OK
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
class PilotCStoJSONSynchronizer:
"""
2 functions are executed:
- It updates a JSON file with the values on the CS which can be used by Pilot3 pilots
- It updates the pilot 3 files
This synchronizer can be triggered at any time via PilotCStoJSONSynchronizer().sync().
"""
def __init__(self):
"""c'tor
Just setting defaults
"""
self.workDir = "" # Working directory where the files are going to be stored
# domain name of the web server(s) used to upload the pilot json file and the pilot scripts
self.pilotFileServer = ""
# pilot sync default parameters
self.pilotRepo = "https://github.com/DIRACGrid/Pilot.git" # repository of the pilot
self.pilotVORepo = "" # repository of the VO that can contain a pilot extension
self.pilotSetup = gConfig.getValue("/DIRAC/Setup", "")
self.projectDir = ""
# where the find the pilot scripts in the VO pilot repository
self.pilotScriptPath = "Pilot" # where the find the pilot scripts in the pilot repository
self.pilotVOScriptPath = ""
self.pilotRepoBranch = "master"
self.pilotVORepoBranch = "master"
self.log = gLogger.getSubLogger(__name__)
ops = Operations()
# Overriding parameters from the CS
self.pilotRepo = ops.getValue("Pilot/pilotRepo", self.pilotRepo)
self.pilotVORepo = ops.getValue("Pilot/pilotVORepo", self.pilotVORepo)
self.projectDir = ops.getValue("Pilot/projectDir", self.projectDir)
self.pilotScriptPath = ops.getValue("Pilot/pilotScriptsPath", self.pilotScriptPath)
self.pilotVOScriptPath = ops.getValue("Pilot/pilotVOScriptsPath", self.pilotVOScriptPath)
self.pilotRepoBranch = ops.getValue("Pilot/pilotRepoBranch", self.pilotRepoBranch)
self.pilotVORepoBranch = ops.getValue("Pilot/pilotVORepoBranch", self.pilotVORepoBranch)
def getCSDict(self, includeMasterCS=True):
"""Gets minimal info for running a pilot, from the CS
:returns: pilotDict (containing pilots run info)
:rtype: S_OK, S_ERROR, value is pilotDict
"""
pilotDict = {
"timestamp": datetime.datetime.utcnow().isoformat(),
"Setups": {},
"CEs": {},
"GenericPilotDNs": [],
}
self.log.info("-- Getting the content of the CS --")
# These are in fact not only setups: they may be "Defaults" sections, or VOs, in multi-VOs installations
setupsRes = gConfig.getSections("/Operations/")
if not setupsRes["OK"]:
self.log.error("Can't get sections from Operations", setupsRes["Message"])
return setupsRes
setupsInOperations = setupsRes["Value"]
# getting the setup(s) in this CS, and comparing with what we found in Operations
setupsInDIRACRes = gConfig.getSections("DIRAC/Setups")
if not setupsInDIRACRes["OK"]:
self.log.error("Can't get sections from DIRAC/Setups", setupsInDIRACRes["Message"])
return setupsInDIRACRes
setupsInDIRAC = setupsInDIRACRes["Value"]
# Handling the case of multi-VO CS
if not set(setupsInDIRAC).intersection(set(setupsInOperations)):
vos = list(setupsInOperations)
for vo in vos:
setupsFromVOs = gConfig.getSections("/Operations/%s" % vo)
if not setupsFromVOs["OK"]:
continue
else:
setupsInOperations = setupsFromVOs["Value"]
self.log.verbose("From Operations/[Setup]/Pilot")
for setup in setupsInOperations:
self._getPilotOptionsPerSetup(setup, pilotDict)
self.log.verbose("From Resources/Sites")
sitesSection = gConfig.getSections("/Resources/Sites/")
if not sitesSection["OK"]:
self.log.error("Can't get sections from Resources", sitesSection["Message"])
return sitesSection
for grid in sitesSection["Value"]:
gridSection = gConfig.getSections("/Resources/Sites/" + grid)
if not gridSection["OK"]:
self.log.error("Can't get sections from Resources", gridSection["Message"])
return gridSection
for site in gridSection["Value"]:
ceList = gConfig.getSections(cfgPath("/Resources", "Sites", grid, site, "CEs"))
if not ceList["OK"]:
# Skip but log it
self.log.error("Site has no CEs! - skipping", site)
continue
for ce in ceList["Value"]:
# This CEType is like 'HTCondor' or 'ARC' etc.
ceType = gConfig.getValue(cfgPath("/Resources", "Sites", grid, site, "CEs", ce, "CEType"))
if ceType is None:
# Skip but log it
self.log.error("CE has no option CEType!", ce + " at " + site)
pilotDict["CEs"][ce] = {"Site": site}
else:
pilotDict["CEs"][ce] = {"Site": site, "GridCEType": ceType}
# This LocalCEType is like 'InProcess' or 'Pool' or 'Pool/Singularity' etc.
# It can be in the queue and/or the CE level
localCEType = gConfig.getValue(cfgPath("/Resources", "Sites", grid, site, "CEs", ce, "LocalCEType"))
if localCEType is not None:
pilotDict["CEs"][ce].setdefault("LocalCEType", localCEType)
res = gConfig.getSections(cfgPath("/Resources", "Sites", grid, site, "CEs", ce, "Queues"))
if not res["OK"]:
# Skip but log it
self.log.error("No queues found for CE", ce + ": " + res["Message"])
continue
queueList = res["Value"]
for queue in queueList:
localCEType = gConfig.getValue(
cfgPath("/Resources", "Sites", grid, site, "CEs", ce, "Queues", queue, "LocalCEType")
)
if localCEType is not None:
pilotDict["CEs"][ce].setdefault(queue, {"LocalCEType": localCEType})
defaultSetup = gConfig.getValue("/DIRAC/DefaultSetup")
if defaultSetup:
pilotDict["DefaultSetup"] = defaultSetup
self.log.debug("From DIRAC/Configuration")
configurationServers = gConfig.getServersList()
if not includeMasterCS:
masterCS = gConfigurationData.getMasterServer()
configurationServers = list(set(configurationServers) - set([masterCS]))
pilotDict["ConfigurationServers"] = configurationServers
self.log.debug("Got pilotDict", str(pilotDict))
return S_OK(pilotDict)
def _getPilotOptionsPerSetup(self, setup, pilotDict):
"""Given a setup, returns its pilot options in a dictionary"""
options = gConfig.getOptionsDict("/Operations/%s/Pilot" % setup)
if not options["OK"]:
self.log.warn("Section does not exist: skipping", "/Operations/%s/Pilot " % setup)
return
# We include everything that's in the Pilot section for this setup
if setup == self.pilotSetup:
self.pilotVOVersion = options["Value"]["Version"]
pilotDict["Setups"][setup] = options["Value"]
# We update separately 'GenericPilotDNs'
try:
pilotDict["GenericPilotDNs"].append(pilotDict["Setups"][setup]["GenericPilotDN"])
except KeyError:
pass
ceTypesCommands = gConfig.getOptionsDict("/Operations/%s/Pilot/Commands" % setup)
if ceTypesCommands["OK"]:
# It's ok if the Pilot section doesn't list any Commands too
pilotDict["Setups"][setup]["Commands"] = {}
for ceType in ceTypesCommands["Value"]:
# FIXME: inconsistent that we break Commands down into a proper list but other things are comma-list strings
pilotDict["Setups"][setup]["Commands"][ceType] = ceTypesCommands["Value"][ceType].split(", ")
# pilotDict['Setups'][setup]['Commands'][ceType] = ceTypesCommands['Value'][ceType]
if "CommandExtensions" in pilotDict["Setups"][setup]:
# FIXME: inconsistent that we break CommandExtensionss down into a proper
# list but other things are comma-list strings
pilotDict["Setups"][setup]["CommandExtensions"] = pilotDict["Setups"][setup]["CommandExtensions"].split(
", "
)
# pilotDict['Setups'][setup]['CommandExtensions'] = pilotDict['Setups'][setup]['CommandExtensions']
# Getting the details aboout the MQ Services to be used for logging, if any
if "LoggingMQService" in pilotDict["Setups"][setup]:
loggingMQService = gConfig.getOptionsDict(
"/Resources/MQServices/%s" % pilotDict["Setups"][setup]["LoggingMQService"]
)
if not loggingMQService["OK"]:
self.log.error(loggingMQService["Message"])
return loggingMQService
pilotDict["Setups"][setup]["Logging"] = {}
pilotDict["Setups"][setup]["Logging"]["Host"] = loggingMQService["Value"]["Host"]
pilotDict["Setups"][setup]["Logging"]["Port"] = loggingMQService["Value"]["Port"]
loggingMQServiceQueuesSections = gConfig.getSections(
"/Resources/MQServices/%s/Queues" % pilotDict["Setups"][setup]["LoggingMQService"]
)
if not loggingMQServiceQueuesSections["OK"]:
self.log.error(loggingMQServiceQueuesSections["Message"])
return loggingMQServiceQueuesSections
pilotDict["Setups"][setup]["Logging"]["Queue"] = {}
for queue in loggingMQServiceQueuesSections["Value"]:
loggingMQServiceQueue = gConfig.getOptionsDict(
"/Resources/MQServices/%s/Queues/%s" % (pilotDict["Setups"][setup]["LoggingMQService"], queue)
)
if not loggingMQServiceQueue["OK"]:
self.log.error(loggingMQServiceQueue["Message"])
return loggingMQServiceQueue
pilotDict["Setups"][setup]["Logging"]["Queue"][queue] = loggingMQServiceQueue["Value"]
queuesRes = gConfig.getSections(
"/Resources/MQServices/%s/Queues" % pilotDict["Setups"][setup]["LoggingMQService"]
)
if not queuesRes["OK"]:
return queuesRes
queues = queuesRes["Value"]
queuesDict = {}
for queue in queues:
queueOptionRes = gConfig.getOptionsDict(
"/Resources/MQServices/%s/Queues/%s" % (pilotDict["Setups"][setup]["LoggingMQService"], queue)
)
if not queueOptionRes["OK"]:
return queueOptionRes
queuesDict[queue] = queueOptionRes["Value"]
pilotDict["Setups"][setup]["Logging"]["Queues"] = queuesDict
def syncScripts(self):
"""Clone the pilot scripts from the Pilot repositories (handle also extensions)"""
tarFiles = []
# Extension, if it exists
if self.pilotVORepo:
pilotVOLocalRepo = os.path.join(self.workDir, "pilotVOLocalRepo")
if os.path.isdir(pilotVOLocalRepo):
shutil.rmtree(pilotVOLocalRepo)
os.mkdir(pilotVOLocalRepo)
repo_VO = Repo.init(pilotVOLocalRepo)
upstream = repo_VO.create_remote("upstream", self.pilotVORepo)
upstream.fetch()
upstream.pull(upstream.refs[0].remote_head)
if repo_VO.tags:
repo_VO.git.checkout(repo_VO.tags[self.pilotVOVersion], b="pilotVOScripts")
else:
repo_VO.git.checkout("upstream/%s" % self.pilotVORepoBranch, b="pilotVOScripts")
scriptDir = os.path.join(pilotVOLocalRepo, self.projectDir, self.pilotVOScriptPath, "*.py")
for fileVO in glob.glob(scriptDir):
tarFiles.append(fileVO)
else:
self.log.info("The /Operations/<Setup>/Pilot/pilotVORepo option is not defined, using Vanilla DIRAC pilot")
# DIRAC repo
pilotLocalRepo = os.path.join(self.workDir, "pilotLocalRepo")
if os.path.isdir(pilotLocalRepo):
shutil.rmtree(pilotLocalRepo)
os.mkdir(pilotLocalRepo)
repo = Repo.init(pilotLocalRepo)
upstream = repo.create_remote("upstream", self.pilotRepo)
upstream.fetch()
upstream.pull(upstream.refs[0].remote_head)
repo.git.checkout("upstream/%s" % self.pilotRepoBranch, b="pilotScripts")
scriptDir = os.path.join(pilotLocalRepo, self.pilotScriptPath, "*.py")
for filename in glob.glob(scriptDir):
tarFiles.append(filename)
tarPath = os.path.join(self.workDir, "pilot.tar")
with tarfile.TarFile(name=tarPath, mode="w") as tf:
for ptf in tarFiles:
# This copy makes sure that all the files in the tarball are accessible
# in the work directory. It should be kept
shutil.copyfile(ptf, os.path.join(self.workDir, os.path.basename(ptf)))
tf.add(ptf, arcname=os.path.basename(ptf), recursive=False)
tarFilesPaths = [os.path.join(self.workDir, os.path.basename(tarredF)) for tarredF in tarFiles]
return S_OK((tarPath, tarFilesPaths))
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Utilities/PilotCStoJSONSynchronizer.py
|
Python
|
gpl-3.0
| 14,299
|
[
"DIRAC"
] |
a2d2b2077a89ce5df44f32787a6ef41611ccd3ee5d66ba481cfce2cc6c8f8dcc
|
#!/bin/env python
# -*- coding: utf-8 -*-
import os,re, sys, shutil
from Sire.IO import *
from Sire.Mol import *
from Sire.CAS import *
from Sire.System import *
from Sire.Move import *
from Sire.MM import *
from Sire.FF import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Maths import *
from Sire.Base import *
from Sire.Qt import *
from Sire.ID import *
from Sire.Config import *
from Sire.Tools import Parameter, resolveParameters
import Sire.Stream
##### This is how we can have the script specify all of the
##### user-controllable parameters
use_sphere = Parameter("use sphere", False,
"""Whether or not to use sphereical boundary conditions""")
sphere_radius = Parameter("spherical boundary radius", 10*angstrom,
"""The radius for the spherical boundary conditions.""")
sphere_center = None # this parameter will be calculated and set in the script
use_softcore = Parameter("use softcore", True,
"""Whether or not to use a soft-core potential for the perturbed solute.""")
use_grid = Parameter("use grid", False,
"""Whether or not to use a grid for the interactions with atoms
that are beyond the spherical boundary""")
grid_spacing = Parameter("grid spacing", 0.5*angstrom,
"""The spacing between grid points if a grid is used""")
grid_buffer = Parameter("grid buffer", 3*angstrom,
"""The grid is generated to enclose all of the molecules in group 0,
plus a buffer specified by this parameter. The larger this buffer,
the larger the grid, but also the lower the chance that the grid
will need to be recalculated as the molecules in group 0 move.""")
cutoff_scheme = Parameter("cutoff scheme", "group",
"""The method used to apply the non-bonded cutoff. Choices are;
(1) shift_electrostatics : This should become the default, and uses an atomistic cutoff
with a force-shifted cutoff.
(2) reaction_field : This uses the atomistic reaction field cutoff. You can
set the reaction field dielectric using the "dielectric"
parameter.
(3) group : This is the default, and uses a group-based cutoff with a feather. Note that this is
incompatible with a grid, so an error will be raised if you try
to use a group-based cutoff with a grid.""")
rf_dielectric = Parameter("dielectric", 78.3,
"""The dielectric constant to use with the reaction field cutoff method.""")
out_dir = Parameter("output directory", "output",
"""The directory in which to place all output files.""")
top_file = Parameter("topology file", "../../SYSTEM.top",
"""The name of the topology file that contains the solvated solute.""")
crd_file = Parameter("coordinate file", "../../SYSTEM.crd",
"""The name of the coordinate file that contains the solvated solute.""")
ligand_flex_file = Parameter("ligand flex file", "../../MORPH.flex",
"""Flexibility file describing how the morph is perturbed.""")
ligand_pert_file = Parameter("ligand perturbation file", "../../MORPH.pert",
"""Perturbation file describing how the morph is perturbed.""")
lig_name = Parameter("ligand name", "LIG",
"""Optional, the name of the ligand used in the flexibility file.
If the ligand has a single residue, the program will use the residue name
by default to look up the flexibility template""")
restart_file = Parameter("restart file", "sim_restart.s3",
"""The name of the restart file.""")
random_seed = Parameter("random seed", 0, """The random number seed""")
nmoves = Parameter("number of moves", 50, """The number of moves per block""")
nmoves_per_energy = Parameter("number of energy snapshots", 1,
"""The number of times during the simulation that you want the
energy to be recorded.""")
nmoves_per_pdb = Parameter("number of structure snapshots", 1,
"""The number of times during the simulation that you want the
structure to be recorded (as a PDB).""")
nmoves_per_pdb_intermediates = Parameter("number of intermediate structure snapshots", None,
"""The number of times during an intermediate simulation to save
the structure (as a PDB).""")
temperature = Parameter("temperature", 25 * celsius, """The temperature of the simulation""")
pressure = Parameter("pressure", 1 * atm,
"""The pressure of the simulation. Note that this is ignored if you
are using spherical boundary conditions.""")
coul_cutoff = Parameter("coulomb cutoff", 10*angstrom,
"""The cutoff radius for non-bonded coulomb interactions""")
coul_feather = Parameter("coulomb feather", 0.5*angstrom,
"""The feather radius for the non-bonded coulomb interactions
(only needed if a group-based cutoff is used)""")
lj_cutoff = Parameter("lj cutoff", 10*angstrom,
"""The cutoff radius for non-bonded LJ interactions""")
lj_feather = Parameter("lj feather", 0.5*angstrom,
"""The feather radius for the non-bonded LJ interactions
(only needed if a group-based cutoff is used)""")
coulomb_power = Parameter("coulomb power", 0,
"""The soft-core coulomb power parameter""")
shift_delta = Parameter("shift delta", 2.0,
"""The soft-core shift delta parameter""")
combining_rules = Parameter("combining rules", "arithmetic",
"""The combinining rules for LJ interactions""")
pref_constant = Parameter("preferential constant", 200 * angstrom2,
"""The preferential sampling constant""")
max_solvent_translation = Parameter("maximum solvent translation", 0.15*angstrom,
"""Maximum amount to translate the solvent""")
max_solvent_rotation = Parameter("maximum solvent rotation", 15*degrees,
"""Maximum amount to rotate the solvent""")
solvent_mc_weight_factor = Parameter("solvent move weight", 1,
"""Factor used to multiply the weight of the solvent moves.""")
solute_mc_weight = Parameter("solute move weight", 100,
"""Factor used to multiply the weight of the solute moves.""")
volume_mc_weight = Parameter("volume move weight", 1,
"""Factor used to multiply the weight of the volume moves.""")
delta_lambda = Parameter("delta lambda", 0.001,
"""Delta lambda for finite difference gradients.""")
compress = Parameter("compression method", "bzip2 -f",
"""Command used to compress output files.""")
lam_val = Parameter("lambda", 0.0, """Value of lambda for the simulation""")
print_nrgs = Parameter("print energies", None,
"""Whether or not to print all energy components after loading
the restart file or starting the simulation. Useful for debugging.""")
def adjustPerturbedDOFs( molecule ):
perturbations = molecule.property("perturbations").perturbations()
r0 = Symbol("r0")
theta0 = Symbol("theta0")
for pert in perturbations:
if ( pert.what() == 'SireMM::TwoAtomPerturbation'):
ri = pert.initialForms()[r0].toString().toDouble()
rf = pert.finalForms()[r0].toString().toDouble()
if (abs(ri-rf) > 0.001):
#rint ri,rf
r = (1-lam_val.val) * ri + lam_val.val * rf
r = r * angstrom
bond = BondID(pert.atom0(), pert.atom1() )
mover = molecule.move()
try:
mover.set(bond, r)
except UserWarning:
# extract the type of the errror
_, error, _ = sys.exc_info()
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
continue
molecule = mover.commit()
elif ( pert.what() == 'SireMM::ThreeAtomPerturbation'):
thetai = pert.initialForms()[theta0].toString().toDouble()
thetaf = pert.finalForms()[theta0].toString().toDouble()
if (abs(thetai-thetaf) > 0.001):
#rint thetai,thetaf
theta = (1-lam_val.val) * thetai + lam_val.val * thetaf
theta = theta * radians
angle = AngleID(pert.atom0(), pert.atom1(), pert.atom2() )
mover = molecule.move()
try:
mover.set(angle, theta)
except UserWarning:
# extract the type of the errror
_, err, _ = sys.exc_info()
error_type = re.search(r"(Sire\w*::\w*)", str(error)).group(0)
if error_type == "SireMol::ring_error":
continue
molecule = mover.commit()
return molecule
def getDummies(molecule):
print "Selecting dummy groups"
natoms = molecule.nAtoms()
atoms = molecule.atoms()
from_dummies = None
to_dummies = None
for x in range(0,natoms):
atom = atoms[x]
if atom.property("initial_ambertype") == "du":
if from_dummies is None:
from_dummies = molecule.selectAll( atom.index() )
else:
from_dummies += molecule.selectAll( atom.index() )
elif atom.property("final_ambertype") == "du":
if to_dummies is None:
to_dummies = molecule.selectAll( atom.index() )
else:
to_dummies += molecule.selectAll( atom.index() )
return to_dummies, from_dummies
def createSystem(molecules, space):
# First, sanity check that the cutoff is not greater than half the box length for
# periodic spaces...
if space.isPeriodic():
cutoff = coul_cutoff.val.to(angstrom)
if lj_cutoff.val.to(angstrom) > cutoff:
cutoff = lj_cutoff.val.to(angstrom)
eps_cutoff = cutoff - 1e-6
ok_x = (space.getMinimumImage(Vector(eps_cutoff,0,0), Vector(0)).length() <= cutoff)
ok_y = (space.getMinimumImage(Vector(0,eps_cutoff,0), Vector(0)).length() <= cutoff)
ok_z = (space.getMinimumImage(Vector(0,0,eps_cutoff), Vector(0)).length() <= cutoff)
if not (ok_x and ok_y and ok_z):
print >>sys.stderr,"The cutoff (%f A) is too large for periodic box %s" % \
(cutoff, space)
raise RuntimeError()
print "Applying flexibility and zmatrix templates..."
moleculeNumbers = molecules.molNums()
moleculeList = []
for moleculeNumber in moleculeNumbers:
molecule = molecules.molecule(moleculeNumber).molecule()
moleculeList.append(molecule)
solute = moleculeList[0]
# If lig_name has not been defined, and there is a single residue,
# use the residue name
ligand_name = lig_name.val
if ligand_name is None:
if ( solute.nResidues() == 1 ):
ligand_name = solute.residue( ResIdx(0) ).name().value()
else:
ligand_name = "ligand" # Likely not good...
#print lig_name
solute = solute.edit().rename(ligand_name).commit()
# if the space is periodic, then translate everything so that the solute
# is in the center of the box
if space.isPeriodic():
print "Centering the system so that the solute is at (0,0,0)..."
center = solute.evaluate().center()
delta = -center
solute = solute.move().translate(delta).commit()
moleculeList[0] = solute
for i in range(1,len(moleculeList)):
mol = moleculeList[i].move().translate(delta).commit()
center = mol.evaluate().center()
image = space.getMinimumImage(center, Vector(0))
moleculeList[i] = mol.move().translate(image-center).commit()
#print solute
# This will add the property "flexibility" to the solute
flexibility_lib = FlexibilityLibrary(ligand_flex_file.val)
flexibility = flexibility_lib.getFlexibility(solute)
solute = solute.edit().setProperty("flexibility", flexibility).commit()
perturbations_lib = PerturbationsLibrary(ligand_pert_file.val)
solute = perturbations_lib.applyTemplate(solute)
perturbations = solute.property("perturbations")
#print lam_val
lam = Symbol("lambda")
lam_fwd = Symbol("lambda_{fwd}")
lam_bwd = Symbol("lambda_{bwd}")
initial = Perturbation.symbols().initial()
final = Perturbation.symbols().final()
solute = solute.edit().setProperty("perturbations",
perturbations.recreate( (1-lam)*initial + lam*final ) ).commit()
# Set the geometry of perturbed bonds/angles to match the corresponding equilibrium value
solute = adjustPerturbedDOFs( solute )
solute_fwd = solute.edit().renumber().setProperty("perturbations",
perturbations.substitute( lam, lam_fwd ) ).commit()
solute_bwd = solute.edit().renumber().setProperty("perturbations",
perturbations.substitute( lam, lam_bwd ) ).commit()
#print solute
#print solute_fwd
#print solute_bwd
# We put atoms in three groups depending on what happens in the perturbation
# non dummy to non dummy --> the hard group, uses a normal intermolecular FF
# non dummy to dummy --> the todummy group, uses SoftFF with alpha = Lambda
# dummy to non dummy --> the fromdummy group, uses SoftFF with alpha = 1 - Lambda
# start/end as dummies and update the hard, todummy and fromdummy groups accordingly
solute_grp_ref = MoleculeGroup("solute_ref", solute)
solute_grp_ref_hard = MoleculeGroup("solute_ref_hard")
solute_grp_ref_todummy = MoleculeGroup("solute_ref_todummy")
solute_grp_ref_fromdummy = MoleculeGroup("solute_ref_fromdummy")
solute_grp_fwd = MoleculeGroup("solute_fwd", solute_fwd)
solute_grp_fwd_hard = MoleculeGroup("solute_fwd_hard")
solute_grp_fwd_todummy = MoleculeGroup("solute_fwd_todummy")
solute_grp_fwd_fromdummy = MoleculeGroup("solute_fwd_fromdummy")
solute_grp_bwd = MoleculeGroup("solute_bwd", solute_bwd)
solute_grp_bwd_hard = MoleculeGroup("solute_bwd_hard")
solute_grp_bwd_todummy = MoleculeGroup("solute_bwd_todummy")
solute_grp_bwd_fromdummy = MoleculeGroup("solute_bwd_fromdummy")
solute_ref_hard = solute.selectAllAtoms()
solute_ref_todummy = solute_ref_hard.invert()
solute_ref_fromdummy = solute_ref_hard.invert()
solute_fwd_hard = solute_fwd.selectAllAtoms()
solute_fwd_todummy = solute_fwd_hard.invert()
solute_fwd_fromdummy = solute_fwd_hard.invert()
solute_bwd_hard = solute_bwd.selectAllAtoms()
solute_bwd_todummy = solute_bwd_hard.invert()
solute_bwd_fromdummy = solute_bwd_hard.invert()
to_dummies, from_dummies = getDummies(solute)
#print to_dummies
#print from_dummies
if to_dummies is not None:
ndummies = to_dummies.count()
dummies = to_dummies.atoms()
for x in range(0,ndummies):
dummy_index = dummies[x].index()
solute_ref_hard = solute_ref_hard.subtract( solute.select( dummy_index ) )
solute_fwd_hard = solute_fwd_hard.subtract( solute_fwd.select( dummy_index ) )
solute_bwd_hard = solute_bwd_hard.subtract( solute_bwd.select( dummy_index ) )
solute_ref_todummy = solute_ref_todummy.add( solute.select( dummy_index ) )
solute_fwd_todummy = solute_fwd_todummy.add( solute_fwd.select( dummy_index ) )
solute_bwd_todummy = solute_bwd_todummy.add( solute_bwd.select( dummy_index ) )
if from_dummies is not None:
ndummies = from_dummies.count()
dummies = from_dummies.atoms()
for x in range(0,ndummies):
dummy_index = dummies[x].index()
solute_ref_hard = solute_ref_hard.subtract( solute.select( dummy_index ) )
solute_fwd_hard = solute_fwd_hard.subtract( solute_fwd.select( dummy_index ) )
solute_bwd_hard = solute_bwd_hard.subtract( solute_bwd.select( dummy_index ) )
solute_ref_fromdummy = solute_ref_fromdummy.add( solute.select( dummy_index ) )
solute_fwd_fromdummy = solute_fwd_fromdummy.add( solute_fwd.select( dummy_index ) )
solute_bwd_fromdummy = solute_bwd_fromdummy.add( solute_bwd.select( dummy_index ) )
solute_grp_ref_hard.add(solute_ref_hard)
solute_grp_fwd_hard.add(solute_fwd_hard)
solute_grp_bwd_hard.add(solute_bwd_hard)
solute_grp_ref_todummy.add(solute_ref_todummy)
solute_grp_fwd_todummy.add(solute_fwd_todummy)
solute_grp_bwd_todummy.add(solute_bwd_todummy)
solute_grp_ref_fromdummy.add(solute_ref_fromdummy)
solute_grp_fwd_fromdummy.add(solute_fwd_fromdummy)
solute_grp_bwd_fromdummy.add(solute_bwd_fromdummy)
solutes = MoleculeGroup("solutes")
solutes.add(solute)
solutes.add(solute_fwd)
solutes.add(solute_bwd)
perturbed_solutes = MoleculeGroup("perturbed_solutes")
perturbed_solutes.add(solute_fwd)
perturbed_solutes.add(solute_bwd)
# Add these groups to the System
system = System()
system.add(solutes)
system.add(perturbed_solutes)
system.add(solute_grp_ref)
system.add(solute_grp_fwd)
system.add(solute_grp_bwd)
system.add(solute_grp_ref_hard)
system.add(solute_grp_ref_todummy)
system.add(solute_grp_ref_fromdummy)
system.add(solute_grp_fwd_hard)
system.add(solute_grp_fwd_todummy)
system.add(solute_grp_fwd_fromdummy)
system.add(solute_grp_bwd_hard)
system.add(solute_grp_bwd_todummy)
system.add(solute_grp_bwd_fromdummy)
# Now sort out the solvent group - how we do this depends on the
# type of boundary conditions
if use_sphere.val:
# we are using spherical boundary conditions. Need to divide the system
# into mobile and fixed solvent molecules
solvent = MoleculeGroup("solvent")
fixed_solvent = MoleculeGroup("fixed_solvent")
# get the center of the solute (this is the center for the spherical
# boundary conditions) (this sets the global "sphere_center" variable)
global sphere_center
sphere_center = solute.evaluate().center()
# get the cutoff - all solvent molecules whose centers are within this radius
# are mobile
radius = sphere_radius.val.to(angstrom)
print "Using a reflection sphere of radius %f A centered at %s." % (radius, sphere_center)
if space.isPeriodic():
eps_radius = cutoff + radius - 1e-6
ok_x = (space.getMinimumImage(Vector(eps_radius,0,0), Vector(0)).length() > radius)
ok_y = (space.getMinimumImage(Vector(0,eps_radius,0), Vector(0)).length() > radius)
ok_z = (space.getMinimumImage(Vector(0,0,eps_radius), Vector(0)).length() > radius)
if not (ok_x and ok_y and ok_z):
print >>sys.stderr,"The sphere radius (%f A) plus non-bonded cutoff (%f A) is too large for periodic box %s" \
% (radius, cutoff, space)
print >>sys.stderr, \
"Two times the sphere radius plus the cutoff distance cannot exceed the dimension of the box."
raise RuntimeError()
num_images = 0
for molecule in moleculeList[1:]:
# get the center of the solvent
solv_center = molecule.evaluate().center()
# wrap the solvent molecule into the same space as the solute
wrapped_solv_center = space.getMinimumImage(solv_center, sphere_center)
if wrapped_solv_center != solv_center:
molecule = molecule.move().translate( wrapped_solv_center - solv_center ).commit()
solv_center = molecule.evaluate().center()
if Vector.distance(solv_center, sphere_center) <= radius:
solvent.add(molecule)
else:
fixed_solvent.add(molecule)
# if we are in a periodic space, we need to manually mirror this molecule
# into each of the periodic boxes and keep those images that lie within cutoff+sphere_radius
# of the solute (since these images will be seen by mobile solvent molecules on the
# edge of the sphere
if space.isPeriodic():
image_cutoff = cutoff + radius
for i in (-1,0,1):
for j in (-1,0,1):
for k in (-1,0,1):
delta = Vector(i * radius, j * radius, k * radius)
if delta.length() != 0:
# get the image of the solvent molecule in this box
image_solv_center = space.getMinimumImage(solv_center, sphere_center+delta)
delta = image_solv_center - solv_center
if delta.length() > 0:
#there is a periodic image available here - is it within non-bonded cutoff?
if (image_solv_center - sphere_center).length() <= image_cutoff:
# it is within cutoff, so a copy of this molecule should be added
image = molecule.edit().renumber().move().translate(delta).commit()
fixed_solvent.add(image)
num_images += 1
print "There are %d fixed solvent molecules (%d of these are periodic images)." % \
(fixed_solvent.nMolecules(), num_images)
# print out a PDB containing all fixed atoms. This will be useful when visualising
# the system
PDB().write(fixed_solvent, "fixed_solvent.pdb")
traj = MoleculeGroup("traj")
traj.add(solute)
traj.add(solvent)
system.add(solvent)
system.add(fixed_solvent)
system.add(traj)
else:
# we are using periodic boundary conditions. All solvent molecules can
# be moved, and we have to create a group to handle volume moves
solvent = MoleculeGroup("solvent")
for molecule in moleculeList[1:]:
solvent.add(molecule)
all = MoleculeGroup("all")
all.add(solutes)
all.add(perturbed_solutes)
all.add(solute_grp_ref)
all.add(solute_grp_fwd)
all.add(solute_grp_bwd)
all.add(solute_grp_ref_hard)
all.add(solute_grp_ref_todummy)
all.add(solute_grp_ref_fromdummy)
all.add(solute_grp_fwd_hard)
all.add(solute_grp_fwd_todummy)
all.add(solute_grp_fwd_fromdummy)
all.add(solute_grp_bwd_hard)
all.add(solute_grp_bwd_todummy)
all.add(solute_grp_bwd_fromdummy)
all.add(solvent)
traj = MoleculeGroup("traj")
traj.add(solute)
traj.add(solvent)
system.add(solvent)
system.add(all)
system.add(traj)
return system
def setupForcefields(system, space):
print "Creating force fields... "
solutes = system[ MGName("solutes") ]
solute = system[ MGName("solute_ref") ]
solute_hard = system[ MGName("solute_ref_hard") ]
solute_todummy = system[ MGName("solute_ref_todummy") ]
solute_fromdummy = system[ MGName("solute_ref_fromdummy") ]
solute_fwd = system[ MGName("solute_fwd") ]
solute_fwd_hard = system[ MGName("solute_fwd_hard") ]
solute_fwd_todummy = system[ MGName("solute_fwd_todummy") ]
solute_fwd_fromdummy = system[ MGName("solute_fwd_fromdummy") ]
solute_bwd = system[ MGName("solute_bwd") ]
solute_bwd_hard = system[ MGName("solute_bwd_hard") ]
solute_bwd_todummy = system[ MGName("solute_bwd_todummy") ]
solute_bwd_fromdummy = system[ MGName("solute_bwd_fromdummy") ]
solvent = system[ MGName("solvent") ]
if use_sphere.val:
fixed_solvent = system[ MGName("fixed_solvent") ]
else:
all = system[ MGName("all") ]
# The list of all coulomb/LJ forcefields - we keep a list of
# all of these so that we can set the cutoff scheme to use for them all at once
clj_ffs = []
# - first solvent-solvent coulomb/LJ (CLJ) energy
solventff = InterCLJFF("solvent:solvent")
solventff.add(solvent)
clj_ffs.append(solventff)
# Now solute bond, angle, dihedral energy
solute_intraff = InternalFF("solute_intraff")
solute_intraff.add(solute)
solute_fwd_intraff = InternalFF("solute_fwd_intraff")
solute_fwd_intraff.add(solute_fwd)
solute_bwd_intraff = InternalFF("solute_bwd_intraff")
solute_bwd_intraff.add(solute_bwd)
# Now solute intramolecular CLJ energy
solute_hard_intraclj = IntraCLJFF("solute_hard_intraclj")
solute_hard_intraclj.add(solute_hard)
solute_todummy_intraclj = IntraSoftCLJFF("solute_todummy_intraclj")
solute_todummy_intraclj.add(solute_todummy)
solute_fromdummy_intraclj = IntraSoftCLJFF("solute_fromdummy_intraclj")
solute_fromdummy_intraclj.add(solute_fromdummy)
solute_hard_todummy_intraclj = IntraGroupSoftCLJFF("solute_hard:todummy_intraclj")
solute_hard_todummy_intraclj.add(solute_hard, MGIdx(0))
solute_hard_todummy_intraclj.add(solute_todummy, MGIdx(1))
solute_hard_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_hard:fromdummy_intraclj")
solute_hard_fromdummy_intraclj.add(solute_hard, MGIdx(0))
solute_hard_fromdummy_intraclj.add(solute_fromdummy, MGIdx(1))
solute_todummy_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_todummy:fromdummy_intraclj")
solute_todummy_fromdummy_intraclj.add(solute_todummy, MGIdx(0))
solute_todummy_fromdummy_intraclj.add(solute_fromdummy, MGIdx(1))
# The forwards intramoleculer CLJ energy
solute_fwd_hard_intraclj = IntraCLJFF("solute_fwd_hard_intraclj")
solute_fwd_hard_intraclj.add(solute_fwd_hard)
solute_fwd_todummy_intraclj = IntraSoftCLJFF("solute_fwd_todummy_intraclj")
solute_fwd_todummy_intraclj.add(solute_fwd_todummy)
solute_fwd_fromdummy_intraclj = IntraSoftCLJFF("solute_fwd_fromdummy_intraclj")
solute_fwd_fromdummy_intraclj.add(solute_fwd_fromdummy)
solute_fwd_hard_todummy_intraclj = IntraGroupSoftCLJFF("solute_fwd_hard:todummy_intraclj")
solute_fwd_hard_todummy_intraclj.add(solute_fwd_hard, MGIdx(0))
solute_fwd_hard_todummy_intraclj.add(solute_fwd_todummy, MGIdx(1))
solute_fwd_hard_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_fwd_hard:fromdummy_intraclj")
solute_fwd_hard_fromdummy_intraclj.add(solute_fwd_hard, MGIdx(0))
solute_fwd_hard_fromdummy_intraclj.add(solute_fwd_fromdummy, MGIdx(1))
solute_fwd_todummy_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_fwd_todummy:fromdummy_intraclj")
solute_fwd_todummy_fromdummy_intraclj.add(solute_fwd_todummy, MGIdx(0))
solute_fwd_todummy_fromdummy_intraclj.add(solute_fwd_fromdummy, MGIdx(1))
# The backwards intramolecular CLJ energy
solute_bwd_hard_intraclj = IntraCLJFF("solute_bwd_hard_intraclj")
solute_bwd_hard_intraclj.add(solute_bwd_hard)
solute_bwd_todummy_intraclj = IntraSoftCLJFF("solute_bwd_todummy_intraclj")
solute_bwd_todummy_intraclj.add(solute_bwd_todummy)
solute_bwd_fromdummy_intraclj = IntraSoftCLJFF("solute_bwd_fromdummy_intraclj")
solute_bwd_fromdummy_intraclj.add(solute_bwd_fromdummy)
solute_bwd_hard_todummy_intraclj = IntraGroupSoftCLJFF("solute_bwd_hard:todummy_intraclj")
solute_bwd_hard_todummy_intraclj.add(solute_bwd_hard, MGIdx(0))
solute_bwd_hard_todummy_intraclj.add(solute_bwd_todummy, MGIdx(1))
solute_bwd_hard_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_bwd_hard:fromdummy_intraclj")
solute_bwd_hard_fromdummy_intraclj.add(solute_bwd_hard, MGIdx(0))
solute_bwd_hard_fromdummy_intraclj.add(solute_bwd_fromdummy, MGIdx(1))
solute_bwd_todummy_fromdummy_intraclj = IntraGroupSoftCLJFF("solute_bwd_todummy:fromdummy_intraclj")
solute_bwd_todummy_fromdummy_intraclj.add(solute_bwd_todummy, MGIdx(0))
solute_bwd_todummy_fromdummy_intraclj.add(solute_bwd_fromdummy, MGIdx(1))
# Now the solute-solvent CLJ energy
solute_hard_solventff = InterGroupCLJFF("solute_hard:solvent")
solute_hard_solventff.add(solute_hard, MGIdx(0))
solute_hard_solventff.add(solvent, MGIdx(1))
solute_todummy_solventff = InterGroupSoftCLJFF("solute_todummy:solvent")
solute_todummy_solventff.add(solute_todummy, MGIdx(0))
solute_todummy_solventff.add(solvent, MGIdx(1))
solute_fromdummy_solventff = InterGroupSoftCLJFF("solute_fromdummy:solvent")
solute_fromdummy_solventff.add(solute_fromdummy, MGIdx(0))
solute_fromdummy_solventff.add(solvent, MGIdx(1))
clj_ffs.append(solute_hard_solventff)
clj_ffs.append(solute_todummy_solventff)
clj_ffs.append(solute_fromdummy_solventff)
#Now the forwards solute-solvent CLJ energy
solute_fwd_hard_solventff = InterGroupCLJFF("solute_fwd_hard:solvent")
solute_fwd_hard_solventff.add(solute_fwd_hard, MGIdx(0))
solute_fwd_hard_solventff.add(solvent, MGIdx(1))
solute_fwd_todummy_solventff = InterGroupSoftCLJFF("solute_fwd_todummy:solvent")
solute_fwd_todummy_solventff.add(solute_fwd_todummy, MGIdx(0))
solute_fwd_todummy_solventff.add(solvent, MGIdx(1))
solute_fwd_fromdummy_solventff = InterGroupSoftCLJFF("solute_fwd_fromdummy:solvent")
solute_fwd_fromdummy_solventff.add(solute_fwd_fromdummy, MGIdx(0))
solute_fwd_fromdummy_solventff.add(solvent, MGIdx(1))
clj_ffs.append(solute_fwd_hard_solventff)
clj_ffs.append(solute_fwd_todummy_solventff)
clj_ffs.append(solute_fwd_fromdummy_solventff)
# Now the backwards solute-solvent CLJ energy
solute_bwd_hard_solventff = InterGroupCLJFF("solute_bwd_hard:solvent")
solute_bwd_hard_solventff.add(solute_bwd_hard, MGIdx(0))
solute_bwd_hard_solventff.add(solvent, MGIdx(1))
solute_bwd_todummy_solventff = InterGroupSoftCLJFF("solute_bwd_todummy:solvent")
solute_bwd_todummy_solventff.add(solute_bwd_todummy, MGIdx(0))
solute_bwd_todummy_solventff.add(solvent, MGIdx(1))
solute_bwd_fromdummy_solventff = InterGroupSoftCLJFF("solute_bwd_fromdummy:solvent")
solute_bwd_fromdummy_solventff.add(solute_bwd_fromdummy, MGIdx(0))
solute_bwd_fromdummy_solventff.add(solvent, MGIdx(1))
clj_ffs.append(solute_bwd_hard_solventff)
clj_ffs.append(solute_bwd_todummy_solventff)
clj_ffs.append(solute_bwd_fromdummy_solventff)
# Here is the list of all forcefields
forcefields = [ solute_intraff, solute_fwd_intraff, solute_bwd_intraff,
solute_hard_intraclj, solute_todummy_intraclj, solute_fromdummy_intraclj,
solute_hard_todummy_intraclj, solute_hard_fromdummy_intraclj,
solute_todummy_fromdummy_intraclj,
solute_fwd_hard_intraclj, solute_fwd_todummy_intraclj, solute_fwd_fromdummy_intraclj,
solute_fwd_hard_todummy_intraclj, solute_fwd_hard_fromdummy_intraclj,
solute_fwd_todummy_fromdummy_intraclj,
solute_bwd_hard_intraclj, solute_bwd_todummy_intraclj, solute_bwd_fromdummy_intraclj,
solute_bwd_hard_todummy_intraclj, solute_bwd_hard_fromdummy_intraclj,
solute_bwd_todummy_fromdummy_intraclj,
solventff,
solute_hard_solventff, solute_todummy_solventff, solute_fromdummy_solventff,
solute_fwd_hard_solventff, solute_fwd_todummy_solventff, solute_fwd_fromdummy_solventff,
solute_bwd_hard_solventff, solute_bwd_todummy_solventff, solute_bwd_fromdummy_solventff ]
if use_sphere.val:
# we need to add on the energy of interaction between the fixed and mobile atoms
fixed_solvent = system[MGName("fixed_solvent")]
if use_grid.val:
# interactions with fixed atoms are calculated on a grid
# Start by creating a template GridFF forcefield, which can be duplicated
# for each grid. This ensures that only a single copy of the fixed atoms
# will be saved in the system, saving space and improving efficiency
gridff = GridFF2("template")
gridff.addFixedAtoms(fixed_solvent)
gridff.setGridSpacing( grid_spacing.val )
gridff.setBuffer( grid_buffer.val )
gridff.setCoulombCutoff( coul_cutoff.val )
gridff.setLJCutoff( lj_cutoff.val )
if cutoff_scheme.val == "shift_electrostatics":
gridff.setShiftElectrostatics(True)
elif cutoff_scheme.val == "reaction_field":
gridff.setUseReactionField(True)
gridff.setReactionFieldDielectric(rf_dielectric.val)
elif cutoff_scheme.val == "group":
print >>sys.stderr,"You cannot use a group-based cutoff with a grid!"
print >>sys.stderr,"Please choose either the shift_electrostatics or reaction_field cutoff schemes."
raise RuntimeError()
else:
print "WARNING. Unrecognised cutoff scheme. Using \"shift_electrostatics\"."
gridff.setShiftElectrostatics(True)
# solvent - fixed_solvent energy
solvent_fixedff = gridff.clone()
solvent_fixedff.setName("solvent:solvent_fixed")
solvent_fixedff.add(solvent, MGIdx(0))
forcefields.append(solvent_fixedff)
# Now the solute-solvent CLJ energy (note that we don't need to use
# soft-core as the fixed solvent is a long way away from the solute)
solute_fixed_solventff = gridff.clone()
solute_fixed_solventff.setName("solute:solvent_fixed")
solute_fixed_solventff.add(solute_hard, MGIdx(0))
solute_todummy_fixed_solventff = gridff.clone()
solute_todummy_fixed_solventff.setName("solute_todummy:solvent_fixed")
solute_todummy_fixed_solventff.add(solute_todummy, MGIdx(0))
solute_fromdummy_fixed_solventff = gridff.clone()
solute_fromdummy_fixed_solventff.setName("solute_fromdummy:solvent_fixed")
solute_fromdummy_fixed_solventff.add(solute_fromdummy, MGIdx(0))
forcefields.append(solute_fixed_solventff)
forcefields.append(solute_todummy_fixed_solventff)
forcefields.append(solute_fromdummy_fixed_solventff)
#Now the forwards solute-solvent CLJ energy
solute_fwd_fixed_solventff = gridff.clone()
solute_fwd_fixed_solventff.setName("solute_fwd_hard:fixed_solvent")
solute_fwd_fixed_solventff.add(solute_fwd_hard, MGIdx(0))
solute_fwd_todummy_fixed_solventff = gridff.clone()
solute_fwd_todummy_fixed_solventff.setName("solute_fwd_todummy:fixed_solvent")
solute_fwd_todummy_fixed_solventff.add(solute_fwd_todummy, MGIdx(0))
solute_fwd_fromdummy_fixed_solventff = gridff.clone()
solute_fwd_fromdummy_fixed_solventff.setName("solute_fwd_fromdummy:fixed_solvent")
solute_fwd_fromdummy_fixed_solventff.add(solute_fwd_fromdummy, MGIdx(0))
forcefields.append(solute_fwd_fixed_solventff)
forcefields.append(solute_fwd_todummy_fixed_solventff)
forcefields.append(solute_fwd_fromdummy_fixed_solventff)
# Now the backwards solute-solvent CLJ energy
solute_bwd_fixed_solventff = gridff.clone()
solute_bwd_fixed_solventff.setName("solute_bwd_hard:fixed_solvent")
solute_bwd_fixed_solventff.add(solute_bwd_hard, MGIdx(0))
solute_bwd_todummy_fixed_solventff = gridff.clone()
solute_bwd_todummy_fixed_solventff.setName("solute_bwd_todummy:fixed_solvent")
solute_bwd_todummy_fixed_solventff.add(solute_bwd_todummy, MGIdx(0))
solute_bwd_fromdummy_fixed_solventff = gridff.clone()
solute_bwd_fromdummy_fixed_solventff.setName("solute_bwd_fromdummy:fixed_solvent")
solute_bwd_fromdummy_fixed_solventff.add(solute_bwd_fromdummy, MGIdx(0))
forcefields.append(solute_bwd_fixed_solventff)
forcefields.append(solute_bwd_todummy_fixed_solventff)
forcefields.append(solute_bwd_fromdummy_fixed_solventff)
# Now remove the "fixed_solvent" group from the system. This will remove
# all copies of these molecules from the system, leaving their data in the
# "fixed_atoms" arrays in each of the GridFF forcefields.
system.remove( MGName("fixed_solvent") )
else:
# interactions with fixed atoms are calculated explicitly
# solvent - fixed_solvent energy
solvent_fixedff = InterGroupCLJFF("solvent:solvent_fixed")
solvent_fixedff.add(solvent, MGIdx(0))
solvent_fixedff.add(fixed_solvent, MGIdx(1))
forcefields.append(solvent_fixedff)
clj_ffs.append(solvent_fixedff)
# Now the solute-solvent CLJ energy (note that we don't need to use
# soft-core as the fixed solvent is a long way away from the solute)
solute_fixed_solventff = InterGroupCLJFF("solute:solvent_fixed")
solute_fixed_solventff.add(solute_hard, MGIdx(0))
solute_fixed_solventff.add(fixed_solvent, MGIdx(1))
solute_todummy_fixed_solventff = InterGroupCLJFF("solute_todummy:solvent_fixed")
solute_todummy_fixed_solventff.add(solute_todummy, MGIdx(0))
solute_todummy_fixed_solventff.add(fixed_solvent, MGIdx(1))
solute_fromdummy_fixed_solventff = InterGroupCLJFF("solute_fromdummy:solvent_fixed")
solute_fromdummy_fixed_solventff.add(solute_fromdummy, MGIdx(0))
solute_fromdummy_fixed_solventff.add(fixed_solvent, MGIdx(1))
forcefields.append(solute_fixed_solventff)
forcefields.append(solute_todummy_fixed_solventff)
forcefields.append(solute_fromdummy_fixed_solventff)
clj_ffs.append(solute_fixed_solventff)
clj_ffs.append(solute_todummy_fixed_solventff)
clj_ffs.append(solute_fromdummy_fixed_solventff)
#Now the forwards solute-solvent CLJ energy
solute_fwd_fixed_solventff = InterGroupCLJFF("solute_fwd_hard:fixed_solvent")
solute_fwd_fixed_solventff.add(solute_fwd_hard, MGIdx(0))
solute_fwd_fixed_solventff.add(fixed_solvent, MGIdx(1))
solute_fwd_todummy_fixed_solventff = InterGroupCLJFF("solute_fwd_todummy:fixed_solvent")
solute_fwd_todummy_fixed_solventff.add(solute_fwd_todummy, MGIdx(0))
solute_fwd_todummy_fixed_solventff.add(fixed_solvent, MGIdx(1))
solute_fwd_fromdummy_fixed_solventff = InterGroupCLJFF("solute_fwd_fromdummy:fixed_solvent")
solute_fwd_fromdummy_fixed_solventff.add(solute_fwd_fromdummy, MGIdx(0))
solute_fwd_fromdummy_fixed_solventff.add(fixed_solvent, MGIdx(1))
forcefields.append(solute_fwd_fixed_solventff)
forcefields.append(solute_fwd_todummy_fixed_solventff)
forcefields.append(solute_fwd_fromdummy_fixed_solventff)
clj_ffs.append(solute_fwd_fixed_solventff)
clj_ffs.append(solute_fwd_todummy_fixed_solventff)
clj_ffs.append(solute_fwd_fromdummy_fixed_solventff)
# Now the backwards solute-solvent CLJ energy
solute_bwd_fixed_solventff = InterGroupCLJFF("solute_bwd_hard:fixed_solvent")
solute_bwd_fixed_solventff.add(solute_bwd_hard, MGIdx(0))
solute_bwd_fixed_solventff.add(fixed_solvent, MGIdx(1))
solute_bwd_todummy_fixed_solventff = InterGroupCLJFF("solute_bwd_todummy:fixed_solvent")
solute_bwd_todummy_fixed_solventff.add(solute_bwd_todummy, MGIdx(0))
solute_bwd_todummy_fixed_solventff.add(fixed_solvent, MGIdx(1))
solute_bwd_fromdummy_fixed_solventff = InterGroupCLJFF("solute_bwd_fromdummy:fixed_solvent")
solute_bwd_fromdummy_fixed_solventff.add(solute_bwd_fromdummy, MGIdx(0))
solute_bwd_fromdummy_fixed_solventff.add(fixed_solvent, MGIdx(1))
forcefields.append(solute_bwd_fixed_solventff)
forcefields.append(solute_bwd_todummy_fixed_solventff)
forcefields.append(solute_bwd_fromdummy_fixed_solventff)
clj_ffs.append(solute_bwd_fixed_solventff)
clj_ffs.append(solute_bwd_todummy_fixed_solventff)
clj_ffs.append(solute_bwd_fromdummy_fixed_solventff)
# # end of "if use_grid.val"
# end of "if use_sphere.val"
if cutoff_scheme.val == "shift_electrostatics":
for ff in clj_ffs:
ff.setShiftElectrostatics(True)
ff.setSwitchingFunction(HarmonicSwitchingFunction( coul_cutoff.val, coul_cutoff.val,
lj_cutoff.val, lj_cutoff.val) )
elif cutoff_scheme.val == "reaction_field":
for ff in clj_ffs:
ff.setUseReactionField(True)
ff.setReactionFieldDielectric(rf_dielectric.val)
ff.setSwitchingFunction(HarmonicSwitchingFunction( coul_cutoff.val, coul_cutoff.val,
lj_cutoff.val, lj_cutoff.val) )
elif cutoff_scheme.val == "group":
for ff in clj_ffs:
ff.setUseGroupCutoff(True)
ff.setSwitchingFunction(HarmonicSwitchingFunction( coul_cutoff.val, coul_cutoff.val - coul_feather.val,
lj_cutoff.val, lj_cutoff.val - lj_feather.val) )
else:
print "WARNING. Unrecognised cutoff scheme. Using \"shift_electrostatics\"."
for ff in clj_ffs:
ff.setShiftElectrostatics(True)
ff.setSwitchingFunction(HarmonicSwitchingFunction( coul_cutoff.val, coul_cutoff.val,
lj_cutoff.val, lj_cutoff.val) )
for forcefield in forcefields:
system.add(forcefield)
# We only use a "space" if we are using periodic boundaries
if not (use_sphere.val):
# Setting the "space" property
print "Setting space to %s" % space
system.setProperty( "space", space )
system.setProperty( "combiningRules", VariantProperty(combining_rules.val) )
system.setProperty( "coulombPower", VariantProperty(coulomb_power.val) )
system.setProperty( "shiftDelta", VariantProperty(shift_delta.val) )
lam = Symbol("lambda")
lam_fwd = Symbol("lambda_{fwd}")
lam_bwd = Symbol("lambda_{bwd}")
total_nrg = solute_intraff.components().total() + solute_hard_intraclj.components().total() + \
solute_todummy_intraclj.components().total(0) + solute_fromdummy_intraclj.components().total(0) + \
solute_hard_todummy_intraclj.components().total(0) + solute_hard_fromdummy_intraclj.components().total(0) + \
solute_todummy_fromdummy_intraclj.components().total(0) + \
solventff.components().total() + \
solute_hard_solventff.components().total() + \
solute_todummy_solventff.components().total(0) + \
solute_fromdummy_solventff.components().total(0)
fwd_nrg = solute_fwd_intraff.components().total() + solute_fwd_hard_intraclj.components().total() +\
solute_fwd_todummy_intraclj.components().total(0) + solute_fwd_fromdummy_intraclj.components().total(0) +\
solute_fwd_hard_todummy_intraclj.components().total(0) + solute_fwd_hard_fromdummy_intraclj.components().total(0) +\
solute_fwd_todummy_fromdummy_intraclj.components().total(0) +\
solventff.components().total() +\
solute_fwd_hard_solventff.components().total() +\
solute_fwd_todummy_solventff.components().total(0) +\
solute_fwd_fromdummy_solventff.components().total(0)
bwd_nrg = solute_bwd_intraff.components().total() + solute_bwd_hard_intraclj.components().total() +\
solute_bwd_todummy_intraclj.components().total(0) + solute_bwd_fromdummy_intraclj.components().total(0) +\
solute_bwd_hard_todummy_intraclj.components().total(0) + solute_bwd_hard_fromdummy_intraclj.components().total(0) +\
solute_bwd_todummy_fromdummy_intraclj.components().total(0) +\
solventff.components().total() +\
solute_bwd_hard_solventff.components().total() +\
solute_bwd_todummy_solventff.components().total(0) +\
solute_bwd_fromdummy_solventff.components().total(0)
if use_sphere.val:
# add in the extra terms for the fixed forcefield
total_nrg += solvent_fixedff.components().total() + solute_fixed_solventff.components().total() + \
solute_todummy_fixed_solventff.components().total() + \
solute_fromdummy_fixed_solventff.components().total()
fwd_nrg += solvent_fixedff.components().total() + solute_fwd_fixed_solventff.components().total() + \
solute_fwd_todummy_fixed_solventff.components().total() + \
solute_fwd_fromdummy_fixed_solventff.components().total()
bwd_nrg += solvent_fixedff.components().total() + solute_bwd_fixed_solventff.components().total() + \
solute_bwd_todummy_fixed_solventff.components().total() + \
solute_bwd_fromdummy_fixed_solventff.components().total()
e_total = system.totalComponent()
e_fwd = Symbol("E_{fwd}")
e_bwd = Symbol("E_{bwd}")
system.setComponent( e_total, total_nrg )
system.setComponent( e_fwd, fwd_nrg )
system.setComponent( e_bwd, bwd_nrg )
system.setConstant(lam, 0.0)
system.setConstant(lam_fwd, 0.0)
system.setConstant(lam_bwd, 0.0)
de_fwd = Symbol("dE_{fwd}")
de_bwd = Symbol("dE_{bwd}")
system.setComponent( de_fwd, fwd_nrg - total_nrg )
system.setComponent( de_bwd, total_nrg - bwd_nrg )
# Add a space wrapper that wraps all molecules into the box centered at (0,0,0)
#if not (use_sphere.val):
# system.add( SpaceWrapper(Vector(0,0,0), all) )
# Add a monitor that calculates the average total energy and average energy
# deltas - we will collect both a mean average and a zwanzig average
system.add( "total_energy", MonitorComponent(e_total, Average()) )
system.add( "dg_fwd", MonitorComponent(de_fwd, FreeEnergyAverage(temperature.val)) )
system.add( "dg_bwd", MonitorComponent(de_bwd, FreeEnergyAverage(temperature.val)) )
system.add( PerturbationConstraint(solutes) )
system.add( ComponentConstraint( lam_fwd, Min( lam + delta_lambda.val, 1 ) ) )
system.add( ComponentConstraint( lam_bwd, Max( lam - delta_lambda.val, 0 ) ) )
# Add a monitor that records the value of all energy components
if nmoves_per_energy.val:
if nmoves_per_energy.val > 0:
system.add( "energies", MonitorComponents(RecordValues()), nmoves.val / nmoves_per_energy.val )
# Add a monitor that records the coordinates of the system
if (lam_val.val < 0.001 or lam_val.val > 0.999):
if nmoves_per_pdb.val:
if nmoves_per_pdb.val > 0:
system.add( "trajectory", TrajectoryMonitor(MGName("traj")), nmoves.val / nmoves_per_pdb.val )
elif not (nmoves_per_pdb_intermediates.val is None):
if nmoves_per_pdb_intermediates.val > 0:
system.add( "trajectory", TrajectoryMonitor(MGName("traj")), nmoves.val / nmoves_per_pdb_intermediates.val )
# Alpha constraints for the soft force fields
if use_softcore.val:
system.add( PropertyConstraint( "alpha0", FFName("solute_todummy_intraclj"), lam ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fromdummy_intraclj"), 1 - lam ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_hard:todummy_intraclj"), lam ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_hard:fromdummy_intraclj"), 1 - lam ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_todummy:fromdummy_intraclj"), Max( lam, 1 - lam ) ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_todummy:solvent"), lam ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fromdummy:solvent"), 1 - lam ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_todummy_intraclj"), lam_fwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_fromdummy_intraclj"), 1 - lam_fwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_hard:todummy_intraclj"), lam_fwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_hard:fromdummy_intraclj"), 1 - lam_fwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_todummy:fromdummy_intraclj"), Min( lam_fwd, 1 - lam_fwd ) ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_todummy:solvent"), lam_fwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_fwd_fromdummy:solvent"), 1 - lam_fwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_todummy_intraclj"), lam_bwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_fromdummy_intraclj"), 1 - lam_bwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_hard:todummy_intraclj"), lam_bwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_hard:fromdummy_intraclj"), 1 - lam_bwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_todummy:fromdummy_intraclj"), Min( lam_bwd, 1 - lam_bwd ) ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_todummy:solvent"), lam_bwd ) )
system.add( PropertyConstraint( "alpha0", FFName("solute_bwd_fromdummy:solvent"), 1 - lam_bwd ) )
else:
# Setting alpha to 0 makes all of the soft-core forcefields hard
print "Using a purely hard potential"
system.setProperty("alpha0", VariantProperty(0))
system.setComponent( lam, lam_val.val )
return system
def getAtomNearCOG( molecule ):
mol_centre = molecule.evaluate().center()
mindist = 99999.0
for x in range(0, molecule.nAtoms()):
atom = molecule.atoms()[x]
at_coords = atom.property('coordinates')
dist = Vector().distance2(at_coords, mol_centre)
if dist < mindist:
mindist = dist
nearest_atom = atom
return nearest_atom
def setupMoves(system, random_seed):
solutes = system[ MGName("solutes") ]
solute_ref = system[ MGName("solute_ref") ]
solvent = system[ MGName("solvent") ]
print "Setting up moves..."
# Setup Moves
solute_moves = RigidBodyMC( solutes )
# No translation for a solvation calculation
solute_moves.setMaximumTranslation( 0.0 * angstrom )
solute_moves.setMaximumTranslation(solutes[MolIdx(0)].molecule().property('flexibility').translation() )
# Find solute atom nearest to the center of geometry
nearestcog_atom = getAtomNearCOG( solutes[MolIdx(0)].molecule() )
#print nearestcog_atom
solute_moves.setCenterOfRotation( GetCOGPoint( nearestcog_atom.name() ) )
solute_moves.setSynchronisedTranslation(True)
solute_moves.setSynchronisedRotation(True)
#solute_moves.setSharedRotationCenter(True)
solute_intra_moves = InternalMoveSingle( solute_ref )
# Each molecule in perturbed_solutes will have its coordinates set to those
# of solute_ref after the move
perturbed_solutes = system[ MGName("perturbed_solutes") ]
solute_intra_moves.setSynchronisedCoordinates(perturbed_solutes)
moves = WeightedMoves()
if use_sphere.val:
# Do not use preferential sampling with the reflection sphere. Preferential
# sampling causes volume expansion around the solute, which would push the
# solvent towards the boundary of the sphere. Given we have already really
# reduced the number of moving solvent molecules, preferential sampling
# is not needed
solvent_moves = RigidBodyMC(solvent)
solvent_moves.setReflectionSphere(sphere_center, sphere_radius.val)
solute_moves.setReflectionSphere(sphere_center, sphere_radius.val)
# Cannot translate the solute when using spherical boundary conditions
solute_moves.setMaximumTranslation( 0.0 * angstrom )
else:
solvent_moves = RigidBodyMC( PrefSampler(solute_ref[MolIdx(0)],
solvent, pref_constant.val) )
all = system[ MGName("all") ]
max_volume_change = 0.50 * solvent.nMolecules() * angstrom3
volume_moves = VolumeMove(all)
volume_moves.setMaximumVolumeChange(max_volume_change)
moves.add( volume_moves, volume_mc_weight.val )
solvent_moves.setMaximumTranslation(max_solvent_translation.val)
solvent_moves.setMaximumRotation(max_solvent_rotation.val)
moves.add( solute_moves, solute_mc_weight.val / 2 )
moves.add( solute_intra_moves, solute_mc_weight.val / 2)
moves.add( solvent_moves, solvent.nMolecules() * solvent_mc_weight_factor.val)
moves.setTemperature(temperature.val)
print "Using a temperature of %f C" % temperature.val.to(celsius)
if not use_sphere.val:
moves.setPressure(pressure.val)
print "Using a pressure of %f atm" % pressure.val.to(atm)
if (random_seed):
print "Using supplied random seed %d" % random_seed.val
else:
random_seed = RanGenerator().randInt(100000,1000000)
print "Generated random seed number %d " % random_seed
moves.setGenerator( RanGenerator(random_seed) )
return moves
def writeComponents(components, filename):
"""This function writes the energy components to the file 'filename'"""
symbols = components.monitoredComponents()
if len(symbols) == 0:
return
newrun = False
if not os.path.exists(filename):
newrun = True
FILE = open(filename, "a")
nrgs = {}
for symbol in symbols:
nrgs[str(symbol)] = components.accumulator(symbol).values()
symbols = nrgs.keys()
symbols.sort()
if newrun:
print >>FILE,"#step ",
for symbol in symbols:
print >>FILE,"%s " % symbol,
print >>FILE,"\n",
for i in range(0, len(nrgs[symbols[0]])):
print >>FILE,"%d " % i,
for symbol in symbols:
print >>FILE,"%f " % nrgs[symbol][i],
print >>FILE,"\n",
def writeSystemData(system, moves, block):
nmoves = moves.nMoves()
monitors = system.monitors()
outdir = out_dir.val
if not os.path.exists(outdir):
os.makedirs(outdir)
try:
pdb = monitors[MonitorName("trajectory")]
pdb.writeToDisk("%s/output%0009d.pdb" % (outdir,block))
except:
pass
try:
energies = monitors[MonitorName("energies")]
if os.path.exists("%s/energies.dat.bz2" % outdir):
os.system("bunzip2 -f %s/energies.dat.bz2" % outdir)
writeComponents( energies, "%s/energies.dat" % outdir )
except:
pass
total_energy = monitors[MonitorName("total_energy")]
dg_fwd = monitors[MonitorName("dg_fwd")]
dg_bwd = monitors[MonitorName("dg_bwd")]
dg_fwd = dg_fwd.accumulator().average() / delta_lambda.val
dg_bwd = dg_bwd.accumulator().average() / delta_lambda.val
system.clearStatistics()
# Ugly
lam = system.constantExpression(Symbol("lambda")).toString().toDouble()
#print dg_bwd, dg_fwd, lam
if lam < 0.0001:
dg_bwd = dg_fwd
elif lam > 0.9999:
dg_fwd = dg_bwd
dg_avg = 0.5 * ( dg_fwd + dg_bwd )
#print dg_avg
FILE = open("%s/gradients.dat" % outdir , 'a')
print >>FILE, "%9d %12.8f " % ( block, dg_avg)
FILE = open("%s/moves.dat" % outdir, "w")
print >>FILE, "%s" % moves
def printComponents(nrgs):
keys = nrgs.keys()
keys.sort()
for key in keys:
print "%s %s" % (key, nrgs[key])
print "\n",
@resolveParameters
def run():
print " ### Running a \"free leg\" single topology free energy calculation ### "
timer = QTime()
timer.start()
# Setup the system from scratch if no restart file is available
if not os.path.exists("%s/%s" % (out_dir.val,restart_file.val)):
print "New run. Loading input and creating restart"
print "Lambda is %5.3f" % lam_val.val
amber = Amber()
molecules, space = amber.readCrdTop(crd_file.val,top_file.val)
system = createSystem(molecules, space)
system = setupForcefields(system, space)
moves = setupMoves(system, random_seed.val)
print "Saving restart"
if not os.path.exists(out_dir.val):
os.makedirs(out_dir.val)
Sire.Stream.save( [system, moves], "%s/%s" % (out_dir.val,restart_file.val) )
system, moves = Sire.Stream.load("%s/%s" % (out_dir.val,restart_file.val))
print "Loaded a restart file on wich we have performed %d moves." % moves.nMoves()
block_number = moves.nMoves() / nmoves.val + 1
s1 = timer.elapsed()/1000.
print "Setup took %d s " % ( s1 )
# Run a short simulation
print "Performing simulation for block number %d " % block_number
if print_nrgs.val:
printComponents(system.energies())
system = moves.move(system, nmoves.val, True)
s2 = timer.elapsed()/1000.
print "Simulation took %d s " % ( s2 - s1)
# Update statistics and save restart
writeSystemData(system, moves, block_number)
Sire.Stream.save( [system, moves], "%s/%s" % (out_dir.val,restart_file.val) )
# Compress some output files
outpdb = "%s/output%0009d.pdb" % (out_dir.val,block_number)
if os.path.exists(outpdb):
os.system( "%s %s/output%0009d*" % (compress.val, out_dir.val, block_number) )
if os.path.exists("energies.dat"):
os.system(" %s %s/energies.dat" % (out_dir.val,compress.val) )
|
chryswoods/Sire
|
wrapper/Tools/FDTISingleFree.py
|
Python
|
gpl-2.0
| 59,222
|
[
"Amber"
] |
6cf35c94e3e15a08e270321e7e4d2212b8eaa2fdb63abf8f9dc35bfcdbdbc46b
|
#!/usr/bin/env python
from xml.dom.minidom import parse, parseString
import getopt
import sys
class DomHandler():
def __init__(self, file_name):
self.dom = parse(file_name)
def setValue(self, attr_name, attr_value):
result = False
for node in self.dom.getElementsByTagName('parameter'):
if node.getAttribute('name') == attr_name:
""" parameter name is equal to attr_name """
print "find attribute name: %s" % (attr_name)
result = True
if node.getAttribute('value') == attr_value:
continue
else:
node.setAttribute('value', attr_value)
print "set attribute value: %s" % (attr_value)
return result
def save(self, file_name):
f = open(file_name, 'w')
f.write(self.dom.toxml())
f.close
def main():
if len(sys.argv) < 4:
usage()
sys.exit(2)
fileName = sys.argv[1]
attrName = sys.argv[2]
attrValue = sys.argv[3]
simpleDom = DomHandler(fileName)
result = simpleDom.setValue(attrName, attrValue)
if not result:
print "set attribute fail"
else:
simpleDom.save(fileName)
def usage():
print "usage: %s [file] [name] [value]" % (__file__)
print\
"""
[file] xml file
[name] attribute name
[value] value to set to that attribute
"""
def test():
dom1 = parse( "/nfs/home/zac/zillians/lib/node/world-server/WorldServerModule.module" ) # parse an XML file
dom2 = parseString( "<myxml>Some data <empty/> some more data</myxml>" )
print dom1.toxml()
#print dom2.toxml()
for node in dom1.getElementsByTagName('parameter'): # visit every node <bar />
if node.getAttribute("name") == "local_id":
print "node attribute value: %s" % (node.getAttribute("value"))
if __name__ == "__main__":
main()
|
zillians/supercell
|
scripts/set_xml.py
|
Python
|
agpl-3.0
| 1,678
|
[
"VisIt"
] |
db0ba7713e12002b747ba65340dfc90c6d88fe6ac1214ae03914de76ee387857
|
#!/usr/bin/python
"""
Copyright 2015 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import cgi
import Cookie
import MySQLdb
import dbSession
import dbShared
import Image
import urllib
import time
import subprocess
import json
import difflib
import ghObjects
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
def lookupResourceType(typeName):
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
resourceType = "no match"
if (cursor):
sqlStr = 'SELECT resourceType, resourceTypeName FROM tResourceType WHERE enterable > 0;'
cursor.execute(sqlStr)
row = cursor.fetchone()
while (row != None):
#find a close enough match to ocr detected name
if len(difflib.get_close_matches(typeName, [row[1]], 1, 0.90)) > 0:
resourceType = row[0]
break
row = cursor.fetchone()
cursor.close()
conn.close()
return resourceType
errstr=''
if not form.has_key("capture"):
errstr = "Error: No capture image sent."
else:
img_data = form["capture"]
if not img_data.file: errstr = "Error: capture is not a file."
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
# Get a session
logged_state = 0
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
s = ghObjects.resourceSpawn()
# Check for errors
if errstr == '':
imgName = img_data.filename
if (logged_state == 0):
errstr = errstr + "Error: You must be logged in to add resources. \r\n"
try:
im = Image.open(img_data.file)
except:
errstr = errstr + "Error: I don't recognize the file you uploaded as an image (" + imgName + "). Please make sure it is a jpg, gif, or png. \r\n"
if (errstr != ''):
result = "Error: Could not detect resource because of the following errors:\r\n" + errstr
else:
result = ''
#resize to improve ocr speed if too big, quality if too small
xsize, ysize = im.size
newwidth = xsize
newheight = ysize
if (ysize > 1000 or ysize < 800):
if xsize >= ysize:
newwidth = int(xsize * (900.0/ysize))
newheight = 900
elif ysize > xsize:
newheight = int(ysize * (900.0/xsize))
newwidth = 900
# also convert to greyscale to improve recognition
try:
im = im.resize((newwidth,newheight), Image.ANTIALIAS).convert("L")
except IOError:
result = "Error: I can't handle that type of image file, please try a different one."
if result == '':
# write image file
imageName = currentUser + str(time.time())
im.save("temp/"+imageName+".png", dpi=(300,300))
# detect text with tesseract
ocrOutput = ""
try:
ocrOutput = subprocess.check_output(["tesseract", "temp/"+imageName+".png", "temp/"+imageName])
#ocrOutput = subprocess.check_output(["bash", "-c", "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/pwillworth/tesseract/lib;export TESSDATA_PREFIX=/home/pwillworth/tesseract/share;/home/pwillworth/tesseract/bin/tesseract temp/"+imageName+".png temp/"+imageName])
except subprocess.CalledProcessError:
result = "Error: Image could not be processed."
except OSError:
result = "Error: OCR service temporarily unavailable."
# interpret tesseract output
if result == "":
lastLine = ""
f = open("temp/"+imageName+".txt", "r")
for line in f:
adjLine = line.replace("'","").replace("-","").strip()
if ":" in adjLine:
vp = adjLine.split(":")
if len(difflib.get_close_matches(vp[0], ["Resource Type"], 1, 0.7)) > 0:
s.spawnName = vp[1].strip()
if len(difflib.get_close_matches(vp[0], ["Resource Class"], 1, 0.7)) > 0:
s.resourceTypeName = vp[1].strip()
lastLine = "class"
else:
lastLine = ""
if len(difflib.get_close_matches(vp[0], ["Entangle Resistance"], 1, 0.8)) > 0:
try:
s.stats.ER = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Cold Resistance"], 1, 0.8)) > 0:
try:
s.stats.CR = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Conductivity"], 1, 0.8)) > 0:
try:
s.stats.CD = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Decay Resistance"], 1, 0.8)) > 0:
try:
s.stats.DR = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Flavor"], 1, 0.8)) > 0:
try:
s.stats.FL = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Heat Resistance"], 1, 0.8)) > 0:
try:
s.stats.HR = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Malleability"], 1, 0.8)) > 0:
try:
s.stats.MA = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Potential Energy"], 1, 0.8)) > 0:
try:
s.stats.PE = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Overall quality"], 1, 0.8)) > 0:
try:
s.stats.OQ = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Shock Resistance"], 1, 0.8)) > 0:
try:
s.stats.SR = int(vp[1].strip())
except:
pass
if len(difflib.get_close_matches(vp[0], ["Unit Toughness"], 1, 0.8)) > 0:
try:
s.stats.UT = int(vp[1].strip())
except:
pass
else:
if lastLine == "class":
s.resourceTypeName = s.resourceTypeName + " " + adjLine.strip()
s.resourceType = lookupResourceType(s.resourceTypeName)
f.close()
result = "image scanned"
# remove temp files
os.remove("temp/"+imageName+".txt")
os.remove("temp/"+imageName+".png")
print "Content-Type: text/json\n"
if (s.spawnName != "" or s.resourceType != "no match"):
print json.dumps({"result": result, "spawnData": {"spawnName": s.spawnName, "resourceType": s.resourceType, "resourceTypeName": s.resourceTypeName, "ER": s.stats.ER, "CR": s.stats.CR, "CD": s.stats.CD, "DR": s.stats.DR, "FL": s.stats.FL, "HR": s.stats.HR, "MA": s.stats.MA, "PE": s.stats.PE, "OQ": s.stats.OQ, "SR": s.stats.SR, "UT": s.stats.UT}})
else:
print json.dumps({"result": result})
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
|
druss316/G-Harvestor
|
html/getResourceByImage.py
|
Python
|
gpl-3.0
| 9,438
|
[
"Galaxy"
] |
dab5a5de5a9bfc72d3dfbbeb4fa18137ceb6f24fd9787fee3cc9873ab678c01b
|
#!/usr/bin/env python
# encoding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import numpy
import unittest
import os
from rmgpy.cantherm.qchem import QchemLog
from rmgpy.statmech import Conformer, IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator, HinderedRotor
import rmgpy.constants as constants
from external.wip import work_in_progress
################################################################################
class QChemTest(unittest.TestCase):
"""
Contains unit tests for the chempy.io.qchem module, used for reading
and writing Qchem files.
"""
def testNumberOfAtomsFromQchemLog(self):
"""
Uses a Qchem log files to test that
number of atoms can be properly read.
"""
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','npropyl.out'))
self.assertEqual(log.getNumberOfAtoms(), 10)
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','co.out'))
self.assertEqual(log.getNumberOfAtoms(), 2)
def testEnergyFromQchemLog(self):
"""
Uses a Qchem log files to test that
molecular energies can be properly read.
"""
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','npropyl.out'))
self.assertAlmostEqual(log.loadEnergy(), -310896203.5432524, 1e-5)
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','co.out'))
self.assertAlmostEqual(log.loadEnergy(), -297402545.0217114, 1e-5)
def testLoadVibrationsFromQchemLog(self):
"""
Uses a Qchem log files to test that
molecular energies can be properly read.
"""
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','npropyl.out'))
conformer = log.loadConformer()
self.assertEqual(len(conformer.modes[2]._frequencies.getValue()), 24)
self.assertEqual(conformer.modes[2]._frequencies.getValue()[5], 881.79)
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','co.out'))
conformer = log.loadConformer()
self.assertEqual(len(conformer.modes[2]._frequencies.getValue()), 1)
self.assertEqual(conformer.modes[2]._frequencies.getValue(), 2253.16)
def testLoadNpropylModesFromQchemLog(self):
"""
Uses a Qchem log file for npropyl to test that its
molecular modes can be properly read.
"""
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','npropyl.out'))
conformer = log.loadConformer()
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,IdealGasTranslation)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,NonlinearRotor)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,HarmonicOscillator)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,HinderedRotor)]) == 0)
def testSpinMultiplicityFromQchemLog(self):
"""
Uses a Qchem log file for npropyl to test that its
molecular degrees of freedom can be properly read.
"""
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','npropyl.out'))
conformer = log.loadConformer()
self.assertEqual(conformer.spinMultiplicity, 2)
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','co.out'))
conformer = log.loadConformer()
self.assertEqual(conformer.spinMultiplicity, 1)
def testLoadCOModesFromQchemLog(self):
"""
Uses a Qchem log file for CO to test that its
molecular degrees of freedom can be properly read.
"""
log = QchemLog(os.path.join(os.path.dirname(__file__),'data','co.out'))
conformer = log.loadConformer()
E0 = log.loadEnergy()
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,IdealGasTranslation)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,LinearRotor)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,NonlinearRotor)]) == 0)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,HarmonicOscillator)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode,HinderedRotor)]) == 0)
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/cantherm/qchemTest.py
|
Python
|
mit
| 6,019
|
[
"ChemPy"
] |
a6b19c1ba0e4d6dc54c8699e1d42cb3132504ac51ff4e627819d5958741e6420
|
from __future__ import print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import os
import tempfile
import time
import numpy as np
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.file import trymakedirs
from astrometry.util.plotutils import dimshow
from astrometry.util.util import Tan, Sip, anwcs_t
from astrometry.util.ttime import CpuMeas, Time
from astrometry.util.starutil_numpy import degrees_between, hmsstring2ra, dmsstring2dec
from astrometry.util.miscutils import polygons_intersect, estimate_mode, clip_polygon, clip_wcs
from astrometry.util.resample import resample_with_wcs,OverlapError
from tractor.basics import ConstantSky, NanoMaggies, ConstantFitsWcs, LinearPhotoCal, PointSource, RaDecPos
from tractor.engine import Image, Catalog, Patch
from tractor.galaxy import enable_galaxy_cache, disable_galaxy_cache
from tractor.utils import get_class_from_name
from tractor.ellipses import EllipseESoft
from tractor.sfd import SFDMap
from utils import EllipseWithPriors
# search order: $TMPDIR, $TEMP, $TMP, then /tmp, /var/tmp, /usr/tmp
tempdir = tempfile.gettempdir()
# From: http://www.noao.edu/noao/staff/fvaldes/CPDocPrelim/PL201_3.html
# 1 -- detector bad pixel InstCal
# 1 -- detector bad pixel/no data Resampled
# 1 -- No data Stacked
# 2 -- saturated InstCal/Resampled
# 4 -- interpolated InstCal/Resampled
# 16 -- single exposure cosmic ray InstCal/Resampled
# 64 -- bleed trail InstCal/Resampled
# 128 -- multi-exposure transient InstCal/Resampled
CP_DQ_BITS = dict(badpix=1, satur=2, interp=4, cr=16, bleed=64,
trans=128,
edge = 256,
edge2 = 512) # in z-band images?
# Ugly hack: for sphinx documentation, the astrometry and tractor (and
# other) packages are replaced by mock objects. But you can't
# subclass a mock object correctly, so we have to un-mock
# EllipseWithPriors here.
if 'Mock' in str(type(EllipseWithPriors)):
class duck(object):
pass
EllipseWithPriors = duck
class LegacyEllipseWithPriors(EllipseWithPriors):
# Prior on (softened) ellipticity: Gaussian with this standard deviation
ellipticityStd = 0.25
class BrickDuck(object):
pass
def get_version_header(program_name, decals_dir):
from astrometry.util.run_command import run_command
import datetime
if program_name is None:
import sys
program_name = sys.argv[0]
rtn,version,err = run_command('git describe')
if rtn:
raise RuntimeError('Failed to get version string (git describe):' + ver + err)
version = version.strip()
print('Version:', version)
hdr = fitsio.FITSHDR()
for s in [
'Data product of the DECam Legacy Survey (DECaLS)',
'Full documentation at http://legacysurvey.org',
]:
hdr.add_record(dict(name='COMMENT', value=s, comment=s))
hdr.add_record(dict(name='TRACTORV', value=version,
comment='Tractor git version'))
hdr.add_record(dict(name='DECALSV', value=decals_dir,
comment='DECaLS version'))
hdr.add_record(dict(name='DECALSDR', value='DR1',
comment='DECaLS release name'))
hdr.add_record(dict(name='DECALSDT', value=datetime.datetime.now().isoformat(),
comment='%s run time' % program_name))
hdr.add_record(dict(name='SURVEY', value='DECaLS',
comment='DECam Legacy Survey'))
import socket
hdr.add_record(dict(name='HOSTNAME', value=socket.gethostname(),
comment='Machine where runbrick.py was run'))
hdr.add_record(dict(name='HOSTFQDN', value=socket.getfqdn(),
comment='Machine where runbrick.py was run'))
hdr.add_record(dict(name='NERSC', value=os.environ.get('NERSC_HOST', 'none'),
comment='NERSC machine where runbrick.py was run'))
return hdr
class MyFITSHDR(fitsio.FITSHDR):
''' This is copied straight from fitsio, simply removing "BUNIT"
from the list of headers to remove.
'''
def clean(self):
"""
Remove reserved keywords from the header.
These are keywords that the fits writer must write in order
to maintain consistency between header and data.
"""
rmnames = ['SIMPLE','EXTEND','XTENSION','BITPIX','PCOUNT','GCOUNT',
'THEAP',
'EXTNAME',
#'BUNIT',
'BSCALE','BZERO','BLANK',
'ZQUANTIZ','ZDITHER0','ZIMAGE','ZCMPTYPE',
'ZSIMPLE','ZTENSION','ZPCOUNT','ZGCOUNT',
'ZBITPIX','ZEXTEND',
#'FZTILELN','FZALGOR',
'CHECKSUM','DATASUM']
self.delete(rmnames)
r = self._record_map.get('NAXIS',None)
if r is not None:
naxis = int(r['value'])
self.delete('NAXIS')
rmnames = ['NAXIS%d' % i for i in xrange(1,naxis+1)]
self.delete(rmnames)
r = self._record_map.get('ZNAXIS',None)
self.delete('ZNAXIS')
if r is not None:
znaxis = int(r['value'])
rmnames = ['ZNAXIS%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
rmnames = ['ZTILE%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
rmnames = ['ZNAME%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
rmnames = ['ZVAL%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
r = self._record_map.get('TFIELDS',None)
if r is not None:
tfields = int(r['value'])
self.delete('TFIELDS')
if tfields > 0:
nbase = ['TFORM','TTYPE','TDIM','TUNIT','TSCAL','TZERO',
'TNULL','TDISP','TDMIN','TDMAX','TDESC','TROTA',
'TRPIX','TRVAL','TDELT','TCUNI',
#'FZALG'
]
for i in xrange(1,tfields+1):
names=['%s%d' % (n,i) for n in nbase]
self.delete(names)
def bin_image(data, invvar, S):
# rebin image data
H,W = data.shape
sH,sW = (H+S-1)/S, (W+S-1)/S
newdata = np.zeros((sH,sW), dtype=data.dtype)
newiv = np.zeros((sH,sW), dtype=invvar.dtype)
for i in range(S):
for j in range(S):
iv = invvar[i::S, j::S]
subh,subw = iv.shape
newdata[:subh,:subw] += data[i::S, j::S] * iv
newiv [:subh,:subw] += iv
newdata /= (newiv + (newiv == 0)*1.)
newdata[newiv == 0] = 0.
return newdata,newiv
def segment_and_group_sources(image, T, name=None, ps=None, plots=False):
'''
*image*: binary image that defines "blobs"
*T*: source table; only ".itx" and ".ity" elements are used (x,y integer pix pos)
- ".blob" field is added.
*name*: for debugging only
Returns: (blobs, blobsrcs, blobslices)
*blobs*: image, values -1 = no blob, integer blob indices
*blobsrcs*: list of np arrays of integers, elements in T within each blob
*blobslices*: list of slice objects for blob bounding-boxes.
'''
from scipy.ndimage.morphology import binary_fill_holes
from scipy.ndimage.measurements import label, find_objects
emptyblob = 0
image = binary_fill_holes(image)
blobs,nblobs = label(image)
print('N detected blobs:', nblobs)
H,W = image.shape
del image
blobslices = find_objects(blobs)
T.blob = blobs[T.ity, T.itx]
if plots:
plt.clf()
dimshow(blobs > 0, vmin=0, vmax=1)
ax = plt.axis()
for i,bs in enumerate(blobslices):
sy,sx = bs
by0,by1 = sy.start, sy.stop
bx0,bx1 = sx.start, sx.stop
plt.plot([bx0, bx0, bx1, bx1, bx0], [by0, by1, by1, by0, by0], 'r-')
plt.text((bx0+bx1)/2., by0, '%i' % (i+1), ha='center', va='bottom', color='r')
plt.plot(T.itx, T.ity, 'rx')
for i,t in enumerate(T):
plt.text(t.itx, t.ity, 'src %i' % i, color='red', ha='left', va='center')
plt.axis(ax)
plt.title('Blobs')
ps.savefig()
# Find sets of sources within blobs
blobsrcs = []
keepslices = []
blobmap = {}
dropslices = {}
for blob in range(1, nblobs+1):
Isrcs = np.flatnonzero(T.blob == blob)
if len(Isrcs) == 0:
#print('Blob', blob, 'has no sources')
blobmap[blob] = -1
dropslices[blob] = blobslices[blob-1]
continue
blobmap[blob] = len(blobsrcs)
blobsrcs.append(Isrcs)
bslc = blobslices[blob-1]
keepslices.append(bslc)
blobslices = keepslices
# Find sources that do not belong to a blob and add them as
# singleton "blobs"; otherwise they don't get optimized.
# for sources outside the image bounds, what should we do?
inblobs = np.zeros(len(T), bool)
for Isrcs in blobsrcs:
inblobs[Isrcs] = True
noblobs = np.flatnonzero(np.logical_not(inblobs))
del inblobs
# Add new fake blobs!
for ib,i in enumerate(noblobs):
#S = 3
S = 5
bslc = (slice(np.clip(T.ity[i] - S, 0, H-1), np.clip(T.ity[i] + S+1, 0, H)),
slice(np.clip(T.itx[i] - S, 0, W-1), np.clip(T.itx[i] + S+1, 0, W)))
# Does this new blob overlap existing blob(s)?
oblobs = np.unique(blobs[bslc])
oblobs = oblobs[oblobs != emptyblob]
#print('This blob overlaps existing blobs:', oblobs)
if len(oblobs) > 1:
print('WARNING: not merging overlapping blobs like maybe we should')
if len(oblobs):
blob = oblobs[0]
#print('Adding source to existing blob', blob)
blobs[bslc][blobs[bslc] == emptyblob] = blob
blobindex = blobmap[blob]
if blobindex == -1:
# the overlapping blob was going to be dropped -- restore it.
blobindex = len(blobsrcs)
blobmap[blob] = blobindex
blobslices.append(dropslices[blob])
blobsrcs.append(np.array([], np.int64))
# Expand the existing blob slice to encompass this new source
oldslc = blobslices[blobindex]
sy,sx = oldslc
oy0,oy1, ox0,ox1 = sy.start,sy.stop, sx.start,sx.stop
sy,sx = bslc
ny0,ny1, nx0,nx1 = sy.start,sy.stop, sx.start,sx.stop
newslc = slice(min(oy0,ny0), max(oy1,ny1)), slice(min(ox0,nx0), max(ox1,nx1))
blobslices[blobindex] = newslc
# Add this source to the list of source indices for the existing blob.
blobsrcs[blobindex] = np.append(blobsrcs[blobindex], np.array([i]))
else:
# Set synthetic blob number
blob = nblobs+1 + ib
blobs[bslc][blobs[bslc] == emptyblob] = blob
blobmap[blob] = len(blobsrcs)
blobslices.append(bslc)
blobsrcs.append(np.array([i]))
#print('Added', len(noblobs), 'new fake singleton blobs')
# Remap the "blobs" image so that empty regions are = -1 and the blob values
# correspond to their indices in the "blobsrcs" list.
if len(blobmap):
maxblob = max(blobmap.keys())
else:
maxblob = 0
maxblob = max(maxblob, blobs.max())
bm = np.zeros(maxblob + 1, int)
for k,v in blobmap.items():
bm[k] = v
bm[0] = -1
# DEBUG
if plots:
fitsio.write('blobs-before-%s.fits' % name, blobs, clobber=True)
# Remap blob numbers
blobs = bm[blobs]
if plots:
fitsio.write('blobs-after-%s.fits' % name, blobs, clobber=True)
if plots:
plt.clf()
dimshow(blobs > -1, vmin=0, vmax=1)
ax = plt.axis()
for i,bs in enumerate(blobslices):
sy,sx = bs
by0,by1 = sy.start, sy.stop
bx0,bx1 = sx.start, sx.stop
plt.plot([bx0, bx0, bx1, bx1, bx0], [by0, by1, by1, by0, by0], 'r-')
plt.text((bx0+bx1)/2., by0, '%i' % (i+1), ha='center', va='bottom', color='r')
plt.plot(T.itx, T.ity, 'rx')
for i,t in enumerate(T):
plt.text(t.itx, t.ity, 'src %i' % i, color='red', ha='left', va='center')
plt.axis(ax)
plt.title('Blobs')
ps.savefig()
for j,Isrcs in enumerate(blobsrcs):
for i in Isrcs:
#assert(blobs[T.ity[i], T.itx[i]] == j)
if (blobs[T.ity[i], T.itx[i]] != j):
print('---------------------------!!!--------------------------')
print('Blob', j, 'sources', Isrcs)
print('Source', i, 'coords x,y', T.itx[i], T.ity[i])
print('Expected blob value', j, 'but got', blobs[T.ity[i], T.itx[i]])
T.blob = blobs[T.ity, T.itx]
assert(len(blobsrcs) == len(blobslices))
return blobs, blobsrcs, blobslices
def get_sdss_sources(bands, targetwcs, photoobjdir=None, local=True,
extracols=[], ellipse=None):
'''
Finds SDSS catalog sources within the given `targetwcs` region,
returning FITS table and Tractor Source objects.
Returns
-------
cat : Tractor Catalog object
Tractor Source objects for the SDSS catalog entries
objs : fits_table object
FITS table object for the sources. Row-by-row parallel to `cat`.
'''
from astrometry.sdss import DR9, band_index, AsTransWrapper
from astrometry.sdss.fields import read_photoobjs_in_wcs
from tractor.sdss import get_tractor_sources_dr9
# FIXME?
margin = 0.
if ellipse is None:
ellipse = EllipseESoft.fromRAbPhi
sdss = DR9(basedir=photoobjdir)
if local:
local = (local and ('BOSS_PHOTOOBJ' in os.environ)
and ('PHOTO_RESOLVE' in os.environ))
if local:
sdss.useLocalTree()
cols = ['objid', 'ra', 'dec', 'fracdev', 'objc_type',
'theta_dev', 'theta_deverr', 'ab_dev', 'ab_deverr',
'phi_dev_deg',
'theta_exp', 'theta_experr', 'ab_exp', 'ab_experr',
'phi_exp_deg',
'resolve_status', 'nchild', 'flags', 'objc_flags',
'run','camcol','field','id',
'psfflux', 'psfflux_ivar',
'cmodelflux', 'cmodelflux_ivar',
'modelflux', 'modelflux_ivar',
'devflux', 'expflux', 'extinction'] + extracols
# If we have a window_flist file cut to primary objects, use that.
# This file comes from svn+ssh://astrometry.net/svn/trunk/projects/wise-sdss-phot
# cut-window-flist.py, and used resolve/2013-07-29 (pre-DR13) as input.
wfn = 'window_flist-cut.fits'
if not os.path.exists(wfn):
# default to the usual window_flist.fits file.
wfn = None
objs = read_photoobjs_in_wcs(targetwcs, margin, sdss=sdss, cols=cols, wfn=wfn)
if objs is None:
print('No photoObjs in wcs')
return None,None
print('Got', len(objs), 'photoObjs')
print('Bands', bands, '->', list(bands))
# It can be string-valued
objs.objid = np.array([int(x) if len(x) else 0 for x in objs.objid])
srcs = get_tractor_sources_dr9(
None, None, None, objs=objs, sdss=sdss, bands=list(bands),
nanomaggies=True, fixedComposites=True, useObjcType=True,
ellipse=ellipse)
print('Created', len(srcs), 'Tractor sources')
# record coordinates in target brick image
ok,objs.tx,objs.ty = targetwcs.radec2pixelxy(objs.ra, objs.dec)
objs.tx -= 1
objs.ty -= 1
W,H = targetwcs.get_width(), targetwcs.get_height()
objs.itx = np.clip(np.round(objs.tx), 0, W-1).astype(int)
objs.ity = np.clip(np.round(objs.ty), 0, H-1).astype(int)
cat = Catalog(*srcs)
return cat, objs
def _detmap(X):
from scipy.ndimage.filters import gaussian_filter
(tim, targetwcs, H, W) = X
R = tim_get_resamp(tim, targetwcs)
if R is None:
return None,None,None,None
ie = tim.getInvvar()
psfnorm = 1./(2. * np.sqrt(np.pi) * tim.psf_sigma)
detim = tim.getImage().copy()
detim[ie == 0] = 0.
detim = gaussian_filter(detim, tim.psf_sigma) / psfnorm**2
detsig1 = tim.sig1 / psfnorm
subh,subw = tim.shape
detiv = np.zeros((subh,subw), np.float32) + (1. / detsig1**2)
detiv[ie == 0] = 0.
(Yo,Xo,Yi,Xi) = R
return Yo, Xo, detim[Yi,Xi], detiv[Yi,Xi]
def tim_get_resamp(tim, targetwcs):
if hasattr(tim, 'resamp'):
return tim.resamp
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(targetwcs, tim.subwcs, [], 2)
except OverlapError:
print('No overlap')
return None
if len(Yo) == 0:
return None
resamp = [x.astype(np.int16) for x in (Yo,Xo,Yi,Xi)]
return resamp
def detection_maps(tims, targetwcs, bands, mp):
# Render the detection maps
H,W = targetwcs.shape
detmaps = dict([(b, np.zeros((H,W), np.float32)) for b in bands])
detivs = dict([(b, np.zeros((H,W), np.float32)) for b in bands])
for tim, (Yo,Xo,incmap,inciv) in zip(
tims, mp.map(_detmap, [(tim, targetwcs, H, W) for tim in tims])):
if Yo is None:
continue
detmaps[tim.band][Yo,Xo] += incmap*inciv
detivs [tim.band][Yo,Xo] += inciv
for band in bands:
detmaps[band] /= np.maximum(1e-16, detivs[band])
# back into lists, not dicts
detmaps = [detmaps[b] for b in bands]
detivs = [detivs [b] for b in bands]
return detmaps, detivs
def get_rgb(imgs, bands, mnmx=None, arcsinh=None, scales=None):
'''
Given a list of images in the given bands, returns a scaled RGB
image.
*imgs* a list of numpy arrays, all the same size, in nanomaggies
*bands* a list of strings, eg, ['g','r','z']
*mnmx* = (min,max), values that will become black/white *after* scaling.
Default is (-3,10)
*arcsinh* use nonlinear scaling as in SDSS
*scales*
Returns a (H,W,3) numpy array with values between 0 and 1.
'''
bands = ''.join(bands)
grzscales = dict(g = (2, 0.0066),
r = (1, 0.01),
z = (0, 0.025),
)
if scales is None:
if bands == 'grz':
scales = grzscales
elif bands == 'urz':
scales = dict(u = (2, 0.0066),
r = (1, 0.01),
z = (0, 0.025),
)
elif bands == 'gri':
# scales = dict(g = (2, 0.004),
# r = (1, 0.0066),
# i = (0, 0.01),
# )
scales = dict(g = (2, 0.002),
r = (1, 0.004),
i = (0, 0.005),
)
else:
scales = grzscales
h,w = imgs[0].shape
rgb = np.zeros((h,w,3), np.float32)
# Convert to ~ sigmas
for im,band in zip(imgs, bands):
plane,scale = scales[band]
rgb[:,:,plane] = (im / scale).astype(np.float32)
if mnmx is None:
mn,mx = -3, 10
else:
mn,mx = mnmx
if arcsinh is not None:
def nlmap(x):
return np.arcsinh(x * arcsinh) / np.sqrt(arcsinh)
rgb = nlmap(rgb)
mn = nlmap(mn)
mx = nlmap(mx)
rgb = (rgb - mn) / (mx - mn)
return np.clip(rgb, 0., 1.)
def switch_to_soft_ellipses(cat):
from tractor.galaxy import DevGalaxy, ExpGalaxy, FixedCompositeGalaxy
from tractor.ellipses import EllipseESoft
for src in cat:
if isinstance(src, (DevGalaxy, ExpGalaxy)):
src.shape = EllipseESoft.fromEllipseE(src.shape)
elif isinstance(src, FixedCompositeGalaxy):
src.shapeDev = EllipseESoft.fromEllipseE(src.shapeDev)
src.shapeExp = EllipseESoft.fromEllipseE(src.shapeExp)
def brick_catalog_for_radec_box(ralo, rahi, declo, dechi,
decals, catpattern, bricks=None):
'''
Merges multiple Tractor brick catalogs to cover an RA,Dec
bounding-box.
No cleverness with RA wrap-around; assumes ralo < rahi.
decals: Decals object
bricks: table of bricks, eg from Decals.get_bricks()
catpattern: filename pattern of catalog files to read,
eg "pipebrick-cats/tractor-phot-%06i.its"
'''
assert(ralo < rahi)
assert(declo < dechi)
if bricks is None:
bricks = decals.get_bricks_readonly()
I = decals.bricks_touching_radec_box(bricks, ralo, rahi, declo, dechi)
print(len(I), 'bricks touch RA,Dec box')
TT = []
hdr = None
for i in I:
brick = bricks[i]
fn = catpattern % brick.brickid
print('Catalog', fn)
if not os.path.exists(fn):
print('Warning: catalog does not exist:', fn)
continue
T = fits_table(fn, header=True)
if T is None or len(T) == 0:
print('Warning: empty catalog', fn)
continue
T.cut((T.ra >= ralo ) * (T.ra <= rahi) *
(T.dec >= declo) * (T.dec <= dechi))
TT.append(T)
if len(TT) == 0:
return None
T = merge_tables(TT)
# arbitrarily keep the first header
T._header = TT[0]._header
return T
def ccd_map_image(valmap, empty=0.):
'''
valmap: { 'N7' : 1., 'N8' : 17.8 }
Returns: a numpy image (shape (12,14)) with values mapped to their CCD locations.
'''
img = np.empty((12,14))
img[:,:] = empty
for k,v in valmap.items():
x0,x1,y0,y1 = ccd_map_extent(k)
#img[y0+6:y1+6, x0+7:x1+7] = v
img[y0:y1, x0:x1] = v
return img
def ccd_map_center(ccdname):
x0,x1,y0,y1 = ccd_map_extent(ccdname)
return (x0+x1)/2., (y0+y1)/2.
def ccd_map_extent(ccdname, inset=0.):
assert(ccdname.startswith('N') or ccdname.startswith('S'))
num = int(ccdname[1:])
assert(num >= 1 and num <= 31)
if num <= 7:
x0 = 7 - 2*num
y0 = 0
elif num <= 13:
x0 = 6 - (num - 7)*2
y0 = 1
elif num <= 19:
x0 = 6 - (num - 13)*2
y0 = 2
elif num <= 24:
x0 = 5 - (num - 19)*2
y0 = 3
elif num <= 28:
x0 = 4 - (num - 24)*2
y0 = 4
else:
x0 = 3 - (num - 28)*2
y0 = 5
if ccdname.startswith('N'):
(x0,x1,y0,y1) = (x0, x0+2, -y0-1, -y0)
else:
(x0,x1,y0,y1) = (x0, x0+2, y0, y0+1)
# Shift from being (0,0)-centered to being aligned with the ccd_map_image() image.
x0 += 7
x1 += 7
y0 += 6
y1 += 6
if inset == 0.:
return (x0,x1,y0,y1)
return (x0+inset, x1-inset, y0+inset, y1-inset)
def wcs_for_brick(b, W=3600, H=3600, pixscale=0.262):
'''
b: row from decals-bricks.fits file
W,H: size in pixels
pixscale: pixel scale in arcsec/pixel.
Returns: Tan wcs object
'''
pixscale = pixscale / 3600.
return Tan(b.ra, b.dec, W/2.+0.5, H/2.+0.5,
-pixscale, 0., 0., pixscale,
float(W), float(H))
def bricks_touching_wcs(targetwcs, decals=None, B=None, margin=20):
# margin: How far outside the image to keep objects
# FIXME -- should be adaptive to object size!
from astrometry.libkd.spherematch import match_radec
if B is None:
assert(decals is not None)
B = decals.get_bricks_readonly()
ra,dec = targetwcs.radec_center()
radius = targetwcs.radius()
# MAGIC 0.4 degree search radius =
# DECam hypot(1024,2048)*0.27/3600 + Brick hypot(0.25, 0.25) ~= 0.35 + margin
I,J,d = match_radec(B.ra, B.dec, ra, dec,
radius + np.hypot(0.25,0.25)/2. + 0.05)
print(len(I), 'bricks nearby')
keep = []
for i in I:
b = B[i]
brickwcs = wcs_for_brick(b)
clip = clip_wcs(targetwcs, brickwcs)
if len(clip) == 0:
print('No overlap with brick', b.brickname)
continue
keep.append(i)
return B[np.array(keep)]
def ccds_touching_wcs(targetwcs, T, ccdrad=0.17, polygons=True):
'''
targetwcs: wcs object describing region of interest
T: fits_table object of CCDs
ccdrad: radius of CCDs, in degrees. Default 0.17 is for DECam.
#If None, computed from T.
Returns: index array I of CCDs within range.
'''
trad = targetwcs.radius()
if ccdrad is None:
ccdrad = max(np.sqrt(np.abs(T.cd1_1 * T.cd2_2 - T.cd1_2 * T.cd2_1)) *
np.hypot(T.width, T.height) / 2.)
rad = trad + ccdrad
r,d = targetwcs.radec_center()
I, = np.nonzero(np.abs(T.dec - d) < rad)
I = I[np.atleast_1d(degrees_between(T.ra[I], T.dec[I], r, d) < rad)]
if not polygons:
return I
# now check actual polygon intersection
tw,th = targetwcs.imagew, targetwcs.imageh
targetpoly = [(0.5,0.5),(tw+0.5,0.5),(tw+0.5,th+0.5),(0.5,th+0.5)]
cd = targetwcs.get_cd()
tdet = cd[0]*cd[3] - cd[1]*cd[2]
if tdet > 0:
targetpoly = list(reversed(targetpoly))
targetpoly = np.array(targetpoly)
keep = []
for i in I:
W,H = T.width[i],T.height[i]
wcs = Tan(*[float(x) for x in
[T.crval1[i], T.crval2[i], T.crpix1[i], T.crpix2[i], T.cd1_1[i],
T.cd1_2[i], T.cd2_1[i], T.cd2_2[i], W, H]])
cd = wcs.get_cd()
wdet = cd[0]*cd[3] - cd[1]*cd[2]
poly = []
for x,y in [(0.5,0.5),(W+0.5,0.5),(W+0.5,H+0.5),(0.5,H+0.5)]:
rr,dd = wcs.pixelxy2radec(x,y)
ok,xx,yy = targetwcs.radec2pixelxy(rr,dd)
poly.append((xx,yy))
if wdet > 0:
poly = list(reversed(poly))
poly = np.array(poly)
if polygons_intersect(targetpoly, poly):
keep.append(i)
I = np.array(keep)
return I
def create_temp(**kwargs):
f,fn = tempfile.mkstemp(dir=tempdir, **kwargs)
os.close(f)
os.unlink(fn)
return fn
def sed_matched_filters(bands):
'''
Determines which SED-matched filters to run based on the available
bands.
Returns
-------
SEDs : list of (name, sed) tuples
'''
if len(bands) == 1:
return [(bands[0], (1.,))]
# single-band filters
SEDs = []
for i,band in enumerate(bands):
sed = np.zeros(len(bands))
sed[i] = 1.
SEDs.append((band, sed))
if len(bands) > 1:
flat = dict(g=1., r=1., z=1.)
SEDs.append(('Flat', [flat[b] for b in bands]))
red = dict(g=2.5, r=1., z=0.4)
SEDs.append(('Red', [red[b] for b in bands]))
return SEDs
def run_sed_matched_filters(SEDs, bands, detmaps, detivs, omit_xy,
targetwcs, nsigma=5, saturated_pix=None,
plots=False, ps=None, mp=None):
'''
Runs a given set of SED-matched filters.
Parameters
----------
SEDs : list of (name, sed) tuples
The SEDs to run. The `sed` values are lists the same length
as `bands`.
bands : list of string
The band names of `detmaps` and `detivs`.
detmaps : numpy array, float
Detection maps for each of the listed `bands`.
detivs : numpy array, float
Inverse-variances of the `detmaps`.
omit_xy : None, or (xx,yy) tuple
Existing sources to avoid.
targetwcs : WCS object
WCS object to use to convert pixel values into RA,Decs for the
returned Tractor PointSource objects.
nsigma : float, optional
Detection threshold
saturated_pix : None or numpy array, boolean
Passed through to sed_matched_detection.
A map of pixels that are always considered "hot" when
determining whether a new source touches hot pixels of an
existing source.
plots : boolean, optional
Create plots?
ps : PlotSequence object
Create plots?
mp : multiproc object
Multiprocessing
Returns
-------
Tnew : fits_table
Table of new sources detected
newcat : list of PointSource objects
Newly detected objects, with positions and fluxes, as Tractor
PointSource objects.
hot : numpy array of bool
"Hot pixels" containing sources.
See also
--------
sed_matched_detection : run a single SED-matched filter.
'''
if omit_xy is not None:
xx,yy = omit_xy
n0 = len(xx)
else:
xx,yy = [],[]
n0 = 0
H,W = detmaps[0].shape
hot = np.zeros((H,W), bool)
peaksn = []
apsn = []
for sedname,sed in SEDs:
print('SED', sedname)
if plots:
pps = ps
else:
pps = None
t0 = Time()
sedhot,px,py,peakval,apval = sed_matched_detection(
sedname, sed, detmaps, detivs, bands, xx, yy,
nsigma=nsigma, saturated_pix=saturated_pix, ps=pps)
print('SED took', Time()-t0)
if sedhot is None:
continue
print(len(px), 'new peaks')
hot |= sedhot
# With an empty xx, np.append turns it into a double!
xx = np.append(xx, px).astype(int)
yy = np.append(yy, py).astype(int)
peaksn.extend(peakval)
apsn.extend(apval)
# New peaks:
peakx = xx[n0:]
peaky = yy[n0:]
# Add sources for the new peaks we found
pr,pd = targetwcs.pixelxy2radec(peakx+1, peaky+1)
print('Adding', len(pr), 'new sources')
# Also create FITS table for new sources
Tnew = fits_table()
Tnew.ra = pr
Tnew.dec = pd
Tnew.tx = peakx
Tnew.ty = peaky
assert(len(peaksn) == len(Tnew))
assert(len(apsn) == len(Tnew))
Tnew.peaksn = np.array(peaksn)
Tnew.apsn = np.array(apsn)
Tnew.itx = np.clip(np.round(Tnew.tx), 0, W-1).astype(int)
Tnew.ity = np.clip(np.round(Tnew.ty), 0, H-1).astype(int)
newcat = []
for i,(r,d,x,y) in enumerate(zip(pr,pd,peakx,peaky)):
fluxes = dict([(band, detmap[Tnew.ity[i], Tnew.itx[i]])
for band,detmap in zip(bands,detmaps)])
newcat.append(PointSource(RaDecPos(r,d),
NanoMaggies(order=bands, **fluxes)))
return Tnew, newcat, hot
def sed_matched_detection(sedname, sed, detmaps, detivs, bands,
xomit, yomit,
nsigma=5.,
saturated_pix=None,
saddle=2.,
cutonaper=True,
ps=None):
'''
Runs a single SED-matched detection filter.
Avoids creating sources close to existing sources.
Parameters
----------
sedname : string
Name of this SED; only used for plots.
sed : list of floats
The SED -- a list of floats, one per band, of this SED.
detmaps : list of numpy arrays
The per-band detection maps. These must all be the same size, the
brick image size.
detivs : list of numpy arrays
The inverse-variance maps associated with `detmaps`.
bands : list of strings
The band names of the `detmaps` and `detivs` images.
xomit, yomit : iterables (lists or numpy arrays) of int
Previously known sources that are to be avoided.
nsigma : float, optional
Detection threshold.
saturated_pix : None or numpy array, boolean
A map of pixels that are always considered "hot" when
determining whether a new source touches hot pixels of an
existing source.
saddle : float, optional
Saddle-point depth from existing sources down to new sources.
cutonaper : bool, optional
Apply a cut that the source's detection strength must be greater
than `nsigma` above the 16th percentile of the detection strength in
an annulus (from 10 to 20 pixels) around the source.
ps : PlotSequence object, optional
Create plots?
Returns
-------
hotblobs : numpy array of bool
A map of the blobs yielding sources in this SED.
px, py : numpy array of int
The new sources found.
aper : numpy array of float
The detection strength in the annulus around the source, if
`cutonaper` is set; else -1.
peakval : numpy array of float
The detection strength.
See also
--------
sed_matched_filters : creates the `(sedname, sed)` pairs used here
run_sed_matched_filters : calls this method
'''
from scipy.ndimage.measurements import label, find_objects
from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
t0 = Time()
H,W = detmaps[0].shape
allzero = True
for iband,band in enumerate(bands):
if sed[iband] == 0:
continue
if np.all(detivs[iband] == 0):
continue
allzero = False
break
if allzero:
print('SED', sedname, 'has all zero weight')
return None,None,None,None,None
sedmap = np.zeros((H,W), np.float32)
sediv = np.zeros((H,W), np.float32)
for iband,band in enumerate(bands):
if sed[iband] == 0:
continue
# We convert the detmap to canonical band via
# detmap * w
# And the corresponding change to sig1 is
# sig1 * w
# So the invvar-weighted sum is
# (detmap * w) / (sig1**2 * w**2)
# = detmap / (sig1**2 * w)
sedmap += detmaps[iband] * detivs[iband] / sed[iband]
sediv += detivs [iband] / sed[iband]**2
sedmap /= np.maximum(1e-16, sediv)
sedsn = sedmap * np.sqrt(sediv)
del sedmap
peaks = (sedsn > nsigma)
print('SED sn:', Time()-t0)
t0 = Time()
def saddle_level(Y):
# Require a saddle that drops by (the larger of) "saddle"
# sigma, or 20% of the peak height
drop = max(saddle, Y * 0.2)
return Y - drop
lowest_saddle = nsigma - saddle
# zero out the edges -- larger margin here?
peaks[0 ,:] = 0
peaks[:, 0] = 0
peaks[-1,:] = 0
peaks[:,-1] = 0
# Label the N-sigma blobs at this point... we'll use this to build
# "sedhot", which in turn is used to define the blobs that we will
# optimize simultaneously. This also determines which pixels go
# into the fitting!
dilate = 8
hotblobs,nhot = label(binary_fill_holes(
binary_dilation(peaks, iterations=dilate)))
# find pixels that are larger than their 8 neighbors
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[0:-2,1:-1])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[2: ,1:-1])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[1:-1,0:-2])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[1:-1,2: ])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[0:-2,0:-2])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[0:-2,2: ])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[2: ,0:-2])
peaks[1:-1, 1:-1] &= (sedsn[1:-1,1:-1] >= sedsn[2: ,2: ])
print('Peaks:', Time()-t0)
t0 = Time()
if ps is not None:
crossa = dict(ms=10, mew=1.5)
green = (0,1,0)
def plot_boundary_map(X):
bounds = binary_dilation(X) - X
H,W = X.shape
rgba = np.zeros((H,W,4), np.uint8)
rgba[:,:,1] = bounds*255
rgba[:,:,3] = bounds*255
plt.imshow(rgba, interpolation='nearest', origin='lower')
plt.clf()
plt.imshow(sedsn, vmin=-2, vmax=10, interpolation='nearest',
origin='lower', cmap='hot')
above = (sedsn > nsigma)
plot_boundary_map(above)
ax = plt.axis()
y,x = np.nonzero(peaks)
plt.plot(x, y, 'r+')
plt.axis(ax)
plt.title('SED %s: S/N & peaks' % sedname)
ps.savefig()
# plt.clf()
# plt.imshow(sedsn, vmin=-2, vmax=10, interpolation='nearest',
# origin='lower', cmap='hot')
# plot_boundary_map(sedsn > lowest_saddle)
# plt.title('SED %s: S/N & lowest saddle point bounds' % sedname)
# ps.savefig()
# For each new source, compute the saddle value, segment at that
# level, and drop the source if it is in the same blob as a
# previously-detected source. We dilate the blobs a bit too, to
# catch slight differences in centroid vs SDSS sources.
dilate = 2
# For efficiency, segment at the minimum saddle level to compute
# slices; the operations described above need only happen within
# the slice.
saddlemap = (sedsn > lowest_saddle)
if saturated_pix is not None:
saddlemap |= saturated_pix
saddlemap = binary_dilation(saddlemap, iterations=dilate)
allblobs,nblobs = label(saddlemap)
allslices = find_objects(allblobs)
ally0 = [sy.start for sy,sx in allslices]
allx0 = [sx.start for sy,sx in allslices]
# brightest peaks first
py,px = np.nonzero(peaks)
I = np.argsort(-sedsn[py,px])
py = py[I]
px = px[I]
keep = np.zeros(len(px), bool)
peakval = []
aper = []
apin = 10
apout = 20
# For each peak, determine whether it is isolated enough --
# separated by a low enough saddle from other sources. Need only
# search within its "allblob", which is defined by the lowest
# saddle.
for i,(x,y) in enumerate(zip(px, py)):
level = saddle_level(sedsn[y,x])
ablob = allblobs[y,x]
index = ablob - 1
slc = allslices[index]
saddlemap = (sedsn[slc] > level)
if saturated_pix is not None:
saddlemap |= saturated_pix[slc]
saddlemap *= (allblobs[slc] == ablob)
saddlemap = binary_fill_holes(saddlemap)
saddlemap = binary_dilation(saddlemap, iterations=dilate)
blobs,nblobs = label(saddlemap)
x0,y0 = allx0[index], ally0[index]
thisblob = blobs[y-y0, x-x0]
# previously found sources:
ox = np.append(xomit, px[:i][keep[:i]]) - x0
oy = np.append(yomit, py[:i][keep[:i]]) - y0
h,w = blobs.shape
cut = False
if len(ox):
ox = ox.astype(int)
oy = oy.astype(int)
cut = any((ox >= 0) * (ox < w) * (oy >= 0) * (oy < h) *
(blobs[np.clip(oy,0,h-1), np.clip(ox,0,w-1)] ==
thisblob))
if False and (not cut) and ps is not None:
plt.clf()
plt.subplot(1,2,1)
dimshow(sedsn, vmin=-2, vmax=10, cmap='hot')
plot_boundary_map((sedsn > nsigma))
ax = plt.axis()
plt.plot(x, y, 'm+', ms=12, mew=2)
plt.axis(ax)
plt.subplot(1,2,2)
y1,x1 = [s.stop for s in slc]
ext = [x0,x1,y0,y1]
dimshow(saddlemap, extent=ext)
#plt.plot([x0,x0,x1,x1,x0], [y0,y1,y1,y0,y0], 'c-')
#ax = plt.axis()
#plt.plot(ox+x0, oy+y0, 'rx')
plt.plot(xomit, yomit, 'rx', ms=8, mew=2)
plt.plot(px[:i][keep[:i]], py[:i][keep[:i]], '+',
color=green, ms=8, mew=2)
plt.plot(x, y, 'mo', mec='m', mfc='none', ms=12, mew=2)
plt.axis(ax)
if cut:
plt.suptitle('Cut')
else:
plt.suptitle('Keep')
ps.savefig()
if cut:
# in same blob as previously found source
continue
# Measure in aperture...
ap = sedsn[max(0, y-apout):min(H,y+apout+1),
max(0, x-apout):min(W,x+apout+1)]
apiv = (sediv[max(0, y-apout):min(H,y+apout+1),
max(0, x-apout):min(W,x+apout+1)] > 0)
aph,apw = ap.shape
apx0, apy0 = max(0, x - apout), max(0, y - apout)
R2 = ((np.arange(aph)+apy0 - y)[:,np.newaxis]**2 +
(np.arange(apw)+apx0 - x)[np.newaxis,:]**2)
ap = ap[apiv * (R2 >= apin**2) * (R2 <= apout**2)]
if len(ap):
# 16th percentile ~ -1 sigma point.
m = np.percentile(ap, 16.)
else:
# fake
m = -1.
if cutonaper:
if sedsn[y,x] - m < nsigma:
continue
aper.append(m)
peakval.append(sedsn[y,x])
keep[i] = True
if False and ps is not None:
plt.clf()
plt.subplot(1,2,1)
dimshow(ap, vmin=-2, vmax=10, cmap='hot',
extent=[apx0,apx0+apw,apy0,apy0+aph])
plt.subplot(1,2,2)
dimshow(ap * ((R2 >= apin**2) * (R2 <= apout**2)),
vmin=-2, vmax=10, cmap='hot',
extent=[apx0,apx0+apw,apy0,apy0+aph])
plt.suptitle('peak %.1f vs ap %.1f' % (sedsn[y,x], m))
ps.savefig()
print('New sources:', Time()-t0)
t0 = Time()
if ps is not None:
pxdrop = px[np.logical_not(keep)]
pydrop = py[np.logical_not(keep)]
py = py[keep]
px = px[keep]
# Which of the hotblobs yielded sources? Those are the ones to keep.
hbmap = np.zeros(nhot+1, bool)
hbmap[hotblobs[py,px]] = True
if len(xomit):
hbmap[hotblobs[yomit,xomit]] = True
# in case a source is (somehow) not in a hotblob?
hbmap[0] = False
hotblobs = hbmap[hotblobs]
if ps is not None:
plt.clf()
dimshow(hotblobs, vmin=0, vmax=1, cmap='hot')
ax = plt.axis()
p1 = plt.plot(px, py, 'g+', ms=8, mew=2)
p2 = plt.plot(pxdrop, pydrop, 'm+', ms=8, mew=2)
p3 = plt.plot(xomit, yomit, 'r+', ms=8, mew=2)
plt.axis(ax)
plt.title('SED %s: hot blobs' % sedname)
plt.figlegend((p3[0],p1[0],p2[0]), ('Existing', 'Keep', 'Drop'),
'upper left')
ps.savefig()
return hotblobs, px, py, aper, peakval
class Decals(object):
def __init__(self, decals_dir=None):
if decals_dir is None:
decals_dir = os.environ.get('DECALS_DIR')
if decals_dir is None:
print('''Warning: you should set the $DECALS_DIR environment variable.
On NERSC, you can do:
module use /project/projectdirs/cosmo/work/decam/versions/modules
module load decals
Using the current directory as DECALS_DIR, but this is likely to fail.
''')
decals_dir = os.getcwd()
self.decals_dir = decals_dir
self.ZP = None
self.bricks = None
# Create and cache a kd-tree for bricks_touching_radec_box ?
self.cache_tree = False
self.bricktree = None
### HACK! Hard-coded brick edge size, in degrees!
self.bricksize = 0.25
self.image_typemap = {
'decam': DecamImage,
'90prime': BokImage,
}
def get_calib_dir(self):
return os.path.join(self.decals_dir, 'calib')
def get_image_dir(self):
return os.path.join(self.decals_dir, 'images')
def get_decals_dir(self):
return self.decals_dir
def get_se_dir(self):
return os.path.join(self.decals_dir, 'calib', 'se-config')
def get_bricks(self):
return fits_table(os.path.join(self.decals_dir, 'decals-bricks.fits'))
### HACK...
def get_bricks_readonly(self):
if self.bricks is None:
self.bricks = self.get_bricks()
# Assert that bricks are the sizes we think they are.
# ... except for the two poles, which are half-sized
assert(np.all(np.abs((self.bricks.dec2 - self.bricks.dec1)[1:-1] -
self.bricksize) < 1e-8))
return self.bricks
def get_brick(self, brickid):
B = self.get_bricks_readonly()
I, = np.nonzero(B.brickid == brickid)
if len(I) == 0:
return None
return B[I[0]]
def get_brick_by_name(self, brickname):
B = self.get_bricks_readonly()
I, = np.nonzero(np.array([n == brickname for n in B.brickname]))
if len(I) == 0:
return None
return B[I[0]]
def bricks_touching_radec_box(self, bricks,
ralo, rahi, declo, dechi):
'''
Returns an index vector of the bricks that touch the given RA,Dec box.
'''
if bricks is None:
bricks = self.get_bricks_readonly()
if self.cache_tree and bricks == self.bricks:
from astrometry.libkd.spherematch import tree_build_radec, tree_search_radec
# Use kdtree
if self.bricktree is None:
self.bricktree = tree_build_radec(bricks.ra, bricks.dec)
# brick size
radius = np.sqrt(2.)/2. * self.bricksize
# + RA,Dec box size
radius = radius + degrees_between(ralo, declo, rahi, dechi) / 2.
dec = (dechi + declo) / 2.
c = (np.cos(np.deg2rad(rahi)) + np.cos(np.deg2rad(ralo))) / 2.
s = (np.sin(np.deg2rad(rahi)) + np.sin(np.deg2rad(ralo))) / 2.
ra = np.rad2deg(np.arctan2(s, c))
J = tree_search_radec(self.bricktree, ra, dec, radius)
I = J[np.nonzero((bricks.ra1[J] <= rahi ) * (bricks.ra2[J] >= ralo) *
(bricks.dec1[J] <= dechi) * (bricks.dec2[J] >= declo))[0]]
return I
if rahi < ralo:
# Wrap-around
print('In Dec slice:', len(np.flatnonzero((bricks.dec1 <= dechi) *
(bricks.dec2 >= declo))))
print('Above RAlo=', ralo, ':', len(np.flatnonzero(bricks.ra2 >= ralo)))
print('Below RAhi=', rahi, ':', len(np.flatnonzero(bricks.ra1 <= rahi)))
print('In RA slice:', len(np.nonzero(np.logical_or(bricks.ra2 >= ralo,
bricks.ra1 <= rahi))))
I, = np.nonzero(np.logical_or(bricks.ra2 >= ralo, bricks.ra1 <= rahi) *
(bricks.dec1 <= dechi) * (bricks.dec2 >= declo))
print('In RA&Dec slice', len(I))
else:
I, = np.nonzero((bricks.ra1 <= rahi ) * (bricks.ra2 >= ralo) *
(bricks.dec1 <= dechi) * (bricks.dec2 >= declo))
return I
def get_ccds(self):
fn = os.path.join(self.decals_dir, 'decals-ccds.fits')
if not os.path.exists(fn):
fn += '.gz'
print('Reading CCDs from', fn)
T = fits_table(fn)
print('Got', len(T), 'CCDs')
# "N4 " -> "N4"
T.ccdname = np.array([s.strip() for s in T.ccdname])
return T
def ccds_touching_wcs(self, wcs, **kwargs):
T = self.get_ccds()
I = ccds_touching_wcs(wcs, T, **kwargs)
if len(I) == 0:
return None
T.cut(I)
return T
def get_image_object(self, t):
'''
Returns a DecamImage or similar object for one row of the CCDs table.
'''
imageType = self.image_typemap[t.camera.strip()]
return imageType(self, t)
def tims_touching_wcs(self, targetwcs, mp, bands=None,
gaussPsf=False, const2psf=True, pixPsf=False):
'''
mp: multiprocessing object
'''
# Read images
C = self.ccds_touching_wcs(targetwcs)
# Sort by band
if bands is not None:
C.cut(np.hstack([np.nonzero(C.filter == band)[0]
for band in bands]))
ims = []
for t in C:
print()
print('Image file', t.image_filename, 'hdu', t.image_hdu)
im = DecamImage(self, t)
ims.append(im)
# Read images, clip to ROI
W,H = targetwcs.get_width(), targetwcs.get_height()
targetrd = np.array([targetwcs.pixelxy2radec(x,y) for x,y in
[(1,1),(W,1),(W,H),(1,H),(1,1)]])
args = [(im, targetrd, gaussPsf, const2psf, pixPsf) for im in ims]
tims = mp.map(read_one_tim, args)
return tims
def find_ccds(self, expnum=None, ccdname=None):
T = self.get_ccds()
if expnum is not None:
T.cut(T.expnum == expnum)
if ccdname is not None:
T.cut(T.ccdname == ccdname)
return T
def photometric_ccds(self, CCD):
'''
Returns an index array for the members of the table "CCD" that
are photometric.
'''
'''
Recipe in [decam-data 1314], 2015-07-15:
* CCDNMATCH >= 20 (At least 20 stars to determine zero-pt)
* abs(ZPT - CCDZPT) < 0.10 (Agreement with full-frame zero-pt)
* CCDPHRMS < 0.2 (Uniform photometry across the CCD)
* ZPT within 0.50 mag of 25.08 for g-band
* ZPT within 0.50 mag of 25.29 for r-band
* ZPT within 0.50 mag of 24.92 for z-band
* DEC > -20 (in DESI footprint)
* CCDNUM = 31 (S7) is OK, but only for the region
[1:1023,1:4094] (mask region [1024:2046,1:4094] in CCD s7)
Slightly revised by DJS in Re: [decam-data 828] 2015-07-31:
* CCDNMATCH >= 20 (At least 20 stars to determine zero-pt)
* abs(ZPT - CCDZPT) < 0.10 (Loose agreement with full-frame zero-pt)
* ZPT within [25.08-0.50, 25.08+0.25] for g-band
* ZPT within [25.29-0.50, 25.29+0.25] for r-band
* ZPT within [24.92-0.50, 24.92+0.25] for z-band
* DEC > -20 (in DESI footprint)
* EXPTIME >= 30
* CCDNUM = 31 (S7) should mask outside the region [1:1023,1:4094]
'''
# We assume that the zeropoints are present in the
# CCDs file (starting in DR2)
z0 = dict(g = 25.08,
r = 25.29,
z = 24.92,)
z0 = np.array([z0[f[0]] for f in CCD.filter])
good = np.ones(len(CCD), bool)
n0 = sum(good)
# This is our list of cuts to remove non-photometric CCD images
for name,crit in [
('exptime < 30 s', (CCD.exptime < 30)),
('ccdnmatch < 20', (CCD.ccdnmatch < 20)),
('abs(zpt - ccdzpt) > 0.1',
(np.abs(CCD.zpt - CCD.ccdzpt) > 0.1)),
('zpt < 0.5 mag of nominal (for DECam)',
((CCD.camera == 'decam') * (CCD.zpt < (z0 - 0.5)))),
('zpt > 0.25 mag of nominal (for DECam)',
((CCD.camera == 'decam') * (CCD.zpt > (z0 + 0.25)))),
]:
good[crit] = False
n = sum(good)
print('Flagged', n0-n, 'more non-photometric using criterion:', name)
n0 = n
return np.flatnonzero(good)
def _get_zeropoints_table(self):
if self.ZP is not None:
return self.ZP
# Hooray, DRY
self.ZP = self.get_ccds()
return self.ZP
def get_zeropoint_row_for(self, im):
ZP = self._get_zeropoints_table()
I, = np.nonzero(ZP.expnum == im.expnum)
if len(I) > 1:
I, = np.nonzero((ZP.expnum == im.expnum) *
(ZP.ccdname == im.ccdname))
if len(I) == 0:
return None
assert(len(I) == 1)
return ZP[I[0]]
def get_zeropoint_for(self, im):
zp = self.get_zeropoint_row_for(im)
# No updated zeropoint -- use header MAGZERO from primary HDU.
if zp is None:
print('WARNING: using header zeropoints for', im)
hdr = im.read_image_primary_header()
# DES Year1 Stripe82 images:
magzero = hdr['MAGZERO']
return magzero
magzp = zp.ccdzpt
magzp += 2.5 * np.log10(zp.exptime)
return magzp
def get_astrometric_zeropoint_for(self, im):
zp = self.get_zeropoint_row_for(im)
if zp is None:
print('WARNING: no astrometric zeropoints found for', im)
return 0.,0.
dra, ddec = zp.ccdraoff, zp.ccddecoff
return dra / 3600., ddec / 3600.
#dec = zp.ccddec
#return dra / np.cos(np.deg2rad(dec)), ddec
def exposure_metadata(filenames, hdus=None, trim=None):
nan = np.nan
primkeys = [('FILTER',''),
('RA', nan),
('DEC', nan),
('AIRMASS', nan),
('DATE-OBS', ''),
('EXPTIME', nan),
('EXPNUM', 0),
('MJD-OBS', 0),
('PROPID', ''),
]
hdrkeys = [('AVSKY', nan),
('ARAWGAIN', nan),
('FWHM', nan),
('CRPIX1',nan),
('CRPIX2',nan),
('CRVAL1',nan),
('CRVAL2',nan),
('CD1_1',nan),
('CD1_2',nan),
('CD2_1',nan),
('CD2_2',nan),
('EXTNAME',''),
('CCDNUM',''),
]
otherkeys = [('IMAGE_FILENAME',''), ('IMAGE_HDU',0),
('HEIGHT',0),('WIDTH',0),
]
allkeys = primkeys + hdrkeys + otherkeys
vals = dict([(k,[]) for k,d in allkeys])
for i,fn in enumerate(filenames):
print('Reading', (i+1), 'of', len(filenames), ':', fn)
F = fitsio.FITS(fn)
primhdr = F[0].read_header()
expstr = '%08i' % primhdr.get('EXPNUM')
# # Parse date with format: 2014-08-09T04:20:50.812543
# date = datetime.datetime.strptime(primhdr.get('DATE-OBS'),
# '%Y-%m-%dT%H:%M:%S.%f')
# # Subract 12 hours to get the date used by the CP to label the night;
# # CP20140818 includes observations with date 2014-08-18 evening and
# # 2014-08-19 early AM.
# cpdate = date - datetime.timedelta(0.5)
# #cpdatestr = '%04i%02i%02i' % (cpdate.year, cpdate.month, cpdate.day)
# #print 'Date', date, '-> CP', cpdatestr
# cpdateval = cpdate.year * 10000 + cpdate.month * 100 + cpdate.day
# print 'Date', date, '-> CP', cpdateval
cpfn = fn
if trim is not None:
cpfn = cpfn.replace(trim, '')
print('CP fn', cpfn)
if hdus is not None:
hdulist = hdus
else:
hdulist = range(1, len(F))
for hdu in hdulist:
hdr = F[hdu].read_header()
info = F[hdu].get_info()
#'extname': 'S1', 'dims': [4146L, 2160L]
H,W = info['dims']
for k,d in primkeys:
vals[k].append(primhdr.get(k, d))
for k,d in hdrkeys:
vals[k].append(hdr.get(k, d))
vals['IMAGE_FILENAME'].append(cpfn)
vals['IMAGE_HDU'].append(hdu)
vals['WIDTH'].append(int(W))
vals['HEIGHT'].append(int(H))
T = fits_table()
for k,d in allkeys:
T.set(k.lower().replace('-','_'), np.array(vals[k]))
#T.about()
#T.rename('extname', 'ccdname')
T.ccdname = np.array([t.strip() for t in T.extname])
T.filter = np.array([s.split()[0] for s in T.filter])
T.ra_bore = np.array([hmsstring2ra (s) for s in T.ra ])
T.dec_bore = np.array([dmsstring2dec(s) for s in T.dec])
T.ra = np.zeros(len(T))
T.dec = np.zeros(len(T))
for i in range(len(T)):
W,H = T.width[i], T.height[i]
wcs = Tan(T.crval1[i], T.crval2[i], T.crpix1[i], T.crpix2[i],
T.cd1_1[i], T.cd1_2[i], T.cd2_1[i], T.cd2_2[i], float(W), float(H))
xc,yc = W/2.+0.5, H/2.+0.5
rc,dc = wcs.pixelxy2radec(xc,yc)
T.ra [i] = rc
T.dec[i] = dc
return T
class LegacySurveyImage(object):
'''
A base class containing common code for the images we handle.
You shouldn't directly instantiate this class, but rather use the appropriate
subclass:
* DecamImage
* BokImage
'''
def __init__(self, decals, t):
'''
Create a new LegacySurveyImage object, from a Decals object,
and one row of a CCDs fits_table object.
You may not need to instantiate this class directly, instead using
Decals.get_image_object():
decals = Decals()
# targetwcs = ....
# T = decals.ccds_touching_wcs(targetwcs, ccdrad=None)
T = decals.get_ccds()
im = decals.get_image_object(T[0])
# which does the same thing as:
im = DecamImage(decals, T[0])
Or, if you have a Community Pipeline-processed input file and
FITS HDU extension number:
decals = Decals()
T = exposure_metadata([filename], hdus=[hdu])
im = DecamImage(decals, T[0])
Perhaps the most important method in this class is
*get_tractor_image*.
'''
self.decals = decals
imgfn, hdu, band, expnum, ccdname, exptime = (
t.image_filename.strip(), t.image_hdu, t.filter.strip(), t.expnum,
t.ccdname.strip(), t.exptime)
if os.path.exists(imgfn):
self.imgfn = imgfn
else:
self.imgfn = os.path.join(self.decals.get_image_dir(), imgfn)
self.hdu = hdu
self.expnum = expnum
self.ccdname = ccdname.strip()
self.band = band
self.exptime = exptime
self.camera = t.camera.strip()
self.fwhm = t.fwhm
# in arcsec/pixel
self.pixscale = 3600. * np.sqrt(np.abs(t.cd1_1 * t.cd2_2 - t.cd1_2 * t.cd2_1))
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def get_good_image_slice(self, extent, get_extent=False):
'''
extent = None or extent = [x0,x1,y0,y1]
If *get_extent* = True, returns the new [x0,x1,y0,y1] extent.
Returns a new pair of slices, or *extent* if the whole image is good.
'''
gx0,gx1,gy0,gy1 = self.get_good_image_subregion()
if gx0 is None and gx1 is None and gy0 is None and gy1 is None:
return extent
if extent is None:
imh,imw = self.get_image_shape()
extent = (0, imw, 0, imh)
x0,x1,y0,y1 = extent
if gx0 is not None:
x0 = max(x0, gx0)
if gy0 is not None:
y0 = max(y0, gy0)
if gx1 is not None:
x1 = min(x1, gx1)
if gy1 is not None:
y1 = min(y1, gy1)
if get_extent:
return (x0,x1,y0,y1)
return slice(y0,y1), slice(x0,x1)
def get_good_image_subregion(self):
'''
Returns x0,x1,y0,y1 of the good region of this chip,
or None if no cut should be applied to that edge; returns
(None,None,None,None) if the whole chip is good.
This cut is applied in addition to any masking in the mask or
invvar map.
'''
return None,None,None,None
def get_tractor_image(self, slc=None, radecpoly=None,
gaussPsf=False, const2psf=False, pixPsf=False,
nanomaggies=True, subsky=True, tiny=5):
'''
Returns a tractor.Image ("tim") object for this image.
Options describing a subimage to return:
- *slc*: y,x slice objects
- *radecpoly*: numpy array, shape (N,2), RA,Dec polygon describing bounding box to select.
Options determining the PSF model to use:
- *gaussPsf*: single circular Gaussian PSF based on header FWHM value.
- *const2Psf*: 2-component general Gaussian fit to PsfEx model at image center.
- *pixPsf*: pixelized PsfEx model at image center.
Options determining the units of the image:
- *nanomaggies*: convert the image to be in units of NanoMaggies;
*tim.zpscale* contains the scale value the image was divided by.
- *subsky*: subtract a constant sky value, leaving pixel
values distributed around zero. *tim.midsky* contains the
value subtracted.
'''
band = self.band
imh,imw = self.get_image_shape()
wcs = self.get_wcs()
x0,y0 = 0,0
x1 = x0 + imw
y1 = y0 + imh
if slc is None and radecpoly is not None:
imgpoly = [(1,1),(1,imh),(imw,imh),(imw,1)]
ok,tx,ty = wcs.radec2pixelxy(radecpoly[:-1,0], radecpoly[:-1,1])
tpoly = zip(tx,ty)
clip = clip_polygon(imgpoly, tpoly)
clip = np.array(clip)
if len(clip) == 0:
return None
x0,y0 = np.floor(clip.min(axis=0)).astype(int)
x1,y1 = np.ceil (clip.max(axis=0)).astype(int)
slc = slice(y0,y1+1), slice(x0,x1+1)
if y1 - y0 < tiny or x1 - x0 < tiny:
print('Skipping tiny subimage')
return None
if slc is not None:
sy,sx = slc
y0,y1 = sy.start, sy.stop
x0,x1 = sx.start, sx.stop
old_extent = (x0,x1,y0,y1)
new_extent = self.get_good_image_slice((x0,x1,y0,y1), get_extent=True)
if new_extent != old_extent:
x0,x1,y0,y1 = new_extent
print('Applying good subregion of CCD: slice is', x0,x1,y0,y1)
if x0 >= x1 or y0 >= y1:
return None
slc = slice(y0,y1), slice(x0,x1)
print('Reading image slice:', slc)
img,imghdr = self.read_image(header=True, slice=slc)
# check consistency... something of a DR1 hangover
e = imghdr['EXTNAME']
assert(e.strip() == self.ccdname.strip())
invvar = self.read_invvar(slice=slc)
dq = self.read_dq(slice=slc)
invvar[dq != 0] = 0.
if np.all(invvar == 0.):
print('Skipping zero-invvar image')
return None
assert(np.all(np.isfinite(img)))
assert(np.all(np.isfinite(invvar)))
assert(not(np.all(invvar == 0.)))
# header 'FWHM' is in pixels
# imghdr['FWHM']
psf_fwhm = self.fwhm
psf_sigma = psf_fwhm / 2.35
primhdr = self.read_image_primary_header()
sky = self.read_sky_model()
midsky = sky.getConstant()
if subsky:
img -= midsky
sky.subtract(midsky)
magzp = self.decals.get_zeropoint_for(self)
orig_zpscale = zpscale = NanoMaggies.zeropointToScale(magzp)
if nanomaggies:
# Scale images to Nanomaggies
img /= zpscale
invvar *= zpscale**2
zpscale = 1.
assert(np.sum(invvar > 0) > 0)
sig1 = 1./np.sqrt(np.median(invvar[invvar > 0]))
assert(np.all(np.isfinite(img)))
assert(np.all(np.isfinite(invvar)))
assert(np.isfinite(sig1))
twcs = ConstantFitsWcs(wcs)
if x0 or y0:
twcs.setX0Y0(x0,y0)
if gaussPsf:
#from tractor.basics import NCircularGaussianPSF
#psf = NCircularGaussianPSF([psf_sigma], [1.0])
from tractor.basics import GaussianMixturePSF
v = psf_sigma**2
psf = GaussianMixturePSF(1., 0., 0., v, v, 0.)
print('WARNING: using mock PSF:', psf)
elif pixPsf:
# spatially varying pixelized PsfEx
from tractor.psfex import PixelizedPsfEx
print('Reading PsfEx model from', self.psffn)
psf = PixelizedPsfEx(self.psffn)
psf.shift(x0, y0)
elif const2psf:
from tractor.psfex import PsfExModel
from tractor.basics import GaussianMixtureEllipsePSF
# 2-component constant MoG.
print('Reading PsfEx model from', self.psffn)
psfex = PsfExModel(self.psffn)
psfim = psfex.at(imw/2., imh/2.)
psfim = psfim[5:-5, 5:-5]
print('Fitting PsfEx model as 2-component Gaussian...')
psf = GaussianMixtureEllipsePSF.fromStamp(psfim, N=2)
del psfim
del psfex
else:
assert(False)
print('Using PSF model', psf)
tim = Image(img, invvar=invvar, wcs=twcs, psf=psf,
photocal=LinearPhotoCal(zpscale, band=band),
sky=sky, name=self.name + ' ' + band)
assert(np.all(np.isfinite(tim.getInvError())))
tim.zr = [-3. * sig1, 10. * sig1]
tim.zpscale = orig_zpscale
tim.midsky = midsky
tim.sig1 = sig1
tim.band = band
tim.psf_fwhm = psf_fwhm
tim.psf_sigma = psf_sigma
tim.sip_wcs = wcs
tim.x0,tim.y0 = int(x0),int(y0)
tim.imobj = self
tim.primhdr = primhdr
tim.hdr = imghdr
tim.dq = dq
tim.dq_bits = CP_DQ_BITS
tim.saturation = imghdr.get('SATURATE', None)
tim.satval = tim.saturation or 0.
if subsky:
tim.satval -= midsky
if nanomaggies:
tim.satval /= zpscale
subh,subw = tim.shape
tim.subwcs = tim.sip_wcs.get_subimage(tim.x0, tim.y0, subw, subh)
mn,mx = tim.zr
tim.ima = dict(interpolation='nearest', origin='lower', cmap='gray',
vmin=mn, vmax=mx)
return tim
def _read_fits(self, fn, hdu, slice=None, header=None, **kwargs):
if slice is not None:
f = fitsio.FITS(fn)[hdu]
img = f[slice]
rtn = img
if header:
hdr = f.read_header()
return (img,hdr)
return img
return fitsio.read(fn, ext=hdu, header=header, **kwargs)
def read_image(self, **kwargs):
'''
Reads the image file from disk.
The image is read from FITS file self.imgfn HDU self.hdu.
Parameters
----------
slice : slice, optional
2-dimensional slice of the subimage to read.
header : boolean, optional
Return the image header also, as tuple (image, header) ?
Returns
-------
image : numpy array
The image pixels.
(image, header) : (numpy array, fitsio header)
If `header = True`.
'''
print('Reading image from', self.imgfn, 'hdu', self.hdu)
return self._read_fits(self.imgfn, self.hdu, **kwargs)
def get_image_info(self):
'''
Reads the FITS image header and returns some summary information
as a dictionary (image size, type, etc).
'''
return fitsio.FITS(self.imgfn)[self.hdu].get_info()
def get_image_shape(self):
'''
Returns image shape H,W.
'''
return self.get_image_info()['dims']
@property
def shape(self):
'''
Returns the full shape of the image, (H,W).
'''
return self.get_image_shape()
def read_image_primary_header(self, **kwargs):
'''
Reads the FITS primary (HDU 0) header from self.imgfn.
Returns
-------
primary_header : fitsio header
The FITS header
'''
return fitsio.read_header(self.imgfn)
def read_image_header(self, **kwargs):
'''
Reads the FITS image header from self.imgfn HDU self.hdu.
Returns
-------
header : fitsio header
The FITS header
'''
return fitsio.read_header(self.imgfn, ext=self.hdu)
def read_dq(self, **kwargs):
'''
Reads the Data Quality (DQ) mask image.
'''
return None
def read_invvar(self, clip=True, **kwargs):
'''
Reads the inverse-variance (weight) map image.
'''
return None
def read_pv_wcs(self):
'''
Reads the WCS header, returning an `astrometry.util.util.Sip` object.
'''
print('Reading WCS from', self.pvwcsfn)
wcs = Sip(self.pvwcsfn)
dra,ddec = self.decals.get_astrometric_zeropoint_for(self)
r,d = wcs.get_crval()
print('Applying astrometric zeropoint:', (dra,ddec))
wcs.set_crval((r + dra, d + ddec))
return wcs
def read_sky_model(self):
'''
Reads the sky model, returning a Tractor Sky object.
'''
print('Reading sky model from', self.skyfn)
hdr = fitsio.read_header(self.skyfn)
skyclass = hdr['SKY']
clazz = get_class_from_name(skyclass)
fromfits = getattr(clazz, 'fromFitsHeader')
skyobj = fromfits(hdr, prefix='SKY_')
return skyobj
def run_calibs(self, pvastrom=True, psfex=True, sky=True, se=False,
funpack=False, fcopy=False, use_mask=True,
force=False, just_check=False):
'''
Runs any required calibration processes for this image.
'''
print('run_calibs for', self)
print('(not implemented)')
pass
class BokImage(LegacySurveyImage):
'''
A LegacySurveyImage subclass to handle images from the 90prime
camera on the Bok telescope.
Currently, there are several hacks and shortcuts in handling the
calibration; this is a sketch, not a final working solution.
'''
def __init__(self, decals, t):
super(BokImage, self).__init__(decals, t)
self.dqfn = self.imgfn.replace('_oi.fits', '_od.fits')
expstr = '%10i' % self.expnum
self.calname = '%s/%s/bok-%s-%s' % (expstr[:5], expstr, expstr, self.ccdname)
self.name = '%s-%s' % (expstr, self.ccdname)
calibdir = os.path.join(self.decals.get_calib_dir(), self.camera)
self.pvwcsfn = os.path.join(calibdir, 'astrom-pv', self.calname + '.wcs.fits')
self.sefn = os.path.join(calibdir, 'sextractor', self.calname + '.fits')
self.psffn = os.path.join(calibdir, 'psfex', self.calname + '.fits')
self.skyfn = os.path.join(calibdir, 'sky', self.calname + '.fits')
def __str__(self):
return 'Bok ' + self.name
def read_sky_model(self):
## HACK -- create the sky model on the fly
img = self.read_image()
sky = np.median(img)
print('Median "sky" model:', sky)
return ConstantSky(sky)
def read_dq(self, **kwargs):
print('Reading data quality from', self.dqfn, 'hdu', self.hdu)
X = self._read_fits(self.dqfn, self.hdu, **kwargs)
return X
def read_invvar(self, **kwargs):
print('Reading inverse-variance for image', self.imgfn, 'hdu', self.hdu)
##### HACK! No weight-maps available?
img = self.read_image(**kwargs)
# # Estimate per-pixel noise via Blanton's 5-pixel MAD
slice1 = (slice(0,-5,10),slice(0,-5,10))
slice2 = (slice(5,None,10),slice(5,None,10))
mad = np.median(np.abs(img[slice1] - img[slice2]).ravel())
sig1 = 1.4826 * mad / np.sqrt(2.)
print('sig1 estimate:', sig1)
invvar = np.ones_like(img) / sig1**2
return invvar
def get_wcs(self):
##### HACK! Ignore the distortion solution in the headers,
##### converting to straight TAN.
hdr = fitsio.read_header(self.imgfn, self.hdu)
print('Converting CTYPE1 from', hdr.get('CTYPE1'), 'to RA---TAN')
hdr['CTYPE1'] = 'RA---TAN'
print('Converting CTYPE2 from', hdr.get('CTYPE2'), 'to DEC--TAN')
hdr['CTYPE2'] = 'DEC--TAN'
H,W = self.get_image_shape()
hdr['IMAGEW'] = W
hdr['IMAGEH'] = H
tmphdr = create_temp(suffix='.fits')
fitsio.write(tmphdr, None, header=hdr, clobber=True)
print('Wrote fake header to', tmphdr)
wcs = Tan(tmphdr)
print('Returning', wcs)
return wcs
class DecamImage(LegacySurveyImage):
'''
A LegacySurveyImage subclass to handle images from the Dark Energy
Camera, DECam, on the Blanco telescope.
'''
def __init__(self, decals, t):
super(DecamImage, self).__init__(decals, t)
self.dqfn = self.imgfn.replace('_ooi_', '_ood_')
self.wtfn = self.imgfn.replace('_ooi_', '_oow_')
for attr in ['imgfn', 'dqfn', 'wtfn']:
fn = getattr(self, attr)
if os.path.exists(fn):
continue
if fn.endswith('.fz'):
fun = fn[:-3]
if os.path.exists(fun):
print('Using ', fun)
print('rather than', fn)
setattr(self, attr, fun)
expstr = '%08i' % self.expnum
self.calname = '%s/%s/decam-%s-%s' % (expstr[:5], expstr, expstr, self.ccdname)
self.name = '%s-%s' % (expstr, self.ccdname)
calibdir = os.path.join(self.decals.get_calib_dir(), self.camera)
self.pvwcsfn = os.path.join(calibdir, 'astrom-pv', self.calname + '.wcs.fits')
self.sefn = os.path.join(calibdir, 'sextractor', self.calname + '.fits')
self.psffn = os.path.join(calibdir, 'psfex', self.calname + '.fits')
self.skyfn = os.path.join(calibdir, 'sky', self.calname + '.fits')
def __str__(self):
return 'DECam ' + self.name
def get_good_image_subregion(self):
x0,x1,y0,y1 = None,None,None,None
imh,imw = self.get_image_shape()
# Handle 'glowing' edges in DES r-band images
# aww yeah
if self.band == 'r' and (('DES' in self.imgfn) or ('COSMOS' in self.imgfn)):
# Northern chips: drop 100 pix off the bottom
if 'N' in self.ccdname:
print('Clipping bottom part of northern DES r-band chip')
y0 = 100
else:
# Southern chips: drop 100 pix off the top
print('Clipping top part of southern DES r-band chip')
y1 = imh - 100
# Clip the bad half of chip S7.
# The left half is OK.
if self.ccdname == 'S7':
print('Clipping the right half of chip S7')
x1 = 1023
return x0,x1,y0,y1
def read_dq(self, header=False, **kwargs):
from distutils.version import StrictVersion
print('Reading data quality from', self.dqfn, 'hdu', self.hdu)
dq,hdr = self._read_fits(self.dqfn, self.hdu, header=True, **kwargs)
# The format of the DQ maps changed as of version 3.5.0 of the
# Community Pipeline. Handle that here...
primhdr = fitsio.read_header(self.dqfn)
plver = primhdr['PLVER'].strip()
plver = plver.replace('V','')
if StrictVersion(plver) >= StrictVersion('3.5.0'):
# Integer codes, not bit masks.
dqbits = np.zeros(dq.shape, np.int16)
'''
1 = bad
2 = no value (for remapped and stacked data)
3 = saturated
4 = bleed mask
5 = cosmic ray
6 = low weight
7 = diff detect (multi-exposure difference detection from median)
8 = long streak (e.g. satellite trail)
'''
dqbits[dq == 1] |= CP_DQ_BITS['badpix']
dqbits[dq == 2] |= CP_DQ_BITS['badpix']
dqbits[dq == 3] |= CP_DQ_BITS['satur']
dqbits[dq == 4] |= CP_DQ_BITS['bleed']
dqbits[dq == 5] |= CP_DQ_BITS['cr']
dqbits[dq == 6] |= CP_DQ_BITS['badpix']
dqbits[dq == 7] |= CP_DQ_BITS['trans']
dqbits[dq == 8] |= CP_DQ_BITS['trans']
else:
dq = dq.astype(np.int16)
if header:
return dq,hdr
else:
return dq
def read_invvar(self, clip=True, **kwargs):
print('Reading inverse-variance from', self.wtfn, 'hdu', self.hdu)
invvar = self._read_fits(self.wtfn, self.hdu, **kwargs)
if clip:
# Clamp near-zero (incl negative!) invvars to zero.
# These arise due to fpack.
med = np.median(invvar[invvar > 0])
thresh = 0.2 * med
invvar[invvar < thresh] = 0
return invvar
def get_wcs(self):
return self.read_pv_wcs()
def run_calibs(self, pvastrom=True, psfex=True, sky=True, se=False,
funpack=False, fcopy=False, use_mask=True,
force=False, just_check=False):
'''
Run calibration pre-processing steps.
Parameters
----------
just_check: boolean
If True, returns True if calibs need to be run.
'''
for fn in [self.pvwcsfn, self.sefn, self.psffn, self.skyfn]:
print('exists?', os.path.exists(fn), fn)
if psfex and os.path.exists(self.psffn) and (not force):
# Sometimes SourceExtractor gets interrupted or something and
# writes out 0 detections. Then PsfEx fails but in a way that
# an output file is still written. Try to detect & fix this
# case.
# Check the PsfEx output file for POLNAME1
hdr = fitsio.read_header(self.psffn, ext=1)
if hdr.get('POLNAME1', None) is None:
print('Did not find POLNAME1 in PsfEx header', self.psffn, '-- deleting')
os.unlink(self.psffn)
else:
psfex = False
if psfex:
se = True
if se and os.path.exists(self.sefn) and (not force):
# Check SourceExtractor catalog for size = 0
fn = self.sefn
T = fits_table(fn, hdu=2)
print('Read', len(T), 'sources from SE catalog', fn)
if T is None or len(T) == 0:
print('SourceExtractor catalog', fn, 'has no sources -- deleting')
try:
os.unlink(fn)
except:
pass
if os.path.exists(self.sefn):
se = False
if se:
funpack = True
if pvastrom and os.path.exists(self.pvwcsfn) and (not force):
fn = self.pvwcsfn
if os.path.exists(fn):
try:
wcs = Sip(fn)
except:
print('Failed to read PV-SIP file', fn, '-- deleting')
os.unlink(fn)
if os.path.exists(fn):
pvastrom = False
if sky and os.path.exists(self.skyfn) and (not force):
fn = self.skyfn
if os.path.exists(fn):
try:
hdr = fitsio.read_header(fn)
except:
print('Failed to read sky file', fn, '-- deleting')
os.unlink(fn)
if os.path.exists(fn):
sky = False
if just_check:
return (se or psfex or sky or pvastrom)
tmpimgfn = None
tmpmaskfn = None
# Unpacked image file
funimgfn = self.imgfn
funmaskfn = self.dqfn
if funpack:
# For FITS files that are not actually fpack'ed, funpack -E
# fails. Check whether actually fpacked.
hdr = fitsio.read_header(self.imgfn, ext=self.hdu)
if not ((hdr['XTENSION'] == 'BINTABLE') and hdr.get('ZIMAGE', False)):
print('Image', self.imgfn, 'HDU', self.hdu, 'is not actually fpacked; not funpacking, just imcopying.')
fcopy = True
tmpimgfn = create_temp(suffix='.fits')
tmpmaskfn = create_temp(suffix='.fits')
if fcopy:
cmd = 'imcopy %s"+%i" %s' % (self.imgfn, self.hdu, tmpimgfn)
else:
cmd = 'funpack -E %i -O %s %s' % (self.hdu, tmpimgfn, self.imgfn)
print(cmd)
if os.system(cmd):
raise RuntimeError('Command failed: ' + cmd)
funimgfn = tmpimgfn
if use_mask:
if fcopy:
cmd = 'imcopy %s"+%i" %s' % (self.dqfn, self.hdu, tmpmaskfn)
else:
cmd = 'funpack -E %i -O %s %s' % (self.hdu, tmpmaskfn, self.dqfn)
print(cmd)
if os.system(cmd):
#raise RuntimeError('Command failed: ' + cmd)
print('Command failed: ' + cmd)
M,hdr = fitsio.read(self.dqfn, ext=self.hdu, header=True)
print('Read', M.dtype, M.shape)
fitsio.write(tmpmaskfn, M, header=hdr, clobber=True)
print('Wrote', tmpmaskfn, 'with fitsio')
funmaskfn = tmpmaskfn
if se:
# grab header values...
primhdr = self.read_image_primary_header()
magzp = primhdr['MAGZERO']
seeing = self.pixscale * self.fwhm
print('FWHM', self.fwhm, 'pix')
print('pixscale', self.pixscale, 'arcsec/pix')
print('Seeing', seeing, 'arcsec')
if se:
maskstr = ''
if use_mask:
maskstr = '-FLAG_IMAGE ' + funmaskfn
sedir = self.decals.get_se_dir()
trymakedirs(self.sefn, dir=True)
cmd = ' '.join([
'sex',
'-c', os.path.join(sedir, 'DECaLS.se'),
maskstr,
'-SEEING_FWHM %f' % seeing,
'-PARAMETERS_NAME', os.path.join(sedir, 'DECaLS.param'),
'-FILTER_NAME', os.path.join(sedir, 'gauss_5.0_9x9.conv'),
'-STARNNW_NAME', os.path.join(sedir, 'default.nnw'),
'-PIXEL_SCALE 0',
# SE has a *bizarre* notion of "sigma"
'-DETECT_THRESH 1.0',
'-ANALYSIS_THRESH 1.0',
'-MAG_ZEROPOINT %f' % magzp,
'-CATALOG_NAME', self.sefn,
funimgfn])
print(cmd)
if os.system(cmd):
raise RuntimeError('Command failed: ' + cmd)
if pvastrom:
# DECam images appear to have PV coefficients up to PVx_10,
# which are up to cubic terms in xi,eta,r. Overshoot what we
# need in SIP terms.
tmpwcsfn = create_temp(suffix='.wcs')
cmd = ('wcs-pv2sip -S -o 6 -e %i %s %s' %
(self.hdu, self.imgfn, tmpwcsfn))
print(cmd)
if os.system(cmd):
raise RuntimeError('Command failed: ' + cmd)
# Read the resulting WCS header and add version info cards to it.
version_hdr = get_version_header(None, self.decals.get_decals_dir())
wcshdr = fitsio.read_header(tmpwcsfn)
os.unlink(tmpwcsfn)
for r in wcshdr.records():
version_hdr.add_record(r)
trymakedirs(self.pvwcsfn, dir=True)
fitsio.write(self.pvwcsfn, None, header=version_hdr, clobber=True)
print('Wrote', self.pvwcsfn)
if psfex:
sedir = self.decals.get_se_dir()
trymakedirs(self.psffn, dir=True)
# If we wrote *.psf instead of *.fits in a previous run...
oldfn = self.psffn.replace('.fits', '.psf')
if os.path.exists(oldfn):
print('Moving', oldfn, 'to', self.psffn)
os.rename(oldfn, self.psffn)
else:
cmd = ('psfex -c %s -PSF_DIR %s %s' %
(os.path.join(sedir, 'DECaLS.psfex'),
os.path.dirname(self.psffn), self.sefn))
print(cmd)
rtn = os.system(cmd)
if rtn:
raise RuntimeError('Command failed: ' + cmd + ': return value: %i' % rtn)
if sky:
print('Fitting sky for', self)
slc = self.get_good_image_slice(None)
print('Good image slice is', slc)
img = self.read_image(slice=slc)
wt = self.read_invvar(slice=slc)
img = img[wt > 0]
try:
skyval = estimate_mode(img, raiseOnWarn=True)
skymeth = 'mode'
except:
skyval = np.median(img)
skymeth = 'median'
tsky = ConstantSky(skyval)
tt = type(tsky)
sky_type = '%s.%s' % (tt.__module__, tt.__name__)
hdr = get_version_header(None, self.decals.get_decals_dir())
hdr.add_record(dict(name='SKYMETH', value=skymeth,
comment='estimate_mode, or fallback to median?'))
hdr.add_record(dict(name='SKY', value=sky_type, comment='Sky class'))
tsky.toFitsHeader(hdr, prefix='SKY_')
trymakedirs(self.skyfn, dir=True)
fits = fitsio.FITS(self.skyfn, 'rw', clobber=True)
fits.write(None, header=hdr)
if tmpimgfn is not None:
os.unlink(tmpimgfn)
if tmpmaskfn is not None:
os.unlink(tmpmaskfn)
def run_calibs(X):
im = X[0]
kwargs = X[1]
print('run_calibs for image', im)
return im.run_calibs(**kwargs)
def read_one_tim(X):
(im, targetrd, gaussPsf, const2psf, pixPsf) = X
print('Reading', im)
tim = im.get_tractor_image(radecpoly=targetrd, gaussPsf=gaussPsf,
const2psf=const2psf, pixPsf=pixPsf)
return tim
from tractor.psfex import PsfExModel
class SchlegelPsfModel(PsfExModel):
def __init__(self, fn=None, ext=1):
'''
`ext` is ignored.
'''
if fn is not None:
from astrometry.util.fits import fits_table
T = fits_table(fn)
T.about()
ims = fitsio.read(fn, ext=2)
print('Eigen-images', ims.shape)
nsch,h,w = ims.shape
hdr = fitsio.read_header(fn)
x0 = 0.
y0 = 0.
xscale = 1. / hdr['XSCALE']
yscale = 1. / hdr['YSCALE']
degree = (T.xexp + T.yexp).max()
self.sampling = 1.
# Reorder the 'ims' to match the way PsfEx sorts its polynomial terms
# number of terms in polynomial
ne = (degree + 1) * (degree + 2) / 2
print('Number of eigen-PSFs required for degree=', degree, 'is', ne)
self.psfbases = np.zeros((ne, h,w))
for d in range(degree + 1):
# x polynomial degree = j
# y polynomial degree = k
for j in range(d+1):
k = d - j
ii = j + (degree+1) * k - (k * (k-1))/ 2
jj = np.flatnonzero((T.xexp == j) * (T.yexp == k))
if len(jj) == 0:
print('Schlegel image for power', j,k, 'not found')
continue
im = ims[jj,:,:]
print('Schlegel image for power', j,k, 'has range', im.min(), im.max(), 'sum', im.sum())
self.psfbases[ii,:,:] = ims[jj,:,:]
self.xscale, self.yscale = xscale, yscale
self.x0,self.y0 = x0,y0
self.degree = degree
print('SchlegelPsfEx degree:', self.degree)
bh,bw = self.psfbases[0].shape
self.radius = (bh+1)/2.
|
gregreen/legacypipe
|
py/legacypipe/common.py
|
Python
|
gpl-2.0
| 86,929
|
[
"Galaxy",
"Gaussian"
] |
ffa9960f6cd8f81f6b147570847d5ef6cecd16d7d853b36d1cd864d617aca775
|
import numpy as np
from diffraction import Site
# Crystal Structure
lattice_parameters = (5.5876, 5.5876, 13.867, 90, 90, 120)
chemical_formula = 'BiFeO3'
formula_units = 6
g_factor = 2
space_group = 'R3c'
# Atomic sites
sites = {
'Bi1': Site('Bi3+', (0, 0, 0)),
'Fe1': Site('Fe3+', (0, 0, 0.2212)),
'O1': Site('O2-', (0.443, 0.012, 0.9543))
}
# Magnetic sites
magnetic_ion = sites['Fe1'].ion
pos_Fe1 = sites['Fe1'].position
pos_Fe2 = pos_Fe1
# Propagation vectors
delta = 0.0045
k1 = np.array((delta, delta, 0))
k2 = np.array((delta, -2 * delta, 0))
k3 = np.array((-2 * delta, delta, 0))
|
noahwaterfieldprice/bfo
|
bfo/constants.py
|
Python
|
gpl-2.0
| 613
|
[
"CRYSTAL"
] |
a4807ba7f67a7a8f2976d1a0f793181ff81324d29bbb3cd1935e95b8bbebfc7c
|
"""Next-gen alignments with TopHat a spliced read mapper for RNA-seq experiments.
http://tophat.cbcb.umd.edu
"""
import os
import shutil
from contextlib import closing
import glob
import numpy
import pysam
try:
import sh
except ImportError:
sh = None
from bcbio.pipeline import config_utils
from bcbio.ngsalign import bowtie, bowtie2
from bcbio.utils import safe_makedir, file_exists, get_in, symlink_plus
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.provenance import do
from bcbio import bam
from bcbio import broad
import bcbio.pipeline.datadict as dd
_out_fnames = ["accepted_hits.sam", "junctions.bed",
"insertions.bed", "deletions.bed"]
def _set_quality_flag(options, config):
qual_format = config["algorithm"].get("quality_format", None)
if qual_format.lower() == "illumina":
options["solexa1.3-quals"] = True
elif qual_format.lower() == "solexa":
options["solexa-quals"] = True
return options
def _set_transcriptome_option(options, data, ref_file):
# prefer transcriptome-index vs a GTF file if available
transcriptome_index = get_in(data, ("genome_resources", "rnaseq",
"transcriptome_index", "tophat"))
fusion_mode = get_in(data, ("config", "algorithm", "fusion_mode"), False)
if transcriptome_index and file_exists(transcriptome_index) and not fusion_mode:
options["transcriptome-index"] = os.path.splitext(transcriptome_index)[0]
return options
gtf_file = dd.get_gtf_file(data)
if gtf_file:
options["GTF"] = gtf_file
return options
return options
def _set_cores(options, config):
num_cores = config["algorithm"].get("num_cores", 0)
if num_cores > 1 and "num-threads" not in options:
options["num-threads"] = num_cores
return options
def _set_rg_options(options, names):
if not names:
return options
options["rg-id"] = names["rg"]
options["rg-sample"] = names["sample"]
options["rg-library"] = names["pl"]
options["rg-platform-unit"] = names["pu"]
return options
def _set_stranded_flag(options, config):
strand_flag = {"unstranded": "fr-unstranded",
"firststrand": "fr-firststrand",
"secondstrand": "fr-secondstrand"}
stranded = get_in(config, ("algorithm", "strandedness"), "unstranded").lower()
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', "
"'secondstrand' and 'unstranded" % (stranded))
flag = strand_flag[stranded]
options["library-type"] = flag
return options
def _set_fusion_mode(options, config):
fusion_mode = get_in(config, ("algorithm", "fusion_mode"), False)
if fusion_mode:
options["fusion-search"] = True
return options
def tophat_align(fastq_file, pair_file, ref_file, out_base, align_dir, data,
names=None):
"""
run alignment using Tophat v2
"""
config = data["config"]
options = get_in(config, ("resources", "tophat", "options"), {})
options = _set_fusion_mode(options, config)
options = _set_quality_flag(options, config)
options = _set_transcriptome_option(options, data, ref_file)
options = _set_cores(options, config)
options = _set_rg_options(options, names)
options = _set_stranded_flag(options, config)
ref_file, runner = _determine_aligner_and_reference(ref_file, config)
# fusion search does not work properly with Bowtie2
if options.get("fusion-search", False):
ref_file = ref_file.replace("/bowtie2", "/bowtie")
if _tophat_major_version(config) == 1:
raise NotImplementedError("Tophat versions < 2.0 are not supported, please "
"download the newest version of Tophat here: "
"http://tophat.cbcb.umd.edu")
if _ref_version(ref_file) == 1 or options.get("fusion-search", False):
options["bowtie1"] = True
out_dir = os.path.join(align_dir, "%s_tophat" % out_base)
final_out = os.path.join(out_dir, "{0}.bam".format(names["sample"]))
if file_exists(final_out):
return final_out
out_file = os.path.join(out_dir, "accepted_hits.sam")
unmapped = os.path.join(out_dir, "unmapped.bam")
files = [ref_file, fastq_file]
if not file_exists(out_file):
with file_transaction(config, out_dir) as tx_out_dir:
safe_makedir(tx_out_dir)
if pair_file and not options.get("mate-inner-dist", None):
d, d_stdev = _estimate_paired_innerdist(fastq_file, pair_file,
ref_file, out_base,
tx_out_dir, data)
options["mate-inner-dist"] = d
options["mate-std-dev"] = d_stdev
files.append(pair_file)
options["output-dir"] = tx_out_dir
options["no-convert-bam"] = True
options["no-coverage-search"] = True
options["no-mixed"] = True
tophat_runner = sh.Command(config_utils.get_program("tophat",
config))
ready_options = {}
for k, v in options.iteritems():
ready_options[k.replace("-", "_")] = v
# tophat requires options before arguments,
# otherwise it silently ignores them
tophat_ready = tophat_runner.bake(**ready_options)
cmd = str(tophat_ready.bake(*files))
do.run(cmd, "Running Tophat on %s and %s." % (fastq_file, pair_file), None)
_fix_empty_readnames(out_file, data)
if pair_file and _has_alignments(out_file):
fixed = _fix_mates(out_file, os.path.join(out_dir, "%s-align.sam" % out_base),
ref_file, config)
else:
fixed = out_file
fixed = merge_unmapped(fixed, unmapped, config)
fixed = _fix_unmapped(fixed, config, names)
fixed = bam.sort(fixed, config)
picard = broad.runner_from_config(config)
# set the contig order to match the reference file so GATK works
fixed = picard.run_fn("picard_reorder", out_file, data["sam_ref"],
os.path.splitext(out_file)[0] + ".picard.bam")
fixed = fix_insert_size(fixed, config)
if not file_exists(final_out):
symlink_plus(fixed, final_out)
return final_out
def merge_unmapped(mapped_sam, unmapped_bam, config):
merged_bam = os.path.join(os.path.dirname(mapped_sam), "merged.bam")
bam_file = bam.sam_to_bam(mapped_sam, config)
if not file_exists(merged_bam):
merged_bam = bam.merge([bam_file, unmapped_bam], merged_bam, config)
return merged_bam
def _has_alignments(sam_file):
with open(sam_file) as in_handle:
for line in in_handle:
if line.startswith("File removed to save disk space"):
return False
elif not line.startswith("@"):
return True
return False
def _fix_empty_readnames(orig_file, data):
""" Fix SAMfile reads with empty read names
Tophat 2.0.9 sometimes outputs empty read names, making the
FLAG field be the read name. This throws those reads away.
"""
with file_transaction(data, orig_file) as tx_out_file:
logger.info("Removing reads with empty read names from Tophat output.")
with open(orig_file) as orig, open(tx_out_file, "w") as out:
for line in orig:
if line.split()[0].isdigit():
continue
out.write(line)
return orig_file
def _fix_mates(orig_file, out_file, ref_file, config):
"""Fix problematic unmapped mate pairs in TopHat output.
TopHat 2.0.9 appears to have issues with secondary reads:
https://groups.google.com/forum/#!topic/tuxedo-tools-users/puLfDNbN9bo
This cleans the input file to only keep properly mapped pairs,
providing a general fix that will handle correctly mapped secondary
reads as well.
"""
if not file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
samtools = config_utils.get_program("samtools", config)
cmd = "{samtools} view -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Fix mate pairs in TopHat output", {})
return out_file
def _fix_unmapped(unmapped_file, config, names):
"""
the unmapped.bam file from Tophat 2.0.9 is missing some things
1) the RG tag is missing from the reads
2) MAPQ is set to 255 instead of 0
3) for reads where both are unmapped, the mate_is_unmapped flag is not set correctly
"""
out_file = os.path.splitext(unmapped_file)[0] + "_fixed.bam"
if file_exists(out_file):
return out_file
picard = broad.runner_from_config(config)
rg_fixed = picard.run_fn("picard_fix_rgs", unmapped_file, names)
fixed = bam.sort(rg_fixed, config, "queryname")
with closing(pysam.Samfile(fixed)) as work_sam:
with file_transaction(config, out_file) as tx_out_file:
tx_out = pysam.Samfile(tx_out_file, "wb", template=work_sam)
for read1 in work_sam:
if not read1.is_paired:
if read1.is_unmapped:
read1.mapq = 0
tx_out.write(read1)
continue
read2 = work_sam.next()
if read1.qname != read2.qname:
continue
if read1.is_unmapped and not read2.is_unmapped:
read1.mapq = 0
read1.tid = read2.tid
if not read1.is_unmapped and read2.is_unmapped:
read2.mapq = 0
read2.tid = read1.tid
if read1.is_unmapped and read2.is_unmapped:
read1.mapq = 0
read2.mapq = 0
read1.mate_is_unmapped = True
read2.mate_is_unmapped = True
tx_out.write(read1)
tx_out.write(read2)
tx_out.close()
return out_file
def align(fastq_file, pair_file, ref_file, names, align_dir, data,):
out_files = tophat_align(fastq_file, pair_file, ref_file, names["lane"],
align_dir, data, names)
return out_files
def _estimate_paired_innerdist(fastq_file, pair_file, ref_file, out_base,
out_dir, data):
"""Use Bowtie to estimate the inner distance of paired reads.
"""
mean, stdev = _bowtie_for_innerdist("100000", fastq_file, pair_file, ref_file,
out_base, out_dir, data, True)
if not mean or not stdev:
mean, stdev = _bowtie_for_innerdist("1", fastq_file, pair_file, ref_file,
out_base, out_dir, data, True)
# No reads aligning so no data to process, set some default values
if not mean or not stdev:
mean, stdev = 200, 50
return mean, stdev
def _bowtie_for_innerdist(start, fastq_file, pair_file, ref_file, out_base,
out_dir, data, remove_workdir=False):
work_dir = os.path.join(out_dir, "innerdist_estimate")
if os.path.exists(work_dir):
shutil.rmtree(work_dir)
safe_makedir(work_dir)
extra_args = ["-s", str(start), "-u", "250000"]
ref_file, bowtie_runner = _determine_aligner_and_reference(ref_file, data["config"])
out_sam = bowtie_runner.align(fastq_file, pair_file, ref_file, {"lane": out_base},
work_dir, data, extra_args)
dists = []
with closing(pysam.Samfile(out_sam)) as work_sam:
for read in work_sam:
if read.is_proper_pair and read.is_read1:
dists.append(abs(read.isize) - 2 * read.rlen)
if dists:
median = float(numpy.median(dists))
deviations = []
for d in dists:
deviations.append(abs(d - median))
# this is the median absolute deviation estimator of the
# standard deviation
mad = 1.4826 * float(numpy.median(deviations))
return int(median), int(mad)
else:
return None, None
def _calculate_average_read_length(sam_file):
with closing(pysam.Samfile(sam_file)) as work_sam:
count = 0
read_lengths = []
for read in work_sam:
count = count + 1
read_lengths.append(read.rlen)
avg_read_length = int(float(sum(read_lengths)) / float(count))
return avg_read_length
def _bowtie_major_version(stdout):
"""
bowtie --version returns strings like this:
bowtie version 0.12.7
32-bit
Built on Franklin.local
Tue Sep 7 14:25:02 PDT 2010
"""
version_line = stdout.split("\n")[0]
version_string = version_line.strip().split()[2]
major_version = int(version_string.split(".")[0])
# bowtie version 1 has a leading character of 0 or 1
if major_version == 0 or major_version == 1:
major_version = 1
return major_version
def _determine_aligner_and_reference(ref_file, config):
fusion_mode = get_in(config, ("algorithm", "fusion_mode"), False)
# fusion_mode only works with bowtie1
if fusion_mode:
return _get_bowtie_with_reference(config, ref_file, 1)
else:
return _get_bowtie_with_reference(config, ref_file, 2)
def _get_bowtie_with_reference(config, ref_file, version):
if version == 1:
ref_file = ref_file.replace("/bowtie2/", "/bowtie/")
return ref_file, bowtie
else:
ref_file = ref_file.replace("/bowtie/", "/bowtie2/")
return ref_file, bowtie2
def _tophat_major_version(config):
tophat_runner = sh.Command(config_utils.get_program("tophat", config,
default="tophat"))
# tophat --version returns strings like this: Tophat v2.0.4
version_string = str(tophat_runner(version=True)).strip().split()[1]
major_version = int(version_string.split(".")[0][1:])
return major_version
def _ref_version(ref_file):
for ext in [os.path.splitext(x)[1] for x in glob.glob(ref_file + "*")]:
if ext == ".ebwt":
return 1
elif ext == ".bt2":
return 2
raise ValueError("Cannot detect which reference version %s is. "
"Should end in either .ebwt (bowtie) or .bt2 "
"(bowtie2)." % (ref_file))
def fix_insert_size(in_bam, config):
"""
Tophat sets PI in the RG to be the inner distance size, but the SAM spec
states should be the insert size. This fixes the RG in the alignment
file generated by Tophat header to match the spec
"""
fixed_file = os.path.splitext(in_bam)[0] + ".pi_fixed.bam"
if file_exists(fixed_file):
return fixed_file
header_file = os.path.splitext(in_bam)[0] + ".header.sam"
read_length = bam.estimate_read_length(in_bam)
bam_handle= bam.open_samfile(in_bam)
header = bam_handle.header.copy()
rg_dict = header['RG'][0]
if 'PI' not in rg_dict:
return in_bam
PI = int(rg_dict.get('PI'))
PI = PI + 2*read_length
rg_dict['PI'] = PI
header['RG'][0] = rg_dict
with pysam.Samfile(header_file, "wb", header=header) as out_handle:
with bam.open_samfile(in_bam) as in_handle:
for record in in_handle:
out_handle.write(record)
shutil.move(header_file, fixed_file)
return fixed_file
|
SciLifeLab/bcbio-nextgen
|
bcbio/ngsalign/tophat.py
|
Python
|
mit
| 15,665
|
[
"Bowtie",
"pysam"
] |
5cf61edbf3bcaf03ee61c47f83a5ce2d03c2e64683e0f48309715321770298d5
|
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from . import MinCovDet
from ..utils.validation import check_is_fitted
from ..metrics import accuracy_score
from ..base import OutlierMixin
class EllipticEnvelope(OutlierMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5].
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term: `Glossary <random_state>`.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
.. versionadded:: 0.20
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EllipticEnvelope
>>> true_cov = np.array([[.8, .3],
... [.3, .4]])
>>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
... cov=true_cov,
... size=500)
>>> cov = EllipticEnvelope(random_state=0).fit(X)
>>> # predict returns 1 for an inlier and -1 for an outlier
>>> cov.predict([[0, 0],
... [3, 3]])
array([ 1, -1])
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
See Also
--------
EmpiricalCovariance, MinCovDet
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
"""
def __init__(self, *, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state)
self.contamination = contamination
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
"""
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
return self
def decision_function(self, X):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
decision : ndarray of shape (n_samples,)
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
negative_mahal_distances : array-like of shape (n_samples,)
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict the labels (1 inlier, -1 outlier) of X according to the
fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
values = self.decision_function(X)
is_inlier = np.full(values.shape[0], -1, dtype=int)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
kevin-intel/scikit-learn
|
sklearn/covariance/_elliptic_envelope.py
|
Python
|
bsd-3-clause
| 8,336
|
[
"Gaussian"
] |
fe68be627c5dc1e15c341abf1fc35578e7e9f24b8db415e3e634f001c6b93cec
|
# Copyright 2004 by Harry Zuzan. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#Edited for speed for BioGui. Core function was preserved.
"""
Classes for accessing the information in Affymetrix cel files.
Functions:
read Read a cel file and store its contents in a Record
Classes:
Record Contains the information from a cel file
"""
import numpy
import gzip
class Record:
"""
Stores the information in a cel file
"""
def __init__(self):
self.intensities = None
self.nrows = None
self.ncols = None
def read(handle):
"""
Read the information in a cel file, and store it in a Record.
"""
# Needs error handling.
# Needs to know the chip design.
record = Record()
for line in handle:
if not line.strip():
continue
if line[:8]=="[HEADER]":
section = "HEADER"
elif line[:11]=="[INTENSITY]":
section = "INTENSITY"
record.intensities = numpy.zeros((record.nrows, record.ncols))
elif line[0]=="[":
section = ""
elif section=="HEADER":
keyword, value = line.split("=", 1)
if keyword=="Cols":
record.ncols = int(value)
elif keyword=="Rows":
record.nrows = int(value)
elif section=="INTENSITY":
if "=" in line:
continue
words = line.split()
y, x = map(int, words[:2])
record.intensities[x,y] = float(words[2])
return record
def platform(handle):
h = gzip.GzipFile(r'.\CurrentCel/'+handle)
i = 0
stopper = True
pName = ''
while stopper:
line = h.readline()
if line[:9] == 'DatHeader':
for split in line.split():
if r'.1sq' in split:
pName = split[:-4]
print pName
stopper = False
return pName
|
fxb22/BioGUI
|
Utils/CelFileReader.py
|
Python
|
gpl-2.0
| 2,077
|
[
"Biopython"
] |
9b6eb8ea421d224122475df2ca21bf35f53497e6e5bc505e90ea81d28dfad050
|
"""
Morlet Wavelet Analysis
=======================
Perform time-frequency decomposition using wavelets.
In this tutorial we will use Morlet wavelets to compute a time-frequency
representation of the data.
This tutorial primarily covers the ``neurodsp.timefrequency.wavelets`` module.
"""
###################################################################################################
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
# Import simulation and plot code to create & visualize data
from neurodsp.sim import sim_combined
from neurodsp.plts import plot_time_series, plot_timefrequency
from neurodsp.utils import create_times
# Import function for Morlet Wavelets
from neurodsp.timefrequency.wavelets import compute_wavelet_transform
###################################################################################################
# Simulate Data
# -------------
#
# First, we'll simulate a time series using the :func:`~.sim_combined` function
# to create a time-varying oscillation.
#
# For this example, our oscillation frequency will be 20 Hz, with a sampling rate of 500 Hz,
# and a simulation time of 10 seconds.
#
###################################################################################################
# Define general settings for across the example
fs = 500
# Define settings for the simulated oscillation
n_seconds = 10
freq = 20
exp = -1
# Define settings for creating the simulated signal
comps = {'sim_powerlaw' : {'exponent' : exp, 'f_range' : (2, None)},
'sim_bursty_oscillation' : {'freq' : freq}}
comp_vars = [0.25, 1]
# Simulate a signal with bursty oscillations at 20 Hz
sig = sim_combined(n_seconds, fs, comps, comp_vars)
times = create_times(n_seconds, fs)
###################################################################################################
# Plot a segment of our simulated time series
plot_time_series(times, sig, xlim=[0, 2])
###################################################################################################
# Compute Wavelet Transform
# -------------------------
#
# Now, let's use the compute Morlet wavelet transform algorithm to compute a
# time-frequency representation of our simulated data, using Morlet wavelets.
#
# To apply the continuous Morlet wavelet transform, we need to specify frequencies of
# interest. The wavelet transform can then be used to compute the power at these
# frequencies across time.
#
# For this example, we'll compute the Morlet wavelet transform on 50 equally-spaced
# frequencies from 5 Hz to 100 Hz.
#
###################################################################################################
# Settings for the wavelet transform algorithm
freqs = np.linspace(5, 100, 50)
# Compute wavelet transform using compute Morlet wavelet transform algorithm
mwt = compute_wavelet_transform(sig, fs=fs, n_cycles=7, freqs=freqs)
###################################################################################################
# Plot morlet wavelet transform
plot_timefrequency(times, freqs, mwt)
###################################################################################################
#
# In the plot above, we can see the time-frequency representation from the
# Morlet-wavelet transformed signal.
#
# Note that having simulated a bursty signal at 20 Hz, we can see that the
# time-frequency representation shows periods with high power at this frequency.
#
###################################################################################################
# Computing wavelets across different frequency ranges
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If we want to compute the time-frequency representation across a different frequency range,
# we can change the frequencies passed to the Morlet wavelet transform algorithm.
#
# For the next example, let's use an array of frequencies from 15 Hz to 50 Hz with a
# spacing of 5 Hz.
#
###################################################################################################
# Settings for the wavelet transform algorithm
freqs = np.arange(15, 50, 5)
# Compute wavelet transform using compute Morlet wavelet transform algorithm
mwt = compute_wavelet_transform(sig, fs=fs, n_cycles=7, freqs=freqs)
###################################################################################################
# Plot morlet wavelet transform
plot_timefrequency(times, freqs, mwt)
###################################################################################################
#
# From the plot above, you can see the Morlet-wavelet transformed signal for the new frequency
# range. Again, we can see the how power at the frequency of our simulated oscillation.
#
###################################################################################################
# Wavelet Description
# -------------------
#
# Let's look a little further into what a Wavelet is. In general, a wavelet is simply a small
# "wave" like signal. By sweeping this wave across our data, we can see how much of this 'wave'
# is in our signal, which is useful to quantify variations of signal amplitude across time.
#
# A Morlet wavelet is a particular type of wavelet in which the wavelet has been multiplied
# by a Gaussian envelope.
#
# Some parameters are needed to define a Morlet wavelet. These parameters are the
# sampling frequency, the fundamental frequency, and the number of cycles per frequency.
#
# For more information on Morlet wavelets, see:
# `Morlet wavelets in time and frequency <https://www.youtube.com/watch?v=7ahrcB5HL0k>`_
# a video on Youtube from Mike X Cohen.
#
###################################################################################################
# Example Plot of Morlet Wavelet
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Here, we provide an example plot of an individual Morlet wavelet. We'll use the
# `scipy.signal` function `morlet` to create a wavelet that is 5 cycles long.
#
###################################################################################################
# Define sampling rate, number of cycles, fundamental frequency, and length for the wavelet
n_cycles = 5
freq = 5
scaling = 1.0
omega = n_cycles
wavelet_len = int(n_cycles * fs / freq)
# Create wavelet
wavelet = signal.morlet(wavelet_len, omega, scaling)
###################################################################################################
# Plot the real part of the wavelet
_, ax = plt.subplots()
ax.plot(np.real(wavelet))
ax.set_axis_off()
###################################################################################################
# Real & Imaginary Dimensions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Note that wavelets have both real and imaginary dimensions.
#
###################################################################################################
# Plot both real and imaginary dimensions of the wavelet
_, ax = plt.subplots()
ax.plot(np.real(wavelet))
ax.plot(np.imag(wavelet))
ax.set_axis_off()
###################################################################################################
# Plot real and imaginary components in a 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(np.linspace(0, scaling, wavelet.size), wavelet.real, wavelet.imag)
ax.set(xlabel='Scaling', ylabel='Real Amplitude', zlabel='Imag Amplitude')
###################################################################################################
#
# In the plots above, you can see both the real and imaginary components of the Morlet-wavelet.
#
# Note that the function we have been using, :func:`~.compute_wavelet_transform`
# creates Morlet wavelets in the same way we have been doing here, by using the
# `morlet` function from scipy.
#
####################################################################################################
# Changing Parameters
# ~~~~~~~~~~~~~~~~~~~
#
# Adjusting the input parameters results in a different wavelet.
#
# For example, let's try this same plot but with a different number of cycles:
#
###################################################################################################
# Define settings for a new wavelet
n_cycles = 10
freq = 5
scaling = 1.0
omega = n_cycles
wavelet_len = int(n_cycles * fs / freq)
# Create wavelet
wavelet = signal.morlet(wavelet_len, omega, scaling)
# Plot wavelet
_, ax = plt.subplots()
ax.plot(np.real(wavelet))
ax.plot(np.imag(wavelet))
ax.set_axis_off()
###################################################################################################
#
# As you can see, when you increase the n_cycles parameter, you get more oscillations (cycles)
# in the wavelet.
#
###################################################################################################
# Time-Frequency Representations
# ------------------------------
#
# Let's now return to the Morlet wavelet transform we were originally using. How this function
# works is it creates wavelets, as we've been doing above, and then applies them to the data
# with the :func:`~.convolve_wavelet` function. This function convolves the raw signal with
# our complex Morlet wavelet.
#
# The complex Morlet wavelet can be thought of as a complex sine tapered by a Gaussian. The
# result of the convolution returns a complex array (with real and imaginary components, like
# we plotted above) which represents how much power of the frequency of the wavelet was
# found in our signal.
#
###################################################################################################
# Changing Parameters
# ~~~~~~~~~~~~~~~~~~~
#
# Returning to the Morlet-wavelet transform algorithm, we can adjust input parameters
# to demonstrate how changes in the number of cycles per frequency affects the outputs.
#
###################################################################################################
# Compute the wavelet transform with a higher number of cycles
freqs = np.arange(15, 50, 5)
mwt = compute_wavelet_transform(sig, fs=fs, n_cycles=15, freqs=freqs)
# Plot the wavelet transform
plot_timefrequency(times, freqs, mwt)
###################################################################################################
#
# As you can see, increasing n_cycles results in what looks like smoother pattern of activity
# across time. This is because the wavelets, with more cycles, are longer. This can help
# increase frequency resolution, but decreases temporal resolution, due to the time-frequency
# trade off.
#
###################################################################################################
#
# If we adjust other input parameters, such as the frequency resolution, we can also get a
# different result.
#
###################################################################################################
# Compute the wavelet transform with a different set of frequencies
freqs = np.arange(10, 60, 10)
mwt = compute_wavelet_transform(sig, fs=fs, n_cycles=15, freqs=freqs)
# Plot the wavelet transform
plot_timefrequency(times, freqs, mwt)
###################################################################################################
#
# In the above, we used a larger frequency step, with the same starting and ending frequencies.
#
# Doing so changes the frequency resolution of our estimation.
#
|
voytekresearch/neurodsp
|
tutorials/timefreq/plot_MorletWavelet.py
|
Python
|
apache-2.0
| 11,349
|
[
"Gaussian"
] |
31202551aa6efb8c45ae9fdc21e160f331ad093a6ee612d62f90c07053de9a7c
|
"""
# Copyright Nick Cheng, Brian Harrington, Danny Heap, 2013, 2014, 2015
# Distributed under the terms of the GNU General Public License.
#
# This file is part of Assignment 2, CSCA48, Winter 2015
#
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
"""
# Do not change any of the declarations of RegexTree class and its
# subclasses! This module provides the various RegexTree subclasses
# you need to complete regex_functions.py
class RegexTree:
"""Root of a regular expression tree"""
def __init__(self, symbol, children):
"""(RegexTree, str, list of RegexTrees) -> NoneType
A new RegexTree with regex symbol and subtrees children.
REQ: symbol must be one of "0", "1", "2", "e", "|", ".", "*"
>>> print(RegexTree("0", []))
RegexTree('0', [])
>>> print(RegexTree("1", []))
RegexTree('1', [])
"""
self._symbol = symbol
self._children = children[:]
def __repr__(self):
"""(RegexTree) -> str
Return string representation of this RegexTree.
"""
return 'RegexTree({}, {})'.format(
repr(self._symbol), repr(self._children))
def __eq__(self, other):
"""(RegexTree, object) -> bool
Return whether RegexTree self is equivalent to other
>>> RegexTree("1", []).__eq__(RegexTree("2", []))
False
>>> RegexTree("2", []).__eq__(RegexTree("2", []))
True
"""
# cool trick here, we can compare the children variables
# because Python will call the __eq__ methods of each
# member of the list to check that they're equal
return (isinstance(other, RegexTree) and
self._symbol == other._symbol and
self._children == other._children)
def get_symbol(self):
"""(RegexTree) -> str
Return the symbol held in this node
"""
return self._symbol
def get_children(self):
"""(RegexTree) -> list of RegexTree
Return the children of this node in a list
"""
return self._children
class Leaf(RegexTree):
"""RegexTree with no children, used for symbols."""
def __init__(self, symbol):
"""(Leaf, str) -> NoneType
A new Leaf node with regex symbol and no children
"""
RegexTree.__init__(self, symbol, [])
def __repr__(self):
"""(Leaf) -> str
Return string representation of this Leaf
"""
return 'Leaf({})'.format(
repr(self._symbol))
class UnaryTree(RegexTree):
"""RegexTree with a single child, so far used only for star nodes."""
def __init__(self, symbol, child):
"""(UnaryTree, str, RegexTree) -> NoneType
A new UnaryTree with regex symbol and (only) child
"""
RegexTree.__init__(self, symbol, [child])
def __repr__(self):
"""(UnaryTree) -> str
Return string representation of this UnaryTree
"""
return 'UnaryTree({}, {})'.format(
repr(self._symbol), repr(self._children[0]))
def get_child(self):
"""(UnaryTree) -> RegexTree
Return the child of this node
"""
return self._children[0]
class BinaryTree(RegexTree):
"""RegexTree with two children. so far, it's only used for bar
and dot nodes.
"""
def __init__(self, symbol, left, right):
"""(BinaryTree, str, RegexTree, RegexTree) -> NoneType
A new BinaryTree with regex symbol and left and right children.
"""
RegexTree.__init__(self, symbol, [left, right])
def __repr__(self):
"""(BinaryTree) -> str
Return string representation of this BinaryTree
"""
return 'BinaryTree({}, {}, {})'.format(repr(self._symbol),
repr(self._children[0]),
repr(self._children[1]))
def get_left_child(self):
"""(BinaryTree) -> RegexTree
Return the left child of this node
"""
return self._children[0]
def get_right_child(self):
"""(BinaryTree) -> RegexTree
Return the right child of this node
"""
return self._children[1]
class StarTree(UnaryTree):
"""A UnaryTree rooted at a star ("*")
>>> rtn0 = RegexTree("0", [])
>>> rtn1 = RegexTree("1", [])
>>> rtdot = DotTree(rtn1, rtn1)
>>> rtbar = BarTree(rtn0, rtdot)
>>> StarTree(rtbar).__eq__(\
StarTree(BarTree(RegexTree('0', []), \
DotTree(RegexTree('1', []), RegexTree('1', [])))))
True
"""
def __init__(self, child):
"""(StarTree, RegexTree) -> NoneType
New StarTree representing child*
"""
UnaryTree.__init__(self, '*', child)
def __repr__(self):
"""(StarTree) -> str
Return string representation of this StarTree
"""
return 'StarTree({})'.format(repr(self._children[0]))
class BarTree(BinaryTree):
"""A UnaryTree rooted at a bar ("|")
>>> rtn0 = RegexTree("0", [])
>>> rtn1 = RegexTree("1", [])
>>> BarTree(rtn0, rtn1) == BarTree(RegexTree('0', []), \
RegexTree('1', []))
True
"""
def __init__(self, left, right):
"""(BarTree, RegexTree, RegexTree) -> NoneType
New BarTree representing (left | right)
"""
BinaryTree.__init__(self, "|", left, right)
def __repr__(self):
"""(BarTree) -> str
Return string representation of this BarTree"""
return 'BarTree({}, {})'.format(repr(self._children[0]),
repr(self._children[1]))
class DotTree(BinaryTree):
"""BinaryTree for a dot ('.')"""
def __init__(self, left, right):
"""(DotTree, RegexTree, RegexTree) -> NoneType
New DotTree representing (left . right)
>>> rtn0 = RegexTree("0", [])
>>> rtn1 = RegexTree("1", [])
>>> DotTree(rtn0, rtn1) == DotTree(RegexTree('0', []), \
RegexTree('1', []))
True
"""
BinaryTree.__init__(self, ".", left, right)
def __repr__(self):
"""(DotTree) -> str
Return string representation of this DotTree"""
return 'DotTree({}, {})'.format(repr(self._children[0]),
repr(self._children[1]))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
venkatkorapaty/Regex
|
regextree.py
|
Python
|
gpl-2.0
| 6,972
|
[
"Brian"
] |
0a81df54bb5ae6f8dcfa99c3767155aac7e48ffbebd42cb4daf0047b37eb572e
|
#!/usr/bin/env python
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# MSMTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""EMMA: Emma's Markov Model Algorithms
EMMA is an open source collection of algorithms implemented mostly in
`NumPy <http://www.numpy.org/>`_ and `SciPy <http://www.scipy.org>`_
to analyze trajectories generated from any kind of simulation
(e.g. molecular trajectories) via Markov state models (MSM).
"""
from __future__ import print_function, absolute_import
import sys
import os
import versioneer
import warnings
from io import open
DOCLINES = __doc__.split("\n")
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Environment :: MacOS X
Intended Audience :: Science/Research
License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: POSIX
Operating System :: Microsoft :: Windows
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Mathematics
Topic :: Scientific/Engineering :: Physics
"""
from setup_util import getSetuptoolsError, lazy_cythonize
try:
from setuptools import setup, Extension, find_packages
from pkg_resources import VersionConflict
except ImportError as ie:
print(getSetuptoolsError())
sys.exit(23)
###############################################################################
# Extensions
###############################################################################
def extensions():
"""How do we handle cython:
1. when on git, require cython during setup time (do not distribute
generated .c files via git)
a) cython present -> fine
b) no cython present -> install it on the fly. Extensions have to have .pyx suffix
This is solved via a lazy evaluation of the extension list. This is needed,
because build_ext is being called before cython will be available.
https://bitbucket.org/pypa/setuptools/issue/288/cannot-specify-cython-under-setup_requires
2. src dist install (have pre-converted c files and pyx files)
a) cython present -> fine
b) no cython -> use .c files
"""
USE_CYTHON = False
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
warnings.warn('Cython not found. Using pre cythonized files.')
# setup OpenMP support
from setup_util import detect_openmp
openmp_enabled, needs_gomp = detect_openmp()
import mdtraj
from numpy import get_include as _np_inc
np_inc = _np_inc()
exts = []
if sys.platform.startswith('win'):
lib_prefix = 'lib'
else:
lib_prefix = ''
regspatial_module = \
Extension('pyemma.coordinates.clustering.regspatial',
sources=[
'pyemma/coordinates/clustering/src/regspatial.c',
'pyemma/coordinates/clustering/src/clustering.c'],
include_dirs=[
mdtraj.capi()['include_dir'],
np_inc,
'pyemma/coordinates/clustering/include',
],
libraries=[lib_prefix+'theobald'],
library_dirs=[mdtraj.capi()['lib_dir']],
extra_compile_args=['-std=c99', '-g', '-O3', '-pg'])
kmeans_module = \
Extension('pyemma.coordinates.clustering.kmeans_clustering',
sources=[
'pyemma/coordinates/clustering/src/kmeans.c',
'pyemma/coordinates/clustering/src/clustering.c'],
include_dirs=[
mdtraj.capi()['include_dir'],
np_inc,
'pyemma/coordinates/clustering/include'],
libraries=[lib_prefix+'theobald'],
library_dirs=[mdtraj.capi()['lib_dir']],
extra_compile_args=['-std=c99'])
covar_module = \
Extension('pyemma.coordinates.estimators.covar.covar_c.covartools',
sources=['pyemma/coordinates/estimators/covar/covar_c/covartools.pyx',
'pyemma/coordinates/estimators/covar/covar_c/_covartools.c'],
include_dirs=['pyemma/coordinates/estimators/covar/covar_c/',
np_inc,
],
extra_compile_args=['-std=c99', '-O3'])
exts += [regspatial_module,
kmeans_module,
covar_module,
]
if not USE_CYTHON:
# replace pyx files by their pre generated c code.
for e in exts:
new_src = []
for s in e.sources:
new_src.append(s.replace('.pyx', '.c'))
e.sources = new_src
else:
exts = cythonize(exts)
if openmp_enabled:
warnings.warn('enabled openmp')
omp_compiler_args = ['-fopenmp']
omp_libraries = ['-lgomp'] if needs_gomp else []
omp_defines = [('USE_OPENMP', None)]
for e in exts:
e.extra_compile_args += omp_compiler_args
e.extra_link_args += omp_libraries
e.define_macros += omp_defines
return exts
def get_cmdclass():
versioneer_cmds = versioneer.get_cmdclass()
sdist_class = versioneer_cmds['sdist']
class sdist(sdist_class):
"""ensure cython files are compiled to c, when distributing"""
def run(self):
# only run if .git is present
if not os.path.exists('.git'):
print("Not on git, can not create source distribution")
return
try:
from Cython.Build import cythonize
print("cythonizing sources")
cythonize(extensions())
except ImportError:
warnings.warn('sdist cythonize failed')
return sdist_class.run(self)
versioneer_cmds['sdist'] = sdist
return versioneer_cmds
metadata = dict(
name='pyEMMA',
maintainer='Martin K. Scherer',
maintainer_email='m.scherer@fu-berlin.de',
author='The Emma team',
author_email='info@emma-project.org',
url='http://github.com/markovmodel/PyEMMA',
license='LGPLv3+',
description=DOCLINES[0],
long_description=open('README.rst', encoding='utf8').read(),
version=versioneer.get_version(),
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
classifiers=[c for c in CLASSIFIERS.split('\n') if c],
keywords='Markov State Model Algorithms',
# packages are found if their folder contains an __init__.py,
packages=find_packages(),
# install default emma.cfg into package.
package_data=dict(pyemma=['pyemma.cfg']),
cmdclass=get_cmdclass(),
tests_require=['nose'],
test_suite='nose.collector',
# runtime dependencies
install_requires=['numpy>=1.7.0',
'scipy>=0.11',
'mdtraj>=1.7.0',
'matplotlib',
'msmtools',
'thermotools>=0.2.0',
'bhmm>=0.6,<0.7',
'joblib>0.8.4',
'pyyaml',
'psutil>=3.1.1',
'decorator>=4.0.0',
],
zip_safe=False,
)
# this is only metadata and not used by setuptools
metadata['requires'] = ['numpy', 'scipy']
# not installing?
if len(sys.argv) == 1 or (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean'))):
pass
else:
# setuptools>=2.2 can handle setup_requires
metadata['setup_requires'] = ['numpy>=1.7.0',
'mdtraj>=1.7.0',
'nose',
]
if sys.version_info.major == 2:
# kick it since causes headages with conda recently...
#metadata['install_requires'] += ['mock']
pass
metadata['package_data'] = {
'pyemma': ['pyemma.cfg', 'logging.yml'],
'pyemma.coordinates.tests': ['data/*'],
'pyemma.datasets': ['*.npz'],
'pyemma.util.tests': ['data/*'],
}
# when on git, we require cython
if os.path.exists('.git'):
warnings.warn('using git, require cython')
metadata['setup_requires'] += ['cython>=0.22']
# only require numpy and extensions in case of building/installing
metadata['ext_modules'] = lazy_cythonize(callback=extensions)
setup(**metadata)
|
gph82/PyEMMA
|
setup.py
|
Python
|
lgpl-3.0
| 9,557
|
[
"MDTraj"
] |
82c262023203058cd09e81df5c2b9d664b406d60e3318742a7ab9b1ef4268a54
|
import os
import numpy as np
from shutil import copy
from subprocess import call
import quantumpropagator.GeneralFunctionsSystem as gfs
def launchSPfromGeom(geom):
'''
geom -> xyz name (with path)
given a xyz into a folder it launches the corresponding molcas calculation
'''
folder_path = ('.').join(geom.split('.')[:-1])
gfs.ensure_dir(folder_path)
copy(geom,folder_path)
projectN = ('.').join(os.path.basename(geom).split('.')[:-1])
inputname = folder_path + "/" + projectN + ".input"
writeRassiLiHInput(inputname)
with gfs.cd(folder_path):
call(["/home/alessio/bin/LaunchMolcas", inputname])
def writeRassiLiHInput(inputname):
content = """ >>> LINK FORCE $Project.JobOld JOBOLD
&Gateway
coord=$Project.xyz
basis=6-31PPGDD
group=NoSym
&Seward
&Rasscf
nactel = 4 0 0
ras2 = 20
inactive = 0
ciroot = 9 9 1
prwf = 0.0
&Rassi
mees
&grid_it
all
>> COPY $Project.rassi.h5 $HomeDir
>> COPY $Project.JobIph $HomeDir
"""
with open(inputname, 'w') as f:
f.write(content)
def LiHxyz(folder,distance,label):
lihxyz = """ 2
Li 0.00000000 0.00000000 0.00000000
H {distance:5.8f} 0.00000000 0.00000000
"""
context = {"distance":distance}
fnN = folder + 'LiH' + label + '.xyz'
with open(fnN, 'w') as f:
f.write(lihxyz.format(**context))
def WaterXyz(folder,distance,label):
watxyz = """ 3
O 0.000000 0.000000 0.000000
H {distance:7.6f} 0.000000 0.000000
H -0.251204 -0.905813 0.000000
"""
return watxyz
def generateLiHxyz(outfolder, rangearg):
doubleList = list(np.arange(*rangearg).tolist())
for tup in enumerate(doubleList):
(label,dist) = tup
label3 = '%03i' % label
LiHxyz(outfolder, dist, label3)
def generateWater(outfolder, rangearg):
doubleList = list(np.arange(*rangearg).tolist())
for tup in enumerate(doubleList):
(label,dist) = tup
label3 = '%03i' % label
WaterXyz(outfolder, dist, label3)
if __name__ == "__main__":
print('lol!!!')
# import quantumpropagator.h5Reader as hf
# import quantumpropagator.GeneralFunctions as gf
# fn = 'Grid_119.648_000.000.rassi.h5'
# overlapsM = hf.retrieve_hdf5_data(fn, 'ORIGINAL_OVERLAPS')
# (dim, _ ) = overlapsM.shape
# nstates = dim // 2
# overlaps = overlapsM[nstates:,:nstates]
# gf.printMatrix2D(overlaps,2)
# arrayOneD = compressColumnOverlap(overlaps)
# correctionMatrix = gf.createTabellineFromArray(arrayOneD)
# print(arrayOneD)
# print(correctionMatrix)
#generateLiHxyz('XyzS/', (0.7,4.0,0.1))
#fns = sorted(glob.glob('XyzS/*'))
#for fileN in fns:
# launchSPfromGeom(fileN)
|
acuzzio/GridQuantumPropagator
|
src/quantumpropagator/molcas.py
|
Python
|
gpl-3.0
| 2,768
|
[
"MOLCAS"
] |
00cbf8527be862db7764c31dc18ab4a2d612cf75becf3d1929fbe3564b8db867
|
# This is the tutorial script.
# Most icons used in this script are from the Tango icon library (http://tango.freedesktop.org/)
# Created by Toni Sagrista
from __future__ import division
from gaia.cu9.ari.gaiaorbit.script import EventScriptingInterface
headerSize = 25
textSize = 13
twdelay = 0.01
arrowH = 7
gs = EventScriptingInterface.instance()
version = gs.getVersionNumber()
"""
Prints a notice on the screen and waits for any input.
y - y coordinate of the notice in [0..1], from bottom to top
idsToRemove - List with the ids to remove when input is received.
"""
def wait_input(y, idsToRemove):
waitid = 3634
gs.displayMessageObject(waitid, "Press any key to continue...", 0.6, y, 0.9, 0.9, 0.0, 1.0, 15)
gs.waitForInput()
gs.removeObjects(idsToRemove)
gs.removeObject(waitid)
"""
Adds arrow to screen.
id - The id of the arrow.
x - x coordinate of bottom-left corner in pixels, from left to right.
y - y coordinate of bottom-left corner in pixels, from bottom to top.
"""
def arrow(id, x, y):
w = gs.getScreenWidth()
h = gs.getScreenHeight()
gs.displayImageObject(id, "scripts/tutorial/arrow-left.png", x / w, y / h, 1.0, 1.0, 0.0, 1.0)
"""
Adds a small arrow to screen.
id - The id of the arrow.
x - x coordinate of bottom-left corner in pixels, from left to right.
y - y coordinate of bottom-left corner in pixels, from bottom to top.
args - optional, red, green, blue and alpha components of color.
"""
def arrow_small(id, x, y, *args):
w = gs.getScreenWidth()
h = gs.getScreenHeight()
if(len(args) > 0):
gs.displayImageObject(id, "scripts/tutorial/arrow-left-s.png", x / w, y / h, args[0], args[1], args[2], args[3])
else:
gs.displayImageObject(id, "scripts/tutorial/arrow-left-s.png", x / w, y / h, 1.0, 1.0, 0.0, 1.0)
"""
Adds a message in the given position and with the given color
x - x coordinate of bottom-left corner in pixels, from left to right.
y - y coordinate of bottom-left corner in pixels, from bottom to top.
"""
def message(id, msg, x, y, color, size):
w = gs.getScreenWidth()
h = gs.getScreenHeight()
gs.displayMessageObject(id, msg, x / w, y / h, color[0], color[1], color[2], color[3], size)
"""
Creates a typewriter effect where text appears one letter at a time.
The parameter twdelay indicates the time in seconds between each letter.
"""
def typewriter(id, text, x, y, width, height, r, g, b, a, twdelay):
buffer = ""
gs.displayTextObject(id, "", x, y, width, height, r, g, b, a, textSize)
for letter in text:
buffer += letter
gs.displayTextObject(id, buffer, x, y, width, height, r, g, b, a, textSize)
gs.sleep(twdelay)
gs.preloadTextures("scripts/tutorial/arrow-left-s.png", "scripts/tutorial/gaia.png", "scripts/tutorial/clock.png", "scripts/tutorial/camera.png", "scripts/tutorial/visibility.png", "scripts/tutorial/light.png", "scripts/tutorial/preferences.png", "scripts/tutorial/globe.png")
# Disable input and prepare
gs.disableInput()
gs.cameraStop()
gs.stopSimulationTime()
gs.setFov(50.0)
gs.setCinematicCamera(True)
gs.minimizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.goToObject("Earth")
#
# WELCOME
#
gs.displayMessageObject(0, "Gaia Sky tutorial", 0.3, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
typewriter(1, "Welcome! In this tutorial you will learn the basic functionality and interface of this application in an interactive mode.\nHang on, it is starting...", 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.75, [0, 1])
gs.sleep(1.0)
#
# GENERAL NAVIGATION
#
gs.displayImageObject(0, "scripts/tutorial/gaia.png", 0.25, 0.88)
gs.displayMessageObject(1, "General navigation", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
typewriter(2, 'Gaia Sky (' + version + ') is an interactive 3D visualisation application to explore the Galaxy in space and time.', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
wait_input(0.75, [])
typewriter(3, 'You can move around using the mouse and keyboard (or touch screen if applicable), clicking and dragging the LEFT MOUSE button to rotate the scene and using the SCROLL WHEEL to zoom in and out.', 0.3, 0.7, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
wait_input(0.65, [2, 3])
gs.sleep(1.0)
gs.setRotationCameraSpeed(20.0)
typewriter(2, 'For example, right...', 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraRotate(-0.5, 0)
gs.sleep(3.0)
gs.cameraStop()
gs.sleep(1.0)
typewriter(3, 'And left', 0.3, 0.73, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraRotate(0.5, 0)
gs.sleep(3.0)
gs.cameraStop()
gs.sleep(1.0)
typewriter(4, 'Up...', 0.3, 0.71, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraRotate(0, -0.5)
gs.sleep(3.0)
gs.cameraStop()
gs.sleep(1.0)
typewriter(5, 'And down', 0.3, 0.69, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraRotate(0, 0.5)
gs.sleep(3.0)
gs.cameraStop()
gs.sleep(1.0)
gs.removeObjects([2, 3, 4, 5])
gs.setCameraSpeed(20.0)
typewriter(2, 'Using the MOUSE WHEEL, we can also move away from Earth...', 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraForward(-1.0)
gs.sleep(2.0)
gs.cameraStop()
gs.sleep(1.0)
typewriter(3, 'Or zoom back in', 0.3, 0.73, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.goToObject("Earth")
gs.cameraStop()
gs.sleep(1.0)
wait_input(0.69, [2, 3])
typewriter(2, 'We can roll the camera by holding LEFT SHIFT and dragging the LEFT MOUSE button', 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
typewriter(3, 'Like this', 0.3, 0.71, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraRoll(0.2)
gs.sleep(2.5)
gs.cameraStop()
gs.sleep(1.0)
gs.cameraRoll(-0.2)
gs.sleep(2.5)
gs.cameraStop()
wait_input(0.69, [2, 3])
gs.setTurningCameraSpeed(2)
typewriter(2, 'We can also look away from the focus by dragging the RIGHT MOUSE button', 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
typewriter(3, 'Like this', 0.3, 0.69, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.cameraTurn(1.0, 0)
gs.sleep(2.5)
gs.setTurningCameraSpeed(40)
gs.cameraCenter()
gs.goToObject("Earth")
gs.cameraStop()
typewriter(2, 'Finally, to select a different focus, just DOUBLE CLICK on it', 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
typewriter(3, 'Let\'s change the focus a couple of times...', 0.3, 0.69, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(2.5)
gs.setCameraFocus("Betelgeuse")
gs.sleep(4.5)
gs.setCameraFocus("Mars")
gs.sleep(4.5)
gs.setCameraFocus("Sol")
gs.sleep(4.5)
gs.setCameraFocus("Earth")
gs.sleep(4.5)
gs.cameraStop()
gs.cameraCenter()
wait_input(0.69, [2, 3])
typewriter(2, 'Let us now take a look at the time simulation', 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
wait_input(0.69, [0, 1, 2])
#
# TIME
#
gs.maximizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.setGuiScrollPosition(0)
gs.displayImageObject(0, "scripts/tutorial/clock.png", 0.25, 0.88)
gs.displayMessageObject(1, "Time simulation", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
# Play/pause
posize = gs.getPositionAndSizeGui("play stop")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'The time can be paused and resumed using the PLAY/PAUSE button next to the title.\nIt also indicates whether the time is currently activated or not.', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.75, [100, 2])
gs.sleep(1.0)
# Pace
posize = gs.getPositionAndSizeGui("plus")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, "The speed of the simulation time is governed by the warp (shown in the text field 'pace'). You can modify the warp by using the + and - buttons. You can also use the shortcuts ',' and '.'.", 0.3, 0.7, 0.6, 0.15, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.7, [100, 2])
# Date
posize = gs.getPositionAndSizeGui("input time")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, "The current date is displayed in the date field marked by the arrow. You can toggle the time by pressing SPACE", 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.75, [100, 2])
# Demo
gs.goToObject("Earth")
typewriter(2, "Let's test it. We'll start the time simulation now.", 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.75, [2])
gs.setSimulationPace(1)
gs.startSimulationTime()
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH, 1.0, 0.0, 0.0, 1.0)
typewriter(2, "The time is running, check the red arrow!", 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(5.0)
gs.stopSimulationTime()
gs.removeObject(2)
typewriter(2, "Time stopped!", 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
typewriter(3, "We can also run the time backwards. Let's set a negative pace", 0.3, 0.73, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("time warp")
typewriter(4, "Check the pace value as we set it to -1 and start the simulation again", 0.3, 0.71, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
arrow_small(101, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
gs.sleep(1.0)
gs.setSimulationPace(-1)
wait_input(0.75, [100, 2])
gs.startSimulationTime()
gs.sleep(4.0)
# Restore pace
gs.setSimulationPace(0.1)
gs.stopSimulationTime()
wait_input(0.75, [2, 3, 4, 100, 101])
typewriter(2, "Let's now find out about the camera modes", 0.3, 0.75, 0.6, 0.08, 1.0, 1.0, 1.0, 1.0, twdelay)
wait_input(0.75, [0, 1, 2])
#
# CAMERA
#
gs.maximizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.setGuiScrollPosition(0)
gs.expandGuiComponent("CameraComponent")
gs.displayImageObject(0, "scripts/tutorial/camera.png", 0.25, 0.88)
gs.displayMessageObject(1, "Camera", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
# Modes
posize = gs.getPositionAndSizeGui("camera mode")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'There are 5 camera modes:\n -Free camera\n -Focus camera\n -Gaia scene\n -Spacecraft\n -Field of view', 0.3, 0.60, 0.6, 0.25, 1.0, 1.0, 1.0, 1.0, twdelay)
typewriter(3, 'You can select the camera mode using the select box or with the keys 0-4 in the numeric keypad', 0.3, 0.55, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.45, [100, 2, 3])
typewriter(2, 'You can use the sliders to change the camera field of view', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("field of view")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
gs.sleep(2.0)
typewriter(3, 'The camera speed', 0.3, 0.73, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("camera speed")
arrow_small(101, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
gs.sleep(2.0)
typewriter(4, 'The camera rotation speed', 0.3, 0.71, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("rotate speed")
arrow_small(102, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
gs.sleep(2.0)
typewriter(5, 'And the camera turning speed', 0.3, 0.69, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("turn speed")
arrow_small(103, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
gs.sleep(2.0)
wait_input(0.69, [100, 101, 102, 103, 2, 3, 4, 5])
gs.sleep(1.0)
typewriter(2, 'You can also lock the camera to the focus. This means that the motion of the camera is locked to that of the object, so that it follows it around everywhere', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("focus lock")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
gs.sleep(1.0)
wait_input(0.69, [100, 2])
gs.sleep(1.0)
typewriter(2, 'You can play with that later, let\'s now see the objects pane', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.75, [0, 1, 2])
#
# OBJECTS
#
gs.maximizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.setGuiScrollPosition(500)
gs.expandGuiComponent("ObjectsComponent")
gs.sleep(1.0)
gs.displayImageObject(0, "scripts/tutorial/globe.png", 0.25, 0.88)
gs.displayMessageObject(1, "Objects pane", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
posize = gs.getPositionAndSizeGui("objects list scroll")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'You can select the current focus using the objects list.\nHowever, it only contains objects with proper names.', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
typewriter(3, 'You can also select a focus by DOUBLE CLICKING on it.', 0.3, 0.65, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.65, [100, 2, 3])
posize = gs.getPositionAndSizeGui("search box")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'Finally, you can use the search box highlighted by the arrow to search for any object by name. You can also get a search dialog with the shortcut CTRL+F.', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.enableInput()
doneko = True
while doneko:
gs.removeObject(4)
typewriter(3, 'Try it now, search for \'Betelgeuse\'', 0.3, 0.69, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
doneko = gs.waitFocus("Betelgeuse", 18000)
if doneko:
typewriter(4, 'Cmon, you can do it! You just needed to press CTRL+F and type Betelgeuse. Let\'s try again...', 0.3, 0.65, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(3.0)
gs.removeObjects([100, 3, 4])
gs.disableInput()
gs.sleep(3.0)
typewriter(3, 'Well done! Lets move forward', 0.3, 0.69, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
gs.setCameraFocus("Earth")
wait_input(0.65, [100, 0, 1, 2, 3])
gs.removeAllObjects()
#
# VISIBILITY
#
gs.maximizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.setGuiScrollPosition(500)
gs.expandGuiComponent("VisibilityComponent")
gs.sleep(1.0)
gs.displayImageObject(0, "scripts/tutorial/visibility.png", 0.25, 0.88)
gs.displayMessageObject(1, "Object visibility", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
posize = gs.getPositionAndSizeGui("visibility table")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'You can toggle the visibility of object types on and off using these buttons by the yellow arrow', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
typewriter(3, 'For example, we can switch off planets...', 0.3, 0.69, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
gs.setVisibility("element.planets", False)
gs.sleep(2.5)
typewriter(4, 'And stars too', 0.3, 0.67, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
gs.setVisibility("element.stars", False)
gs.sleep(1.0)
wait_input(0.65, [3, 4])
gs.sleep(1.0)
typewriter(3, 'And we can re-enable them again', 0.3, 0.69, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
gs.setVisibility("element.planets", True)
gs.setVisibility("element.stars", True)
gs.sleep(1.0)
wait_input(0.65, [2, 3])
gs.sleep(1.0)
typewriter(2, 'The visibility of elements can also be toggled using keyboard shortcuts. For exmaple\nL-SHIFT+P - Planets\nL-SHIFT+O - Orbits\nL-SHIFT+C - Constellations\nL-SHIFT+S - Stars\netc.', 0.3, 0.71, 0.6, 0.15, 1.0, 1.0, 1.0, 1.0, twdelay)
wait_input(0.65, [100, 0, 1, 2])
gs.removeAllObjects()
#
# LIGHTING
#
gs.maximizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.setGuiScrollPosition(500)
gs.expandGuiComponent("VisualEffectsComponent")
gs.sleep(1.0)
gs.displayImageObject(0, "scripts/tutorial/light.png", 0.25, 0.88)
gs.displayMessageObject(1, "Scene lighting", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
typewriter(2, 'We are about to finish', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
posize = gs.getPositionAndSizeGui("star brightness")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
posize = gs.getPositionAndSizeGui("ambient light")
arrow_small(101, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
posize = gs.getPositionAndSizeGui("bloom effect")
arrow_small(102, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(3, 'We can adjust some lighting parameters such as:', 0.3, 0.69, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
typewriter(4, 'The star brightness', 0.3, 0.67, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("star brightness")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH, 1.0, 0.0, 0.0, 1.0)
gs.sleep(2.0)
typewriter(5, 'The ambient light (affects the shadowed parts of planets and bodies)', 0.3, 0.65, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("star brightness")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
posize = gs.getPositionAndSizeGui("ambient light")
arrow_small(101, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH, 1.0, 0.0, 0.0, 1.0)
gs.sleep(2.0)
typewriter(6, 'The bloom post-processing effect', 0.3, 0.63, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
posize = gs.getPositionAndSizeGui("ambient light")
arrow_small(101, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
posize = gs.getPositionAndSizeGui("bloom effect")
arrow_small(102, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH, 1.0, 0.0, 0.0, 1.0)
gs.sleep(2.0)
wait_input(0.63, [100, 101, 102, 0, 1, 2, 3, 4, 5, 6])
gs.removeAllObjects()
#
# GAIA
#
gs.maximizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.setGuiScrollPosition(500)
gs.expandGuiComponent("GaiaComponent")
gs.sleep(1.0)
gs.displayImageObject(0, "scripts/tutorial/gaia.png", 0.25, 0.88)
gs.displayMessageObject(1, "Gaia scan options", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
gs.goToObject("Gaia")
typewriter(2, 'Finally, there are three controls for managing the simulation of the Gaia sky scan', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.69, [2])
gs.sleep(1.0)
posize = gs.getPositionAndSizeGui("compute gaia scan")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'The \'Enable Gaia scan\' checkbox enables the computation of the scanned stars in real time', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.69, [100, 2])
gs.sleep(1.0)
posize = gs.getPositionAndSizeGui("transit color")
arrow_small(100, posize[0] + posize[2], posize[1] + posize[3] / 2 - arrowH)
typewriter(2, 'Then, you can enable the coloring of stars depending on how many times they have been observed', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.69, [100, 2])
gs.sleep(1.0)
typewriter(2, 'Remember to enable the simulation time so that Gaia actually moves when trying this out!', 0.3, 0.75, 0.6, 0.1, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
wait_input(0.69, [100, 0, 1, 2])
gs.sleep(1.0)
#
# FINAL REMARKS
#
gs.minimizeInterfaceWindow()
gs.setGuiPosition(0, 1)
gs.goToObject("Earth")
gs.sleep(1.0)
gs.displayImageObject(0, "scripts/tutorial/preferences.png", 0.25, 0.88)
gs.displayMessageObject(1, "Just one more thing...", 0.33, 0.9, 1.0, 0.7, 0.0, 1.0, headerSize)
gs.sleep(1.0)
typewriter(2, 'If you need more detailed information on this software and/or how to perform advanced tasks (such scripting, image outputting, etc.) you can read the online documentation in http://gaia-sky.rtfd.io. Also, feel free to send us any bug reports or suggestions. We are always happy to recieve them.\nNow, finally, enjoy the application!', 0.3, 0.69, 0.6, 0.15, 1.0, 1.0, 1.0, 1.0, twdelay)
gs.sleep(1.0)
gs.displayMessageObject(90, "Press any key to finish", 0.6, 0.65, 0.9, 0.9, 0.0, 1.0, 15)
gs.waitForInput()
# Restore input and interface
gs.removeAllObjects()
gs.enableInput()
gs.maximizeInterfaceWindow()
|
vga101/gaiasky
|
assets/scripts/tutorial/tutorial.py
|
Python
|
mpl-2.0
| 19,757
|
[
"Galaxy"
] |
8c49d1253b3a70c90af3b7da34d6da0fd1b06a12467449f3f8e3c4bafa49b3ce
|
#######################################################################
#
# Copyright (C) 2001-2014, Michele Cappellari
# E-mail: cappellari_at_astro.ox.ac.uk
#
# This software is provided as is without any warranty whatsoever.
# Permission to use, for non-commercial purposes is granted.
# Permission to modify for personal or internal use is granted,
# provided this copyright and disclaimer are included unchanged
# at the beginning of the file. All other rights are reserved.
#
#######################################################################
#
# NAME:
# LOG_REBIN
#
# PURPOSE:
# Logarithmically rebin a spectrum, while rigorously conserving the flux.
# Basically the photons in the spectrum are simply ridistributed according
# to a new grid of pixels, with non-uniform size in the spectral direction.
#
# This routine makes the `standard' zero-order assumption that the spectrum
# is *constant* within each pixels. It is possible to perform log-rebinning
# by assuming the spectrum is represented by a piece-wise polynomial of
# higer degree, while still obtaining a uniquely defined linear problem,
# but this reduces to a deconvolution and amplifies noise.
#
# This same routine can be used to compute approximate errors
# of the log-rebinned spectrum. To do this type the command
#
# LOG_REBIN, lamRange, err^2, err2New
#
# and the desired errors will be given by SQRT(err2New).
# NB: This rebinning of the error-spectrum is very *approximate* as
# it does not consider the correlation introduced by the rebinning!
#
# CALLING SEQUENCE:
# LOG_REBIN, lamRange, spec, specNew, logLam, $
# OVERSAMPLE=oversample, VELSCALE=velScale, /FLUX
#
# INPUTS:
# LAMRANGE: two elements vector containing the central wavelength
# of the first and last pixels in the spectrum, which is assumed
# to have constant wavelength scale! E.g. from the values in the
# standard FITS keywords: LAMRANGE = CRVAL1 + [0,CDELT1*(NAXIS1-1)].
# It must be LAMRANGE[0] < LAMRANGE[1].
# SPEC: input spectrum.
#
# OUTPUTS:
# SPECNEW: logarithmically rebinned spectrum.
# LOGLAM: log(lambda) (*natural* logarithm: ALOG) of the central
# wavelength of each pixel. This is the log of the geometric
# mean of the borders of each pixel.
#
# KEYWORDS:
# FLUX: Set this keyword to preserve total flux. In this case the
# log rebinning changes the pixels flux in proportion to their
# dLam so the following command will show large differences
# beween the spectral shape before and after LOG_REBIN:
#
# plot, exp(logLam), specNew # Plot log-rebinned spectrum
# oplot, range(lamRange[0],lamRange[1],n_elements(spec)), spec
#
# By defaul, when this keyword is *not* set, the above two lines
# produce two spectra that almost perfectly overlap each other.
# OVERSAMPLE: Oversampling can be done, not to loose spectral resolution,
# especally for extended wavelength ranges and to avoid aliasing.
# Default: OVERSAMPLE=1 ==> Same number of output pixels as input.
# VELSCALE: velocity scale in km/s per pixels. If this variable is
# not defined, then it will contain in output the velocity scale.
# If this variable is defined by the user it will be used
# to set the output number of pixels and wavelength scale.
#
# MODIFICATION HISTORY:
# V1.0.0: Using interpolation. Michele Cappellari, Leiden, 22 October 2001
# V2.0.0: Analytic flux conservation. MC, Potsdam, 15 June 2003
# V2.1.0: Allow a velocity scale to be specified by the user.
# MC, Leiden, 2 August 2003
# V2.2.0: Output the optional logarithmically spaced wavelength at the
# geometric mean of the wavelength at the border of each pixel.
# Thanks to Jesus Falcon-Barroso. MC, Leiden, 5 November 2003
# V2.2.1: Verify that lamRange[0] < lamRange[1].
# MC, Vicenza, 29 December 2004
# V2.2.2: Modified the documentation after feedback from James Price.
# MC, Oxford, 21 October 2010
# V2.3.0: By default now preserve the shape of the spectrum, not the
# total flux. This seems what most users expect from the procedure.
# Set the keyword /FLUX to preserve flux like in previous version.
# MC, Oxford, 30 November 2011
# V3.0.0: Translated from IDL into Python. MC, Santiago, 23 November 2013
# V3.1.0: Fully vectorized log_rebin. Typical speed up by two orders of magnitude.
# MC, Oxford, 4 March 2014
#
#----------------------------------------------------------------------
from __future__ import print_function
import numpy as np
def log_rebin(lamRange, spec, oversample=False, velscale=None, flux=False):
"""
Logarithmically rebin a spectrum, while rigorously conserving the flux.
Basically the photons in the spectrum are simply ridistributed according
to a new grid of pixels, with non-uniform size in the spectral direction.
"""
lamRange = np.asarray(lamRange)
if len(lamRange) != 2:
raise ValueError('lamRange must contain two elements')
if lamRange[0] >= lamRange[1]:
raise ValueError('It must be lamRange[0] < lamRange[1]')
s = spec.shape
if len(s) != 1:
raise ValueError('input spectrum must be a vector')
n = s[0]
if oversample:
m = int(n*oversample)
else:
m = int(n)
dLam = np.diff(lamRange)/(n - 1.) # Assume constant dLam
lim = lamRange/dLam + [-0.5, 0.5] # All in units of dLam
borders = np.linspace(*lim, num=n+1) # Linearly
logLim = np.log(lim)
c = 299792.458 # Speed of light in km/s
if velscale is None: # Velocity scale is set by user
velscale = np.diff(logLim)/m*c # Only for output
else:
logScale = velscale/c
m = int(np.diff(logLim)/logScale) # Number of output pixels
logLim[1] = logLim[0] + m*logScale
newBorders = np.exp(np.linspace(*logLim, num=m+1)) # Logarithmically
k = (newBorders - lim[0]).clip(0, n-1).astype(int)
specNew = np.add.reduceat(spec, k)[:-1] # Do analytic integral
specNew *= np.diff(k) > 0 # fix for design flaw of reduceat()
specNew += np.diff((newBorders - borders[k])*spec[k])
if not flux:
specNew /= np.diff(newBorders)
# Output log(wavelength): log of geometric mean
logLam = np.log(np.sqrt(newBorders[1:]*newBorders[:-1])*dLam)
return specNew, logLam, velscale
#----------------------------------------------------------------------
#
# PPXF_DETERMINE_GOODPIXELS: Example routine to generate the vector of goodPixels
# to be used as input keyword for the routine PPXF. This is useful to mask
# gas emission lines or atmospheric absorptions.
# It can be trivially adapted to mask different lines.
#
# INPUT PARAMETERS:
# - LOGLAM: Natural logarithm ALOG(wave) of the wavelength in Angstrom
# of each pixel of the log rebinned *galaxy* spectrum.
# - LAMRANGETEMP: Two elements vectors [lamMin2,lamMax2] with the minimum and
# maximum wavelength in Angstrom in the stellar *template* used in PPXF.
# - VEL: Estimate of the galaxy velocity in km/s.
#
# V1.0.0: Michele Cappellari, Leiden, 9 September 2005
# V1.0.1: Made a separate routine and included additional common emission lines.
# MC, Oxford 12 January 2012
# V2.0.0: Translated from IDL into Python. MC, Oxford, 10 December 2013
# V2.0.1: Updated line list. MC, Oxford, 8 January 2014
def determine_goodpixels(logLam, lamRangeTemp, vel):
"""
Generates a list of goodpixels to mask a given set of gas emission
lines. This is meant to be used as input for PPXF.
"""
# -----[OII]----- Hdelta Hgamma Hbeta -----[O
lines = np.array([3726.03, 3728.82, 4101.76, 4340.47, 4861.33, 4958.92,
5006.84, 6300.30, 6548.03, 6583.41, 6562.80, 6716.47, 6730.85])
dv = lines*0 + 800 # width/2 of masked gas emission region in km/s
c = 299792.458 # speed of light in km/s
flag = logLam < 0 # empty mask
for j in range(lines.size):
flag |= (logLam > np.log(lines[j]) + (vel - dv[j])/c) \
& (logLam < np.log(lines[j]) + (vel + dv[j])/c)
flag |= logLam < np.log(lamRangeTemp[0]) + (vel + 900.)/c # Mask edges of
flag |= logLam > np.log(
lamRangeTemp[1]) + (vel - 900.)/c # stellar library
return np.where(flag == 0)[0]
#------------------------------------------------------------------------------
# V1.0.0: Michele Cappellari, Oxford, 7 January 2014
# V1.1.0: Fixes [OIII] and [NII] doublets to the theoretical flux ratio.
# Returns line names together with emission lines templates.
# MC, Oxford, 3 August 2014
# V1.1.1: Only returns lines included within the estimated fitted wavelength range.
# This avoids identically zero gas templates being included in the PPXF fit
# which can cause numearical instabilities in the solution of the system.
# MC, Oxford, 3 September 2014
def emission_lines(logLam_temp, lamRange_gal, FWHM_gal):
"""
Generates an array of Gaussian emission lines to be used as templates in PPXF.
Additional lines can be easily added by editing this procedure.
- logLam_temp is the natural log of the wavelength of the templates in Angstrom.
logLam_temp should be the same as that of the stellar templates.
- lamRange_gal is the estimated rest-frame fitted wavelength range
Typically lamRange_gal = np.array([np.min(wave), np.max(wave)])/(1 + z),
where wave is the observed wavelength of the fitted galaxy pixels and
z is an initial very rough estimate of the galaxy redshift.
- FWHM_gal is the instrumantal FWHM of the galaxy spectrum under study in
Angstrom. Here it is assume constant. It could be a function of wavelength.
- The [OI], [OIII] and [NII] doublets are fixed at theoretical flux ratio~3.
"""
lam = np.exp(logLam_temp)
# Assumes instrumental sigma is constant in Angstrom
sigma = FWHM_gal/2.355
# Balmer Series: Hdelta Hgamma Hbeta Halpha
line_wave = np.array([4101.76, 4340.47, 4861.33, 6562.80])
line_names = np.array(['Hdelta', 'Hgamma', 'Hbeta', 'Halpha'])
emission_lines = np.exp(-0.5*((lam[:, np.newaxis] - line_wave)/sigma)**2)
# -----[OII]----- -----[SII]-----
lines = np.array([3726.03, 3728.82, 6716.47, 6730.85])
names = np.array(['[OII]3726', '[OII]3729', '[SII]6716', '[SII]6731'])
gauss = np.exp(-0.5*((lam[:, np.newaxis] - lines)/sigma)**2)
emission_lines = np.column_stack([emission_lines, gauss])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, lines)
# -----[OIII]-----
lines = np.array([4958.92, 5006.84])
doublet = np.exp(-0.5*((lam - lines[1])/sigma)**2) + \
0.35*np.exp(-0.5*((lam - lines[0])/sigma)**2)
emission_lines = np.column_stack([emission_lines, doublet])
# single template for this doublet
line_names = np.append(line_names, '[OIII]5007d')
line_wave = np.append(line_wave, lines[1])
# -----[OI]-----
lines = np.array([6363.67, 6300.30])
doublet = np.exp(-0.5*((lam - lines[1])/sigma)**2) + \
0.33*np.exp(-0.5*((lam - lines[0])/sigma)**2)
emission_lines = np.column_stack([emission_lines, doublet])
# single template for this doublet
line_names = np.append(line_names, '[OI]6300d')
line_wave = np.append(line_wave, lines[1])
# -----[NII]-----
lines = np.array([6548.03, 6583.41])
doublet = np.exp(-0.5*((lam - lines[1])/sigma)**2) + \
0.34*np.exp(-0.5*((lam - lines[0])/sigma)**2)
emission_lines = np.column_stack([emission_lines, doublet])
# single template for this doublet
line_names = np.append(line_names, '[NII]6583d')
line_wave = np.append(line_wave, lines[1])
# Only include lines falling within the estimated fitted wavelength range.
# This is important to avoid instabilities in the PPXF system solution
#
w = (line_wave > lamRange_gal[0]) & (line_wave < lamRange_gal[1])
emission_lines = emission_lines[:, w]
line_names = line_names[w]
line_wave = line_wave[w]
print('Emission lines included in gas templates:')
print(line_names)
return emission_lines, line_names, line_wave
#------------------------------------------------------------------------------
|
zpace/MaNGA-fitting
|
ppxf_util.py
|
Python
|
mit
| 12,458
|
[
"Galaxy",
"Gaussian"
] |
37d499dff64fd5825c0a9bebf152b3da4a52d1a8da94a89eb6241a2492d12f8c
|
# -*- coding: utf-8 -*-
'''
param_file.py
'''
import numpy as np
import logging
from .log import LOG_NAME
from .write import write_param_file
from .share import NcGlobals, SECSPERDAY, MAX_NC_CHARS
from .pycompat import iteritems, pyrange, pyzip
from . import plots
import os
from datetime import date
# -------------------------------------------------------------------- #
# create logger
log = logging.getLogger(LOG_NAME)
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Wrap up functiions to finish the parameter file
def finish_params(outlets, dom_data, config_dict, directories):
'''
Adjust the unit hydrographs and pack for parameter file
'''
options = config_dict['OPTIONS']
routing = config_dict['ROUTING']
domain = config_dict['DOMAIN']
dom_area = domain['AREA_VAR']
dom_frac = domain['FRACTION_VAR']
if not len(outlets) > 0:
raise ValueError('outlets in finish_params are empty')
# ------------------------------------------------------------ #
# netCDF variable options
ncvaropts = {}
if 'NETCDF_ZLIB' in options:
ncvaropts['zlib'] = options['NETCDF_ZLIB']
if 'NETCDF_COMPLEVEL' in options:
ncvaropts['complevel'] = options['NETCDF_COMPLEVEL']
if 'NETCDF_SIGFIGS' in options:
ncvaropts['least_significant_digit'] = options['NETCDF_SIGFIGS']
# ------------------------------------------------------------ #
# ---------------------------------------------------------------- #
# subset (shorten time base)
if options['SUBSET_DAYS'] and \
options['SUBSET_DAYS'] < routing['BASIN_FLOWDAYS']:
subset_length = int(options['SUBSET_DAYS'] *
SECSPERDAY / routing['OUTPUT_INTERVAL'])
outlets, full_time_length, \
before, after = subset(outlets, subset_length=subset_length)
slc = slice(min(len(before), 1000))
log.debug('plotting unit hydrograph timeseries now for before'
' / after subseting')
title = 'UHS before subset'
pfname = plots.uhs(before[slc], title, options['CASEID'],
directories['plots'])
log.info('%s Plot: %s', title, pfname)
title = 'UHS after subset'
pfname = plots.uhs(after[slc], title, options['CASEID'],
directories['plots'])
log.info('%s Plot: %s', title, pfname)
else:
log.info('Not subsetting because either SUBSET_DAYS is null or '
'SUBSET_DAYS<BASIN_FLOWDAYS')
for key, outlet in iteritems(outlets):
outlet.offset = np.zeros(outlet.unit_hydrograph.shape[1],
dtype=np.int32)
full_time_length = outlet.unit_hydrograph.shape[0]
subset_length = full_time_length
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# adjust fractions
if options['CONSTRAIN_FRACTIONS']:
adjust = True
log.info('Adjusting Fractions to be less than or equal to '
'domain fractions')
else:
adjust = False
outlets, plot_dict = adjust_fractions(outlets,
dom_data[domain['FRACTION_VAR']],
adjust=adjust)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Calculate the upstream area and upstream grid cells
# The upstream_area must be calculated after adjust_fractions
for key, outlet in iteritems(outlets):
outlet.upstream_gridcells = len(outlet.y_source)
outlet.upstream_area = np.sum(dom_data[dom_area][outlet.y_source,
outlet.x_source] *
dom_data[dom_frac][outlet.y_source,
outlet.x_source])
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Group
grouped_data = group(outlets, subset_length)
# unpack grouped data
unit_hydrograph = grouped_data['unit_hydrograph']
frac_sources = grouped_data['frac_sources']
source_lon = grouped_data['source_lon']
source_lat = grouped_data['source_lat']
source_x_ind = grouped_data['source_x_ind']
source_y_ind = grouped_data['source_y_ind']
source_decomp_ind = grouped_data['source_decomp_ind']
source_time_offset = grouped_data['source_time_offset']
source2outlet_ind = grouped_data['source2outlet_ind']
outlet_lon = grouped_data['outlet_lon']
outlet_lat = grouped_data['outlet_lat']
outlet_x_ind = grouped_data['outlet_x_ind']
outlet_y_ind = grouped_data['outlet_y_ind']
outlet_decomp_ind = grouped_data['outlet_decomp_ind']
outlet_number = grouped_data['outlet_number']
outlet_name = grouped_data['outlet_name']
outlet_upstream_area = grouped_data['outlet_upstream_area']
outlet_upstream_gridcells = grouped_data['outlet_upstream_gridcells']
# Make sure the inds are all greater than zero, ref: Github #79
assert source_decomp_ind.min() >= 0, source_decomp_ind
assert outlet_decomp_ind.min() >= 0, outlet_decomp_ind
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Adjust Unit Hydrographs for differences in source/outlet areas and
# fractions
area = dom_data[domain['AREA_VAR']]
if outlet_y_ind.ndim == 0 or outlet_x_ind.ndim == 0:
for source, outlet in enumerate(source2outlet_ind):
unit_hydrograph[:, source] *= area[source_y_ind[source],
source_x_ind[source]]
unit_hydrograph[:, source] /= area[outlet_y_ind[()],
outlet_x_ind[()]]
unit_hydrograph[:, source] *= frac_sources[source]
else:
for source, outlet in enumerate(source2outlet_ind):
unit_hydrograph[:, source] *= area[source_y_ind[source],
source_x_ind[source]]
unit_hydrograph[:, source] /= area[outlet_y_ind[outlet],
outlet_x_ind[outlet]]
unit_hydrograph[:, source] *= frac_sources[source]
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Make diagnostic plots
sum_after = np.zeros(dom_data[domain['FRACTION_VAR']].shape)
for i, (y, x) in enumerate(pyzip(source_y_ind, source_x_ind)):
sum_after[y, x] += unit_hydrograph[:, i].sum()
plot_dict['Sum UH Final'] = sum_after
dom_y = dom_data[domain['LATITUDE_VAR']]
dom_x = dom_data[domain['LONGITUDE_VAR']]
for title, data in iteritems(plot_dict):
pfname = plots.fractions(data, dom_x, dom_y, title, options['CASEID'],
directories['plots'])
log.info('%s Plot: %s', title, pfname)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# fill in some misc arrays
if outlet_y_ind.ndim == 0:
numoutlets = 1
else:
numoutlets = len(outlet_lon)
outlet_mask = np.zeros(numoutlets)
newshape = unit_hydrograph.shape + (1, )
unit_hydrograph = unit_hydrograph.reshape(newshape)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Write parameter file
today = date.today().strftime('%Y%m%d')
param_file = os.path.join(directories['params'],
'{0}.rvic.prm.{1}.{2}.'
'nc'.format(options['CASEID'],
options['GRIDID'],
today))
if 'NEW_DOMAIN' in list(config_dict.keys()):
dom_file_name = config_dict['NEW_DOMAIN']['FILE_NAME']
else:
dom_file_name = config_dict['DOMAIN']['FILE_NAME']
param_file_name = \
os.path.split(config_dict['POUR_POINTS']['FILE_NAME'])[1]
glob_atts = NcGlobals(
title='RVIC parameter file',
RvicPourPointsFile=param_file_name,
RvicUHFile=os.path.split(config_dict['UH_BOX']['FILE_NAME'])[1],
RvicFdrFile=os.path.split(routing['FILE_NAME'])[1],
RvicDomainFile=os.path.split(dom_file_name)[1])
log.debug('UH Range: (%f %f)', unit_hydrograph.min(), unit_hydrograph.max())
write_param_file(param_file,
nc_format=options['NETCDF_FORMAT'],
glob_atts=glob_atts,
full_time_length=full_time_length,
subset_length=subset_length,
unit_hydrograph_dt=routing['OUTPUT_INTERVAL'],
outlet_lon=outlet_lon,
outlet_lat=outlet_lat,
outlet_x_ind=outlet_x_ind,
outlet_y_ind=outlet_y_ind,
outlet_decomp_ind=outlet_decomp_ind,
outlet_number=outlet_number,
outlet_mask=outlet_mask,
outlet_name=outlet_name,
outlet_upstream_gridcells=outlet_upstream_gridcells,
outlet_upstream_area=outlet_upstream_area,
source_lon=source_lon,
source_lat=source_lat,
source_x_ind=source_x_ind,
source_y_ind=source_y_ind,
source_decomp_ind=source_decomp_ind,
source_time_offset=source_time_offset,
source2outlet_ind=source2outlet_ind,
unit_hydrograph=unit_hydrograph,
**ncvaropts)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# write a summary of what was done to the log file.
log.info('Parameter file includes %i outlets', len(outlets))
log.info('Parameter file includes %i Source Points', len(source_lon))
# ---------------------------------------------------------------- #
return param_file, today
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def adjust_fractions(outlets, dom_fractions, adjust=True):
'''
Constrain the fractions in the outles.
The basic idea is that the sum of fraction from the outlets should not
exceed the domain fractions.
'''
log.info('Adjusting fractions now')
fractions = np.zeros(dom_fractions.shape, dtype=np.float64)
ratio_fraction = np.ones(fractions.shape, dtype=np.float64)
adjusted_fractions = np.zeros(dom_fractions.shape, dtype=np.float64)
sum_uh_fractions = np.zeros(dom_fractions.shape, dtype=np.float64)
# ---------------------------------------------------------------- #
# Aggregate the fractions
for key, outlet in iteritems(outlets):
y = outlet.y_source
x = outlet.x_source
fractions[y, x] += outlet.fractions
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# First set fractions to zero where there is no land in the domain
yd, xd = np.nonzero(dom_fractions == 0.0)
fractions[yd, xd] = 0.0
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Only adjust fractions where the aggregated fractions are gt the domain
# fractions
yi, xi = np.nonzero(fractions > dom_fractions)
log.info('Adjust fractions for %s grid cells', len(yi))
ratio_fraction[yi, xi] = dom_fractions[yi, xi] / fractions[yi, xi]
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Adjust fracs based on ratio_fraction
for key, outlet in iteritems(outlets):
y = outlet.y_source
x = outlet.x_source
if adjust:
outlet.fractions *= ratio_fraction[y, x]
# For Diagnostics only
adjusted_fractions[y, x] += outlet.fractions
sum_uh_fractions[y, x] += outlet.unit_hydrograph.sum(axis=0)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Make Fractions Dict for plotting
plot_dict = {'Domain Fractions': dom_fractions,
'Aggregated Fractions': fractions,
'Ratio Fractions': ratio_fraction,
'Adjusted Fractions': adjusted_fractions,
'Sum UH Before': sum_uh_fractions}
# ---------------------------------------------------------------- #
return outlets, plot_dict
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Shorten the unit hydrograph
def subset(outlets, subset_length=None):
''' Shorten the Unit Hydrograph'''
log.info('subsetting unit-hydrographs now...')
log.debug('Subset Length: %s', subset_length)
log.debug(outlets)
for i, (key, outlet) in enumerate(iteritems(outlets)):
if i == 0:
full_time_length = outlet.unit_hydrograph.shape[0]
log.debug('Subset Length: %s', subset_length)
log.debug('full_time_length: %s', full_time_length)
if not subset_length:
subset_length = full_time_length
log.debug('No subset_length provided, using full_time_length')
before = outlet.unit_hydrograph
else:
before = np.append(before, outlet.unit_hydrograph,
axis=1)
outlet.offset = np.empty(outlet.unit_hydrograph.shape[1],
dtype=np.int32)
out_uh = np.zeros((subset_length, outlet.unit_hydrograph.shape[1]),
dtype=np.float64)
d_left = int(-1 * subset_length / 2)
d_right = int(subset_length / 2)
for j in pyrange(outlet.unit_hydrograph.shape[1]):
# find index position of maximum
maxind = np.argmax(outlet.unit_hydrograph[:, j])
# find bounds
left = maxind + d_left
right = maxind + d_right
# make sure left and right fit in unit hydrograph array,
# if not adjust
if left < 0:
left = 0
right = subset_length
if right > full_time_length:
right = full_time_length
left = full_time_length - subset_length
log.warning('Subset centered on UH max extends beyond length '
'of unit hydrograph.')
log.warning('--> Outlet %s', outlet)
log.warning('----> Max Index is %s', maxind)
log.warning('----> Last value in subset '
'is %s', outlet.unit_hydrograph[-1, j])
if maxind == full_time_length:
log.warning('maxind == full_time_length, not able to '
'resolve unithydrograph')
if left < 0 or right > full_time_length:
raise ValueError('Subsetting failed left:{0} or right {1} does'
' not fit inside bounds'.format(left, right))
outlet.offset[j] = left
# clip and normalize
tot = outlet.unit_hydrograph[left:right, j].sum()
out_uh[:, j] = outlet.unit_hydrograph[left:right, j] / tot
outlet.unit_hydrograph = out_uh
if i == 0:
after = outlet.unit_hydrograph
else:
after = np.append(after, outlet.unit_hydrograph, axis=1)
log.info('Done subsetting')
return outlets, full_time_length, before, after
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def group(outlets, subset_length):
'''
group the outlets into one set of arrays
'''
n_outlets = len(outlets)
n_sources = 0
for key, outlet in iteritems(outlets):
n_sources += len(outlet.y_source)
gd = {}
log.debug('n_outlets: %s', n_outlets)
log.debug('n_sources: %s', n_sources)
log.debug('subset_length: %s', subset_length)
# ---------------------------------------------------------------- #
# Source specific values
gd['unit_hydrograph'] = np.empty((subset_length, n_sources),
dtype=np.float64)
gd['frac_sources'] = np.empty(n_sources, dtype=np.float64)
gd['source_lon'] = np.empty(n_sources, dtype=np.float64)
gd['source_lat'] = np.empty(n_sources, dtype=np.float64)
gd['source_x_ind'] = np.empty(n_sources, dtype=np.int32)
gd['source_y_ind'] = np.empty(n_sources, dtype=np.int32)
gd['source_decomp_ind'] = np.empty(n_sources, dtype=np.int32)
gd['source_time_offset'] = np.empty(n_sources, dtype=np.int32)
gd['source2outlet_ind'] = np.empty(n_sources, dtype=np.int32)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# outlet specific inputs
gd['outlet_lon'] = np.empty(n_outlets, dtype=np.float64)
gd['outlet_lat'] = np.empty(n_outlets, dtype=np.float64)
gd['outlet_x_ind'] = np.empty(n_outlets, dtype=np.int32)
gd['outlet_y_ind'] = np.empty(n_outlets, dtype=np.int32)
gd['outlet_decomp_ind'] = np.empty(n_outlets, dtype=np.int32)
gd['outlet_number'] = np.empty(n_outlets, dtype=np.int32)
gd['outlet_name'] = np.empty(n_outlets, dtype='S{0}'.format(MAX_NC_CHARS))
gd['outlet_upstream_gridcells'] = np.empty(n_outlets, dtype=np.int32)
gd['outlet_upstream_area'] = np.empty(n_outlets, dtype=np.float64)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# place outlet and source vars into gd dictionary
a = 0
for i, (key, outlet) in enumerate(iteritems(outlets)):
b = a + len(outlet.y_source)
log.debug('%s unit_hydrograph.shape %s', outlet.name,
outlet.unit_hydrograph.shape)
# -------------------------------------------------------- #
# Point specific values
gd['unit_hydrograph'][:, a:b] = outlet.unit_hydrograph
gd['frac_sources'][a:b] = outlet.fractions
gd['source_lon'][a:b] = outlet.lon_source
gd['source_lat'][a:b] = outlet.lat_source
gd['source_x_ind'][a:b] = outlet.x_source
gd['source_y_ind'][a:b] = outlet.y_source
gd['source_decomp_ind'][a:b] = outlet.cell_id_source
gd['source_time_offset'][a:b] = outlet.offset
gd['source2outlet_ind'][a:b] = i
# -------------------------------------------------------- #
# -------------------------------------------------------- #
# outlet specific inputs
gd['outlet_lon'][i] = outlet.lon
gd['outlet_lat'][i] = outlet.lat
gd['outlet_x_ind'][i] = outlet.domx
gd['outlet_y_ind'][i] = outlet.domy
gd['outlet_decomp_ind'][i] = outlet.cell_id
gd['outlet_number'][i] = i
gd['outlet_name'][i] = outlet.name
gd['outlet_upstream_gridcells'][i] = outlet.upstream_gridcells
gd['outlet_upstream_area'][i] = outlet.upstream_area
# -------------------------------------------------------- #
# -------------------------------------------------------- #
# update src counter
a = b
# -------------------------------------------------------- #
# ---------------------------------------------------------------- #
return gd
# -------------------------------------------------------------------- #
|
UW-Hydro/RVIC
|
rvic/core/param_file.py
|
Python
|
gpl-3.0
| 20,513
|
[
"NetCDF"
] |
ed3139b057e831633dc5e16b19ab75fc93cd59f5d8de277189039e1bac66de30
|
#!/usr/bin/python
"""
This module includes a few basic functions useful for the pygme
Multi Gaussian Expansion models (Monnet et al. 1992, Emsellem et al. 1994)
python module.
For questions, please contact Eric Emsellem at eric.emsellem@eso.org
"""
"""
Importing the most import modules
This module requires NUMPY and SCIPY and optionally matplotlib (for plots)
"""
try:
import numpy as np
except ImportError:
raise Exception("numpy is required for pygme")
from numpy import random, asarray
try:
from scipy import special, interpolate, optimize
except ImportError:
raise Exception("scipy is required for pygme")
from rwcfor import floatMGE
from numpy import sin, cos, exp, sqrt, pi
import matplotlib
import matplotlib.pyplot as plt
import os
__version__ = '1.0.2 (14 August 2014)'
# Version 1.0.3 : GaussHermite only returns the derived profile (1 array)
# Version 1.0.2 : Changed some scaling and output
# Version 1.0.1 : Removed imin imax
# Version 1.0.0 : first extraction from pygme
############################################################################
# Misc Functions
############################################################################
# ====================================================
# Print a message
# ====================================================
def print_msg(text="", status=0, verbose=0) :
""" Internal pygme method to print messages
:param text: Text to be written
:type text: string
:param status: Status (default is 0). If 0 just print. If 1, state a WARNING. If 2, state an ERROR.
:type status: integer (0, 1 or 2)
:param verbose: 0 or 1 (default is 0).
"""
if status >= 2 :
print "ERROR with status %d while %s" %(status, text)
elif status == 1 :
print "WARNING with status %d while %s" %(status, text)
elif status <= 0 :
if verbose :
print "Status OK (0) while %s" %(text)
print text
#===================================================
### ##################################################################
### ### Set up the min and max indices depending on the input ###
### ##################################################################
### def _set_iminmax(imin=None, imax=None, NMAX=0) :
### """
### Set the indices as wished for MGE routines.
###
### :param imin: id of first Gaussian to consider (between 0 and Ngauss-1)
### :type imin: int
### :param imax: id of last Gaussians to consider (between 0 and Ngauss-1)
### :type imax: int
###
### :return: 0, NGauss, NGauss-1 if the input is Null
### Otherwise imin, imax, and imax+1.
###
### DEPRECATED FUNCTION
###
### """
### if imin == None :
### imin = 0
### if imax == None :
### imax = NMAX - 1
### return imin, imax, imax+1
### ###===============================================================
#=============================================================================================================
# Return abs/weights from the orthogonal scipy quadrature
#=============================================================================================================
def quadrat_ps_roots(Nabs) :
return return_floatXY(special.orthogonal.ps_roots(Nabs))
#-------------------------------------------------------------------------------------------------------------
#=============================================================================================================
# Extract the right float values from the orthogonal scipy output
#=============================================================================================================
def return_floatXY(temparray) :
"""
Return a list of 2 arrays, converted from an input array which has a real and
imaginary part, as in the output of the scipy.orthogonal.ps_roots scipy routine
"""
X = asarray(temparray[0].real, dtype=floatMGE)
Y = asarray(temparray[1], dtype=floatMGE)
return [X,Y]
#-------------------------------------------------------------------------------------------------------------
#=============================================================================================================
# Return a realisation for a truncated Gaussian with sigma
#=============================================================================================================
def sample_trunc_gauss(sigma=1., cutX=1., npoints=1, even=0):
"""
Function which returns a sample of points (npoints) which follow
a Gaussian distribution truncated at X=cutX
This uses the special erf function from scipy and the random uniform function
As well as the erfinv (inverse of erf) function
Input:
sigma : sigma of the Gaussian in arbitrary units (default is 1.0)
cutX : truncature (positive) in same units than sigma (default is 1.0)
npoints : Number of points for the output sample (default is 1)
even : if even=1, cut with -cutX, cutX
otherwise, cut between 0 and cutX (default is 0 => not even)
"""
sqrt2sig = np.sqrt(2.)*sigma
cutsamp = special.erf(cutX/sqrt2sig)
if even : ## If distribution needs to be symmetric
return sqrt2sig * special.erfinv(random.uniform(-cutsamp, cutsamp, npoints))
else :
return sqrt2sig * special.erfinv(random.uniform(0., cutsamp, npoints))
#-------------------------------------------------------------------------------------------------------------
#=============================================================================================================
# Return a realisation for a truncated r^2*Gaussian with sigma
#=============================================================================================================
def sample_trunc_r2gauss(sigma=1., cutr=1., npoints=1, nSamp=10000):
"""
Function which returns a sample of points (npoints) which follow
a r^2 * Gaussian distribution truncated at r=cutr
This uses the special erf function from scipy, an interpolation for the
cumulative integrated function and then a random uniform function
Input:
sigma : sigma of the Gaussian in arbitrary units
cutr : truncature (positive) in same units than sigma
npoints : Number of points for the output sample
"""
sqrt2sig = np.sqrt(2.)*sigma
## Sampling in r with nSamp points - default to 10000 points to sample well the profile
sampx = np.linspace(0.,cutr,nSamp)
sampxsig = sampx / sqrt2sig # normalised to the sigma
## Cumulative function of 4*pi*r2*exp(-r2/2*sigma**2)
fSG = 2.*np.pi * sqrt2sig**3. * (-sampxsig * np.exp(-sampxsig**2)+special.erf(sampxsig) * np.sqrt(np.pi)/2.)
## Interpolation to get the inverse function
invF = interpolate.interp1d(fSG, sampx)
return invF(random.uniform(0, fSG[-1], npoints))
#-------------------------------------------------------------------------------------------------------------
#=============================================================================================================
# Return a realisation for a truncated r^2*Gaussian with sigma
#=============================================================================================================
def gridima_XY(npix=(1,1), center=(0.,0.), step=(1.,1.)) :
"""
Return 2D X,Y grids assuming npix pixels, given the centre and step
npix : tuple of integers providing the number of pixels in X and Y
center : tuple of float providing the centre of the array
step : tuple of float (or single float) providing the size of the pixel
"""
if len(step) == 1 : step = (step, step)
X,Y = np.meshgrid(np.linspace(0,npix[1]-1, npix[1]), np.linspace(0,npix[0]-1,npix[0]))
X = (X - center[0]) * step[0]
Y = (Y - center[1]) * step[1]
return X, Y
def convert_xy_to_polar(x, y, cx=0.0, cy=0.0, PA=None) :
"""
Convert x and y coordinates into polar coordinates
cx and cy: Center in X, and Y. 0 by default.
PA : position angle in radians
(Counter-clockwise from vertical)
This allows to take into account some rotation
and place X along the abscissa
Default is None and would be then set for no rotation
Return : R, theta (in radians)
"""
if PA is None : PA = -np.pi / 2.
## If the PA does not have X along the abscissa, rotate
if np.mod(PA+np.pi/2., np.pi) != 0.0 : x, y = rotxyC(x, y, cx=cx, cy=cy, angle=PA+np.pi/2.)
## Polar coordinates
r = np.sqrt(x**2 + y**2)
## Now computing the true theta
theta = np.zeros_like(r)
theta[(x == 0.) & (y >= 0.)] = pi / 2.
theta[(x == 0.) & (y < 0.)] = -pi / 2.
theta[(x < 0.)] = np.arctan(y[(x < 0.)] / x[(x < 0.)]) + pi
theta[(x > 0.)] = np.arctan(y[(x > 0.)] / x[(x > 0.)])
return r, theta
#-------------------------------------------------------------------------------------------------------------
def convert_polar_to_xy(r, theta) :
"""
Convert x and y coordinates into polar coordinates Theta in Radians
Return :x, y
"""
## cartesian
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
#-------------------------------------------------------------------------------------------------------------
def rotxC(x, y, cx=0.0, cy=0.0, angle=0.0) :
""" Rotate by an angle (in radians)
the x axis with a center cx, cy
Return rotated(x)
"""
return (x - cx) * np.cos(angle) + (y - cy) * np.sin(angle)
def rotyC(x, y, cx=0.0, cy=0.0, angle=0.0) :
""" Rotate by an angle (in radians)
the y axis with a center cx, cy
Return rotated(y)
"""
return (cx - x) * np.sin(angle) + (y - cy) * np.cos(angle)
def rotxyC(x, y, cx=0.0, cy=0.0, angle=0.0) :
""" Rotate both x, y by an angle (in radians)
the x axis with a center cx, cy
Return rotated(x), rotated(y)
"""
## First centring
xt = x - cx
yt = y - cy
## Then only rotation
return rotxC(x, y, angle=angle), rotyC(x, y, angle=angle)
#-------------------------------------------------------------------------------------------------------------
def _gaussianROTC(height, center_x, center_y, width_x, width_y, angle):
"""Returns a gaussian function with the given parameters
First is the shift, then rotation
"""
width_x = np.float(width_x)
width_y = np.float(width_y)
return lambda x,y: height*np.exp( -(((rotxC(x, y, center_x, center_y, angle)) / width_x)**2+((rotyC(x, y, center_x, center_y, angle)) / width_y)**2)/2.)
#-------------------------------------------------------------------------------------------------------------
def twod_moments(data):
"""Returns (height, x, y, width_x, width_y, 0.)
the gaussian parameters of a 2D distribution by calculating its moments
The last value (0.) stands for the default position angle
"""
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width_x, width_y, 0.
#-------------------------------------------------------------------------------------------------------------
def Inertia_2DMoments(x, y, data) :
"""
Derive the moment of inertia from a flux map
It returns the major and minor axes, ellipticity and PA
"""
momI = np.sum(data, axis=0)
if momI == 0. :
return 0., 0., 1., 0.
momIX = np.sum(data * x, axis=0) / momI
momIY = np.sum(data * y, axis=0) / momI
a = np.sum(data * x * x, axis=0) / momI - momIX**2
b = np.sum(data * y * y, axis=0) / momI - momIY**2
c = np.sum(data * x * y, axis=0) / momI - momIX*momIY
if c == 0 :
if a == 0. :
return b,a,0.,0.
if b > a :
return b,a,1.-np.sqrt(a/b),0.
else :
if b == 0.:
return a,b,0.,0.
else :
return a,b,1.-np.sqrt(b/a),90.
delta = (a-b)**2. + 4 * c*c
Lp2 = ((a+b) + np.sqrt(delta)) / 2.
Lm2 = ((a+b) - np.sqrt(delta)) / 2.
Lp = np.sqrt(np.maximum(Lp2,0.))
Lm = np.sqrt(np.maximum(Lm2,0.))
eps = (Lp - Lm) / Lp
theta = np.degrees(np.arctan((b - Lp2) / c))
return Lp, Lm, eps, theta
#-------------------------------------------------------------------------------------------------------------
def fit_1gaussian(data):
""" Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit
"""
params = twod_moments(data)
errorfunction = lambda p: np.ravel(_gaussianROTC(*p)(*np.indices(data.shape)) - data)
bestfitparams, success = optimize.leastsq(errorfunction, params)
return bestfitparams
#-------------------------------------------------------------------------------------------------------------
def _find_Image_labels(image=None, threshold=None) :
"""
Select pixels within an image which are contiguous
Used for find_ImageCenter, but here can be also used with a non-default threshold
for the image level.
image: input data array
threshold : threshold above which the selection should be done
Return: a selection from the data
"""
import scipy.ndimage as ndima
if threshold is None : threshold = image.mean()
labels, num = ndima.label(image > threshold, np.ones((3,3)))
maxLabel = np.argmax(np.bincount(labels[labels>0].ravel()))
select_label = (labels == maxLabel)
return select_label, labels
#-------------------------------------------------------------------------------------------------------------
def find_ImageCenter(image=None, showfit=False, verbose=True, threshold=None) :
"""
Find the centre of a galaxy using the centre of mass after filtering
image : An input data array
showfit: show the residual fit - default to False
verbose : print results - default to True
threshol : where to cut the image level - default to None (mean of Image)
Return: xcen, ycen, major, minor, eps, theta
"""
import scipy.ndimage as ndima
from matplotlib.patches import Ellipse
Mimage = ndima.median_filter(image, 3)
## Extracting headers and data
startx, starty = 0., 0.
stepx, stepy = 1., 1.
npy, npx = image.shape
endx = startx + (npx -1) * stepx
endy = starty + (npy -1) * stepy
Xin,Yin = np.meshgrid(np.linspace(startx,endx,npx), np.linspace(starty,endy,npy))
## We select the labels after aggregation with a default threshold (mean of the image)
select_label, labels = _find_Image_labels(Mimage, threshold=threshold)
maxLabel = np.argmax(np.bincount(labels[labels>0].ravel()))
## With the selection we can find the centre of mass using ndima
centers = ndima.center_of_mass(image, labels, maxLabel)
## Being careful here as X and Y are in fact Y, X
xcen = np.array(centers)[1]
ycen = np.array(centers)[0]
major, minor, eps, theta = Inertia_2DMoments(Xin[select_label]-xcen, Yin[select_label]-ycen, Mimage[select_label])
maxRadius = np.max(np.sqrt((Xin[select_label]-xcen)**2+ (Yin[select_label]-ycen)**2))
if showfit :
fig = plt.figure(1, figsize=(8,6))
ax = fig.add_subplot(111, aspect='equal')
ax.plot(Xin[select_label], Yin[select_label], ',')
ellipse = Ellipse(xy=(xcen,ycen), width=maxRadius*1.1*2.0, height=maxRadius*1.1*(1.-eps)*2.0,
angle=90.+theta, edgecolor='r', fc='None', lw=2)
ax.add_patch(ellipse)
ax.set_xlim(startx, endx)
ax.set_ylim(starty, endy)
if verbose :
print "Center of the image found at: ", xcen, " ", ycen
print "Ellipticity: ", eps
print "Position Angle: ", theta
print "Maximum Radius of point accounted for: ", maxRadius
return xcen, ycen, major, minor, eps, theta
#-------------------------------------------------------------------------------------------------------------
def fit_ImageCenter(image=None, showfit=False) :
"""
Fit a gaussian on an image and show the best fit and returns the centre
fitsfile : An input fits file
showfit : default is False (not ploting the fit), if True -> will show the image and fitted gaussian
Return: X, Y, fitdata which are the found central position and the fitted data
"""
## Extracting headers and data
startx, stary = 0., 0.
stepx, stepy = 1., 1.
npx, npy = image.shape
endx = startx + (npx -1) * stepx
endy = starty + (npy -1) * stepy
Xin,Yin = np.meshgrid(np.linspace(startx,endx,npx), np.linspace(starty,endy,npy))
## Starting the fit
fitparams = fit_1gaussian(datatofit)
## Recompute the fitted gaussian
fit = _gaussianROTC(*fitparams)
(height, cenx, ceny, width_x, width_y, angle) = fitparams
## Rescaling the values
cenxarc = ceny * stepx + startx
cenyarc = cenx * stepy + starty
width_xarc = width_x * stepx
width_yarc = width_y * stepy
## Some printing
print "Center is X, Y = %8.4f %8.4f" %(cenxarc, cenyarc)
print "Width is X, Y = %8.4f %8.4f" %(width_xarc, width_yarc)
print "Start/End %8.4f %8.4f %8.4f %8.4f" %(startx, starty, endx, endy)
print "Angle %8.4f" %(np.degrees(angle))
fitdata = fit(*np.indices(datatofit.shape))
if showfit:
## Doing the plot
plt.clf()
## image
plt.imshow(np.log10(datatofit+1.), extent=(startx, endx, starty, endy))
## Contours
plt.contour(Xin, Yin, fitdata, cmap=plt.cm.copper)
return Xin, Yin, fitdata
#-------------------------------------------------------------------------------------------------------------
def GaussHermite(Vbin=None, GH=None) :
""" Returns the Gauss-Hermite function given
a set of parameters given as an array GH (first three moments are flux, velocity and dispersion)
and the input sampling (velocities) Vbin
"""
if Vbin is None :
Vbin = np.linspace(-GH[2]*5. + GH[1], GH[2]*5. + GH[1],101)
degree = len(GH) - 1
if degree < 2 :
print "Error: no enough parameters here"
return Vbin * 0.
if GH[2] == 0. :
print "Error: Sigma is 0!"
return Vbin * 0.
VbinN = (Vbin - GH[1]) / GH[2]
VbinN2 = VbinN * VbinN
GH0 = (2. * VbinN2 - 1.0) / sqrt(2.)
GH1 = (2. * VbinN2 - 3.) * VbinN / sqrt(3.)
GH2 = GH1
var = 1.0
for i in xrange(3, degree+1) :
var += GH[i] * GH2
GH2 = (sqrt(2.) * GH1 * VbinN - GH0) / sqrt(i+1.0);
GH0 = GH1;
GH1 = GH2;
return GH[0] * var * exp(- VbinN2 / 2.)
def oldGaussHermite(Vbin, V, S, GH) :
"""
Return the Gauss-Hermite function up to a certain degree
using V, Sigma, and then an array describing h3, h4, ...
"""
#------------------------------------------------------------
# The Gauss-Hermite function is a superposition of functions of the form
# F = (x-xc)/s
# E = A.Exp[-1/2.F^2] * {1 + h3[c1.F+c3.F^3] + h4[c5+c2.F^2+c4.F^4]}
#------------------------------------------------------------
c0 = sqrt(6.0)/4.0
c1 = -sqrt(3.0)
c2 = -sqrt(6.0)
c3 = 2.0*sqrt(3.0)/3.0
c4 = sqrt(6.0)/3.0
F = (x-x0)/s
E = A*numpy.exp(-0.5*F*F)*( 1.0 + h3*F*(c3*F*F+c1) + h4*(c0+F*F*(c2+c4*F*F)) )
return E
|
emsellem/pygme
|
pygme/mge_miscfunctions.py
|
Python
|
bsd-3-clause
| 19,673
|
[
"Galaxy",
"Gaussian"
] |
4945963b080de4a714077392ad6911697f22be7b83696ca3365135510c2d01bd
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2014, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Cloud controlling tools.
"""
import os
import collections
import functools
import boto.ec2
import paramiko as ssh
from . import helper
__all__ = ['Host', 'AwsHost', 'AwsOperator', 'aoregy']
class Host(object):
"""
Abstraction for actions toward a host.
"""
@staticmethod
def _prepare_command(cmd, sudo=False, env=None, wd=None):
"""
This is the helper method to make up the command (*cmd*) with different
operators.
The optional argument *sudo* will prefix "sudo". Default is False:
>>> Host._prepare_command("cmd")
'cmd'
>>> Host._prepare_command("cmd", sudo=True)
'sudo cmd'
The optional argument *env* will prefix "env" with the given string of
environment variables. The default is None:
>>> Host._prepare_command("cmd", env="PATH=$HOME:$PATH")
'env PATH=$HOME:$PATH cmd'
The optional argument *wd* will first change the working directory and
execute the command. The default is None:
>>> Host._prepare_command("cmd", wd="/tmp")
'cd /tmp; cmd'
Argument *env* can be used with either *sudo* or *wd*:
>>> Host._prepare_command("cmd", sudo=True,
... env="PATH=$HOME:$PATH")
'sudo env PATH=$HOME:$PATH cmd'
>>> Host._prepare_command("cmd", env="PATH=$HOME:$PATH", wd="/tmp")
'cd /tmp; env PATH=$HOME:$PATH cmd'
However, *sudo* doesn't work with *wd*:
>>> Host._prepare_command("cmd", sudo=True, wd="/tmp")
Traceback (most recent call last):
super(self, AwsHost).__init__()
...
ValueError: sudo can't be True with wd set
"""
if env is not None:
cmd = "env %s %s" % (env, cmd)
if wd is not None:
cmd = "cd %s; %s" % (wd, cmd)
if sudo:
raise ValueError("sudo can't be True with wd set")
if sudo:
cmd = "sudo %s" % cmd
return cmd
def connect(self, username):
pass
def disconnect(self):
pass
def run(self, cmd, **kw):
cmd = self._prepare_command(cmd, **kw)
return cmd
class AwsHost(Host):
"""
Abstraction for actions toward an AWS EC2 host.
"""
def __init__(self, instance):
super(AwsHost, self).__init__()
#: :py:class:`boto.ec2.instance.Instance` for the host.
self.instance = instance
#: :py:class:`paramiko.client.SSHClient` to command the remote host.
self.cli = None
def default_keyfn_getter(keyname):
keyfn = "aws_%s.pem" % keyname
return os.path.join(os.environ['HOME'], ".ssh", keyfn)
#: A callable takes a :py:class:`str` and convert it to a file name for
#: the key.
self.keyname2fn = default_keyfn_getter
def connect(self, username):
self.cli = ssh.client.SSHClient()
self.cli.load_system_host_keys()
self.cli.set_missing_host_key_policy(ssh.client.AutoAddPolicy())
self.cli.connect(self.instance.public_dns_name,
username=username,
key_filename=self.keyname2fn(self.instance.key_name),
allow_agent=True)
return self.cli
def disconnect(self):
self.cli.disconnect()
self.cli = None
def run(self, cmd, bufsize=-1, timeout=None, **kw):
# Prepare the command.
cmd = self._prepare_command(cmd, **kw)
# Log command information.
helper.info(cmd + "\n")
# Open the channel.
chan = self.cli.get_transport().open_session()
# Set up SSH authentication agent forwarding.
forward = ssh.agent.AgentRequestHandler(chan)
# Get and set up the terminal.
chan.get_pty()
chan.set_combine_stderr(True)
chan.settimeout(timeout)
# Send the command.
chan.exec_command(cmd)
# Use the STD I/O.
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('r', bufsize)
# Get the data and report.
data = stdout.read()
helper.info(data + "\n")
return data
#: Tuple keys of :py:class:`AwsOperator`.
_AWSHOSTINFOKEYS = ("region", "ami", "osfamily", "username",
"instance_type", "security_groups")
class AwsOperator(collections.namedtuple("AwsOperator", _AWSHOSTINFOKEYS)):
"""
Collection of AWS host information.
Filling information into the class :py:class:`AwsOperator`. The filled
data will become read-only.
>>> info = AwsOperator(region="us-west-2", ami="ami-77d7a747",
... osfamily="RHEL", username="ec2-user")
>>> info # doctest: +NORMALIZE_WHITESPACE
AwsOperator(region='us-west-2', ami='ami-77d7a747', osfamily='RHEL',
username='ec2-user', instance_type='t2.micro', security_groups=('default',))
>>> info.osfamily = "Debian"
Traceback (most recent call last):
...
AttributeError: can't set attribute
Positional arguments aren't allowed:
>>> info = AwsOperator("us-west-2", "ami-77d7a747", "RHEL", "ec2-user")
Traceback (most recent call last):
...
KeyError: "positional arguments aren't allowed"
"""
#: Allowed OS families.
_OSFAMILIES = ("RHEL", "Ubuntu", "Debian")
#: Mapping to the package metadata updating command for each of the OS
#: families.
_PMETACMD = {
"RHEL": "yum makecache -y",
"Ubuntu": "apt-get update -y",
"Debian": "apt-get update -y",
}
#: Mapping to the package installation command for each of the OS families.
_PINSTCMD = {
"RHEL": "yum install -y",
"Ubuntu": "apt-get install -y",
"Debian": "apt-get install -y",
}
_MINPKGS_COMMON = (
"vim ctags wget screen bzip2 patch mercurial git gcc gfortran".split())
_MINPKGS_DEBBUILD = ("build-essential zlib1g "
"liblapack-dev liblapack-pic".split())
#: Minimal packages.
_MINPKGS = {
"RHEL": _MINPKGS_COMMON,
"Ubuntu": _MINPKGS_COMMON + _MINPKGS_DEBBUILD,
"Debian": _MINPKGS_COMMON + _MINPKGS_DEBBUILD,
}
#: The downloading URL for conda installer.
MINICONDA_URL = ("http://repo.continuum.io/miniconda/"
"Miniconda-3.5.5-Linux-x86_64.sh")
#: Where to find conda on the destination box.
MINICONDA_PATH = "$HOME/opt/miniconda/bin"
#: Where to find SOLVCON on the destination box.
SOLVCON_PATH = "$HOME/sc/solvcon"
def __new__(cls, *args, **kw):
# Disallow positional arguments.
if args:
raise KeyError("positional arguments aren't allowed")
# Sanitize osfamily.
osfamily = kw['osfamily']
if osfamily not in cls._OSFAMILIES:
fam = ", ".join("\"%s\"" % fam for fam in cls._OSFAMILIES)
raise ValueError("osfamily \"%s\" not in %s" % (osfamily, fam))
# Set default values.
kw.setdefault("instance_type", "t2.micro")
kw.setdefault("security_groups", ("default",))
# Make up arguments.
args = tuple(kw[key] for key in cls._fields)
# Create the object.
obj = super(AwsOperator, cls).__new__(cls, *args)
#: The commanding :py:class:`Host` object.
obj.host = Host()
# Return the object.
return obj
def connect(self, *args, **kw):
return self.host.connect(self.username, *args, **kw)
def disconnect(self, *args, **kw):
return self.host.disconnect(*args, **kw)
def run(self, *args, **kw):
"""
>>> info = AwsOperator(region="us-west-2", ami="ami-77d7a747",
... osfamily="RHEL", username="ec2-user")
>>> info.run("command")
'command'
"""
return self.host.run(*args, **kw)
@property
def pmetacmd(self):
return self._PMETACMD[self.osfamily]
@property
def pinstcmd(self):
return self._PINSTCMD[self.osfamily]
@property
def minpkgs(self):
return self._MINPKGS[self.osfamily]
def update_package_metadata(self):
self.run(self.pmetacmd, sudo=True)
def install(self, packages):
manager = self.pinstcmd
if not isinstance(packages, basestring):
packages = " ".join(packages)
self.run("%s %s" % (manager, packages), sudo=True)
def hgclone(self, path, sshcmd="", ignore_key=False, **kw):
cmd = "hg clone"
if ignore_key:
if not sshcmd:
sshcmd = "ssh -oStrictHostKeyChecking=no"
else:
raise ValueError("ignore_key can't be used with sshcmd")
if path.startswith("ssh") and sshcmd:
cmd += " --ssh '%s'" % sshcmd
cmd = "%s %s" % (cmd, path)
self.run(cmd, **kw)
def deploy_minimal(self):
# Use OS package manager to install tools.
self.update_package_metadata()
self.install(self.minpkgs)
# Install miniconda.
mcurl = self.MINICONDA_URL
mcfn = mcurl.split("/")[-1]
run = functools.partial(self.run, wd="/tmp")
run("rm -f %s" % mcfn)
run("wget %s" % mcurl)
run("bash %s -p $HOME/opt/miniconda -b" % mcfn)
# Update conda packages.
run = functools.partial(
self.run, env="PATH=%s:$PATH" % self.MINICONDA_PATH)
run("conda update --all --yes")
# Install basic development tools with conda.
run("conda install jinja2 binstar conda-build grin --yes")
# Install standard dependencies with conda.
run("conda install setuptools mercurial "
"scons cython numpy netcdf4 vtk nose sphinx paramiko boto "
"--yes")
# Install customized dependencies with conda.
run("conda install gmsh graphviz scotch --yes "
"-c https://conda.binstar.org/yungyuc/channel/solvcon")
def obtain_solvcon(self):
# Clone the remote repository.
self.run("mkdir -p $HOME/sc")
self.hgclone("http://bitbucket.org/solvcon/solvcon", wd="$HOME/sc")
def set_config_files(self):
# Write conda channel settings.
condarc = ("channels: [ "
"\"https://conda.binstar.org/yungyuc/channel/solvcon\", "
"defaults ]")
self.run("echo '%s' > $HOME/.condarc" % condarc)
# Back up bashrc.
self.run("cp $HOME/.bashrc /tmp")
# Write conda path to bashrc.
self.run("echo 'if ! echo $PATH | egrep -q \"(^|:)%s($|:)\" ; "
"then export PATH=%s:$PATH ; fi' > $HOME/.bashrc" %
(self.MINICONDA_PATH, self.MINICONDA_PATH))
# Write SOLVCON settings to bashrc.
self.run("echo 'export SCSRC=%s' >> $HOME/.bashrc" %
self.SOLVCON_PATH)
self.run("echo 'export PYTHONPATH=$SCSRC' >> $HOME/.bashrc")
self.run("echo 'if ! echo $PATH | egrep -q \"(^|:)%s($|:)\" ; "
"then export PATH=%s:$PATH ; fi' >> $HOME/.bashrc" %
("$SCSRC", "$SCSRC"))
# Copy back the backed up bashrc.
self.run("cat /tmp/.bashrc >> $HOME/.bashrc; rm -f /tmp/.bashrc")
def build_solvcon(self):
self.run("scons scmods", wd="$SCSRC")
self.run("nosetests", wd="$SCSRC")
class AwsOperatorRegistry(dict):
@classmethod
def populate(cls):
regy = cls()
regy["RHEL64"] = AwsOperator(
region="us-west-2", ami="ami-77d7a747",
osfamily="RHEL", username="ec2-user")
regy["trusty64"] = AwsOperator(
region="us-west-2", ami="ami-d34032e3",
osfamily="Ubuntu", username="ubuntu")
regy[""] = regy["trusty64"]
return regy
aoregy = AwsOperatorRegistry.populate()
|
yungyuc/solvcon
|
solvcon/cloud.py
|
Python
|
bsd-3-clause
| 13,444
|
[
"VTK"
] |
3d645f0fda97233ba5799119b8725947b062ca96f9e4c6baf86d9c84354f07f2
|
#/###################/#
# Import modules
#
#ImportModules
import ShareYourSystem as SYS
#/###################/#
# Build the model
#
#Definition an instance
MyBrianer=SYS.BrianerClass(
).mapSet(
{
'BrianingNeurongroupDict':{
'N':100,
'model':
'''
dv/dt = (-(v+60*mV)+11*mV + 5.*mV*sqrt(20.*ms)*xi)/(20*ms) : volt
''',
'threshold':'v>-50*mV',
'reset':'v=-70*mV'
},
'-Traces':{
'|v':{
'RecordingInitMeanVariable':-70.,
'RecordingInitStdVariable':5.,
'-Samples':{
'|Default':{
'RecordingLabelVariable':[0,1],
'ViewingXScaleFloat':1000.,
'ViewingYScaleFloat':1000.
}
}
}
},
'-Events':{
'|Default_Events':{
}
},
'-Rates':{
'|Default_Rates':{
'BrianingWindowFloat':10. #(ms)
}
}
}
).brian(
)
#/###################/#
# Do one simulation
#
MyBrianer.simulate(
500.
)
#/###################/#
# View
#
"""
MyBrianer[
'/-Traces/|*v/-Samples/|Default'
].view(
).pyplot(
).show(
)
"""
"""
MyBrianer['/-Events/|Default'].view(
).pyplot(
).show(
)
"""
MyBrianer.view(
).pyplot(
).show(
)
#/###################/#
# Print
#
#Definition the AttestedStr
print('MyBrianer is ')
SYS._print(MyBrianer)
|
Ledoux/ShareYourSystem
|
Pythonlogy/build/lib/ShareYourSystem/Standards/Recorders/Brianer/01_ExampleCell.py
|
Python
|
mit
| 1,259
|
[
"Brian"
] |
fbd88d4f30082f13230b9845e674b56021e7de22dbc873742a9e9435b4fe5e82
|
#!/usr/bin/env python2
# * **************************************************************** **
# File: audio.py
# Requires: Python 2.7+ (but not Python 3.0+)
# Note: For history, changes and dates for this file, consult git.
# Author: Brian Danilko, Likeable Software (brian@likeablesoftware.com)
# Copyright 2015-2017 Microbric Pty Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (in the doc/licenses directory)
# for more details.
#
# * **************************************************************** */
""" Module providing conversions to, and analysis of, wav files """
from __future__ import print_function
from __future__ import absolute_import
import wave
import tempfile
import os.path
import sys
DOWNLOAD_BYTES_BETWEEN_PAUSES = 1536
DOWNLOAD_PAUSE_MSECS = 2000
WAVE_SAMPLE_RATE_HZ = 44100
# A ramping function between two different samples - the
# values are the percent of the change to apply in each sample.
# values supplied by Brenton (24/Jan/16)
RAMP = (1, 3, 7, 16, 50, 84, 93, 97, 99)
# A quanta is a 1/2 a microsecond. As the sample rate is in
# Hz, we have to divide it by 2000 to get samples per 0.5ms.
SAMPLES_PER_QUANTA = WAVE_SAMPLE_RATE_HZ / 2000
PULSE_AUDIO = True
# ############ main audio creator class ###############################################
# i2b is function for converting int to byte
if sys.version_info[0] == 2:
i2b = chr
else:
i2b = lambda x: bytes([x])
class Output(object):
"""Create a wav file"""
def __init__(self, dir, nameOverride=None):
"""Create an audio file within a directory (typically creating a new name)"""
self.directory = dir
if nameOverride:
self.filename = nameOverride
self.fileHandle = open(self.filename, "wb")
else:
self.fileHandle = tempfile.NamedTemporaryFile(mode="wb",
prefix="tok", suffix=".wav",
dir=self.directory, delete=False)
self.filename = self.fileHandle.name
self.sampleRate = 44100
self.samplesPerQuanta = self.sampleRate / 2000
self.lastLeft = 128
self.lastRight = 128
self.downloadBytesBetweenPauses = 1536
self.downloadPauseMsecs = 2000
if (PULSE_AUDIO):
self.audio_func = self.createAudioWithPulses
self.silence_func = self.createSilenceWithPulses
else:
self.audio_func = self.createAudioRamping
self.silence_func = self.createSilenceRamping
def SetSampleRate(self, sampleRate):
self.sampleRate = sampleRate
self.samplesPerQuanta = self.sampleRate / 2000
def GetWavPath(self):
return os.path.join(self.directory, self.filename)
def CreateDebugWav(self):
waveWriter = wave.open(self.fileHandle)
waveWriter.setnchannels(2)
waveWriter.setsampwidth(1)
waveWriter.setframerate(self.sampleRate)
waveWriter.setcomptype("NONE", "")
# now generate the test file
data = chr(255) + chr(0) + \
chr(128) + chr(128) + \
chr(0) + chr(255) + \
chr(128) + chr(128)
count = 2000
while count > 0:
waveWriter.writeframes(data)
count -= 1
waveWriter.close()
# def WriteProgramWav(self, binaryString):
# self.WriteWav(TOKEN_DOWNLOAD_STR + TOKEN_VERSION_STR + binaryString)
# def WriteFirmwareWav(self, binaryString):
# self.WriteWav(FIRMWARE_DOWNLOAD_STR + FIRMWARE_VERSION_STR + binaryString)
def WriteWav(self, binaryData):
waveWriter = wave.open(self.fileHandle)
waveWriter.setnchannels(2)
waveWriter.setsampwidth(1)
waveWriter.setframerate(self.sampleRate)
waveWriter.setcomptype("NONE", "")
self.lastLeft = 128
self.lastRight = 128
self.ConvertWithPause(binaryData, waveWriter)
waveWriter.close()
def ConvertWithPause(self, binString, waveWriter):
index = 0
preamble = 0
pauseCount = 0
# 500 milliseconds (1000 midQuantas) of silence at the beginning
waveWriter.writeframes(self.silence_func(1000, self.sampleRate))
preamble = 0
while (preamble < self.samplesPerQuanta):
waveWriter.writeframes(self.audio_func(0, self.sampleRate))
preamble += 1
while (index < len(binString)):
if (pauseCount == self.downloadBytesBetweenPauses):
preamble = 0
while (preamble < self.downloadPauseMsecs):
waveWriter.writeframes(self.audio_func(0, self.sampleRate))
preamble += 1
pauseCount = 0
data = binString[index]
# start
waveWriter.writeframes(self.audio_func(6, self.sampleRate))
# now the actual data -- big endian or little endian
mask = 1
ones = 0
while (mask <= 0x80):
if (data & mask):
waveWriter.writeframes(self.audio_func(2, self.sampleRate))
ones += 1
else:
waveWriter.writeframes(self.audio_func(0, self.sampleRate))
mask <<= 1
# add stop - BBB Changed to 8 - differs from start
waveWriter.writeframes(self.audio_func(8, self.sampleRate))
index += 1
pauseCount += 1
# added to end as well - to ensure entire data is played. - ## BBB
preamble = 0
while (preamble < self.samplesPerQuanta):
waveWriter.writeframes(self.audio_func(0, self.sampleRate))
preamble += 1
# 500 milliseconds (1000 midQuantas) of silence at the end
waveWriter.writeframes(self.silence_func(1000, self.sampleRate))
def createAudioRamping(self, midQuantas, sample_rate):
data = b""
samples_per_quanta = sample_rate / 2000
# write fars
data += self.ramp(255, 0, samples_per_quanta)
# write nears
data += self.ramp(0, 255, samples_per_quanta)
if (midQuantas > 0):
data += self.ramp(128, 128, midQuantas * samples_per_quanta)
return data
def createAudioWithPulses(self, midQuantas, sample_rate):
data = b""
samples_per_quanta = sample_rate / 2000
total_samples = 2 * samples_per_quanta + (midQuantas * samples_per_quanta)
# write far
data += i2b(255) + i2b(0)
# write near
data += i2b(0) + i2b(255)
count = 2
while count < total_samples:
data += i2b(128) + i2b(128)
count += 1
return data
def createSilenceRamping(self, midQuantas, sample_rate):
samples_per_quanta = sample_rate / 2000
return self.ramp(128, 128, midQuantas * samples_per_quanta)
def createSilenceWithPulses(self, midQuantas, sample_rate):
data = b""
samples_per_quanta = sample_rate / 2000
total_samples = midQuantas * samples_per_quanta
count = 0
while count < total_samples:
data += i2b(128) + i2b(128)
count += 1
return data
def ramp(self, newLeft, newRight, samples):
# print "ramp", samples
data = b""
if (samples < len(RAMP)):
print("ERROR - audio transition is smaller then the ramp size")
sys.exit(1)
diffLeft = newLeft - self.lastLeft
diffRight = newRight - self.lastRight
count = 0
while (count < len(RAMP)):
left = int(self.lastLeft + (diffLeft * RAMP[count] / 100))
right = int(self.lastRight + (diffRight * RAMP[count] / 100))
# print "Ramp %d/%d" % (left, right)
data += i2b(left) + i2b(right)
count += 1
while (count < samples):
# print "Stable %d/%d" % (newLeft, newRight)
data += i2b(newLeft) + i2b(newRight)
count += 1
self.lastLeft = newLeft
self.lastRight = newRight
return data
|
Bdanilko/EdPy
|
src/lib/audio.py
|
Python
|
gpl-2.0
| 8,576
|
[
"Brian"
] |
05197e76145e7eceab4c9da58eebabe7da2fbe437d627e9066fec19944b01174
|
import re
import os
import random
import pickle
import warnings
import shlex
import shutil
from copy import deepcopy
from collections import defaultdict, Counter
from subprocess import call, Popen, PIPE
import glob
import numpy as np
import pandas as pd
import matplotlib
# try:
# os.environ['DISPLAY']
# except KeyError:
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
with warnings.catch_warnings():
warnings.simplefilter('ignore') # catch experimental ipython widget warning
import seaborn as sns
import bhtsne
from scipy.sparse import csr_matrix, find
from scipy.sparse.linalg import eigs
from numpy.linalg import norm
from scipy.stats import gaussian_kde
from numpy.core.umath_tests import inner1d
from sklearn.neighbors import NearestNeighbors
import fcsparser
import phenograph
import wishbone
# set plotting defaults
with warnings.catch_warnings():
warnings.simplefilter('ignore') # catch experimental ipython widget warning
sns.set(context="paper", style='ticks', font_scale=1.5, font='Bitstream Vera Sans')
cmap = matplotlib.cm.Spectral_r
size = 8
def qualitative_colors(n):
""" Generalte list of colors
:param n: Number of colors
"""
return sns.color_palette('Set1', n)
def get_fig(fig=None, ax=None, figsize=[6.5, 6.5]):
"""fills in any missing axis or figure with the currently active one
:param ax: matplotlib Axis object
:param fig: matplotlib Figure object
"""
if not fig:
fig = plt.figure(figsize=figsize)
if not ax:
ax = plt.gca()
return fig, ax
def density_2d(x, y):
"""return x and y and their density z, sorted by their density (smallest to largest)
:param x:
:param y:
:return:
"""
xy = np.vstack([np.ravel(x), np.ravel(y)])
z = gaussian_kde(xy)(xy)
i = np.argsort(z)
return np.ravel(x)[i], np.ravel(y)[i], np.arcsinh(z[i])
class SCData:
def __init__(self, data, data_type='sc-seq', metadata=None):
"""
Container class for single cell data
:param data: DataFrame of cells X genes representing expression
:param data_type: Type of the data: Can be either 'sc-seq' or 'masscyt'
:param metadata: None or DataFrame representing metadata about the cells
"""
if not (isinstance(data, pd.DataFrame)):
raise TypeError('data must be of type or DataFrame')
if not data_type in ['sc-seq', 'masscyt']:
raise RuntimeError('data_type must be either sc-seq or masscyt')
if metadata is None:
metadata = pd.DataFrame(index=data.index, dtype='O')
self._data = data
self._metadata = metadata
self._data_type = data_type
self._normalized = False
self._pca = None
self._tsne = None
self._diffusion_eigenvectors = None
self._diffusion_eigenvalues = None
self._diffusion_map_correlations = None
self._normalized = False
self._cluster_assignments = None
# Library size
self._library_sizes = None
def save(self, fout: str):# -> None:
"""
:param fout: str, name of archive to store pickled SCData data in. Should end
in '.p'.
:return: None
"""
with open(fout, 'wb') as f:
pickle.dump(vars(self), f)
def save_as_wishbone(self, fout: str):
"""
:param fout: str, name of archive to store pickled Wishbone data in. Should end
in '.p'.
:return: None
"""
wb = wishbone.wb.Wishbone(self, True)
wb.save(fout)
@classmethod
def load(cls, fin):
"""
:param fin: str, name of pickled archive containing SCData data
:return: SCData
"""
with open(fin, 'rb') as f:
data = pickle.load(f)
scdata = cls(data['_data'], data['_data_type'], data['_metadata'])
del data['_data']
del data['_data_type']
del data['_metadata']
for k, v in data.items():
setattr(scdata, k[1:], v)
return scdata
def __repr__(self):
c, g = self.data.shape
_repr = ('SCData: {c} cells x {g} genes\n'.format(g=g, c=c))
for k, v in sorted(vars(self).items()):
if not (k == '_data'):
_repr += '\n{}={}'.format(k[1:], 'None' if v is None else 'True')
return _repr
@property
def data_type(self):
return self._data_type
@property
def data(self):
return self._data
@data.setter
def data(self, item):
if not (isinstance(item, pd.DataFrame)):
raise TypeError('SCData.data must be of type DataFrame')
self._data = item
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, item):
if not isinstance(item, pd.DataFrame):
raise TypeError('SCData.metadata must be of type DataFrame')
self._metadata = item
@property
def pca(self):
return self._pca
@pca.setter
def pca(self, item):
if not (isinstance(item, dict) or item is None):
raise TypeError('self.pca must be a dictionary of pd.DataFrame object')
self._pca = item
@property
def tsne(self):
return self._tsne
@tsne.setter
def tsne(self, item):
if not (isinstance(item, pd.DataFrame) or item is None):
raise TypeError('self.tsne must be a pd.DataFrame object')
self._tsne = item
@property
def diffusion_eigenvectors(self):
return self._diffusion_eigenvectors
@diffusion_eigenvectors.setter
def diffusion_eigenvectors(self, item):
if not (isinstance(item, pd.DataFrame) or item is None):
raise TypeError('self.diffusion_eigenvectors must be a pd.DataFrame object')
self._diffusion_eigenvectors = item
@property
def diffusion_eigenvalues(self):
return self._diffusion_eigenvalues
@diffusion_eigenvalues.setter
def diffusion_eigenvalues(self, item):
if not (isinstance(item, pd.DataFrame) or item is None):
raise TypeError('self.diffusion_eigenvalues must be a pd.DataFrame object')
self._diffusion_eigenvalues = item
@property
def diffusion_map_correlations(self):
return self._diffusion_map_correlations
@diffusion_map_correlations.setter
def diffusion_map_correlations(self, item):
if not (isinstance(item, pd.DataFrame) or item is None):
raise TypeError('self.diffusion_map_correlations must be a pd.DataFrame'
'object')
self._diffusion_map_correlations = item
@property
def library_sizes(self):
return self._library_sizes
@library_sizes.setter
def library_sizes(self, item):
if not (isinstance(item, pd.Series) or item is None):
raise TypeError('self.library_sizes must be a pd.Series object')
@property
def cluster_assignments(self):
return self._cluster_assignments
@cluster_assignments.setter
def cluster_assignments(self, item):
if not (isinstance(item, pd.Series) or item is None):
raise TypeError('self.cluster_assignments must be a pd.Series '
'object')
self._cluster_assignments = item
@classmethod
def from_csv(cls, counts_csv_file, data_type, cell_axis = 0, normalize=True):
if not data_type in ['sc-seq', 'masscyt']:
raise RuntimeError('data_type must be either sc-seq or masscyt')
# Read in csv file
df = pd.read_csv( counts_csv_file, sep=None, header=0, index_col= 0,
engine='python' )
if cell_axis != 0:
df = df.transpose()
# Construct class object
scdata = cls( df, data_type=data_type )
# Normalize if specified
if data_type == 'sc-seq':
scdata = scdata.normalize_scseq_data( )
return scdata
@classmethod
def from_fcs(cls, fcs_file, cofactor=5,
metadata_channels=['Time', 'Event_length', 'DNA1', 'DNA2', 'Cisplatin', 'beadDist', 'bead1']):
# Parse the fcs file
text, data = fcsparser.parse( fcs_file )
data = data.astype(np.float64)
# Extract the S and N features (Indexing assumed to start from 1)
# Assumes channel names are in S
no_channels = text['$PAR']
channel_names = [''] * no_channels
for i in range(1, no_channels+1):
# S name
try:
channel_names[i - 1] = text['$P%dS' % i]
except KeyError:
channel_names[i - 1] = text['$P%dN' % i]
data.columns = channel_names
# Metadata and data
metadata_channels = data.columns.intersection(metadata_channels)
data_channels = data.columns.difference( metadata_channels )
metadata = data[metadata_channels]
data = data[data_channels]
# Transform if necessary
if cofactor is not None and cofactor > 0:
data = np.arcsinh(np.divide( data, cofactor ))
# Create and return scdata object
scdata = cls(data, 'masscyt', metadata)
return scdata
def normalize_scseq_data(self):
"""
Normalize single cell RNA-seq data: Divide each cell by its molecule count
and multiply counts of cells by the median of the molecule counts
:return: SCData
"""
molecule_counts = self.data.sum(axis=1)
data = self.data.div(molecule_counts, axis=0)\
.mul(np.median(molecule_counts), axis=0)
scdata = SCData(data=data, metadata=self.metadata)
scdata._normalized = True
# check that none of the genes are empty; if so remove them
nonzero_genes = scdata.data.sum(axis=0) != 0
scdata.data = scdata.data.ix[:, nonzero_genes].astype(np.float32)
# set unnormalized_cell_sums
self.library_sizes = molecule_counts
scdata._library_sizes = molecule_counts
return scdata
def run_pca(self, n_components=100):
"""
Principal component analysis of the data.
:param n_components: Number of components to project the data
"""
X = self.data.values
# Make sure data is zero mean
X = np.subtract(X, np.amin(X))
X = np.divide(X, np.amax(X))
# Compute covariance matrix
if (X.shape[1] < X.shape[0]):
C = np.cov(X, rowvar=0)
# if N>D, we better use this matrix for the eigendecomposition
else:
C = np.multiply((1/X.shape[0]), np.dot(X, X.T))
# Perform eigendecomposition of C
C[np.where(np.isnan(C))] = 0
C[np.where(np.isinf(C))] = 0
l, M = np.linalg.eig(C)
# Sort eigenvectors in descending order
ind = np.argsort(l)[::-1]
l = l[ind]
if n_components < 1:
n_components = np.where(np.cumsum(np.divide(l, np.sum(l)), axis=0) >= n_components)[0][0] + 1
print('Embedding into ' + str(n_components) + ' dimensions.')
if n_components > M.shape[1]:
n_components = M.shape[1]
print('Target dimensionality reduced to ' + str(n_components) + '.')
M = M[:, ind[:n_components]]
l = l[:n_components]
# Apply mapping on the data
if X.shape[1] >= X.shape[0]:
M = np.multiply(np.dot(X.T, M), (1 / np.sqrt(X.shape[0] * l)).T)
loadings = pd.DataFrame(data=M, index=self.data.columns)
l = pd.DataFrame(l)
self.pca = {'loadings': loadings, 'eigenvalues': l}
def plot_pca_variance_explained(self, n_components=30,
fig=None, ax=None, ylim=(0, 0.1)):
""" Plot the variance explained by different principal components
:param n_components: Number of components to show the variance
:param ylim: y-axis limits
:param fig: matplotlib Figure object
:param ax: matplotlib Axis object
:return: fig, ax
"""
if self.pca is None:
raise RuntimeError('Please run run_pca() before plotting')
fig, ax = get_fig(fig=fig, ax=ax)
ax.plot(np.ravel(self.pca['eigenvalues'].values))
plt.ylim(ylim)
plt.xlim((0, n_components))
plt.xlabel('Components')
plt.ylabel('Variance explained')
plt.title('Principal components')
sns.despine(ax=ax)
return fig, ax
def run_tsne(self, n_components=15, perplexity=30, rand_seed=-1):
""" Run tSNE on the data. tSNE is run on the principal component projections
for single cell RNA-seq data and on the expression matrix for mass cytometry data
:param n_components: Number of components to use for running tSNE for single cell
RNA-seq data. Ignored for mass cytometry
:return: None
"""
# Work on PCA projections if data is single cell RNA-seq
data = deepcopy(self.data)
if self.data_type == 'sc-seq':
if self.pca is None:
raise RuntimeError('Please run PCA using run_pca before running tSNE for single cell RNA-seq')
data -= np.min(np.ravel(data))
data /= np.max(np.ravel(data))
data = pd.DataFrame(np.dot(data, self.pca['loadings'].iloc[:, 0:n_components]),
index=self.data.index)
# Reduce perplexity if necessary
data = data.astype(np.float64)
perplexity_limit = 15
if data.shape[0] < 100 and perplexity > perplexity_limit:
print('Reducing perplexity to %d since there are <100 cells in the dataset. ' % perplexity_limit)
perplexity = perplexity_limit
self.tsne = pd.DataFrame(bhtsne.tsne(data, perplexity=perplexity, rand_seed=rand_seed),
index=self.data.index, columns=['x', 'y'])
def plot_tsne(self, fig=None, ax=None, title='tSNE projection'):
"""Plot tSNE projections of the data
:param fig: matplotlib Figure object
:param ax: matplotlib Axis object
:param title: Title for the plot
"""
if self.tsne is None:
raise RuntimeError('Please run tSNE using run_tsne before plotting ')
fig, ax = get_fig(fig=fig, ax=ax)
plt.scatter(self.tsne['x'], self.tsne['y'], s=size,
color=qualitative_colors(2)[1])
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.set_title(title)
return fig, ax
def plot_tsne_by_cell_sizes(self, fig=None, ax=None, vmin=None, vmax=None):
"""Plot tSNE projections of the data with cells colored by molecule counts
:param fig: matplotlib Figure object
:param ax: matplotlib Axis object
:param vmin: Minimum molecule count for plotting
:param vmax: Maximum molecule count for plotting
:param title: Title for the plot
"""
if self.data_type == 'masscyt':
raise RuntimeError( 'plot_tsne_by_cell_sizes is not applicable \n\
for mass cytometry data. ' )
fig, ax = get_fig(fig, ax)
if self.tsne is None:
raise RuntimeError('Please run run_tsne() before plotting.')
if self._normalized:
sizes = self.library_sizes
else:
sizes = self.data.sum(axis=1)
plt.scatter(self.tsne['x'], self.tsne['y'], s=size, c=sizes, cmap=cmap)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.colorbar()
return fig, ax
def run_phenograph(self, n_pca_components=15, **kwargs):
""" Identify clusters in the data using phenograph. Phenograph is run on the principal component projections
for single cell RNA-seq data and on the expression matrix for mass cytometry data
:param n_pca_components: Number of components to use for running tSNE for single cell
RNA-seq data. Ignored for mass cytometry
:param kwargs: Optional arguments to phenograph
:return: None
"""
data = deepcopy(self.data)
if self.data_type == 'sc-seq':
data -= np.min(np.ravel(data))
data /= np.max(np.ravel(data))
data = pd.DataFrame(np.dot(data, self.pca['loadings'].iloc[:, 0:n_pca_components]),
index=self.data.index)
communities, graph, Q = phenograph.cluster(data, **kwargs)
self.cluster_assignments = pd.Series(communities, index=data.index)
def plot_phenograph_clusters(self, fig=None, ax=None, labels=None):
"""Plot phenograph clustes on the tSNE map
:param fig: matplotlib Figure object
:param ax: matplotlib Axis object
:param vmin: Minimum molecule count for plotting
:param vmax: Maximum molecule count for plotting
:param labels: Dictionary of labels for each cluster
:return fig, ax
"""
if self.tsne is None:
raise RuntimeError('Please run tSNE before plotting phenograph clusters.')
fig, ax = get_fig(fig=fig, ax=ax)
clusters = sorted(set(self.cluster_assignments))
colors = qualitative_colors(len(clusters))
for i in range(len(clusters)):
if labels:
label=labels[i]
else:
label = clusters[i]
data = self.tsne.ix[self.cluster_assignments == clusters[i], :]
ax.plot(data['x'], data['y'], c=colors[i], linewidth=0, marker='o',
markersize=np.sqrt(size), label=label)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), markerscale=3)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
return fig, ax
def summarize_phenograph_clusters(self, fig=None, ax=None):
"""Average expression of genes in phenograph clusters
:param fig: matplotlib Figure object
:param ax: matplotlib Axis object
:return fig, ax
"""
if self.cluster_assignments is None:
raise RuntimeError('Please run phenograph before deriving summary of gene expression.')
# Calculate the means
means = self.data.groupby(self.cluster_assignments).apply(lambda x: np.mean(x))
# Calculate percentages
counter = Counter(self.cluster_assignments)
means.index = ['%d (%.2f%%)' % (i, counter[i]/self.data.shape[0] * 100) \
for i in means.index]
# Plot
fig, ax = get_fig(fig, ax, [8, 5] )
sns.heatmap(means)
plt.ylabel('Phenograph Clusters')
plt.xlabel('Markers')
return fig, ax
def select_clusters(self, clusters):
"""Subselect cells from specific phenograph clusters
:param clusters: List of phenograph clusters to select
:return scdata
"""
if self.cluster_assignments is None:
raise RuntimeError('Please run phenograph before subselecting cells.')
if len(set(clusters).difference(self.cluster_assignments)) > 0 :
raise RuntimeError('Some of the clusters specified are not present. Please select a subset of phenograph clusters')
# Subset of cells to use
cells = self.data.index[self.cluster_assignments.isin( clusters )]
# Create new SCData object
data = self.data.ix[cells]
if self.metadata is not None:
meta = self.metadata.ix[cells]
scdata = SCData( data, self.data_type, meta )
return scdata
def run_diffusion_map(self, knn=10, epsilon=1,
n_diffusion_components=10, n_pca_components=15, markers=None):
""" Run diffusion maps on the data. Run on the principal component projections
for single cell RNA-seq data and on the expression matrix for mass cytometry data
:param knn: Number of neighbors for graph construction to determine distances between cells
:param epsilon: Gaussian standard deviation for converting distances to affinities
:param n_diffusion_components: Number of diffusion components to Generalte
:param n_pca_components: Number of components to use for running tSNE for single cell
RNA-seq data. Ignored for mass cytometry
:return: None
"""
data = deepcopy(self.data)
if self.data_type == 'sc-seq':
if self.pca is None:
raise RuntimeError('Please run PCA using run_pca before running diffusion maps for single cell RNA-seq')
data = deepcopy(self.data)
data -= np.min(np.ravel(data))
data /= np.max(np.ravel(data))
data = pd.DataFrame(np.dot(data, self.pca['loadings'].iloc[:, 0:n_pca_components]),
index=self.data.index)
if markers is None:
markers = self.data.columns
if self.data_type == 'masscyt':
data = deepcopy(self.data[markers])
# Nearest neighbors
N = data.shape[0]
nbrs = NearestNeighbors(n_neighbors=knn).fit(data)
distances, indices = nbrs.kneighbors(data)
# Adjacency matrix
rows = np.zeros(N * knn, dtype=np.int32)
cols = np.zeros(N * knn, dtype=np.int32)
dists = np.zeros(N * knn)
location = 0
for i in range(N):
inds = range(location, location + knn)
rows[inds] = indices[i, :]
cols[inds] = i
dists[inds] = distances[i, :]
location += knn
W = csr_matrix( (dists, (rows, cols)), shape=[N, N] )
# Symmetrize W
W = W + W.T
# Convert to affinity (with selfloops)
rows, cols, dists = find(W)
rows = np.append(rows, range(N))
cols = np.append(cols, range(N))
dists = np.append(dists/(epsilon ** 2), np.zeros(N))
W = csr_matrix( (np.exp(-dists), (rows, cols)), shape=[N, N] )
# Create D
D = np.ravel(W.sum(axis = 1))
D[D!=0] = 1/D[D!=0]
# Symmetric markov normalization
D = csr_matrix((np.sqrt(D), (range(N), range(N))), shape=[N, N])
P = D
T = D.dot(W).dot(D)
T = (T + T.T) / 2
# Eigen value decomposition
D, V = eigs(T, n_diffusion_components, tol=1e-4, maxiter=1000)
D = np.real(D)
V = np.real(V)
inds = np.argsort(D)[::-1]
D = D[inds]
V = V[:, inds]
V = P.dot(V)
# Normalize
for i in range(V.shape[1]):
V[:, i] = V[:, i] / norm(V[:, i])
V = np.round(V, 10)
# Update object
self.diffusion_eigenvectors = pd.DataFrame(V, index=self.data.index)
self.diffusion_eigenvalues = pd.DataFrame(D)
def plot_diffusion_components(self, title='Diffusion Components'):
""" Plots the diffusion components on tSNE maps
:return: fig, ax
"""
if self.tsne is None:
raise RuntimeError('Please run tSNE before plotting diffusion components.')
if self.diffusion_eigenvectors is None:
raise RuntimeError('Please run diffusion maps using run_diffusion_map before plotting')
height = int(2 * np.ceil(self.diffusion_eigenvalues.shape[0] / 5))
width = 10
fig = plt.figure(figsize=[width, height])
n_rows = int(height / 2)
n_cols = int(width / 2)
gs = plt.GridSpec(n_rows, n_cols)
for i in range(self.diffusion_eigenvectors.shape[1]):
ax = plt.subplot(gs[i // n_cols, i % n_cols])
plt.scatter(self.tsne['x'], self.tsne['y'], c=self.diffusion_eigenvectors[i],
cmap=cmap, edgecolors='none', s=size)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.set_aspect('equal')
plt.title( 'Component %d' % i, fontsize=10 )
# fig.suptitle(title, fontsize=12)
return fig, ax
def plot_diffusion_eigen_vectors(self, fig=None, ax=None, title='Diffusion eigen vectors'):
""" Plots the eigen values associated with diffusion components
:return: fig, ax
"""
if self.diffusion_eigenvectors is None:
raise RuntimeError('Please run diffusion maps using run_diffusion_map before plotting')
fig, ax = get_fig(fig=fig, ax=ax)
ax.plot(np.ravel(self.diffusion_eigenvalues.values))
plt.scatter( range(len(self.diffusion_eigenvalues)),
self._diffusion_eigenvalues, s=20, edgecolors='none', color='red' )
plt.xlabel( 'Diffusion components')
plt.ylabel('Eigen values')
plt.title( title )
plt.xlim([ -0.1, len(self.diffusion_eigenvalues) - 0.9])
sns.despine(ax=ax)
return fig, ax
@staticmethod
def _correlation(x: np.array, vals: np.array):
x = x[:, np.newaxis]
mu_x = x.mean() # cells
mu_vals = vals.mean(axis=0) # cells by gene --> cells by genes
sigma_x = x.std()
sigma_vals = vals.std(axis=0)
return ((vals * x).mean(axis=0) - mu_vals * mu_x) / (sigma_vals * sigma_x)
def run_diffusion_map_correlations(self, components=None, no_cells=10):
""" Determine gene expression correlations along diffusion components
:param components: List of components to generate the correlations. All the components
are used by default.
:param no_cells: Window size for smoothing
:return: None
"""
if self.data_type == 'masscyt':
raise RuntimeError('This function is designed to work for single cell RNA-seq')
if self.diffusion_eigenvectors is None:
raise RuntimeError('Please run diffusion maps using run_diffusion_map before determining correlations')
if components is None:
components = np.arange(self.diffusion_eigenvectors.shape[1])
else:
components = np.array(components)
components = components[components != 0]
# Container
diffusion_map_correlations = np.empty((self.data.shape[1],
self.diffusion_eigenvectors.shape[1]),
dtype=np.float)
for component_index in components:
component_data = self.diffusion_eigenvectors.ix[:, component_index]
order = self.data.index[np.argsort(component_data)]
x = component_data[order].rolling(no_cells).mean()[no_cells:]
# x = pd.rolling_mean(component_data[order], no_cells)[no_cells:]
# this fancy indexing will copy self.data
vals = self.data.ix[order, :].rolling(no_cells).mean()[no_cells:].values
# vals = pd.rolling_mean(self.data.ix[order, :], no_cells, axis=0)[no_cells:]
cor_res = self._correlation(x, vals)
# assert cor_res.shape == (gene_shape,)
diffusion_map_correlations[:, component_index] = self._correlation(x, vals)
# this is sorted by order, need it in original order (reverse the sort)
self.diffusion_map_correlations = pd.DataFrame(diffusion_map_correlations[:, components],
index=self.data.columns, columns=components)
def plot_gene_component_correlations(
self, components=None, fig=None, ax=None,
title='Gene vs. Diffusion Component Correlations'):
""" plots gene-component correlations for a subset of components
:param components: Iterable of integer component numbers
:param fig: Figure
:param ax: Axis
:param title: str, title for the plot
:return: fig, ax
"""
fig, ax = get_fig(fig=fig, ax=ax)
if self.diffusion_map_correlations is None:
raise RuntimeError('Please run determine_gene_diffusion_correlations() '
'before attempting to visualize the correlations.')
if components is None:
components = self.diffusion_map_correlations.columns
colors = qualitative_colors(len(components))
for c,color in zip(components, colors):
with warnings.catch_warnings():
warnings.simplefilter('ignore') # catch experimental ipython widget warning
sns.kdeplot(self.diffusion_map_correlations[c].fillna(0), label=c,
ax=ax, color=color)
sns.despine(ax=ax)
ax.set_title(title)
ax.set_xlabel('correlation')
ax.set_ylabel('gene density')
plt.legend()
return fig, ax
@staticmethod
def _gmt_options():
mouse_options = os.listdir(os.path.expanduser('~/.wishbone/tools/mouse'))
human_options = os.listdir(os.path.expanduser('~/.wishbone/tools/human'))
print('Available GSEA .gmt files:\n\nmouse:\n{m}\n\nhuman:\n{h}\n'.format(
m='\n'.join(mouse_options),
h='\n'.join(human_options)))
print('Please specify the gmt_file parameter as gmt_file=(organism, filename)')
@staticmethod
def _gsea_process(c, diffusion_map_correlations, output_stem, gmt_file):
# save the .rnk file
out_dir, out_prefix = os.path.split(output_stem)
genes_file = '{stem}_cmpnt_{component}.rnk'.format(
stem=output_stem, component=c)
ranked_genes = diffusion_map_correlations.ix[:, c]\
.sort_values(inplace=False, ascending=False)
# set any NaN to 0
ranked_genes = ranked_genes.fillna(0)
# dump to file
pd.DataFrame(ranked_genes).to_csv(genes_file, sep='\t', header=False)
# Construct the GSEA call
cmd = shlex.split(
'java -cp {user}/.wishbone/tools/gsea2-2.2.1.jar -Xmx1g '
'xtools.gsea.GseaPreranked -collapse false -mode Max_probe -norm meandiv '
'-nperm 1000 -include_only_symbols true -make_sets true -plot_top_x 0 '
'-set_max 500 -set_min 50 -zip_report false -gui false -rnk {rnk} '
'-rpt_label {out_prefix}_{component} -out {out_dir}/ -gmx {gmt_file}'
''.format(user=os.path.expanduser('~'), rnk=genes_file,
out_prefix=out_prefix, component=c, out_dir=out_dir,
gmt_file=gmt_file))
# Call GSEA
p = Popen(cmd, stderr=PIPE)
_, err = p.communicate()
# remove annoying suffix from GSEA
if err:
return err
else:
pattern = out_prefix + '_' + str(c) + '.GseaPreranked.[0-9]*'
repl = out_prefix + '_' + str(c)
files = os.listdir(out_dir)
for f in files:
mo = re.match(pattern, f)
if mo:
curr_name = mo.group(0)
shutil.move('{}/{}'.format(out_dir, curr_name),
'{}/{}'.format(out_dir, repl))
return err
# execute if file cannot be found
return b'GSEA output pattern was not found, and could not be changed.'
def run_gsea(self, output_stem, gmt_file=None,
components=None, enrichment_threshold=1e-1):
""" Run GSEA using gene rankings from diffusion map correlations
:param output_stem: the file location and prefix for the output of GSEA
:param gmt_file: GMT file containing the gene sets. Use None to see a list of options
:param components: Iterable of integer component numbers
:param enrichment_threshold: FDR corrected p-value significance threshold for gene set enrichments
:return: Dictionary containing the top enrichments for each component
"""
out_dir, out_prefix = os.path.split(output_stem)
out_dir += '/'
os.makedirs(out_dir, exist_ok=True)
if self.diffusion_eigenvectors is None:
raise RuntimeError('Please run run_diffusion_map_correlations() '
'before running GSEA to annotate those components.')
if not gmt_file:
self._gmt_options()
return
else:
if not len(gmt_file) == 2:
raise ValueError('gmt_file should be a tuple of (organism, filename).')
gmt_file = os.path.expanduser('~/.wishbone/tools/{}/{}').format(*gmt_file)
if components is None:
components = self.diffusion_map_correlations.columns
# Run GSEA
print('If running in notebook, please look at the command line window for GSEA progress log')
reports = dict()
for c in components:
res = self._gsea_process( c, self._diffusion_map_correlations,
output_stem, gmt_file )
# Load results
if res == b'':
# Positive correlations
df = pd.DataFrame.from_csv(glob.glob(output_stem + '_%d/gsea*pos*xls' % c)[0], sep='\t')
reports[c] = dict()
reports[c]['pos'] = df['FDR q-val'][0:5]
reports[c]['pos'] = reports[c]['pos'][reports[c]['pos'] < enrichment_threshold]
# Negative correlations
df = pd.DataFrame.from_csv(glob.glob(output_stem + '_%d/gsea*neg*xls' % c)[0], sep='\t')
reports[c]['neg'] = df['FDR q-val'][0:5]
reports[c]['neg'] = reports[c]['neg'][reports[c]['neg'] < enrichment_threshold]
# Return results
return reports
# todo add option to plot phenograph cluster that these are being DE in.
def plot_gene_expression(self, genes):
""" Plot gene expression on tSNE maps
:param genes: Iterable of strings to plot on tSNE
"""
not_in_dataframe = set(genes).difference(self.data.columns)
if not_in_dataframe:
if len(not_in_dataframe) < len(genes):
print('The following genes were either not observed in the experiment, '
'or the wrong gene symbol was used: {!r}'.format(not_in_dataframe))
else:
print('None of the listed genes were observed in the experiment, or the '
'wrong symbols were used.')
return
# remove genes missing from experiment
genes = set(genes).difference(not_in_dataframe)
height = int(2 * np.ceil(len(genes) / 5))
width = 10
fig = plt.figure(figsize=[width, height+0.25])
n_rows = int(height / 2)
n_cols = int(width / 2)
gs = plt.GridSpec(n_rows, n_cols)
axes = []
for i, g in enumerate(genes):
ax = plt.subplot(gs[i // n_cols, i % n_cols])
axes.append(ax)
if self.data_type == 'sc-seq':
plt.scatter(self.tsne['x'], self.tsne['y'], c=np.arcsinh(self.data[g]),
cmap=cmap, edgecolors='none', s=size)
else:
plt.scatter(self.tsne['x'], self.tsne['y'], c=self.data[g],
cmap=cmap, edgecolors='none', s=size)
ax.set_title(g)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
return fig, axes
class Wishbone:
def __init__(self, scdata, ignore_dm_check=False):
"""
Container class for Wishbone
:param data: SCData object
"""
if not ignore_dm_check and scdata.diffusion_eigenvectors is None:
raise RuntimeError('Please use scdata with diffusion maps run for Wishbone')
self._scdata = scdata
self._trajectory = None
self._branch = None
self._waypoints = None
self._branch_colors = None
def __repr__(self):
c, g = self.scdata.data.shape
_repr = ('Wishbone object: {c} cells x {g} genes\n'.format(g=g, c=c))
for k, v in sorted(vars(self).items()):
if not (k == '_scdata'):
_repr += '\n{}={}'.format(k[1:], 'None' if v is None else 'True')
return _repr
def save(self, fout: str):# -> None:
"""
:param fout: str, name of archive to store pickled Experiment data in. Should end
in '.p'.
:return: None
"""
with open(fout, 'wb') as f:
pickle.dump(vars(self), f)
@classmethod
def load(cls, fin):
"""
:param fin: str, name of pickled archive containing Experiment data
:return: Experiment
"""
with open(fin, 'rb') as f:
data = pickle.load(f)
wb = cls(data['_scdata'], True)
del data['_scdata']
for k, v in data.items():
setattr(wb, k[1:], v)
return wb
@property
def scdata(self):
return self._scdata
@scdata.setter
def scdata(self, item):
if not (isinstance(item, SCData)):
raise TypeError('data must be of type wishbone.wb.SCData')
self._scdata = item
@property
def branch(self):
return self._branch
@branch.setter
def branch(self, item):
if not (isinstance(item, pd.Series) or item is None):
raise TypeError('self.branch must be a pd.Series object')
self._branch = item
@property
def trajectory(self):
return self._trajectory
@trajectory.setter
def trajectory(self, item):
if not (isinstance(item, pd.Series) or item is None):
raise TypeError('self.trajectory must be a pd.Series object')
self._trajectory = item
@property
def waypoints(self):
return self._waypoints
@waypoints.setter
def waypoints(self, item):
if not (isinstance(item, list) or item is None):
raise TypeError('self.waypoints must be a list object')
self._waypoints = item
@property
def branch_colors(self):
return self._branch_colors
@branch_colors.setter
def branch_colors(self, item):
if not (isinstance(item, dict) or item is None):
raise TypeError('self.branch_colors a pd.Series object')
self._branch_colors = item
def run_wishbone(self, start_cell, branch=True, k=15,
components_list=[1, 2, 3], num_waypoints=250):
""" Function to run Wishbone.
:param start_cell: Desired start cell. This has to be a cell in self.scdata.index
:param branch: Use True for Wishbone and False for Wanderlust
:param k: Number of nearest neighbors for graph construction
:param components_list: List of components to use for running Wishbone
:param num_waypoints: Number of waypoints to sample
:return:
"""
# Start cell index
s = np.where(self.scdata.diffusion_eigenvectors.index == start_cell)[0]
if len(s) == 0:
raise RuntimeError( 'Start cell %s not found in data. Please rerun with correct start cell' % start_cell)
if isinstance(num_waypoints, list):
if len(pd.Index(num_waypoints).difference(self.scdata.data.index)) > 0:
warnings.warn('Some of the specified waypoints are not in the data. These will be removed')
num_waypoints = list(self.scdata.data.index.intersection(num_waypoints))
elif num_waypoints > self.scdata.data.shape[0]:
raise RuntimeError('num_waypoints parameter is higher than the number of cells in the dataset. \
Please select a smaller number')
s = s[0]
# Run the algorithm
res = wishbone.core.wishbone(
self.scdata.diffusion_eigenvectors.ix[:, components_list].values,
s=s, k=k, l=k, num_waypoints=num_waypoints, branch=branch)
# Assign results
trajectory = res['Trajectory']
branches = res['Branches']
trajectory = (trajectory - np.min(trajectory)) / (np.max(trajectory) - np.min(trajectory))
self.trajectory = pd.Series(trajectory, index=self.scdata.data.index)
self.branch = None
if branch:
self.branch = pd.Series([np.int(i) for i in branches], index=self.scdata.data.index)
self.waypoints = list(self.scdata.data.index[res['Waypoints']])
# Set branch colors
if branch:
self.branch_colors = dict( zip([2, 1, 3], qualitative_colors(3)))
# Plotting functions
# Function to plot wishbone results on tSNE
def plot_wishbone_on_tsne(self):
""" Plot Wishbone results on tSNE maps
"""
if self.trajectory is None:
raise RuntimeError('Please run Wishbone run_wishbone before plotting')
if self.scdata.tsne is None:
raise RuntimeError('Please run tSNE using scdata.run_tsne before plotting')
# Set up figure
fig = plt.figure(figsize=[8, 4])
gs = plt.GridSpec(1, 2)
# Trajectory
ax = plt.subplot(gs[0, 0])
plt.scatter( self.scdata.tsne['x'], self.scdata.tsne['y'],
edgecolors='none', s=size, cmap=cmap, c=self.trajectory )
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.title('Wishbone trajectory')
# Branch
if self.branch is not None:
ax = plt.subplot(gs[0, 1])
plt.scatter( self.scdata.tsne['x'], self.scdata.tsne['y'],
edgecolors='none', s=size,
color=[self.branch_colors[i] for i in self.branch])
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.title('Branch associations')
return fig, ax
# Function to plot trajectory
def plot_marker_trajectory(self, markers, show_variance=False,
no_bins=150, smoothing_factor=1, min_delta=0.1, fig=None, ax=None):
"""Plot marker trends along trajectory
:param markers: Iterable of markers/genes to be plotted.
:param show_variance: Logical indicating if the trends should be accompanied with variance
:param no_bins: Number of bins for calculating marker density
:param smoothing_factor: Parameter controling the degree of smoothing
:param min_delta: Minimum difference in marker expression after normalization to show separate trends for the two branches
:param fig: matplotlib Figure object
:param ax: matplotlib Axis object
:return Dictionary containing the determined trends for the different branches
"""
if self.trajectory is None:
raise RuntimeError('Please run Wishbone run_wishbone before plotting')
# if self.scdata.data_type == 'sc-seq' and show_variance:
# raise RuntimeError('Variance calculation is currently not supported for single-cell RNA-seq')
# Compute bin locations and bin memberships
trajectory = self.trajectory.copy()
# Sort trajectory
trajectory = trajectory.sort_values()
bins = np.linspace(np.min(trajectory), np.max(trajectory), no_bins)
# Compute gaussian weights for points at each location
# Standard deviation estimated from Silverman's approximation
stdev = np.std(trajectory) * 1.34 * len(trajectory) **(-1/5) * smoothing_factor
weights = np.exp(-((np.tile(trajectory, [no_bins, 1]).T -
bins) ** 2 / (2 * stdev**2))) * (1/(2*np.pi*stdev ** 2) ** 0.5)
# Adjust weights if data has branches
if self.branch is not None:
plot_branch = True
# Branch of the trunk
trunk = self.branch[trajectory.index[0]]
branches = list( set( self.branch).difference([trunk]))
linetypes = pd.Series([':', '--'], index=branches)
# Counts of branch cells in each bin
branch_counts = pd.DataFrame(np.zeros([len(bins)-1, 3]), columns=[1, 2, 3])
for j in branch_counts.columns:
branch_counts[j] = pd.Series([sum(self.branch[trajectory.index[(trajectory > bins[i-1]) & \
(trajectory < bins[i])]] == j) for i in range(1, len(bins))])
# Frequencies
branch_counts = branch_counts.divide( branch_counts.sum(axis=1), axis=0)
# Identify the bin with the branch point by looking at the weights
weights = pd.DataFrame(weights, index=trajectory.index, columns=range(no_bins))
bp_bin = weights.columns[np.where(branch_counts[trunk] < 0.9)[0][0]] + 0
if bp_bin < 0:
bp_bin = 3
else:
plot_branch = False
bp_bin = no_bins
weights_copy = weights.copy()
# Plot marker tsne_res
xaxis = bins
# Set up return object
ret_values = dict()
ret_values['Trunk'] = pd.DataFrame( xaxis[0:bp_bin], columns=['x'])
ret_values['Branch1'] = pd.DataFrame( xaxis[(bp_bin-2):], columns=['x'])
ret_values['Branch2'] = pd.DataFrame( xaxis[(bp_bin-2):], columns=['x'])
# Marker colors
colors = qualitative_colors( len(markers) )
scaling_factor = 2
linewidth = 3
# Set up plot
fig, ax = get_fig(fig, ax, figsize=[14, 4])
for marker,color in zip(markers, colors):
# Marker expression repeated no bins times
y = self.scdata.data.ix[trajectory.index, marker]
rep_mark = np.tile(y, [no_bins, 1]).T
# Normalize y
y_min = np.percentile(y, 1)
y = (y - y_min)/(np.percentile(y, 99) - y_min)
y[y < 0] = 0; y[y > 1] = 1;
norm_rep_mark = pd.DataFrame(np.tile(y, [no_bins, 1])).T
if not plot_branch:
# Weight and plot
vals = (rep_mark * weights)/sum(weights)
# Normalize
vals = vals.sum(axis=0)
vals = vals - np.min(vals)
vals = vals/np.max(vals)
# Plot
plt.plot(xaxis, vals, label=marker, color=color, linewidth=linewidth)
# Show errors if specified
if show_variance:
# Scale the marks based on y and values to be plotted
temp = (( norm_rep_mark - vals - np.min(y))/np.max(y)) ** 2
# Calculate standard deviations
wstds = inner1d(np.asarray(temp).T, np.asarray(weights).T) / weights.sum()
plt.fill_between(xaxis, vals - scaling_factor*wstds,
vals + scaling_factor*wstds, alpha=0.2, color=color)
# Return values
ret_values['Trunk'][marker] = vals[0:bp_bin]
ret_values['Branch1'][marker] = vals[(bp_bin-2):]
ret_values['Branch2'][marker] = vals[(bp_bin-2):]
else: # Branching trajectory
rep_mark = pd.DataFrame(rep_mark, index=trajectory.index, columns=range(no_bins))
plot_split = True
# Plot trunk first
weights = weights_copy.copy()
plot_vals = ((rep_mark * weights)/np.sum(weights)).sum()
trunk_vals = plot_vals[0:bp_bin]
branch_vals = []
for br in branches:
# Mute weights of the branch cells and plot
weights = weights_copy.copy()
weights.ix[self.branch.index[self.branch == br], :] = 0
plot_vals = ((rep_mark * weights)/np.sum(weights)).sum()
branch_vals.append( plot_vals[(bp_bin-1):] )
# Min and max
temp = trunk_vals.append( branch_vals[0] ).append( branch_vals[1] )
min_val = np.min(temp)
max_val = np.max(temp)
# Plot the trunk
plot_vals = ((rep_mark * weights)/np.sum(weights)).sum()
plot_vals = (plot_vals - min_val)/(max_val - min_val)
plt.plot(xaxis[0:bp_bin], plot_vals[0:bp_bin],
label=marker, color=color, linewidth=linewidth)
if show_variance:
# Calculate weighted stds for plotting
# Scale the marks based on y and values to be plotted
temp = (( norm_rep_mark - plot_vals - np.min(y))/np.max(y)) ** 2
# Calculate standard deviations
wstds = inner1d(np.asarray(temp).T, np.asarray(weights).T) / weights.sum()
# Plot
plt.fill_between(xaxis[0:bp_bin], plot_vals[0:bp_bin] - scaling_factor*wstds[0:bp_bin],
plot_vals[0:bp_bin] + scaling_factor*wstds[0:bp_bin], alpha=0.1, color=color)
# Add values to return values
ret_values['Trunk'][marker] = plot_vals[0:bp_bin]
# Identify markers which need a split
if sum( abs(pd.Series(branch_vals[0]) - pd.Series(branch_vals[1])) > min_delta ) < 5:
# Split not necessary, plot the trunk values
plt.plot(xaxis[(bp_bin-1):], plot_vals[(bp_bin-1):],
color=color, linewidth=linewidth)
# Add values to return values
ret_values['Branch1'][marker] = list(plot_vals[(bp_bin-2):])
ret_values['Branch2'][marker] = list(plot_vals[(bp_bin-2):])
if show_variance:
# Calculate weighted stds for plotting
# Scale the marks based on y and values to be plotted
temp = (( norm_rep_mark - plot_vals - np.min(y))/np.max(y)) ** 2
wstds = inner1d(np.asarray(temp).T, np.asarray(weights).T) / weights.sum()
# Plot
plt.fill_between(xaxis[(bp_bin-1):], plot_vals[(bp_bin-1):] - scaling_factor*wstds[(bp_bin-1):],
plot_vals[(bp_bin-1):] + scaling_factor*wstds[(bp_bin-1):], alpha=0.1, color=color)
else:
# Plot the two branches separately
for br_ind,br in enumerate(branches):
# Mute weights of the branch cells and plot
weights = weights_copy.copy()
# Smooth weigths
smooth_bins = 10
if bp_bin < smooth_bins:
smooth_bins = bp_bin - 1
for i in range(smooth_bins):
weights.ix[self.branch == br, bp_bin + i - smooth_bins] *= ((smooth_bins - i)/smooth_bins) * 0.25
weights.ix[self.branch == br, (bp_bin):weights.shape[1]] = 0
# Calculate values to be plotted
plot_vals = ((rep_mark * weights)/np.sum(weights)).sum()
plot_vals = (plot_vals - min_val)/(max_val - min_val)
plt.plot(xaxis[(bp_bin-2):], plot_vals[(bp_bin-2):],
linetypes[br], color=color, linewidth=linewidth)
if show_variance:
# Calculate weighted stds for plotting
# Scale the marks based on y and values to be plotted
temp = (( norm_rep_mark - plot_vals - np.min(y))/np.max(y)) ** 2
# Calculate standard deviations
wstds = inner1d(np.asarray(temp).T, np.asarray(weights).T) / weights.sum()
# Plot
plt.fill_between(xaxis[(bp_bin-1):], plot_vals[(bp_bin-1):] - scaling_factor*wstds[(bp_bin-1):],
plot_vals[(bp_bin-1):] + scaling_factor*wstds[(bp_bin-1):], alpha=0.1, color=color)
# Add values to return values
ret_values['Branch%d' % (br_ind + 1)][marker] = list(plot_vals[(bp_bin-2):])
# Clean up the plotting
# Clean xlim
plt.legend(loc=2, bbox_to_anchor=(1, 1), prop={'size':16})
# Annotations
# Add trajectory as underlay
cm = matplotlib.cm.Spectral_r
yval = plt.ylim()[0]
yval = 0
plt.scatter( trajectory, np.repeat(yval - 0.1, len(trajectory)),
c=trajectory, cmap=cm, edgecolors='none', s=size)
sns.despine()
plt.xticks( np.arange(0, 1.1, 0.1) )
# Clean xlim
plt.xlim([-0.05, 1.05])
plt.ylim([-0.2, 1.1 ])
plt.xlabel('Wishbone trajectory')
plt.ylabel('Normalized expression')
return ret_values, fig, ax
def plot_marker_heatmap(self, marker_trends, trajectory_range=[0, 1]):
""" Plot expression of markers as a heatmap
:param marker_trends: Output from the plot_marker_trajectory function
:param trajectory_range: Range of the trajectory in which to plot the results
"""
if trajectory_range[0] >= trajectory_range[1]:
raise RuntimeError('Start cannot exceed end in trajectory_range')
if trajectory_range[0] < 0 or trajectory_range[1] > 1:
raise RuntimeError('Please use a range between (0, 1)')
# Set up figure
markers = marker_trends['Trunk'].columns[1:]
if self.branch is not None:
fig = plt.figure(figsize = [16, 0.5*len(markers)])
gs = plt.GridSpec( 1, 2 )
branches = np.sort(list(set(marker_trends.keys()).difference(['Trunk'])))
for i,br in enumerate(branches):
ax = plt.subplot( gs[0, i] )
# Construct the full matrix
mat = marker_trends['Trunk'].append( marker_trends[br][2:] )
mat.index = range(mat.shape[0])
# Start and end
start = np.where(mat['x'] >= trajectory_range[0])[0][0]
end = np.where(mat['x'] >= trajectory_range[1])[0][0]
# Plot
plot_mat = mat.ix[start:end]
sns.heatmap(plot_mat[markers].T,
linecolor='none', cmap=cmap, vmin=0, vmax=1)
ax.xaxis.set_major_locator(plt.NullLocator())
ticks = np.arange(trajectory_range[0], trajectory_range[1]+0.1, 0.1)
plt.xticks([np.where(plot_mat['x'] >= i)[0][0] for i in ticks], ticks)
# Labels
plt.xlabel( 'Wishbone trajectory' )
plt.title( br )
else:
# Plot values from the trunk alone
fig = plt.figure(figsize = [8, 0.5*len(markers)])
ax = plt.gca()
# Construct the full matrix
mat = marker_trends['Trunk']
mat.index = range(mat.shape[0])
# Start and end
start = np.where(mat['x'] >= trajectory_range[0])[0][0]
end = np.where(mat['x'] >= trajectory_range[1])[0][0]
# Plot
plot_mat = mat.ix[start:end]
sns.heatmap(plot_mat[markers].T,
linecolor='none', cmap=cmap, vmin=0, vmax=1)
ax.xaxis.set_major_locator(plt.NullLocator())
ticks = np.arange(trajectory_range[0], trajectory_range[1]+0.1, 0.1)
plt.xticks([np.where(plot_mat['x'] >= i)[0][0] for i in ticks], ticks)
# Labels
plt.xlabel( 'Wishbone trajectory' )
return fig, ax
def plot_derivatives(self, marker_trends, trajectory_range=[0, 1]):
""" Plot change in expression of markers along trajectory
:param marker_trends: Output from the plot_marker_trajectory function
:param trajectory_range: Range of the trajectory in which to plot the results
"""
if trajectory_range[0] >= trajectory_range[1]:
raise RuntimeError('Start cannot exceed end in trajectory_range')
if trajectory_range[0] < 0 or trajectory_range[1] > 1:
raise RuntimeError('Please use a range between (0, 1)')
# Set up figure
markers = marker_trends['Trunk'].columns[1:]
if self.branch is not None:
fig = plt.figure(figsize = [16, 0.5*len(markers)])
gs = plt.GridSpec( 1, 2 )
branches = np.sort(list(set(marker_trends.keys()).difference(['Trunk'])))
for i,br in enumerate(branches):
ax = plt.subplot( gs[0, i] )
# Construct the full matrix
mat = marker_trends['Trunk'].append( marker_trends[br][2:] )
mat.index = range(mat.shape[0])
# Start and end
start = np.where(mat['x'] >= trajectory_range[0])[0][0]
end = np.where(mat['x'] >= trajectory_range[1])[0][0]
# Plot
diffs = mat[markers].diff()
diffs[diffs.isnull()] = 0
# Update the branch points diffs
bp_bin = marker_trends['Trunk'].shape[0]
diffs.ix[bp_bin-1] = marker_trends[br].ix[0:1, markers].diff().ix[1]
diffs.ix[bp_bin] = marker_trends[br].ix[1:2, markers].diff().ix[2]
diffs = diffs.ix[start:end]
mat = mat.ix[start:end]
# Differences
vmax = max(0.05, abs(diffs).max().max() )
# Plot
sns.heatmap(diffs.T, linecolor='none',
cmap=matplotlib.cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.xaxis.set_major_locator(plt.NullLocator())
ticks = np.arange(trajectory_range[0], trajectory_range[1]+0.1, 0.1)
plt.xticks([np.where(mat['x'] >= i)[0][0] for i in ticks], ticks)
# Labels
plt.xlabel( 'Wishbone trajectory' )
plt.title( br )
else:
# Plot values from the trunk alone
fig = plt.figure(figsize = [8, 0.5*len(markers)])
ax = plt.gca()
# Construct the full matrix
mat = marker_trends['Trunk']
mat.index = range(mat.shape[0])
# Start and end
start = np.where(mat['x'] >= trajectory_range[0])[0][0]
end = np.where(mat['x'] >= trajectory_range[1])[0][0]
# Plot
diffs = mat[markers].diff()
diffs[diffs.isnull()] = 0
diffs = diffs.ix[start:end]
mat = mat.ix[start:end]
# Differences
vmax = max(0.05, abs(diffs).max().max() )
# Plot
sns.heatmap(diffs.T, linecolor='none',
cmap=matplotlib.cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.xaxis.set_major_locator(plt.NullLocator())
ticks = np.arange(trajectory_range[0], trajectory_range[1]+0.1, 0.1)
plt.xticks([np.where(mat['x'] >= i)[0][0] for i in ticks], ticks)
# Labels
plt.xlabel( 'Wishbone trajectory' )
return fig, ax
|
ManuSetty/wishbone
|
src/wishbone/wb.py
|
Python
|
gpl-2.0
| 59,061
|
[
"Gaussian"
] |
6a334d9d01b7f2c8357196a468aeaf4c9fd89b718ad90204fa70f8827e77df90
|
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class BowtieInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing Bowtie 1.0.1 on %s" % (node.alias))
node.ssh.execute('wget -c -P /opt/software/bowtie http://sourceforge.net/projects/bowtie-bio/files/bowtie/1.0.1/bowtie-1.0.1-linux-x86_64.zip')
node.ssh.execute('unzip /opt/software/bowtie/bowtie-1.0.1-linux-x86_64.zip -d /opt/software/bowtie')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/bowtie/;touch /usr/local/Modules/applications/bowtie/1.0.1')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/bowtie/1.0.1')
node.ssh.execute('echo "set root /opt/software/bowtie/bowtie-1.0.1" >> /usr/local/Modules/applications/bowtie/1.0.1')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root" >> /usr/local/Modules/applications/bowtie/1.0.1')
|
meissnert/StarCluster-Plugins
|
bowtie_1_0_1.py
|
Python
|
mit
| 952
|
[
"Bowtie"
] |
ddd320e2e547fbafc224d0fb088f122d855653cfc8ff513431260946ce0af8ea
|
#!/usr/bin/env python3
#
# Sniffles2
# A fast structural variant caller for long-read sequencing data
#
# Created: 28.05.2021
# Author: Moritz Smolka
# Contact: moritz.g.smolka@gmail.com
#
from dataclasses import dataclass
import re
import itertools
import pysam
#for: --dev-cache
import os
import sys
import pickle
#end: for: --dev-cache
from sniffles import util
from sniffles import sv
@dataclass
class Lead:
read_id: int=None
read_qname: str=None
contig: str=None
ref_start: int=None
ref_end: int=None
qry_start: int=None
qry_end: int=None
strand: str=None
mapq: int=None
nm: float=None
source: str=None
svtype: str=None
svlen: int=None
seq: str=None
svtypes_starts_lens: list=None
def CIGAR_analyze(cigar):
buf=""
readspan=0
refspan=0
clip_start=None
clip=0
for c in cigar:
if c.isnumeric():
buf+=c
else:
oplen=int(buf)
h=False
if c in "MIX=":
readspan+=oplen
h=True
if c in "MDX=N":
refspan+=oplen
h=True
if not h:
if c in "SH":
if clip_start==None and readspan+refspan>0:
clip_start=clip
clip+=oplen
else:
raise f"Unknown CIGAR operation: '{c}'"
buf=""
if clip_start==None:
clip_start=clip
return clip_start, clip-clip_start, refspan, readspan
def CIGAR_analyze_regex(cigar):
#TODO: Obsolete
opsums={"M":0,"I":0,"D":0,"=":0,"X":0,"N":0,"H":0,"S":0}
iter=re.split(r"(\d+)", cigar)
for i in range(1,len(iter)-1,2):
op=iter[i+1]
if op!="H" and op!="S":
readstart_fwd=opsums["H"]+opsums["S"]
opsums[iter[i+1]]+=int(iter[i])
readstart_rev=opsums["H"]+opsums["S"]-readstart_fwd
refspan=opsums["M"]+opsums["D"]+opsums["="]+opsums["X"]+opsums["N"]
readspan=opsums["M"]+opsums["I"]+opsums["="]+opsums["X"]
return readstart_fwd,readstart_rev,refspan,readspan
def CIGAR_tolist(cigar):
#TODO: Obsolete (see CIGAR_tolist_analyze)
"""
CIGAR string : str -> List of CIGAR operation & length tuples : [(op1:char, op1_length:int),...]
"""
buf=""
ops=[]
for c in cigar:
if c.isnumeric():
buf+=c
else:
ops.append((c,int(buf)))
buf=""
return ops
def CIGAR_listrefspan(ops):
#TODO: Obsolete (see CIGAR_analyze)
#TODO(Potential): Detect&utilize minimap2 condensed supplementary alignment cigar for speed
return sum(oplen for op,oplen in ops if op=="M" or op=="D" or op=="=" or op=="X" or op=="N")
def CIGAR_listreadspan(ops):
#TODO: Obsolete (see CIGAR_analyze)
return sum(oplen for op,oplen in ops if op=="M" or op=="I" or op=="=" or op=="X")
def CIGAR_listreadstart_fwd(ops):
#TODO: Obsolete (see CIGAR_analyze)
"""
Position in query read where CIGAR alignment starts (i.e. taking into account start clipping)
"""
op,oplen=ops[0]
op2,op2len=ops[1]
if op=="H" or op=="S":
assert(op2!="H" and op2!="S")
return oplen
else:
return 0
def CIGAR_listreadstart_rev(ops):
#TODO: Obsolete (see CIGAR_analyze)
"""
Position in query read where CIGAR alignment starts (i.e. taking into account start clipping)
"""
op,oplen=ops[-1]
op2,op2len=ops[-2]
if op=="H" or op=="S":
assert(op2!="H" and op2!="S")
return oplen
else:
return 0
OPTAB={pysam.CMATCH: (1,1,0),
pysam.CEQUAL: (1,1,0),
pysam.CDIFF: (1,1,0),
pysam.CINS: (1,0,1),
pysam.CDEL: (0,1,1),
pysam.CREF_SKIP: (0,1,0),
pysam.CSOFT_CLIP: (1,0,1),
pysam.CHARD_CLIP: (0,0,0),
pysam.CPAD: (0,0,0)}
# pysam.CBACK: (0,0,0)}
OPLIST=[(0,0,0) for i in range(max(int(k) for k in OPTAB.keys())+1)]
for k,v in OPTAB.items():
OPLIST[int(k)]=v
def read_iterindels(read_id,read,contig,config,use_clips,read_nm):
minsvlen=config.minsvlen_screen
longinslen=config.long_ins_length/2.0
seq_cache_maxlen=config.dev_seq_cache_maxlen
qname=read.query_name
mapq=read.mapping_quality
strand="-" if read.is_reverse else "+"
CINS=pysam.CINS
CDEL=pysam.CDEL
CSOFT_CLIP=pysam.CSOFT_CLIP
pos_read=0
pos_ref=read.reference_start
for op,oplength in read.cigartuples:
add_read,add_ref,event=OPLIST[op]
if event and oplength >= minsvlen:
if op==CINS:
yield Lead(read_id,
qname,
contig,
pos_ref,
pos_ref,
pos_read,
pos_read+oplength,
strand,
mapq,
read_nm,
"INLINE",
"INS",
oplength,
seq=read.query_sequence[pos_read:pos_read+oplength] if oplength <= seq_cache_maxlen else None)
elif op==CDEL:
yield Lead(read_id,
qname,
contig,
pos_ref+oplength,
pos_ref,
pos_read,
pos_read,
strand,
mapq,
read_nm,
"INLINE",
"DEL",
-oplength)
elif use_clips and op==CSOFT_CLIP and oplength >= longinslen:
yield Lead(read_id,
qname,
contig,
pos_ref,
pos_ref,
pos_read,
pos_read+oplength,
strand,
mapq,
read_nm,
"INLINE",
"INS",
None,
seq=None)
pos_read+=add_read*oplength
pos_ref+=add_ref*oplength
def read_iterindels_unoptimized(read_id,read,contig,config,use_clips):
minsvlen=config.minsvlen_screen
seq_cache_maxlen=config.dev_seq_cache_maxlen
qname=read.query_name
mapq=read.mapping_quality
strand="-" if read.is_reverse else "+"
#TODO: Parse CG tag (ultra long alignments), if present
pos_read=0
pos_ref=read.reference_start
for op,oplength in read.cigartuples:
if op==pysam.CMATCH or op==pysam.CEQUAL or op==pysam.CDIFF:
pos_read+=oplength
pos_ref+=oplength
elif op==pysam.CINS:
if oplength>=minsvlen:
#print(pos_read,pos_read+oplength)
#print(pos_read,pos_read+oplength,read.query_sequence[pos_read:pos_read+oplength])
if oplength <= seq_cache_maxlen:
seq=read.query_sequence[pos_read:pos_read+oplength]
else:
seq=None
yield Lead(read_id,qname,contig,pos_ref,pos_ref+0,pos_read,pos_read+oplength,strand,mapq,-1,"INLINE","INS",oplength,seq=seq)
pos_read+=oplength
elif op==pysam.CDEL:
pos_ref+=oplength
if oplength>=minsvlen:
yield Lead(read_id,qname,contig,pos_ref,pos_ref+oplength,pos_read,pos_read+0,strand,mapq,-1,"INLINE","DEL",-oplength)
elif op==pysam.CREF_SKIP:
pos_ref+=oplength
elif op==pysam.CSOFT_CLIP:
if use_clips and oplength >= config.long_ins_length:
yield Lead(read_id,qname,contig,pos_ref,pos_ref+0,pos_read,pos_read+oplength,strand,mapq,-1,"INLINE","INS",None)
pos_read+=oplength
elif op==pysam.CHARD_CLIP:
#pos_ref+=oplength
if use_clips and oplength >= config.long_ins_length:
yield Lead(read_id,qname,contig,pos_ref,pos_ref+0,pos_read,pos_read+oplength,strand,mapq,-1,"INLINE","INS",None)
elif op==pysam.CPAD:
pass
else:
print(f"Unknown OPType {op}")
return
def read_itersplits_bnd(read_id,read,contig,config,read_nm):
assert(read.is_supplementary)
#SA:refname,pos,strand,CIGAR,MAPQ,NM
all_leads=[]
supps=[part.split(",") for part in read.get_tag("SA").split(";") if len(part)>0]
if len(supps) > config.max_splits_base + config.max_splits_kb*(read.query_length/1000.0):
return
if read.is_reverse:
qry_start=read.query_length-read.query_alignment_end
else:
qry_start=read.query_alignment_start
curr_lead=Lead(read_id,
read.query_name,
contig,
read.reference_start,
read.reference_start+read.reference_length,
qry_start,
qry_start+read.query_alignment_length,
"-" if read.is_reverse else "+",
read.mapping_quality,
read_nm,
"SPLIT_SUP",
"?")
all_leads.append(curr_lead)
prim_refname,prim_pos,prim_strand,prim_cigar,prim_mapq,prim_nm=supps[0]
if prim_refname == contig:
#Primary alignment is on this chromosome, no need to parse the supplementary
return
minpos_curr_chr=min(itertools.chain([read.reference_start],(int(pos) for refname,pos,strand,cigar,mapq,nm in supps if refname==contig)))
if minpos_curr_chr < read.reference_start:
#Only process splits once per chr (there may be multiple supplementary alignments on the same chr)
return
for refname,pos,strand,cigar,mapq,nm in supps:
mapq=int(mapq)
nm=int(nm)
#if not config.dev_keep_lowqual_splits and mapq < config.mapq:
# continue
is_rev=(strand=="-")
try:
readstart_fwd,readstart_rev,refspan,readspan=CIGAR_analyze(cigar)
except Exception as e:
util.error(f"Malformed CIGAR '{cigar}' with pos {pos} of read '{read.query_name}' ({e}). Skipping.")
return
pos_zero=int(pos)-1
split_qry_start=readstart_rev if is_rev else readstart_fwd
all_leads.append(Lead(read_id,
read.query_name,
refname,
pos_zero,
pos_zero + refspan,
split_qry_start,
split_qry_start+readspan,
strand,
mapq,
nm/float(readspan+1),
"SPLIT_SUP",
"?"))
sv.classify_splits(read,all_leads,config,contig)
for lead in all_leads:
for svtype, svstart, arg in lead.svtypes_starts_lens:
if svtype=="BND":
bnd = Lead(read_id=lead.read_id,
read_qname=lead.read_qname,
contig=lead.contig,
ref_start=svstart,
ref_end=svstart,
qry_start=lead.qry_start,
qry_end=lead.qry_end,
strand=lead.strand,
mapq=lead.mapq,
nm=lead.nm,
source=lead.source,
svtype=svtype,
svlen=config.bnd_cluster_length,
seq=None)
bnd.bnd_info=arg
#print(lead.contig,svstart,bnd.bnd_info)
yield bnd
def read_itersplits(read_id,read,contig,config,read_nm):
#SA:refname,pos,strand,CIGAR,MAPQ,NM
all_leads=[]
supps=[part.split(",") for part in read.get_tag("SA").split(";") if len(part)>0]
if len(supps) > config.max_splits_base + config.max_splits_kb*(read.query_length/1000.0):
return
#QC on: 18Aug21, HG002.ont.chr22; O.K.
#cigarl=CIGAR_tolist(read.cigarstring)
#if read.is_reverse:
# cigarl.reverse()
#if read.is_reverse:
# assert(read.query_length-read.query_alignment_end == CIGAR_listreadstart(cigarl))
#else:
# assert(read.query_alignment_start == CIGAR_listreadstart(cigarl))
#assert(CIGAR_listrefspan(cigarl)==read.reference_length)
#assert(CIGAR_listreadspan(cigarl)==read.query_alignment_length)
#End QC
if read.is_reverse:
qry_start=read.query_length-read.query_alignment_end
else:
qry_start=read.query_alignment_start
curr_lead=Lead(read_id,
read.query_name,
contig,
read.reference_start,
read.reference_start+read.reference_length,
qry_start,
qry_start+read.query_alignment_length,
"-" if read.is_reverse else "+",
read.mapping_quality,
read_nm,
"SPLIT_PRIM",
"?")
all_leads.append(curr_lead)
#QC on: 18Aug21; O.K.
#assert(read.reference_length == CIGAR_listrefspan(CIGAR_tolist(read.cigarstring)))
#assert(read.query_alignment_start == CIGAR_listreadstart(CIGAR_tolist(read.cigarstring)))
#assert(read.query_alignment_length == CIGAR_listreadspan(CIGAR_tolist(read.cigarstring)))
#End QC
for refname,pos,strand,cigar,mapq,nm in supps:
mapq=int(mapq)
nm=int(nm)
#if not config.dev_keep_lowqual_splits and mapq < config.mapq:
# continue
is_rev=(strand=="-")
try:
readstart_fwd,readstart_rev,refspan,readspan=CIGAR_analyze(cigar)
except Exception as e:
util.error(f"Malformed CIGAR '{cigar}' with pos {pos} of read '{read.query_name}' ({e}). Skipping.")
return
pos_zero=int(pos)-1
split_qry_start=readstart_rev if is_rev else readstart_fwd
all_leads.append(Lead(read_id,
read.query_name,
refname,
pos_zero,
pos_zero + refspan,
split_qry_start,
split_qry_start+readspan,
strand,
mapq,
nm/float(readspan+1),
"SPLIT_SUP",
"?"))
#QC on: 08Sep21; O.K.
#cigarl=CIGAR_tolist(cigar)
#assert(CIGAR_listrefspan(cigarl)==refspan)
#assert(CIGAR_listreadspan(cigarl)==readspan)
#assert(CIGAR_listreadstart_fwd(cigarl)==readstart_fwd)
#assert(CIGAR_listreadstart_rev(cigarl)==readstart_rev)
#End QC
sv.classify_splits(read,all_leads,config,contig)
for lead_i, lead in enumerate(all_leads):
for svtype, svstart, arg in lead.svtypes_starts_lens:
min_mapq=min(lead.mapq,all_leads[max(0,lead_i-1)].mapq)
if not config.dev_keep_lowqual_splits and min_mapq < config.mapq:
continue
if svtype=="BND":
bnd = Lead(read_id=lead.read_id,
read_qname=lead.read_qname,
contig=lead.contig,
ref_start=svstart,
ref_end=svstart,
qry_start=lead.qry_start,
qry_end=lead.qry_end,
strand=lead.strand,
mapq=lead.mapq,
nm=lead.nm,
source=lead.source,
svtype=svtype,
svlen=config.bnd_cluster_length,
seq=None)
bnd.bnd_info=arg
yield bnd
elif svtype!="NOSV":
svlen=arg
yield Lead(read_id=lead.read_id,
read_qname=lead.read_qname,
contig=lead.contig,
ref_start=svstart,
ref_end=svstart+svlen if svlen!=None and svtype!="INS" else svstart,
qry_start=lead.qry_start,
qry_end=lead.qry_end,
strand=lead.strand,
mapq=lead.mapq,
nm=lead.nm,
source=lead.source,
svtype=svtype,
svlen=svlen,
seq=lead.seq if svtype=="INS" else None)
class LeadProvider:
def __init__(self,config,read_id_offset):
self.config=config
self.leadtab={}
self.leadcounts={}
for svtype in sv.TYPES:
self.leadtab[svtype]={}
self.leadcounts[svtype]=0
self.covrtab_fwd={}
self.covrtab_rev={}
self.covrtab_min_bin=None
#self.covrtab_read_start={}
#self.covrtab_read_end={}
self.read_id=read_id_offset
self.read_count=0
self.contig=None
self.start=None
self.end=None
def record_lead(self,ld,pos_leadtab):
leadtab_svtype=self.leadtab[ld.svtype]
if pos_leadtab in leadtab_svtype:
leadtab_svtype[pos_leadtab].append(ld)
lead_count=len(leadtab_svtype[pos_leadtab])
if lead_count > self.config.consensus_max_reads_bin:
ld.seq=None
else:
leadtab_svtype[pos_leadtab]=[ld]
lead_count=1
self.leadcounts[ld.svtype]+=1
def build_leadtab(self,contig,start,end,bam):
if self.config.dev_cache:
loaded_externals=self.dev_load_leadtab(contig,start,end)
if loaded_externals!=False:
return loaded_externals
assert(self.contig==None)
assert(self.start==None)
assert(self.end==None)
self.contig=contig
self.start=start
self.end=end
self.covrtab_min_bin=int(self.start/self.config.coverage_binsize)*self.config.coverage_binsize
externals=[]
ld_binsize=self.config.cluster_binsize
for ld in self.iter_region(bam,contig,start,end):
ld_contig,ld_ref_start=ld.contig,ld.ref_start
#TODO: Handle leads overlapping region ends (start/end)
if contig==ld_contig and ld_ref_start >= start and ld_ref_start < end:
pos_leadtab=int(ld_ref_start/ld_binsize)*ld_binsize
self.record_lead(ld,pos_leadtab)
else:
externals.append(ld)
if self.config.dev_cache:
self.dev_store_leadtab(contig,start,end,externals)
return externals
def iter_region(self,bam,contig,start=None,end=None):
leads_all=[]
binsize=self.config.cluster_binsize
coverage_binsize=self.config.coverage_binsize
coverage_shift_bins=self.config.coverage_shift_bins
long_ins_threshold=self.config.long_ins_length*0.5
qc_nm=self.config.qc_nm
phase=self.config.phase
advanced_tags=qc_nm or phase
mapq_min=self.config.mapq
alen_min=self.config.min_alignment_length
for read in bam.fetch(contig,start,end,until_eof=False):
#if self.read_count % 1000000 == 0:
# gc.collect()
if read.reference_start < start or read.reference_start >= end:
continue
self.read_id+=1
self.read_count+=1
if read.mapping_quality < mapq_min or read.is_secondary or read.query_alignment_length < alen_min:
continue
has_sa=read.has_tag("SA")
use_clips=self.config.detect_large_ins and not read.is_supplementary and not has_sa
nm=-1
curr_read_id=self.read_id
if advanced_tags:
if qc_nm:
if read.has_tag("NM"):
nm=read.get_tag("NM")/float(read.query_alignment_length+1)
if phase:
curr_read_id=(self.read_id,str(read.get_tag("HP")) if read.has_tag("HP") else "NULL",str(read.get_tag("PS")) if read.has_tag("PS") else "NULL")
#Extract small indels
for lead in read_iterindels(curr_read_id,read,contig,self.config,use_clips,read_nm=nm):
yield lead
#Extract read splits
if has_sa:
if read.is_supplementary:
for lead in read_itersplits_bnd(curr_read_id,read,contig,self.config,read_nm=nm):
yield lead
else:
for lead in read_itersplits(curr_read_id,read,contig,self.config,read_nm=nm):
yield lead
#Record in coverage table
read_end=read.reference_start+read.reference_length
assert(read_end==read.reference_end)
#assert(read_end>=read.reference_start)
if read.is_reverse:
target_tab=self.covrtab_rev
else:
target_tab=self.covrtab_fwd
covr_start_bin=(int(read.reference_start/coverage_binsize)+coverage_shift_bins)*coverage_binsize
covr_end_bin=(int(read_end/coverage_binsize)-coverage_shift_bins)*coverage_binsize
if covr_end_bin > covr_start_bin:
self.covrtab_min_bin=min(self.covrtab_min_bin,covr_start_bin)
target_tab[covr_start_bin]=target_tab[covr_start_bin]+1 if covr_start_bin in target_tab else 1
if read_end <= self.end:
target_tab[covr_end_bin]=target_tab[covr_end_bin]-1 if covr_end_bin in target_tab else -1
def dev_leadtab_filename(self,contig,start,end):
scriptloc=os.path.dirname(os.path.realpath(sys.argv[0]))
if self.config.dev_cache_dir==None:
cache_dir=f"{scriptloc}/cache"
else:
cache_dir=self.config.dev_cache_dir
return f"{cache_dir}/{os.path.basename(self.config.input)}_{contig}_{start}_{end}.pickle"
def dev_store_leadtab(self,contig,start,end,externals):
data={"externals":externals, "self": self}
filename=self.dev_leadtab_filename(contig,start,end)
with open(filename,"wb") as h:
pickle.dump(data,h)
print(f"(DEV/Cache) Dumped leadtab to {filename}")
def dev_load_leadtab(self,contig,start,end):
filename=self.dev_leadtab_filename(contig,start,end)
if not os.path.exists(filename):
return False
with open(filename,"rb") as h:
data=pickle.load(h)
for item in data["self"].__dict__:
self.__dict__[item]=data["self"].__dict__[item]
print(f"(DEV/Cache) Loaded leadtab from {filename}")
return data["externals"]
def dev_debug_graph(self,title):
import matplotlib.pyplot as plt
import seaborn as sns
print(title)
sns.set()
data=[]
for k,v in self.leadtab.items():
data.append(len(v))
if len(data)>50000:
break
plt.hist(data,bins=[i for i in range(0,20)])
#plt.savefig(filename)
plt.title(title)
plt.savefig(f"debug/{title}.png")
plt.close()
|
fritzsedlazeck/Sniffles
|
src/sniffles/leadprov.py
|
Python
|
mit
| 23,639
|
[
"pysam"
] |
9c83eac4a2ec76a59e69ba9c7feb491d0056236c680de70e3b2cd32640affe75
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.bibauthorid_webapi import get_canonical_id_from_person_id, add_cname_to_hepname_record
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
import pprint
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except:
CFG_JSON_AVAILABLE = False
json = None
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_SITE_NAME, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import getUid, page_not_authorized, collect_user_info, set_user_preferences, \
email_valid_p, emailUnique, get_email_from_username, get_uid_from_email, \
isUserSuperAdmin, isGuestUser
from invenio.access_control_admin import acc_find_user_role_actions, acc_get_user_roles, acc_get_role_id
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_doi, get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author
from invenio.webauthorprofile_orcidutils import get_dois_from_orcid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/generate_autoclaim_data
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'generate_autoclaim_data',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
uinfo = collect_user_info(req)
uinfo['precached_viewclaimlink'] = pinfo['claim_in_process']
set_user_preferences(pinfo['uid'], uinfo)
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
## Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: true});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
# content += self._generate_person_info_box(ulevel, ln) #### Name variants
# metaheaderadd = self._scripts() + '\n <meta name="robots" content="nofollow" />'
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=self._generate_title(ulevel),
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body(content).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_optional_menu(self, ulevel, req, form):
'''
Generates the menu for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: menu
@rtype: str
'''
def generate_optional_menu_guest(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_user(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_admin(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu_admin(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
generate_optional_menu = {'guest': generate_optional_menu_guest,
'user': generate_optional_menu_user,
'admin': generate_optional_menu_admin}
return "<div class=\"clearfix\">" + generate_optional_menu[ulevel](req, form) + "</div>"
def _generate_ticket_box(self, ulevel, req):
'''
Generates the semi-permanent info box for the specified user permission
level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: info box
@rtype: str
'''
def generate_ticket_box_guest(req):
session = get_session(req)
pinfo = session['personinfo']
ticket = pinfo['ticket']
results = list()
pendingt = list()
for t in ticket:
if 'execution_result' in t:
for res in t['execution_result']:
results.append(res)
else:
pendingt.append(t)
box = ""
if pendingt:
box += TEMPLATE.tmpl_ticket_box('in_process', 'transaction', len(pendingt))
if results:
failed = [messages for status, messages in results if not status]
if failed:
box += TEMPLATE.tmpl_transaction_box('failure', failed)
successfull = [messages for status, messages in results if status]
if successfull:
box += TEMPLATE.tmpl_transaction_box('success', successfull)
return box
def generate_ticket_box_user(req):
return generate_ticket_box_guest(req)
def generate_ticket_box_admin(req):
return generate_ticket_box_guest(req)
generate_ticket_box = {'guest': generate_ticket_box_guest,
'user': generate_ticket_box_user,
'admin': generate_ticket_box_admin}
return generate_ticket_box[ulevel](req)
def _generate_person_info_box(self, ulevel, ln):
'''
Generates the name info box for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param ln: page display language
@type ln: str
@return: name info box
@rtype: str
'''
def generate_person_info_box_guest(ln):
names = webapi.get_person_names_from_id(self.person_id)
box = TEMPLATE.tmpl_admin_person_info_box(ln, person_id=self.person_id,
names=names)
return box
def generate_person_info_box_user(ln):
return generate_person_info_box_guest(ln)
def generate_person_info_box_admin(ln):
return generate_person_info_box_guest(ln)
generate_person_info_box = {'guest': generate_person_info_box_guest,
'user': generate_person_info_box_user,
'admin': generate_person_info_box_admin}
return generate_person_info_box[ulevel](ln)
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets == None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
uinfo = collect_user_info(req)
uinfo['precached_viewclaimlink'] = True
uid = getUid(req)
set_user_preferences(uid, uinfo)
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == True and pinfo['autoclaim']['checkout'] == True:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (CFG_SITE_URL, webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
# need review if should be deleted
def __user_is_authorized(self, req, action):
'''
Determines if a given user is authorized to perform a specified action
@param req: Apache Request Object
@type req: Apache Request Object
@param action: the action the user wants to perform
@type action: string
@return: True if user is allowed to perform the action, False if not
@rtype: boolean
'''
if not req:
return False
if not action:
return False
else:
action = escape(action)
uid = getUid(req)
if not isinstance(uid, int):
return False
if uid == 0:
return False
allowance = [i[1] for i in acc_find_user_role_actions({'uid': uid})
if i[1] == action]
if allowance:
return True
return False
@staticmethod
def _scripts(kill_browser_cache=False):
'''
Returns html code to be included in the meta header of the html page.
The actual code is stored in the template.
@return: html formatted Javascript and CSS inclusions for the <head>
@rtype: string
'''
return TEMPLATE.tmpl_meta_includes(kill_browser_cache)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review':(str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile':(str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system'] is not None:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id'] is not None:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param=''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return = True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
#return self._error_page(req, ln, "Fatal: cannot create ticket without a person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message about the currently attempted merge
pinfo['merge_info_message'] = ("failure", "confirm_failure")
session.dirty = True
redirect_url = "%s/author/merge_profiles?primary_profile=%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
# when redirected back to the manage profile page display a message about the currently attempted merge
pinfo['merge_info_message'] = ("success", "confirm_success")
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str)}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj='Merge profiles request')
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
#pp = pprint.PrettyPrinter(indent=4)
#session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_claim(self, req, bibrefs, ln):
'''
Generate page to let user choose how to proceed
@param req: Apache Request Object
@type req: Apache Request Object
@param bibrefs: list of record IDs to perform an action on
@type bibrefs: list of int
@param ln: language to display the page in
@type ln: string
'''
session = get_session(req)
uid = getUid(req)
uinfo = collect_user_info(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
no_access = self._page_access_permission_wall(req)
session.dirty = True
pid = -1
search_enabled = True
if not no_access and uinfo["precached_usepaperclaim"]:
tpid = webapi.get_pid_from_uid(uid)
if tpid > -1:
pid = tpid
last_viewed_pid = False
if (not no_access
and "claimpaper_admin_last_viewed_pid" in pinfo
and pinfo["claimpaper_admin_last_viewed_pid"]):
names = webapi.get_person_names_from_id(pinfo["claimpaper_admin_last_viewed_pid"])
names = sorted([i for i in names], key=lambda k: k[1], reverse=True)
if len(names) > 0:
if len(names[0]) > 0:
last_viewed_pid = [pinfo["claimpaper_admin_last_viewed_pid"], names[0][0]]
if no_access:
search_enabled = False
pinfo["referer"] = uinfo["referer"]
session.dirty = True
body = TEMPLATE.tmpl_open_claim(bibrefs, pid, last_viewed_pid,
search_enabled=search_enabled)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = _('Claim this paper')
metaheaderadd = WebInterfaceBibAuthorIDClaimPages._scripts(kill_browser_cache=True)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '10';var gPID = '10'; var gNumOfWorkers= '10'; var gReqTimeout= '10'; var gPageTimeout= '10';",
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body(content)
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params = parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
merge_page.add_bootstrapped_data(json.dumps({
"other": "var gMergeProfile = %s; var gMergeList = %s;" % ([primary_cname, '1' if is_available else '0'], profiles_to_merge)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body(body)
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if json_data.has_key('personId'):
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if json_data.has_key('personId'):
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if json_data.has_key('personId'):
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if json_data.has_key('personId'):
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body(content)
#In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins: {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla', 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# # try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req, '%s/author/claim/action?associate_profile=True&redirect_pid=%s' % (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids )
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(probable_profile_suggestion_info, last_viewed_profile_suggestion_info, search_param)
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
############################################
# New autoclaim functions #
############################################
def generate_autoclaim_data(self, req, form):
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
pid = int(json_data['personId'])
except:
raise NotImplementedError("Some error with the parameter from the Ajax request occured.")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# If autoclaim was done already and no new remote systems exist
# in order to autoclaim new papers send the cached result
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
return json.dumps(json_response)
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
autoclaim_ticket = pinfo['autoclaim']['ticket']
ulevel = pinfo['ulevel']
uid = getUid(req)
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_status = webapi.get_login_info(uid, params)
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems, params, external_pubs_association))
already_claimed_recids = set([rec for _, _, rec in get_claimed_papers_of_author(pid)]) & papers_to_autoclaim
papers_to_autoclaim = papers_to_autoclaim - set([rec for _, _, rec in get_claimed_papers_of_author(pid)])
for paper in papers_to_autoclaim:
operation_parts = {'pid': pid,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
autoclaim_data = dict()
autoclaim_data['hidden'] = False
autoclaim_data['person_id'] = pid
autoclaim_data['successfull_recids'] = set([op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
autoclaim_data['unsuccessfull_recids'] = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['num_of_unsuccessfull_recids'] = len(autoclaim_data['unsuccessfull_recids'])
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
# cache the result in the session
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
req.write(json.dumps(json_response))
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
relevant_name = most_relevant_name(name_variants)
if relevant_name:
search_param = relevant_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'],external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([webapi.get_most_frequent_name_from_pid(int(t[0])),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "help", ln, is_owner, self._is_admin(pinfo))
title = "Help page"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
content = TEMPLATE.tmpl_help_page()
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return page_not_authorized(req, text=_("This page is not accessible directly."))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
title_message = _('Profile management')
ssl_param = 0
if req.is_https():
ssl_param = 1
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
if cname == self.person_id:
return page_not_authorized(req, text=_("This page is not accessible directly."))
menu = WebProfileMenu(cname, "manage_profile", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", webapi.get_longest_name_from_pid(self.person_id), no_cache=True)
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
# proccess and collect data for every box [LEGACY]
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ext_ids_data = None
int_ids_data = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
content += TEMPLATE.tmpl_profile_management(ln, person_data, arxiv_data,
orcid_data, claim_paper_data,
int_ids_data, ext_ids_data,
autoclaim_data, support_data,
merge_data, hepnames_data)
body = profile_page.get_wrapped_body(content)
return page(title=title_message,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
# author should have already an orcid if this method was triggered
orcid_id = get_orcid_id_of_author(pinfo['pid'])[0][0]
orcid_dois = get_dois_from_orcid(orcid_id)
# TODO: what to do in case some ORCID server error occurs?
if orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname':(str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.connect_author_with_hepname(cname, hepname)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
webapi.connect_author_with_hepname(cname, hepname)
else:
uid = getUid(req)
add_cname_to_hepname_record(cname, hepname, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid':(str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCiD.")
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort']
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type req: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name',"Default"),
'last_name': json_data.get('last_name',"Default"),
'email': json_data.get('email',"Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.webauthorprofile_webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, get_person_redirect_link(pid))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, self.path))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, redirect_id)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome','update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
# Extract all info from recid that should be included in the form
full_name = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="a")
display_name = record_get_field_value(hepname_bibrec, tag="880", ind1="", ind2="", code="a")
email = record_get_field_value(hepname_bibrec, tag="371", ind1="", ind2="", code="m")
status = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="g")
keynumber = record_get_field_value(hepname_bibrec, tag="970", ind1="", ind2="", code="a")
try:
keynumber = keynumber.split('-')[1]
except IndexError:
pass
research_field_list = record_get_field_values(hepname_bibrec, tag="650", ind1="1", ind2="7", code="a")
institution_list = []
for instance in record_get_field_instances(hepname_bibrec, tag="371", ind1="", ind2=""):
if not instance or field_get_subfield_values(instance, "m"):
continue
institution_info = ["", "", "", "", ""]
if field_get_subfield_values(instance, "a"):
institution_info[0] = field_get_subfield_values(instance, "a")[0]
if field_get_subfield_values(instance, "r"):
institution_info[1] = field_get_subfield_values(instance, "r")[0]
if field_get_subfield_values(instance, "s"):
institution_info[2] = field_get_subfield_values(instance, "s")[0]
if field_get_subfield_values(instance, "t"):
institution_info[3] = field_get_subfield_values(instance, "t")[0]
if field_get_subfield_values(instance, "z"):
institution_info[4] = field_get_subfield_values(instance, "z")[0]
institution_list.append(institution_info)
phd_advisor_list = record_get_field_values(hepname_bibrec, tag="701", ind1="", ind2="", code="a")
experiment_list = record_get_field_values(hepname_bibrec, tag="693", ind1="", ind2="", code="e")
web_page = record_get_field_value(hepname_bibrec, tag="856", ind1="1", ind2="", code="u")
# Create form and pass as parameters all the content from the record
body = TEMPLATE.tmpl_update_hep_name(full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page)
title = "HEPNames"
return page(title=title,
metaheaderadd = TEMPLATE.tmpl_update_hep_name_headers(),
body=body,
req=req,
)
# pylint: enable=C0301
# pylint: enable=W0613
|
GRArmstrong/invenio-inspire-ops
|
modules/bibauthorid/lib/bibauthorid_webinterface.py
|
Python
|
gpl-2.0
| 148,279
|
[
"VisIt"
] |
6938be33b511189a8bf0b688e9f53ac0d89ded45cf3359754e09fd8a7632bd47
|
# coding: utf-8
import numpy as np
from .. import img_as_float
from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman
def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1,
bins=10000, mode='constant', cval=0):
"""Denoise image using bilateral filter.
This is an edge-preserving and noise reducing denoising filter. It averages
pixels based on their spatial closeness and radiometric similarity.
Spatial closeness is measured by the gaussian function of the euclidian
distance between two pixels and a certain standard deviation
(`sigma_spatial`).
Radiometric similarity is measured by the gaussian function of the euclidian
distance between two color values and a certain standard deviation
(`sigma_range`).
Parameters
----------
image : ndarray
Input image.
win_size : int
Window size for filtering.
sigma_range : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range `[0, 1]`.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for gaussian weights of color filtering.
A larger value results in improved accuracy.
mode : string
How to handle values outside the image borders. See
`scipy.ndimage.map_coordinates` for detail.
cval : string
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
denoised : ndarray
Denoised image.
References
----------
.. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf
"""
return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial,
bins, mode, cval)
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter.
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)
def _denoise_tv_chambolle_3d(im, weight=100, eps=2.e-4, n_iter_max=200):
"""Perform total-variation denoising on 3D images.
Parameters
----------
im : ndarray
3-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
px = np.zeros_like(im)
py = np.zeros_like(im)
pz = np.zeros_like(im)
gx = np.zeros_like(im)
gy = np.zeros_like(im)
gz = np.zeros_like(im)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
d = - px - py - pz
d[1:] += px[:-1]
d[:, 1:] += py[:, :-1]
d[:, :, 1:] += pz[:, :, :-1]
out = im + d
E = (d ** 2).sum()
gx[:-1] = np.diff(out, axis=0)
gy[:, :-1] = np.diff(out, axis=1)
gz[:, :, :-1] = np.diff(out, axis=2)
norm = np.sqrt(gx ** 2 + gy ** 2 + gz ** 2)
E += weight * norm.sum()
norm *= 0.5 / weight
norm += 1.
px -= 1. / 6. * gx
px /= norm
py -= 1. / 6. * gy
py /= norm
pz -= 1 / 6. * gz
pz /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def _denoise_tv_chambolle_2d(im, weight=50, eps=2.e-4, n_iter_max=200):
"""Perform total-variation denoising on 2D images.
Parameters
----------
im : ndarray
Input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`)
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
"""
px = np.zeros_like(im)
py = np.zeros_like(im)
gx = np.zeros_like(im)
gy = np.zeros_like(im)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
d = -px - py
d[1:] += px[:-1]
d[:, 1:] += py[:, :-1]
out = im + d
E = (d ** 2).sum()
gx[:-1] = np.diff(out, axis=0)
gy[:, :-1] = np.diff(out, axis=1)
norm = np.sqrt(gx ** 2 + gy ** 2)
E += weight * norm.sum()
norm *= 0.5 / weight
norm += 1
px -= 0.25 * gx
px /= norm
py -= 0.25 * gy
py /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(im, weight=50, eps=2.e-4, n_iter_max=200,
multichannel=False):
"""Perform total-variation denoising on 2D and 3D images.
Parameters
----------
im : ndarray (2d or 3d) of ints, uints or floats
Input data to be denoised. `im` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the 3rd dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = im.dtype
if not im_type.kind == 'f':
im = img_as_float(im)
if im.ndim == 2:
out = _denoise_tv_chambolle_2d(im, weight, eps, n_iter_max)
elif im.ndim == 3:
if multichannel:
out = np.zeros_like(im)
for c in range(im.shape[2]):
out[..., c] = _denoise_tv_chambolle_2d(im[..., c], weight, eps,
n_iter_max)
else:
out = _denoise_tv_chambolle_3d(im, weight, eps, n_iter_max)
else:
raise ValueError('only 2-d and 3-d images may be denoised with this '
'function')
return out
|
michaelaye/scikit-image
|
skimage/restoration/_denoise.py
|
Python
|
bsd-3-clause
| 11,219
|
[
"Gaussian"
] |
ce1a0ffb0b8ce3b6c4b30374b7ab1b454a8b4e7b2f908bf6b883118c4d43965b
|
from collections import OrderedDict
from edc_constants.constants import REQUIRED, NOT_REQUIRED, ADDITIONAL, NOT_ADDITIONAL
from edc_visit_schedule.classes import (
VisitScheduleConfiguration, site_visit_schedules,
CrfTuple, MembershipFormTuple, ScheduleTuple, RequisitionPanelTuple)
from microbiome.apps.mb.constants import INFANT
from ..models import InfantVisit, InfantBirth
class InfantBirthVisitSchedule(VisitScheduleConfiguration):
name = 'birth visit schedule'
app_label = 'mb_infant'
membership_forms = OrderedDict({
'infant_enrollment': MembershipFormTuple('infant_enrollment', InfantBirth, True)})
schedules = OrderedDict({
'Infant Enrollment': ScheduleTuple('Infant Enrollment',
'infant_enrollment', None, None)})
visit_definitions = OrderedDict()
visit_definitions['2000'] = {
'title': 'Birth',
'time_point': 0,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': INFANT,
'visit_tracking_model': InfantVisit,
'schedule': 'Infant Enrollment',
'instructions': None,
'requisitions': (
RequisitionPanelTuple(10L, u'mb_lab', u'infantrequisition',
'DNA PCR', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(20L, u'mb_lab', u'infantrequisition',
'Stool storage', 'STORAGE', 'ST', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(30L, u'mb_lab', u'infantrequisition',
'PBMC Plasma (STORE ONLY)', 'STORAGE', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(40L, u'mb_lab', u'infantrequisition',
'Rectal swab (Storage)', 'STORAGE', 'RS', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(70L, u'mb_lab', u'infantrequisition',
'Viral Load', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(80L, u'mb_lab', u'infantrequisition',
'Chemistry', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(90L, u'mb_lab', u'infantrequisition',
'Hematology (ARV)', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
),
'entries': (
CrfTuple(10L, u'mb_infant', u'infantbirthdata', REQUIRED, NOT_ADDITIONAL),
CrfTuple(20L, u'mb_infant', u'infantbirthexam', REQUIRED, NOT_ADDITIONAL),
CrfTuple(30L, u'mb_infant', u'infantbirthfeedvaccine', REQUIRED, NOT_ADDITIONAL),
CrfTuple(40L, u'mb_infant', u'infantbirtharv', NOT_REQUIRED, ADDITIONAL),
CrfTuple(50L, u'mb_infant', u'infantstoolcollection', REQUIRED, NOT_ADDITIONAL),
CrfTuple(100L, u'mb_infant', u'infantcongenitalanomalies', NOT_REQUIRED, ADDITIONAL),
CrfTuple(200L, u'mb_infant', u'infantdeathreport', NOT_REQUIRED, ADDITIONAL),
CrfTuple(230L, u'mb_infant', u'infantoffstudy', NOT_REQUIRED, ADDITIONAL))}
visit_definitions['2010'] = {
'title': 'Infant 1 Month Visit',
'time_point': 10,
'base_interval': 27,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': INFANT,
'visit_tracking_model': InfantVisit,
'schedule': 'Infant Enrollment',
'instructions': None,
'requisitions': (
RequisitionPanelTuple(10L, u'mb_lab', u'infantrequisition',
'DNA PCR', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(20L, u'mb_lab', u'infantrequisition',
'Stool storage', 'STORAGE', 'ST', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(30L, u'mb_lab', u'infantrequisition',
'PBMC Plasma (STORE ONLY)', 'STORAGE', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(40L, u'mb_lab', u'infantrequisition',
'Rectal swab (Storage)', 'STORAGE', 'RS', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(70L, u'mb_lab', u'infantrequisition',
'Viral Load', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(80L, u'mb_lab', u'infantrequisition',
'Chemistry', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(90L, u'mb_lab', u'infantrequisition',
'Hematology (ARV)', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
),
'entries': (
CrfTuple(30L, u'mb_infant', u'infantfu', REQUIRED, NOT_ADDITIONAL),
CrfTuple(40L, u'mb_infant', u'infantfuphysical', REQUIRED, NOT_ADDITIONAL),
CrfTuple(50L, u'mb_infant', u'infantfudx', NOT_REQUIRED, ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfunewmed', REQUIRED, NOT_ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfuimmunizations', REQUIRED, NOT_ADDITIONAL),
CrfTuple(90L, u'mb_infant', u'infantarvproph', REQUIRED, ADDITIONAL),
CrfTuple(100L, u'mb_infant', u'infantfeeding', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantstoolcollection', REQUIRED, NOT_ADDITIONAL),
CrfTuple(200L, u'mb_infant', u'infantdeathreport', NOT_REQUIRED, ADDITIONAL),
CrfTuple(240L, u'mb_infant', u'infantoffstudy', NOT_REQUIRED, ADDITIONAL))}
visit_definitions['2030'] = {
'title': 'Infant 3 Month Visit',
'time_point': 30,
'base_interval': 3,
'base_interval_unit': 'M',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': INFANT,
'visit_tracking_model': InfantVisit,
'schedule': 'Infant Enrollment',
'instructions': None,
'requisitions': (
RequisitionPanelTuple(10L, u'mb_lab', u'infantrequisition',
'DNA PCR', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(20L, u'mb_lab', u'infantrequisition',
'Stool storage', 'STORAGE', 'ST', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(30L, u'mb_lab', u'infantrequisition',
'PBMC Plasma (STORE ONLY)', 'STORAGE', 'WB', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(40L, u'mb_lab', u'infantrequisition',
'Rectal swab (Storage)', 'STORAGE', 'RS', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(60L, u'mb_lab', u'infantrequisition',
'Inflammatory Cytokines', 'STORAGE', 'WB', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(70L, u'mb_lab', u'infantrequisition',
'Viral Load', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(80L, u'mb_lab', u'infantrequisition',
'Chemistry', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(90L, u'mb_lab', u'infantrequisition',
'Hematology (ARV)', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
),
'entries': (
CrfTuple(30L, u'mb_infant', u'infantfu', REQUIRED, NOT_ADDITIONAL),
CrfTuple(40L, u'mb_infant', u'infantfuphysical', REQUIRED, NOT_ADDITIONAL),
CrfTuple(50L, u'mb_infant', u'infantfudx', NOT_REQUIRED, ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfunewmed', REQUIRED, NOT_ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfuimmunizations', REQUIRED, NOT_ADDITIONAL),
CrfTuple(90L, u'mb_infant', u'infantarvproph', REQUIRED, ADDITIONAL),
CrfTuple(100L, u'mb_infant', u'infantfeeding', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantstoolcollection', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantcircumcision', NOT_REQUIRED, ADDITIONAL),
CrfTuple(200L, u'mb_infant', u'infantdeathreport', NOT_REQUIRED, ADDITIONAL),
CrfTuple(240L, u'mb_infant', u'infantoffstudy', NOT_REQUIRED, ADDITIONAL))}
visit_definitions['2060'] = {
'title': 'Infant 6 Month Visit',
'time_point': 60,
'base_interval': 6,
'base_interval_unit': 'M',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': INFANT,
'visit_tracking_model': InfantVisit,
'schedule': 'Infant Enrollment',
'instructions': None,
'requisitions': (
RequisitionPanelTuple(10L, u'mb_lab', u'infantrequisition',
'DNA PCR', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(20L, u'mb_lab', u'infantrequisition',
'Stool storage', 'STORAGE', 'ST', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(30L, u'mb_lab', u'infantrequisition',
'PBMC Plasma (STORE ONLY)', 'STORAGE', 'WB', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(40L, u'mb_lab', u'infantrequisition',
'Rectal swab (Storage)', 'STORAGE', 'RS', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(60L, u'mb_lab', u'infantrequisition',
'Inflammatory Cytokines', 'STORAGE', 'WB', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(70L, u'mb_lab', u'infantrequisition',
'Viral Load', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(80L, u'mb_lab', u'infantrequisition',
'Chemistry', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(90L, u'mb_lab', u'infantrequisition',
'Hematology (ARV)', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
),
'entries': (
CrfTuple(30L, u'mb_infant', u'infantfu', REQUIRED, NOT_ADDITIONAL),
CrfTuple(40L, u'mb_infant', u'infantfuphysical', REQUIRED, NOT_ADDITIONAL),
CrfTuple(50L, u'mb_infant', u'infantfudx', NOT_REQUIRED, ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfunewmed', REQUIRED, NOT_ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfuimmunizations', REQUIRED, NOT_ADDITIONAL),
CrfTuple(90L, u'mb_infant', u'infantarvproph', REQUIRED, ADDITIONAL),
CrfTuple(100L, u'mb_infant', u'infantfeeding', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantstoolcollection', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantcircumcision', NOT_REQUIRED, ADDITIONAL),
CrfTuple(200L, u'mb_infant', u'infantdeathreport', NOT_REQUIRED, ADDITIONAL),
CrfTuple(240L, u'mb_infant', u'infantoffstudy', NOT_REQUIRED, ADDITIONAL))}
visit_definitions['2090'] = {
'title': 'Infant 9 Month Visit',
'time_point': 90,
'base_interval': 9,
'base_interval_unit': 'M',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': INFANT,
'visit_tracking_model': InfantVisit,
'schedule': 'Infant Enrollment',
'instructions': None,
'requisitions': (
RequisitionPanelTuple(10L, u'mb_lab', u'infantrequisition',
'DNA PCR', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(20L, u'mb_lab', u'infantrequisition',
'Stool storage', 'STORAGE', 'ST', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(30L, u'mb_lab', u'infantrequisition',
'PBMC Plasma (STORE ONLY)', 'STORAGE', 'WB', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(40L, u'mb_lab', u'infantrequisition',
'Rectal swab (Storage)', 'STORAGE', 'RS', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(60L, u'mb_lab', u'infantrequisition',
'Inflammatory Cytokines', 'STORAGE', 'WB', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(70L, u'mb_lab', u'infantrequisition',
'Viral Load', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(80L, u'mb_lab', u'infantrequisition',
'Chemistry', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(90L, u'mb_lab', u'infantrequisition',
'Hematology (ARV)', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
),
'entries': (
CrfTuple(30L, u'mb_infant', u'infantfu', REQUIRED, NOT_ADDITIONAL),
CrfTuple(40L, u'mb_infant', u'infantfuphysical', REQUIRED, NOT_ADDITIONAL),
CrfTuple(50L, u'mb_infant', u'infantfudx', NOT_REQUIRED, ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfunewmed', REQUIRED, NOT_ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfuimmunizations', REQUIRED, NOT_ADDITIONAL),
CrfTuple(90L, u'mb_infant', u'infantarvproph', REQUIRED, ADDITIONAL),
CrfTuple(100L, u'mb_infant', u'infantfeeding', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantstoolcollection', REQUIRED, NOT_ADDITIONAL),
CrfTuple(200L, u'mb_infant', u'infantdeathreport', NOT_REQUIRED, ADDITIONAL),
CrfTuple(240L, u'mb_infant', u'infantoffstudy', NOT_REQUIRED, ADDITIONAL))}
visit_definitions['2120'] = {
'title': 'Infant 12 Month Visit',
'time_point': 120,
'base_interval': 12,
'base_interval_unit': 'M',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': INFANT,
'visit_tracking_model': InfantVisit,
'schedule': 'Infant Enrollment',
'instructions': None,
'requisitions': (
RequisitionPanelTuple(10L, u'mb_lab', u'infantrequisition',
'DNA PCR', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(20L, u'mb_lab', u'infantrequisition',
'Stool storage', 'STORAGE', 'ST', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(30L, u'mb_lab', u'infantrequisition',
'PBMC Plasma (STORE ONLY)', 'STORAGE', 'WB', NOT_REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(40L, u'mb_lab', u'infantrequisition',
'Rectal swab (Storage)', 'STORAGE', 'RS', REQUIRED, NOT_ADDITIONAL),
RequisitionPanelTuple(70L, u'mb_lab', u'infantrequisition',
'Viral Load', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(80L, u'mb_lab', u'infantrequisition',
'Chemistry', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
RequisitionPanelTuple(90L, u'mb_lab', u'infantrequisition',
'Hematology (ARV)', 'TEST', 'WB', NOT_REQUIRED, ADDITIONAL),
),
'entries': (
CrfTuple(30L, u'mb_infant', u'infantfu', REQUIRED, NOT_ADDITIONAL),
CrfTuple(40L, u'mb_infant', u'infantfuphysical', REQUIRED, NOT_ADDITIONAL),
CrfTuple(50L, u'mb_infant', u'infantfudx', NOT_REQUIRED, ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfunewmed', REQUIRED, NOT_ADDITIONAL),
CrfTuple(80L, u'mb_infant', u'infantfuimmunizations', REQUIRED, NOT_ADDITIONAL),
CrfTuple(90L, u'mb_infant', u'infantarvproph', REQUIRED, ADDITIONAL),
CrfTuple(100L, u'mb_infant', u'infantfeeding', REQUIRED, NOT_ADDITIONAL),
CrfTuple(110L, u'mb_infant', u'infantstoolcollection', REQUIRED, NOT_ADDITIONAL),
CrfTuple(200L, u'mb_infant', u'infantdeathreport', NOT_REQUIRED, ADDITIONAL),
CrfTuple(240L, u'mb_infant', u'infantoffstudy', REQUIRED, ADDITIONAL))}
site_visit_schedules.register(InfantBirthVisitSchedule)
|
botswana-harvard/microbiome
|
microbiome/apps/mb_infant/visit_schedule/infant_birth_visit_schedule.py
|
Python
|
gpl-2.0
| 16,747
|
[
"VisIt"
] |
60defb77eb9365f5f74423e0f78b9b3f2a9ac77ea5173b75c8b38ecbb5468480
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.moltemplate.org
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
lttree_check.py
The original template file format supports any variable types or file names.
However if you plan to process template files using lttree.py to create
LAMMPS-readable input/data files, then variables and file names obey certain
naming conventions. This code attempts to insure these conventions are obeyed
and to make sure that necessary variables are defined.
-- This code checks static variables (@) and basic LAMMPS syntax --
This program makes an attempt to check that the variables and file names
which appear in an "lttree" file are not mispelled (or miscapitlised).
It also attempts to check that LAMMPS syntax conventions are obeyed.
(It checks that the appropriate type of variable is located in each column).
It also attempts to check that all of the needed coeffs are defined.
-- This code does NOT check instance variables ($) --
This code does not check to make sure that all references to instance variables
(such as $atom, $bond, $angle, $dihedral, $improper or $mol variables) are valid
This means a user's input script command (like the "group" command) could refer
to an $atom or $mol which was never defined, and this code would not detect it.
(Why: Checking for instance variables requires building the entire instance tree
and checking references uses up additional memory after that. I do not do this
because memory is often very scarce after building the instance tree.)
Instead, we could check for these kinds of errors when post-processing of
the files generated by lttree.py or moltemplate.sh.
-- This is not the pretiest code I've ever written. --
"""
import sys
try:
from .ttree_lex import RemoveOuterQuotes, HasWildcard, InputError, \
ErrorLeader, TextBlock, VarRef, TemplateLexer, \
ExtractCatName, TableFromTemplate
from .ttree import BasicUISettings, BasicUIParseArgs, EraseTemplateFiles, \
StackableCommand, PopCommand, PopRightCommand, PopLeftCommand, \
PushCommand, PushLeftCommand, PushRightCommand, ScopeCommand, \
WriteVarBindingsFile, StaticObj, InstanceObj, \
BasicUI, ScopeBegin, ScopeEnd, WriteFileCommand, Render
from .lttree_styles import data_atoms, data_prefix, data_masses, \
data_velocities, data_ellipsoids, data_triangles, data_lines, \
data_pair_coeffs, data_bond_coeffs, data_angle_coeffs, \
data_dihedral_coeffs, data_improper_coeffs, data_bondbond_coeffs, \
data_bondangle_coeffs, data_middlebondtorsion_coeffs, \
data_endbondtorsion_coeffs, data_angletorsion_coeffs, \
data_angleangletorsion_coeffs, data_bondbond13_coeffs, \
data_angleangle_coeffs, data_bonds_by_type, data_angles_by_type, \
data_dihedrals_by_type, data_impropers_by_type, \
data_bonds, data_bond_list, data_angles, data_dihedrals, data_impropers, \
data_boundary, data_pbc, data_prefix_no_space, in_init, in_settings, \
in_prefix
from .lttree import LttreeSettings, LttreeParseArgs
from .ttree_matrix_stack import AffineTransform, MultiAffineStack, \
LinTransform
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree_lex import *
from ttree import *
from lttree_styles import *
from lttree import *
from ttree_matrix_stack import *
try:
from .ttree import StaticObj, WriteFileCommand, DescrToCatLeafPtkns, \
AssignStaticVarPtrs, FindReplacementVarPairs, ReplaceVars
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree import *
from lttree import *
from ttree_lex import *
from lttree_styles import *
if sys.version < '2.6':
raise InputError(
'Error: Alas, you must upgrade to a newer version of python.')
g_program_name = __file__.split('/')[-1] # = 'lttree_check.py'
g_version_str = '0.80.1'
g_date_str = '2017-10-01'
# g_no_check_msg = \
# "(If this error message is wrong, and/or you would like to continue anyway,\n"+\
# "try running moltemplate again using the \"-nocheck\" command-line-argument.)\n"
g_no_check_msg = \
'(To continue anyway, run moltemplate using the \"-nocheck\" argument.)\n'
def CheckCommonVarNames(prefix, descr_str, suffix, srcloc):
""" Check the name of variables in a lttree-file to confirm
that they follow the conventions used by lttree.
Almost any variable/category name is permitted, except for
names which closely match those reserved by lttree.
"""
cat_name, cat_ptkns, leaf_ptkns = \
DescrToCatLeafPtkns(descr_str,
srcloc)
if (cat_name.lower() == 'mol'):
if (cat_name != 'mol'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"mol\"?')
elif (cat_name.lower() == 'group'):
if (cat_name != 'group'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"group\"?')
elif (cat_name.lower() == 'fix'):
if (cat_name != 'fix'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Use \"fix\" instead.')
elif (cat_name.lower() == 'atom'):
if (cat_name != 'atom'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Illegal lttree variable category: \"' + cat_name + '\"\n' +
'Use \"atom\" instead.')
elif (cat_name.lower() == 'bond'):
if (cat_name != 'bond'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Use \"bond\" instead.')
elif (cat_name.lower() == 'angle'):
if (cat_name != 'angle'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Use \"angle\" instead.')
elif (cat_name.lower() == 'dihedral'):
if (cat_name != 'dihedral'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Use \"dihedral\" instead.')
elif (cat_name.lower() == 'improper'):
if (cat_name != 'improper'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Variable category: \"' + cat_name + '\" does not match, yet overlaps\n' +
'closely with a reserved lttree variable category.\n'
'Use \"improper\" instead.')
else:
sys.stderr.write('-----------------------------------------------------\n' +
'WARNING: in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
' Unrecognised template variable category: \"' + cat_name + '\"\n' +
'-----------------------------------------------------\n')
def CheckDataFileNames(filename,
srcloc,
write_command,
fnames_found):
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix)
section_name = filename[N_data_prefix:]
if ((section_name.lower() == 'atom') or
(section_name.lower() == 'atoms')):
if (filename != data_atoms):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_atoms + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'velocities') or
(section_name.lower() == 'velocity')):
if (filename != data_velocities):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_velocities + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'mass') or
(section_name.lower() == 'masses')):
if (filename != data_masses):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_masses + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'ellipsoids') or
(section_name.lower() == 'ellipsoid') or
(section_name.lower() == 'elipsoids') or
(section_name.lower() == 'elipsoid')):
if (filename != data_ellipsoids):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_ellipsoids + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'triangle') or
(section_name.lower() == 'triangles')):
if (filename != data_triangles):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_triangles + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'line') or
(section_name.lower() == 'lines')):
if (filename != data_lines):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_lines + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('pair coef') == 0) or
(section_name.lower().find('pair_coef') == 0) or
(section_name.lower().find('paircoef') == 0) or
(section_name.lower().find('pair by type') == 0) or
(section_name.lower().find('pair bytype') == 0) or
(section_name.lower().find('pair_by_type') == 0) or
(section_name.lower().find('pair_bytype') == 0) or
(section_name.lower().find('pairbytype') == 0)):
if (filename != data_pair_coeffs):
err_msg = 'Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +\
'Output file name (\"' + filename + '\") does not match,\n' +\
'yet overlaps closely with reserved lttree-file name.\n' +\
'Perhaps you meant \"' + data_pair_coeffs + '\"?'
if ((section_name.lower().find('by type') != -1) or
(section_name.lower().find('by_type') != -1) or
(section_name.lower().find('bytype') != -1)):
err_msg += '\n (Note: "pair" parameters are always assigned by type.\n' +\
' There\'s no need to specify \"by type\")'
raise InputError(err_msg)
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('bond coef') == 0) or
(section_name.lower().find('bond_coef') == 0) or
(section_name.lower().find('bondcoef') == 0)):
if (filename != data_bond_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bond_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('angle coef') == 0) or
(section_name.lower().find('angle_coef') == 0) or
(section_name.lower().find('anglecoef') == 0)):
if (filename != data_angle_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_angle_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('dihedral coef') == 0) or
(section_name.lower().find('dihedral_coef') == 0) or
(section_name.lower().find('dihedralcoef') == 0)):
if (filename != data_dihedral_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_dihedral_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('improper coef') == 0) or
(section_name.lower().find('improper_coef') == 0) or
(section_name.lower().find('impropercoef') == 0)):
if (filename != data_improper_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_improper_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
# -- class2 data sections --
elif ((section_name.lower().find('bondbond coef') == 0) or
(section_name.lower().find('bondbond_coef') == 0) or
(section_name.lower().find('bondbondcoef') == 0)):
if (filename != data_bondbond_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bondbond_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('bondangle coef') == 0) or
(section_name.lower().find('bondangle_coef') == 0) or
(section_name.lower().find('bondanglecoef') == 0)):
if (filename != data_bondangle_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bondangle_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('middlebondtorsion coef') == 0) or
(section_name.lower().find('middlebondtorsion_coef') == 0) or
(section_name.lower().find('middlebondtorsioncoef') == 0) or
(section_name.lower().find('middlebondtorision coef') == 0) or
(section_name.lower().find('middlebondtorision_coef') == 0) or
(section_name.lower().find('middlebondtorisioncoef') == 0)):
if (filename != data_middlebondtorsion_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_middlebondtorsion_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('endbondtorsion coef') == 0) or
(section_name.lower().find('endbondtorsion_coef') == 0) or
(section_name.lower().find('endbondtorsioncoef') == 0) or
(section_name.lower().find('endbondtorision coef') == 0) or
(section_name.lower().find('endbondtorision_coef') == 0) or
(section_name.lower().find('endbondtorisioncoef') == 0)):
if (filename != data_endbondtorsion_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_endbondtorsion_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('angletorsion coef') == 0) or
(section_name.lower().find('angletorsion_coef') == 0) or
(section_name.lower().find('angletorsioncoef') == 0) or
(section_name.lower().find('angletorision coef') == 0) or
(section_name.lower().find('angletorision_coef') == 0) or
(section_name.lower().find('angletorisioncoef') == 0)):
if (filename != data_angletorsion_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_angletorsion_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('angleangletorsion coef') == 0) or
(section_name.lower().find('angleangletorsion_coef') == 0) or
(section_name.lower().find('angleangletorsioncoef') == 0) or
(section_name.lower().find('angleangletorision coef') == 0) or
(section_name.lower().find('angleangletorision_coef') == 0) or
(section_name.lower().find('angleangletorisioncoef') == 0)):
if (filename != data_angleangletorsion_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_angleangletorsion_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('bondbond13 coef') == 0) or
(section_name.lower().find('bondbond13_coef') == 0) or
(section_name.lower().find('bondbond13coef') == 0)):
if (filename != data_bondbond13_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bondbond13_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('angleangle coef') == 0) or
(section_name.lower().find('angleangle_coef') == 0) or
(section_name.lower().find('angleanglecoef') == 0)):
if (filename != data_angleangle_coeffs):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_angleangle_coeffs + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'bonds by type') or
(section_name.lower() == 'bonds bytype') or
(section_name.lower() == 'bonds_by_type') or
(section_name.lower() == 'bonds_bytype') or
(section_name.lower() == 'bondsbytype') or
(section_name.lower() == 'bond by type') or
(section_name.lower() == 'bond bytype') or
(section_name.lower() == 'bond_by_type') or
(section_name.lower() == 'bond_bytype') or
(section_name.lower() == 'bondbytype')):
if (filename != data_bonds_by_type):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bonds_by_type + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'angles by type') or
(section_name.lower() == 'angles bytype') or
(section_name.lower() == 'angles_by_type') or
(section_name.lower() == 'angles_bytype') or
(section_name.lower() == 'anglesbytype') or
(section_name.lower() == 'angle by type') or
(section_name.lower() == 'angle bytype') or
(section_name.lower() == 'angle_by_type') or
(section_name.lower() == 'angle_bytype') or
(section_name.lower() == 'anglebytype')):
if (filename != data_angles_by_type):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_angles_by_type + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'dihedrals by type') or
(section_name.lower() == 'dihedrals bytype') or
(section_name.lower() == 'dihedrals_by_type') or
(section_name.lower() == 'dihedrals_bytype') or
(section_name.lower() == 'dihedralsbytype') or
(section_name.lower() == 'dihedral by type') or
(section_name.lower() == 'dihedral bytype') or
(section_name.lower() == 'dihedral_by_type') or
(section_name.lower() == 'dihedral_bytype') or
(section_name.lower() == 'dihedralbytype')):
if (filename != data_dihedrals_by_type):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_dihedrals_by_type + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'impropers by type') or
(section_name.lower() == 'impropers bytype') or
(section_name.lower() == 'impropers_by_type') or
(section_name.lower() == 'impropers_bytype') or
(section_name.lower() == 'impropersbytype') or
(section_name.lower() == 'improper by type') or
(section_name.lower() == 'improper bytype') or
(section_name.lower() == 'improper_by_type') or
(section_name.lower() == 'improper_bytype') or
(section_name.lower() == 'improperbytype')):
if (filename != data_impropers_by_type):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_impropers_by_type + '\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'bonds') or
(section_name.lower() == 'bond')):
if (filename != data_bonds):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bonds + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower().find('bond list') == 0) or
(section_name.lower().find('bonds list') == 0) or
(section_name.lower().find('bond_list') == 0) or
(section_name.lower().find('bonds_list') == 0)):
if (filename != data_bond_list):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_bonds_by_type + '\"?')
elif (write_command != 'write'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'angles') or
(section_name.lower() == 'angle')):
if (filename != data_angles):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_angles + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'dihedrals') or
(section_name.lower() == 'dihedral')):
if (filename != data_dihedrals):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_dihedrals + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'impropers') or
(section_name.lower() == 'improper')):
if (filename != data_impropers):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_impropers + '\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write(\"' + filename + '\") instead.\n')
elif ((section_name.lower() == 'box boundaries') or
(section_name.lower() == 'box boundary') or
(section_name.lower() == 'boundaries') or
(section_name.lower() == 'boundary') or
(section_name.lower() == 'boundary conditions') or
(section_name.lower() == 'periodic boundaries') or
(section_name.lower() == 'periodic boundary conditions') or
(section_name.lower() == 'periodic_boundaries') or
(section_name.lower() == 'periodic_boundary_conditions') or
(section_name.lower() == 'pbc')):
if ((filename != data_boundary) and
(filename != data_pbc)):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_boundary + '\"?\n'
'(Specify periodic boundary conditions this way.)')
elif (write_command != 'write_once'):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the ' + write_command + '() command with \"' + filename + '\".\n'
'You should probably use write_once(\"' + filename + '\") instead.\n')
elif (filename == data_pbc):
sys.stderr.write('WARNING: write_once(\"' + data_pbc + '\") is depreciated.\n'
' Use write_once(\"' + data_boundary + '\") instead.\n')
def CheckCommonFileNames(filename,
srcloc,
write_command,
filenames_found):
"""
Check the write() or write_once() statements in a
lttree-file to make sure that the files being written
follow the conventions used by lttree.
Almost any file name is permitted, except for file names
which closely match those reserved by lttree.
"""
filenames_found.add(filename)
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix_no_space)
if ((filename[:N_data_prefix].lower() == data_prefix.lower()) and
(filename[:N_data_prefix] != data_prefix)):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'The beginning of output file (\"' +
filename + '\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"' + data_prefix + '\"). Perhaps you meant \"' + data_prefix + filename[N_data_prefix:] + '\"?')
# check did they forget the space?
if (filename[:N_data_prefix_no_space] == data_prefix_no_space):
if (filename[:N_data_prefix] == data_prefix):
CheckDataFileNames(filename,
srcloc,
write_command,
filenames_found)
else:
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'The beginning of output file (\"' +
filename + '\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"' + data_prefix + '\"). Perhaps you meant \"' + data_prefix + filename[N_data_prefix_no_space:] + '\"?')
elif ((filename.lower() == 'box boundaries') or
(filename.lower() == 'box boundary') or
(filename.lower() == 'boundaries') or
(filename.lower() == 'boundary') or
(filename.lower() == 'boundary conditions') or
(filename.lower() == 'periodic boundaries') or
(filename.lower() == 'periodic boundary conditions') or
(filename.lower() == 'periodic_boundaries') or
(filename.lower() == 'periodic_boundary_conditions') or
(filename.lower() == 'pbc')):
# In that case (for one thing) they forgot the data_prefix
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + data_boundary + '\"?\n'
'(Specify periodic boundary conditions this way.)')
elif ((filename.lower() == 'init') or
(filename.lower() == 'in init') or
(filename.lower() == 'ininit') or
(filename.lower() == 'initialize') or
(filename.lower() == 'in initialize') or
(filename.lower() == 'ininitialize')):
if (filename != in_init):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + in_init + '\"?')
# elif (write_command != 'write_once'):
# raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
# 'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
# 'want to use the '+write_command+'() command with \"'+filename+'\".\n'
# 'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((filename.lower() == 'settings') or
(filename.lower() == 'in settings') or
(filename.lower() == 'insettings')):
if (filename != in_settings):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + in_settings + '\"?')
elif ((filename.lower() == 'set_coords') or
(filename.lower() == 'set coords') or
(filename.lower() == 'setcoords') or
(filename.lower() == 'in set_coords') or
(filename.lower() == 'in set coords') or
(filename.lower() == 'in setcoords')):
if (filename != in_set_coords):
raise InputError('Probable typo in ' + ErrorLeader(srcloc.infile, srcloc.lineno) + '\n\n' +
'Output file name (\"' + filename +
'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"' + in_set_coords + '\"?')
def CheckSyntaxCheap(lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is afile or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
fnames_found = set([])
prematurely_read_token = None
while True:
if prematurely_read_token == None:
command = lex.get_token()
else:
command = prematurely_read_token
prematurely_read_token = None
#print('Parse(): token = \"'+command+'\", '+lex.error_leader())
if command == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((command == 'write') or (command == 'write_once')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren == '{':
# ..then the user neglected to specify the "filename" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
filename = ''
srcloc = lex.GetSrcLoc()
else:
filename = lex.get_token()
if filename == ')':
filename == ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error: in ' + lex.error_leader() + '\n\n'
'Syntax error at beginning of ' + command + ' command.')
filename = RemoveOuterQuotes(filename, lex.quotes)
# The previous line is similar to:
#filename = filename.strip(lex.quotes)
CheckCommonFileNames(filename, lex.GetSrcLoc(),
command, fnames_found)
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
for entry in tmpl_contents:
if (type(entry) is VarRef):
CheckCommonVarNames(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
# if (data_velocities not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_velocities+'\" file not found\n'
# '-------------------------------------------------\n')
# if (data_pair_coeffs not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_pair_coeffs+'\" file not found\n'
# '-------------------------------------------------\n')
if (data_atoms not in fnames_found):
sys.stderr.write('WARNING: \"' + data_atoms + '\" file not found\n')
if (data_masses not in fnames_found):
sys.stderr.write('WARNING: \"' + data_masses + '\" file not found\n')
# if (data_bonds not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bonds+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_angles not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angles+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_dihedrals not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedrals+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_impropers not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_impropers+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_bond_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bond_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_angle_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angle_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_dihedral_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedral_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
# if (data_improper_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_imrpoper_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
if (in_init not in fnames_found):
sys.stderr.write('WARNING: \"' + in_init + '\" file not found\n')
if (in_settings not in fnames_found):
sys.stderr.write('WARNING: \"' + in_settings + '\" file not found\n')
def CheckSyntaxStatic(context_node,
root_node,
atom_column_names,
allow_wildcards,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands):
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
filename = command.filename
if filename == None: # (The "create_var" command causes this)
pass
elif (filename.find(in_prefix) == 0): # if filename begins with "In "
CheckInFileSyntax(command.tmpl_list,
root_node,
allow_wildcards,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined)
elif filename == 'Data Atoms':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) == 0:
pass # skip blank lines
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass # skip comment lines
else:
syntax_err = False
if len(table[i]) < len(atom_column_names):
syntax_err = True
else:
syntax_err = False
for j in range(0, len(atom_column_names)):
if ((atom_column_names[j].lower() == 'atom-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$', '${')) and
(ExtractCatName(table[i][j].descr_str) == 'atom')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'molecule-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$', '${')) and
(ExtractCatName(table[i][j].descr_str) == 'mol')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'atom-type') and
(not ((j < len(table[i])) and
(isinstance(table[i][j], VarRef)) and
(table[i][j].prefix in ('@', '@{')) and
(table[i][j].nptr.cat_name == 'atom') and
(table[i][j].nptr.cat_node == root_node)))):
syntax_err = True
if syntax_err:
correct_rows_list = [s for s in atom_column_names]
for j in range(0, len(correct_rows_list)):
if correct_rows_list[j].lower() == 'atom-id':
correct_rows_list[j] = '$atom:id'
elif correct_rows_list[j].lower() == 'atom-type':
correct_rows_list[j] = '@atom:type'
elif correct_rows_list[j].lower() == 'molecule-id':
correct_rows_list[j] = '$mol:id'
correct_rows_msg = ' '.join(correct_rows_list)
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Invalid "Data Atoms" syntax.\n' +
'Each line of the \"Data Atoms\" section should have this format:\n\n'
' ' + correct_rows_msg + '\n\n'
'You may have forgotten to specify the LAMMPS atom_style.\n' +
'(You can do this running moltemplate with the -atom-style _style_ argument.)\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Bonds':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 4:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'bond') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Bonds" syntax.\n' +
'Each line of the \"Data Bonds\" section should have this format:\n\n'
' $bond:id @bond:type $atom:id1 $atom:id2\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Bond List':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 3:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Bond List" syntax.\n' +
'Each lines in this section should have this format:\n\n'
' $bond:id $atom:id1 $atom:id2\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Angles':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 5:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'angle'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'angle') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Angles" syntax.\n' +
'Each line of the \"Data Angles\" section should have this format:\n\n'
' $angle:id @angle:type $atom:id1 $atom:id2 $atom:id3\n' +
'----------------------------------------------------\n\n' +
g_no_check_msg)
elif filename == 'Data Dihedrals':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'dihedral'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'dihedral') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Dihedrals" syntax.\n' +
'Each line of the \"Data Dihedrals\" section should have this format:\n\n'
' $dihedral:id @dihedral:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Impropers':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'improper'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'improper') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$', '${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Impropers" syntax.\n' +
'Each line of the \"Data Impropers\" section should have this format:\n\n'
' $improper:id @improper:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
# A simple wildcard is the character "*" on its own.
# These are okay.
# A "compound" wildcard expression is something like
# 5*7 or
# 5* or
# *7 or
# @{bond:A}*@bond:B or
# @{bond:A}* or
# *@bond:B
# LAMMPS allows this but in moltemplate this causes
# unintended side-effects. Check for these now.
if filename in set(['Data Bond Coeffs',
'Data Angle Coeffs',
'Data Dihedral Coeffs',
'Data Improper Coeffs',
'Data Pair Coeffs']):
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if (isinstance(table[i][0], TextBlock) and
table[i][0].text == '*'):
if filename == 'Data Bond Coeffs':
data_bond_coeffs_defined.add('*')
elif filename == 'Data Angle Coeffs':
data_angle_coeffs_defined.add('*')
elif filename == 'Data Dihedral Coeffs':
data_dihedral_coeffs_defined.add('*')
elif filename == 'Data Improper Coeffs':
data_improper_coeffs_defined.add('*')
elif filename == 'Data Pair Coeffs':
data_pair_coeffs_defined.add(('*', '*'))
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][0], '__len__'):
ltmpl = table[i][0]
else:
ltmpl = [table[i][0]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard and (not allow_wildcards):
raise InputError('--- Paranoid checking: ---\n'
' Possible error near ' +
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno) + '\n'
'The wildcard symbol, \"*\", is not recommended within \"' + filename + '\".\n'
'It is safer to specify the parameters for each type explicitly.\n'
'To get past this error message, run moltemplate.sh with the \"-allow-wildcards\"\n'
'argument. If not all of the @atom,@bond,@angle,@dihedral,@improper types are\n'
'included in the a*b range of values, then MAKE SURE that these types are\n'
'assigned to connsecutively increasing integer values by first defining them in\n'
'that order, or if that fails, by using the \"-a\" to assign the values manually\n')
if filename == 'Data Bond Coeffs':
# Commenting the next line out. We did this already:
# table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Bond Coeffs" syntax.\n'
' Each line of the \"Data Bond Coeffs\" section\n'
' should have the following syntax:\n\n' +
' @bond:type list-of-parameters...\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
else:
data_bond_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Angle Coeffs':
# Commenting the next line out. We did this already:
# table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Angle Coeffs" syntax.\n'
' Each line of the \"Data Angle Coeffs\" section\n'
' should have the following syntax:\n\n' +
' @angle:type list-of-parameters...\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
else:
data_angle_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Dihedral Coeffs':
# Commenting the next line out. We did this already:
# table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Dihedral Coeffs" syntax.\n'
' Each line of the \"Data Dihedral Coeffs\" section\n'
' should have the following syntax:\n\n' +
' @dihedral:type list-of-parameters...\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
else:
data_dihedral_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Improper Coeffs':
# Commenting the next line out. We did this already:
# table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Improper Coeffs" syntax.\n'
' Each line of the \"Data Improper Coeffs\" section\n'
' should have the following syntax:\n\n' +
' @improper:type list-of-parameters...\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
else:
data_improper_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Pair Coeffs':
# Commenting the next line out. We did this already:
# table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not ((len(table[i]) > 0) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'atom') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect "Data Pair Coeffs" syntax.\n'
' Each line of the \"Data Pair Coeffs\" section\n'
' should have the following syntax:\n\n' +
' @atom:type list-of-parameters...\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
else:
data_pair_coeffs_defined.add((table[i][0].binding,
table[i][0].binding))
elif filename == 'Data Bonds By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not ((len(table[i]) >= 3) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect \"Data Bonds By Type\" syntax.\n'
' Each line of the \"Data Bonds By Type\" section should begin with an\n'
' @bond:type variable followed by 2 atom types.\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Angles By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not ((len(table[i]) >= 4) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect \"Data Angles By Type\" syntax.\n'
' Each line of the \"Data Angles By Type\" section should begin with an\n'
' @angle:type variable followed by 3 atom types (and 2 optional bond types).\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Dihedrals By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect \"Data Dihedrals By Type\" syntax.\n'
' Each line of the \"Data Dihedrals By Type\" section should begin with a\n\n'
' @dihedral:type variable followed by 4 atom types (and 3 optional bond types).\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
elif filename == 'Data Impropers By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
# Ignore comment lines (postprocessing removes them)
pass
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Incorrect \"Data Impropers By Type\" syntax.\n'
' Each line of the \"Data Impropers By Type\" section should begin with an\n\n'
' @improper:type variable followed by 4 atom types (and 3 optional bond types).\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
CheckSyntaxStatic(child,
root_node,
atom_column_names,
allow_wildcards,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands)
def CheckInFileSyntax(tmpl_list,
root_node,
allow_wildcards,
pair_coeffs_defined,
bond_coeffs_defined,
angle_coeffs_defined,
dihedral_coeffs_defined,
improper_coeffs_defined):
table = TableFromTemplate(tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((isinstance(table[i][0], TextBlock)) and
(table[i][0].text in set(['bond_coeff',
'angle_coeff',
'dihedral_coeff',
'improper_coeff']))):
if len(table[i]) > 1: # if not deal with error later
if (isinstance(table[i][1], TextBlock) and
table[i][1].text == '*'):
if table[i][0].text == 'bond_coeff':
bond_coeffs_defined.add('*')
elif table[i][0].text == 'angle_coeff':
angle_coeffs_defined.add('*')
elif table[i][0].text == 'dihedral_coeff':
dihedral_coeffs_defined.add('*')
elif table[i][0].text == 'improper_coeff':
improper_coeffs_defined.add('*')
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard and (not allow_wildcards):
raise InputError('---- Paranoid checking: ---\n'
' Possible error near ' +
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno) + '\n'
'The wildcard symbol, \"*\", is not recommended within a \"' +
table[i][0].text + '\".\n'
'command. It is safer to specify the parameters for each bond type explicitly.\n'
'To get past this error message, run moltemplate.sh with the \"-allow-wildcards\"\n'
'argument. If not all of the @bond,@angle,@dihedral,@improper types are included\n'
'in the range, then MAKE SURE these @bond,@angle,@dihedral,@improper types are\n'
'assigned to connsecutively increasing integer values by first defining them in\n'
'that order, or if that fails, by using the \"-a\" to assign the values manually\n')
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'bondcoeff') or
(table[i][0].text.lower() == 'bond_coeff'))):
if table[i][0].text != 'bond_coeff':
raise InputError('----------------------------------------------------\n' +
' Spelling error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Use \"bond_coeff\", not \"' + table[i][0].text + '\"\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'bond') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Invalid \"bond_coeff\" command.\n\n' +
' Each \"bond_coeff\" command should have the following syntax:\n\n' +
' bond_coeff @bond:type [optional style] list-of-parameters...\n' +
'----------------------------------------------------\n\n' +
g_no_check_msg)
else:
bond_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'anglecoeff') or
(table[i][0].text.lower() == 'angle_coeff'))):
if table[i][0].text != 'angle_coeff':
raise InputError('----------------------------------------------------\n' +
' Spelling error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Use \"angle_coeff\", not \"' + table[i][0].text + '\"\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'angle') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Invalid \"angle_coeff\" command.\n\n' +
' Each \"angle_coeff\" command should have the following syntax:\n\n' +
' angle_coeff @angle:type [optional style] list-of-parameters...\n' +
'----------------------------------------------------\n\n' +
g_no_check_msg)
else:
angle_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'dihedralcoeff') or
(table[i][0].text.lower() == 'dihedral_coeff'))):
if table[i][0].text != 'dihedral_coeff':
raise InputError('----------------------------------------------------\n' +
' Spelling error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Use \"dihedral_coeff\", not \"' + table[i][0].text + '\"\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'dihedral') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Invalid \"dihedral_coeff\" command.\n\n' +
' Each \"dihedral_coeff\" command should have the following syntax:\n\n' +
' dihedral_coeff @dihedral:type [optional style] list-of-parameters...\n' +
'----------------------------------------------------\n\n' +
g_no_check_msg)
else:
dihedral_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'impropercoeff') or
(table[i][0].text.lower() == 'improper_coeff'))):
if table[i][0].text != 'improper_coeff':
raise InputError('----------------------------------------------------\n' +
' Spelling error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Use \"improper_coeff\", not \"' + table[i][0].text + '\"\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'improper') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Invalid \"improper_coeff\" command.\n\n' +
' Each \"improper_coeff\" command should have the following syntax:\n\n' +
' improper_coeff @improper:type [optional style] list-of-parameters...\n' +
'----------------------------------------------------\n\n' +
g_no_check_msg)
else:
improper_coeffs_defined.add(table[i][1].binding)
elif ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'paircoeff') or
(table[i][0].text.lower() == 'pair_coeff'))):
if table[i][0].text != 'pair_coeff':
raise InputError('----------------------------------------------------\n' +
' Spelling error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Use \"pair_coeff\", not \"' + table[i][0].text + '\"\n' +
'----------------------------------------------------\n' +
g_no_check_msg)
if len(table[i]) > 2: # if not, deal with error later
if ((isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and #'* *' <--BUG?
(isinstance(table[i][1], TextBlock) and # <--BUG?
(table[i][1].text == '*'))): # <--BUG?
pair_coeffs_defined.add(('*', '*'))
else:
compound_wildcard = False
assert(len(table[i]) > 1)
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if hasattr(table[i][2], '__len__'):
ltmpl = table[i][2]
else:
ltmpl = [table[i][2]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard and (not allow_wildcards):
raise InputError('---- Paranoid checking: ---\n'
' Possible error near ' +
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno) + '\n'
'The wildcard symbol, \"*\", is not recommended within a \"pair_coeff\" command.\n'
'It is safer to specify the parameters for each bond type explicitly.\n'
'To get past this error message, run moltemplate.sh with the \"-allow-wildcards\"\n'
'argument. If not all of the @atom types are included in the range, then\n'
'MAKE SURE the relevant @atom types in the * range are assigned to\n'
'connsecutively increasing integer values by first defining them in that\n'
'order, or if that fails, by using the \"-a\" to assign the values manually.\n')
if ((len(table[i]) > 2) and
((isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) or
(isinstance(table[i][2], TextBlock) and
(table[i][2].text == '*')))):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 2) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'atom') and
(table[i][1].nptr.cat_node == root_node) and
(isinstance(table[i][2], VarRef)) and
(table[i][2].prefix in ('@', '@{')) and
(table[i][2].nptr.cat_name == 'atom') and
(table[i][2].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n' +
' Syntax error near ' +
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno) + '\n'
' Invalid \"pair_coeff\" command.\n\n' +
' Each \"pair_coeff\" command should have the following syntax:\n\n' +
' pair_coeff @atom:typeI @atom:typeJ [optional style] list-of-parameters...\n' +
'----------------------------------------------------\n\n' +
g_no_check_msg)
else:
pair_coeffs_defined.add(
(table[i][1].binding, table[i][2].binding))
def LttreeCheckParseArgs(argv, settings, main=False, show_warnings=True):
LttreeParseArgs(argv, settings, False, show_warnings)
if main:
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
settings.allow_wildcards = True
i = 1
while i < len(argv):
if argv[i].lower() in ('-allow-wildcards', '-allowwildcards'):
settings.allow_wildcards = True
del argv[i:i+1]
elif argv[i].lower() in ('-forbid-wildcards', '-forbidwildcards'):
settings.allow_wildcards = False
del argv[i:i+1]
else:
i += 1
#(perhaps later I'll add some additional argumets)
# The only argument left should be the system.lt file we want to read:
if len(argv) == 2:
try:
# Parse text from the file named argv[1]
settings.lex.infile = argv[1]
settings.lex.instream = open(argv[1], 'r')
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"' + argv[1] + '\"\n'
' for reading.\n')
sys.exit(1)
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise InputError('Syntax Error(' + g_program_name + '):\n\n'
' Unrecognized argument.\n'
' (That or there is some other problem with the argument list.)\n'
' The problem begins with these arguments:\n'
' ' + (' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
return
####### control flow begins here: #######
def main():
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + '\n')
try:
# Parse the argument list and instantiate the lexer we will be using:
settings = LttreeSettings()
LttreeCheckParseArgs([arg for arg in sys.argv], #(deep copy of sys.argv)
settings,
main=True,
show_warnings=True)
# Invoke syntax checker pass:
# This first check only checks for very simple mistakes
# (mispelled versions of standard files or variable names).
CheckSyntaxCheap(settings.lex)
settings.lex.instream.close()
# Now read the file again.
# This time parse it using StaticObj.ReadTemplate().
# (This will allow us to check for deeper problems.)
# Parse text from the file named argv[1]
# Note: Assigning settings.lex directly does not work
# because it does not set the include path:
# settings.lex = TemplateLexer(open(settings.lex.infile, 'r'),
# settings.lex.infile) <--DONT USE!
# Instead reinitialize settings.lex the same way we did earlier:
# by using LttreeCheckParseArgs():
settings = LttreeSettings()
LttreeCheckParseArgs([arg for arg in sys.argv], #(deep copy of sys.argv)
settings,
main=True,
show_warnings=False)
static_tree_root = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
# has name '' (equivalent to '/')
sys.stderr.write(g_program_name +
': parsing the class definitions...')
static_tree_root.Parse(settings.lex)
sys.stderr.write(' done\n' + g_program_name +
': looking up classes...')
static_tree_root.LookupStaticRefs()
sys.stderr.write(' done\n' + g_program_name +
': looking up @variables...')
AssignStaticVarPtrs(static_tree_root,
search_instance_commands=False)
replace_var_pairs = {}
FindReplacementVarPairs(static_tree_root, replace_var_pairs)
ReplaceVars(static_tree_root, replace_var_pairs,
search_instance_commands=False)
AssignStaticVarPtrs(static_tree_root,
search_instance_commands=True)
ReplaceVars(static_tree_root, replace_var_pairs,
search_instance_commands=True)
sys.stderr.write(' done\n')
#sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
data_pair_coeffs_defined = set([])
data_bond_coeffs_defined = set([])
data_angle_coeffs_defined = set([])
data_dihedral_coeffs_defined = set([])
data_improper_coeffs_defined = set([])
in_pair_coeffs_defined = set([])
in_bond_coeffs_defined = set([])
in_angle_coeffs_defined = set([])
in_dihedral_coeffs_defined = set([])
in_improper_coeffs_defined = set([])
# Now check the static syntax
# Here we check the contents of the the "write_once()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
settings.allow_wildcards,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=False)
# Here we check the contents of the the "write()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
settings.allow_wildcards,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=True)
if 'bond' in static_tree_root.categories:
if ((len(data_bond_coeffs_defined) > 0) and
(len(in_bond_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: You can EITHER use \"bond_coeff\" commands\n' +
' OR you can have a \"Data Bond Coeffs\" section.\n' +
' LAMMPS will not allow both (...as of late 2012)\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_bond_coeffs_defined) > 0:
bond_coeffs_defined = data_bond_coeffs_defined
else:
bond_coeffs_defined = in_bond_coeffs_defined
bond_types_have_wildcards = False
bond_bindings = static_tree_root.categories['bond'].bindings
for nd, bond_binding in bond_bindings.items():
if not nd.IsDeleted():
has_wildcard = HasWildcard(bond_binding.full_name)
if has_wildcard:
bond_types_have_wildcards = True
for nd, bond_binding in bond_bindings.items():
if not nd.IsDeleted():
#has_wildcard = HasWildcard(bond_binding.full_name)
if ((not (bond_binding in bond_coeffs_defined)) and
#(not has_wildcard) and
(not bond_types_have_wildcards) and
(not ('*' in bond_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: Missing bond coeff.\n\n' +
' No coeffs for the \"' + bond_binding.full_name + '\" bond type have been\n' +
'defined, but a reference to that bond type was discovered\n' +
'near ' + ErrorLeader(bond_binding.refs[0].srcloc.infile,
bond_binding.refs[0].srcloc.lineno) + '. Check this file and also check\n'
'your \"bond_coeff\" commands or your \"Data Bond Coeffs" section.\n'
'---------------------------------------------------------------------\n' +
g_no_check_msg)
if 'angle' in static_tree_root.categories:
if ((len(data_angle_coeffs_defined) > 0) and
(len(in_angle_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: You can EITHER use \"angle_coeff\" commands\n' +
' OR you can have a \"Data Angle Coeffs\" section.\n' +
' LAMMPS will not allow both (...as of late 2012)\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_angle_coeffs_defined) > 0:
angle_coeffs_defined = data_angle_coeffs_defined
else:
angle_coeffs_defined = in_angle_coeffs_defined
angle_types_have_wildcards = False
angle_bindings = static_tree_root.categories['angle'].bindings
for nd, angle_binding in angle_bindings.items():
if not nd.IsDeleted():
has_wildcard = HasWildcard(angle_binding.full_name)
if has_wildcard:
angle_types_have_wildcards = True
for nd, angle_binding in angle_bindings.items():
if not nd.IsDeleted():
#has_wildcard = HasWildcard(angle_binding.full_name)
if ((not (angle_binding in angle_coeffs_defined)) and
#(not has_wildcard)) and
(not angle_types_have_wildcards) and
(not ('*' in angle_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: Missing angle coeff.\n\n' +
' No coeffs for the \"' + angle_binding.full_name + '\" angle type have been\n' +
'defined, but a reference to that angle type was discovered\n' +
'near ' + ErrorLeader(angle_binding.refs[0].srcloc.infile,
angle_binding.refs[0].srcloc.lineno) + '. Check this file and\n'
'also check your \"angle_coeff\" commands or your \"Data Angle Coeffs" section.\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
if 'dihedral' in static_tree_root.categories:
#sys.stderr.write('dihedral_bindings = '+str(dihedral_bindings)+'\n')
if ((len(data_dihedral_coeffs_defined) > 0) and
(len(in_dihedral_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: You can EITHER use \"dihedral_coeff\" commands\n' +
' OR you can have a \"Data Dihedral Coeffs\" section.\n' +
' LAMMPS will not allow both (...as of late 2012)\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_dihedral_coeffs_defined) > 0:
dihedral_coeffs_defined = data_dihedral_coeffs_defined
else:
dihedral_coeffs_defined = in_dihedral_coeffs_defined
dihedral_types_have_wildcards = False
dihedral_bindings = static_tree_root.categories[
'dihedral'].bindings
for nd, dihedral_binding in dihedral_bindings.items():
if not nd.IsDeleted():
has_wildcard = HasWildcard(dihedral_binding.full_name)
if has_wildcard:
dihedral_types_have_wildcards = True
for nd, dihedral_binding in dihedral_bindings.items():
if not nd.IsDeleted():
#has_wildcard = HasWildcard(dihedral_binding.full_name)
if ((not (dihedral_binding in dihedral_coeffs_defined)) and
#(not has_wildcard) and
(not dihedral_types_have_wildcards) and
(not ('*' in dihedral_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: Missing dihedral coeff.\n\n' +
' No coeffs for the \"' + dihedral_binding.full_name + '\" dihedral type have been\n' +
'defined, but a reference to that dihedral type was discovered\n' +
'near ' + ErrorLeader(dihedral_binding.refs[0].srcloc.infile,
dihedral_binding.refs[0].srcloc.lineno) + '. Check this file and\n'
'also check your \"dihedral_coeff\" commands or your \"Data Dihedral Coeffs" section.\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
if 'improper' in static_tree_root.categories:
if ((len(data_improper_coeffs_defined) > 0) and
(len(in_improper_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: You can EITHER use \"improper_coeff\" commands\n' +
' OR you can have a \"Data Improper Coeffs\" section.\n' +
' LAMMPS will not allow both (...as of late 2012)\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_improper_coeffs_defined) > 0:
improper_coeffs_defined = data_improper_coeffs_defined
else:
improper_coeffs_defined = in_improper_coeffs_defined
improper_types_have_wildcards = False
improper_bindings = static_tree_root.categories[
'improper'].bindings
for nd, improper_binding in improper_bindings.items():
if not nd.IsDeleted():
has_wildcard = HasWildcard(improper_binding.full_name)
if has_wildcard:
improper_types_have_wildcards = True
for nd, improper_binding in improper_bindings.items():
if not nd.IsDeleted():
#has_wildcard = HasWildcard(improper_binding.full_name)
if ((not (improper_binding in improper_coeffs_defined)) and
#(not has_wildcard) and
(not improper_types_have_wildcards) and
(not ('*' in improper_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: Missing improper coeff.\n\n' +
' No coeffs for the \"' + improper_binding.full_name + '\" improper type have been\n' +
'defined, but a reference to that improper type was discovered\n' +
'near ' + ErrorLeader(improper_binding.refs[0].srcloc.infile,
improper_binding.refs[0].srcloc.lineno) + '. Check this file and\n'
'also check your \"improper_coeff\" commands or your \"Data Improper Coeffs" section.\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
if 'atom' in static_tree_root.categories:
if ((len(data_pair_coeffs_defined) > 0) and
(len(in_pair_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: You can EITHER use \"pair_coeff\" commands\n' +
' OR you can have a \"Data Pair Coeffs\" section.\n' +
' LAMMPS will not allow both (...as of late 2012)\n' +
'---------------------------------------------------------------------\n' +
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_pair_coeffs_defined) > 0:
pair_coeffs_defined = data_pair_coeffs_defined
else:
pair_coeffs_defined = in_pair_coeffs_defined
atom_types_have_wildcards = False
atom_bindings = static_tree_root.categories['atom'].bindings
for nd, atom_binding in atom_bindings.items():
if not nd.IsDeleted():
has_wildcard = HasWildcard(atom_binding.full_name)
if has_wildcard:
atom_types_have_wildcards = True
for nd, atom_binding in atom_bindings.items():
if not nd.IsDeleted():
#has_wildcard = HasWildcard(atom_binding.full_name)
if ((not ((atom_binding, atom_binding)
in
pair_coeffs_defined)) and
#(not has_wildcard) and
(not atom_types_have_wildcards) and
(not (('*', '*') in pair_coeffs_defined)) and
(not (atom_binding.nptr.cat_name,
atom_binding.nptr.cat_node,
atom_binding.nptr.leaf_node)
in replace_var_pairs)):
raise InputError('---------------------------------------------------------------------\n' +
' Syntax error: Missing pair coeff.\n\n' +
' No pair coeffs for the \"' + atom_binding.full_name + '\" atom type have been\n' +
'defined, but a reference to that atom type was discovered\n' +
'near ' + ErrorLeader(atom_binding.refs[0].srcloc.infile,
atom_binding.refs[0].srcloc.lineno) + '. Check this file and\n'
'also check your \"pair_coeff\" commands or your \"Data Pair Coeffs" section.\n\n' +
g_no_check_msg)
# else:
# raise InputError('Error: No atom types (@atom) have been defined.\n')
sys.stderr.write(g_program_name + ': -- No errors detected. --\n')
exit(0)
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(1)
return
if __name__ == '__main__':
main()
|
quang-ha/lammps
|
tools/moltemplate/moltemplate/lttree_check.py
|
Python
|
gpl-2.0
| 144,120
|
[
"LAMMPS"
] |
5d388bf9bac006c8b02eb1943d5b8ab7dce4c2e86630ad48049647fa8f395c36
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# Plotting the results of quartz_dissolution.i
import os
import sys
import matplotlib.pyplot as plt
f = open("gold/quartz_dissolution_out.csv", "r")
data = [list(map(float, line.strip().split(","))) for line in f.readlines()[1:]]
f.close()
tim = [x[0] for x in data]
dis = [-x[1] * 1000 for x in data]
gwb_tim = [0, 0.5, 1, 1.5, 2, 3, 4, 5]
gwb_dis = [0, -0.333, -0.5275, -0.6414, -0.7081, -0.77, -0.7912, -0.7985]
plt.figure(0)
plt.plot(tim, dis, 'k-', linewidth = 2.0, label = 'MOOSE')
plt.plot(gwb_tim, gwb_dis, 'ks', linewidth = 2.0, label = 'GWB')
plt.legend()
plt.grid()
plt.xlabel("Time (days)")
plt.ylabel("Quartz change (mmol)")
plt.title("Kinetic dissolution of quartz")
plt.savefig("../../../doc/content/media/geochemistry/quartz_dissolution.png")
sys.exit(0)
|
harterj/moose
|
modules/geochemistry/test/tests/kinetics/quartz_dissolution.py
|
Python
|
lgpl-2.1
| 1,106
|
[
"MOOSE"
] |
a8115ac9e014eec731f3d2388943a95612ee4080cb84797c07928a38baea9e96
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('belmis.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^api/', include('belmis.api.urls', namespace='api')),
url(r'^residence/', include('belmis.residences.urls', namespace='residences')),
url(r'^docs/', include('rest_framework_docs.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
pgergov/belmis
|
config/urls.py
|
Python
|
mit
| 1,805
|
[
"VisIt"
] |
9b62843fd013a4e7f10a7324307a2b96dcfaa841e1ab583d6f74fbeb78a5ea34
|
#!/usr/bin/env jython
# copyright 2002 the Brothers Wilcox
# <mailto:zooko@zooko.com>
#
# This file is part of OvP.
#
# OvP is open source software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OvP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OvP; if not, write to zooko.com:
# <a mailto:zooko@zooko.com>
#
# See the file COPYING or visit http://www.gnu.org/ for details.
# CVS:
__cvsid = '$Id: OvP.py,v 1.5 2002/02/09 22:46:13 zooko Exp $'
import path_fix
# standard Java modules
import java
from java.lang import *
from java.awt import *
from java.awt.event import *
from java.awt.geom import *
from javax.swing import *
from javax.swing.text import *
from java.awt.image import ImageObserver # debug
# standard Python modules
import math
import operator
import time
import traceback
# OvP modules
import HexBoard
from GamePieces import *
import Game
import Images
from util import *
true = 1
false = 0
version = (1, 2, 0)
verstr = '.'.join(map(str, version))
name = "Ogres vs. Cellular Automata"
NUM_STARTING_OGRES=2
NUM_STARTING_PIXIES=3
NUM_STARTING_TREES=32
class OvPHex(HexBoard.Hex):
def __init__(self, hb, hx, hy, bordercolor=Color.green, bgcolor=Color.black):
HexBoard.Hex.__init__(self, hb, hx, hy, bordercolor, bgcolor)
self._nextnumneighbors = 0
def is_center_of_broken_pixie_ring(hex):
return (hex.is_empty() or hex.contains_only(Stone)) and \
(len(hex.get_adjacent_hexes()) == 6) and \
(((HexBoard.all_contain_a(hex.get_east_trio(), Tree)) and \
(HexBoard.all_are_empty(hex.get_west_trio()))) or \
((HexBoard.all_contain_a(hex.get_west_trio(), Tree)) and \
(HexBoard.all_are_empty(hex.get_east_trio()))))
def is_center_of_pixie_ring(hex):
return (hex.is_empty() or hex.contains_only(Stone)) and \
(len(hex.get_adjacent_hexes()) == 6) and \
HexBoard.all_contain_a(hex.get_adjacent_hexes(), Tree)
class OvP(JFrame, MouseListener, KeyListener, Runnable):
def __init__(self, boardwidth=16, boardheight=12, randseed=None):
JFrame.__init__(self, name + " v" + verstr)
if randseed == None:
randseed = int(time.time())
print "randseed: ", randseed
self.randseed = randseed
self.boardwidth=boardwidth
self.boardheight=boardheight
SwingUtilities.invokeLater(self)
def run(self):
randgen.seed(self.randseed)
self.hb = HexBoard.HexBoard(cxoffset=10, cyoffset=10+(self.boardheight-1)*30*2*0.75, scale=30)
self.selectedcreature = None
self.creatures = {} # k: color, v: list
self.creatures[Color.red] = []
self.creatures[Color.white] = []
self.turnmans = {} # k: color, v: TurnMan
self.turnmans[Color.red] = Game.TurnMan(self, self.creatures[Color.red], Color.red)
self.turnmans[Color.white] = Game.TurnMan(self, self.creatures[Color.white], Color.white)
self.turnmans[Color.red].register_regular_eot_event(self.turnmans[Color.white].begin_turn)
self.turnmans[Color.white].register_regular_bot_event(self.grow_trees)
self.turnmans[Color.white].register_regular_eot_event(self.turnmans[Color.red].begin_turn)
self.turnmans[Color.red].register_regular_bot_event(self.turnmans[Color.red].select_next_creature_or_end_turn)
self.turnmans[Color.white].register_regular_bot_event(self.turnmans[Color.white].select_next_creature_or_end_turn)
HSLOP=30
VSLOP=60
self.setContentPane(self.hb)
self.setSize(int(HSLOP + ((self.boardwidth+0.5)*self.hb.w)), int(VSLOP + (self.boardheight*self.hb.h*0.75)))
self.init_locations()
self.setVisible(true)
self.init_creatures()
self.turnmans[Color.red].begin_turn()
for mlmeth in dir(MouseListener):
setattr(self.__class__, mlmeth, OvP._null)
setattr(self.__class__, 'mousePressed', self._mousePressed)
for klmeth in dir(KeyListener):
setattr(self.__class__, klmeth, OvP._null)
setattr(self.__class__, 'keyTyped', self._keyTyped)
self.addMouseListener(self)
self.addKeyListener(self)
def _null(self, *args, **kwargs):
pass
def grow_trees(self):
for hex in self.hb.hexes.values():
if filter(lambda x: isinstance(x, Tree) or (isinstance(x, Pixie) and not x.is_dead()), hex.items):
for adjhex in hex.get_adjacent_hexes():
adjhex._nextnumneighbors += 1
for hex in self.hb.hexes.values():
if hex._nextnumneighbors == 3:
if hex.is_empty():
Tree(self, hex)
for hex in self.hb.hexes.values():
[x.get_older() for x in hex.get_all(Tree)]
if hex._nextnumneighbors < 2:
if hex.contains_a(Tree):
[x.destroy() for x in hex.get_all(Tree)]
if not hex.contains_a(Stone):
Stone(self, hex)
elif hex._nextnumneighbors >= 4:
if hex.contains_a(Tree):
[x.destroy() for x in hex.get_all(Tree)]
if not hex.contains_a(Stone):
Stone(self, hex)
hex._nextnumneighbors = 0
def init_locations(self):
for hx in range(self.boardwidth):
for hy in range(self.boardheight):
OvPHex(self.hb, hx, hy)
def init_creatures(self):
for i in range(NUM_STARTING_OGRES):
hex = self.hb.get_empty_hex(maxhx=self.boardwidth/3)
if hex is not None:
Ogre(self, hex)
for i in range(NUM_STARTING_PIXIES):
hex = self.hb.get_empty_hex(minhx=self.boardwidth*3/4)
if hex is not None:
Pixie(self, hex)
for i in range(NUM_STARTING_TREES):
hex = self.hb.get_empty_hex(minhx=self.boardwidth/3)
if hex is not None:
Tree(self, hex)
# KingTree!
hex = self.hb.get_empty_hex(minhx=self.boardwidth/3)
KingTree(self, hex)
def _mousePressed(self, e):
pt = e.getPoint()
hbpt = self.hb.getLocation()
rppt = self.getRootPane().getLocation()
pt.translate(hbpt.x - rppt.x, hbpt.y - rppt.y)
hex = self.hb.pick_hex(pt)
if SwingUtilities.isRightMouseButton(e):
if self.selectedcreature is not None:
# cancel of current selection by right-click
self.selectedcreature.unselect()
else:
if self.selectedcreature is not None:
# act-request
self.selectedcreature.user_act(hex)
else:
if (hex is not None) and (not hex.is_empty()):
# mousepress
hex.items[-1].mouse_pressed()
def _keyTyped(self, e):
c = e.getKeyChar()
if c == 't':
# if any turn mans are waiting for confirm, then this is the confirm
for tm in self.turnmans.values():
if tm.waitingforconfirm:
tm._really_end_turn()
return
# keystrokes when there is a current selectedcreature item
if self.selectedcreature is None:
return
if c == 'w':
self.selectedcreature.user_act(self.selectedcreature.hex.get_nw())
elif c == 'e':
self.selectedcreature.user_act(self.selectedcreature.hex.get_ne())
elif c == 'a':
self.selectedcreature.user_act(self.selectedcreature.hex.get_w())
elif c == 'd':
self.selectedcreature.user_act(self.selectedcreature.hex.get_e())
elif c == 'z':
self.selectedcreature.user_act(self.selectedcreature.hex.get_sw())
elif c == 'x':
self.selectedcreature.user_act(self.selectedcreature.hex.get_se())
elif c == 's':
self.selectedcreature.user_act(self.selectedcreature.hex)
elif c == 'u':
self.selectedcreature.unselect()
if c == 'n':
self.selectedcreature.creatureman.turnman.select_next_creature_or_end_turn()
elif c == ' ':
self.selectedcreature.pass()
if __name__ == '__main__':
ovp=OvP()
|
zooko/ogresvpixies
|
OvP.py
|
Python
|
gpl-2.0
| 7,593
|
[
"VisIt"
] |
304c951b3a48219f22d001991739c2ccb1863c8be46599df76a57bbca29f8af5
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
# Wizard for running the mccSearch program
'''
import sys
import networkx as nx
import numpy as np
import numpy.ma as ma
import os
import matplotlib.pyplot as plt
import subprocess
#mccSearch modules
import mccSearch
import files
def main():
CEGraph = nx.DiGraph()
prunedGraph = nx.DiGraph()
MCCList =[]
MCSList=[]
MCSMCCNodesList =[]
allMCSsList =[]
allCETRMMList =[]
DIRS={}
# DIRS={
# mainDirStr= "/directory/to/where/to/store/outputs"
# TRMMdirName = "/directory/to/the/TRMM/netCDF/files"
# CEoriDirName = "/directory/to/the/MERG/netCDF/files"
# }
preprocessing = ''
rawMERG = ''
#for GrADs
subprocess.call('export DISPLAY=:0.0', shell=True)
print "Running MCCSearch ..... \n"
DIRS['mainDirStr'] = raw_input("> Please enter working directory: \n" ) # This is where data created will be stored
preprocessing = raw_input ("> Do you need to preprocess the MERG files? [y/n]: \n")
while preprocessing.lower() != 'n':
if preprocessing.lower() == 'y':
#get location for raw files
rawMERG = raw_input("> Please enter the directory to the RAW MERG (.Z) files: \n")
#run preprocessing
mccSearch.preprocessingMERG(rawMERG)
continue
elif preprocessing.lower() == 'n' :
pass
else:
print "Error! Invalid choice "
preprocessing = raw_input ("> Do you need to preprocess the MERG files? [y/n]: \n")
#get the location of the MERG and TRMM data
DIRS['CEoriDirName'] = raw_input("> Please enter the directory to the MERG netCDF files: \n")
try:
if not os.path.exists(DIRS['CEoriDirName']):
print "Error! MERG invalid path!"
DIRS['CEoriDirName'] = raw_input("> Please enter the directory to the MERG netCDF files: \n")
except:
print "..."
DIRS['TRMMdirName'] = raw_input("> Please enter the location to the raw TRMM netCDF files: \n")
try:
if not os.path.exists(DIRS['TRMMdirName']):
print "Error: TRMM invalid path!"
DIRS['TRMMdirName'] = raw_input("> Please enter the location to the raw TRMM netCDF files: \n")
except:
pass
#get the dates for analysis
startDateTime = raw_input("> Please enter the start date and time yyyymmddhr: \n")
#check validity of time
while validDate(startDateTime) == 0:
print "Invalid time entered for startDateTime!"
startDateTime = raw_input("> Please enter the start date and time yyyymmddhr: \n")
endDateTime = raw_input("> Please enter the end date and time yyyymmddhr: \n")
while validDate(endDateTime) == 0:
print "Invalid time entered for endDateTime!"
endDateTime = raw_input("> Please enter the end date and time yyyymmddhr: \n")
#check if all the files exisits in the MERG and TRMM directories entered
test, _ = mccSearch.checkForFiles(startDateTime, endDateTime, DIRS['TRMMdirName'], 2)
if test == False:
print "Error with files in the original TRMM directory entered. Please check your files before restarting. "
return
test, filelist = mccSearch.checkForFiles(startDateTime, endDateTime, DIRS['CEoriDirName'],1)
if test == False:
print "Error with files in the original MERG directory entered. Please check your files before restarting. "
return
#create main directory and file structure for storing intel
mccSearch.createMainDirectory(DIRS['mainDirStr'])
TRMMCEdirName = DIRS['mainDirStr']+'/TRMMnetcdfCEs'
CEdirName = DIRS['mainDirStr']+'/MERGnetcdfCEs'
# for doing some postprocessing with the clipped datasets instead of running the full program, e.g.
postprocessing = raw_input("> Do you wish to postprocess data? [y/n] \n")
while postprocessing.lower() != 'n':
if postprocessing.lower() == 'y':
option = postProcessingplotMenu(DIRS)
return
elif postprocessing.lower() == 'n':
pass
else:
print "\n Invalid option."
postprocessing = raw_input("> Do you wish to postprocess data? [y/n] \n")
# -------------------------------------------------------------------------------------------------
# Getting started. Make it so number one!
print ("-"*80)
print "\t\t Starting the MCCSearch Analysis "
print ("-"*80)
print "\n -------------- Reading MERG and TRMM Data ----------"
mergImgs, timeList = mccSearch.readMergData(DIRS['CEoriDirName'], filelist)
print "\n -------------- findCloudElements ----------"
CEGraph = mccSearch.findCloudElements(mergImgs,timeList,DIRS['TRMMdirName'])
#if the TRMMdirName wasnt entered for whatever reason, you can still get the TRMM data this way
# CEGraph = mccSearch.findCloudElements(mergImgs,timeList)
# allCETRMMList=mccSearch.findPrecipRate(DIRS['TRMMdirName'],timeList)
# ----------------------------------------------------------------------------------------------
print "\n -------------- findCloudClusters ----------"
prunedGraph = mccSearch.findCloudClusters(CEGraph)
print "\n -------------- findMCCs ----------"
MCCList,MCSList = mccSearch.findMCC(prunedGraph)
#now ready to perform various calculations/metrics
print ("-"*80)
print "\n -------------- METRICS ----------"
print ("-"*80)
#some calculations/metrics that work that work
print "creating the MCC userfile ", mccSearch.createTextFile(MCCList,1)
print "creating the MCS userfile ", mccSearch.createTextFile(MCSList,2)
plotMenu(MCCList, MCSList)
#Let's get outta here! Engage!
print ("-"*80)
#*********************************************************************************************************************
def plotMenu(MCCList, MCSList):
'''
Purpose:: The flow of plots for the user to choose
Input:: MCCList: a list of directories representing a list of nodes in the MCC
MCSList: a list of directories representing a list of nodes in the MCS
Output:: None
'''
option = displayPlotMenu()
while option != 0:
try:
if option == 1:
print "Generating Accumulated Rainfall from TRMM for the entire period ...\n"
mccSearch.plotAccTRMM(MCCList)
if option == 2:
startDateTime = raw_input("> Please enter the start date and time yyyy-mm-dd_hr:mm:ss format: \n")
endDateTime = raw_input("> Please enter the end date and time yyyy-mm-dd_hr:mm:ss format: \n")
print "Generating acccumulated rainfall between ", startDateTime," and ", endDateTime, " ... \n"
mccSearch.plotAccuInTimeRange(startDateTime, endDateTime)
if option == 3:
print "Generating area distribution plot ... \n"
mccSearch.displaySize(MCCList)
if option == 4:
print "Generating precipitation and area distribution plot ... \n"
mccSearch.displayPrecip(MCCList)
if option == 5:
try:
print "Generating histogram of precipitation for each time ... \n"
mccSearch.plotPrecipHistograms(MCCList)
except:
pass
except:
print "Invalid option. Please try again, enter 0 to exit \n"
option = displayPlotMenu()
return
#*********************************************************************************************************************
def displayPlotMenu():
'''
Purpose:: Display the plot Menu Options
Input:: None
Output:: option: an integer representing the choice of the user
'''
print "**************** PLOTS ************** \n"
print "0. Exit \n"
print "1. Accumulated TRMM precipitation \n"
print "2. Accumulated TRMM precipitation between dates \n"
print "3. Area distribution of the system over time \n"
print "4. Precipitation and area distribution of the system \n"
print "5. Histogram distribution of the rainfall in the area \n"
option = int(raw_input("> Please enter your option for plots: \n"))
return option
#*********************************************************************************************************************
def displayPostprocessingPlotMenu():
'''
Purpose:: Display the plot Menu Options
Input:: None
Output:: option: an integer representing the choice of the user
'''
print "**************** POST PROCESSING PLOTS ************** \n"
print "0. Exit \n"
print "1. Map plots of the original MERG data \n"
print "2. Map plots of the cloud elements using IR data \n"
print "3. Map plots of the cloud elements rainfall accumulations using TRMM data \n"
#print "4. Accumulated TRMM precipitation \n"
#print "5. Accumulated TRMM precipitation between dates \n"
option = int(raw_input("> Please enter your option for plots: \n"))
return option
#*********************************************************************************************************************
def postProcessingplotMenu(DIRS):
'''
Purpose:: The flow of plots for the user to choose
Input:: DIRS a dictionary of directories
# DIRS={
# mainDirStr= "/directory/to/where/to/store/outputs"
# TRMMdirName = "/directory/to/the/TRMM/netCDF/files"
# CEoriDirName = "/directory/to/the/MERG/netCDF/files"
# }
Output:: None
'''
TRMMCEdirName = DIRS['mainDirStr']+'/TRMMnetcdfCEs'
CEdirName = DIRS['mainDirStr']+'/MERGnetcdfCEs'
option = displayPostprocessingPlotMenu()
while option != 0:
try:
if option == 1:
print "Generating images from the original MERG dataset ... \n"
mccSearch.postProcessingNetCDF(3, DIRS['CEoriDirName'])
if option == 2:
print "Generating images from the cloud elements using MERG IR data ... \n"
mccSearch.postProcessingNetCDF(1, CEdirName)
if option == 3:
print "Generating precipitation accumulation images from the cloud elements using TRMM data ... \n"
mccSearch.postProcessingNetCDF(2, TRMMCEdirName)
# if option == 4:
# print "Generating Accumulated TRMM rainfall from cloud elements for each MCS ... \n"
# featureType = int(raw_input("> Please enter type of MCS MCC-1 or MCS-2: \n"))
# if featureType == 1:
# filename = DIRS['mainDirStr']+'/textFiles/MCCPostProcessing.txt'
# try:
# if os.path.isfile(filename):
# #read each line as a list
# mccSearch.plotAccTRMM()
# if option == 5:
# mccSearch.plotAccuInTimeRange()
except:
print "Invalid option, please try again"
option = displayPostprocessingPlotMenu()
return
#*********************************************************************************************************************
def validDate(dataString):
'''
'''
if len(dataString) > 10:
print "invalid time entered"
return 0
yr = int(dataString[:4])
mm = int(dataString[4:6])
dd = int(dataString[6:8])
hh = int(dataString[-2:])
if mm < 1 or mm > 12:
return 0
elif hh < 0 or hh > 23:
return 0
elif (dd< 0 or dd > 30) and (mm == 4 or mm == 6 or mm == 9 or mm == 11):
return 0
elif (dd< 0 or dd > 31) and (mm == 1 or mm ==3 or mm == 5 or mm == 7 or mm == 8 or mm == 10):
return 0
elif dd > 28 and mm == 2 and (yr%4)!=0:
return 0
elif (yr%4)==0 and mm == 2 and dd>29:
return 0
elif dd > 31 and mm == 12:
return 0
else:
return 1
#*********************************************************************************************************************
main()
|
kwhitehall/climate
|
mccsearch/code/mccSearchUI.py
|
Python
|
apache-2.0
| 12,928
|
[
"NetCDF"
] |
45c8411ea587fe839cea9f9447ad000b10fe517a39db5f320077b4115515227a
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
import numpy as np
# Create a the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
clf_1 = DecisionTreeRegressor(max_depth=4)
clf_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
y_1 = clf_1.predict(X)
y_2 = clf_2.predict(X)
# Plot the results
import pylab as pl
pl.figure()
pl.scatter(X, y, c="k", label="training samples")
pl.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
pl.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
pl.xlabel("data")
pl.ylabel("target")
pl.title("Boosted Decision Tree Regression")
pl.legend()
pl.show()
|
depet/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
Python
|
bsd-3-clause
| 1,404
|
[
"Gaussian"
] |
f31e5cdaa1c3381fab5a942cd13dd01ecff44d5c20ccd2b1498f453e327abf6a
|
from numpy.testing import assert_equal, assert_array_equal, assert_allclose
from nose.tools import assert_true, assert_raises, assert_not_equal
from copy import deepcopy
import os.path as op
import numpy as np
from scipy import sparse
import os
import warnings
from mne.utils import (set_log_level, set_log_file, _TempDir,
get_config, set_config, deprecated, _fetch_file,
sum_squared, estimate_rank,
_url_to_local_path, sizeof_fmt, _check_subject,
_check_type_picks, object_hash, object_diff,
requires_good_network, run_tests_if_main, md5sum,
ArgvSetter, _memory_usage, check_random_state,
_check_mayavi_version, requires_mayavi,
set_memmap_min_size, _get_stim_channel, _check_fname,
create_slices, _time_mask, random_permutation,
_get_call_line, compute_corr, verbose)
from mne.io import show_fiff
from mne import Evoked
from mne.externals.six.moves import StringIO
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_evoked = op.join(base_dir, 'test-ave.fif')
fname_raw = op.join(base_dir, 'test_raw.fif')
fname_log = op.join(base_dir, 'test-ave.log')
fname_log_2 = op.join(base_dir, 'test-ave-2.log')
def clean_lines(lines=[]):
# Function to scrub filenames for checking logging output (in test_logging)
return [l if 'Reading ' not in l else 'Reading test file' for l in lines]
def test_get_call_line():
"""Test getting a call line
"""
@verbose
def foo(verbose=None):
return _get_call_line(in_verbose=True)
for v in (None, True):
my_line = foo(verbose=v) # testing
assert_equal(my_line, 'my_line = foo(verbose=v) # testing')
def bar():
return _get_call_line(in_verbose=False)
my_line = bar() # testing more
assert_equal(my_line, 'my_line = bar() # testing more')
def test_misc():
"""Test misc utilities"""
assert_equal(_memory_usage(-1)[0], -1)
assert_equal(_memory_usage((clean_lines, [], {}))[0], -1)
assert_equal(_memory_usage(clean_lines)[0], -1)
assert_raises(ValueError, check_random_state, 'foo')
assert_raises(ValueError, set_memmap_min_size, 1)
assert_raises(ValueError, set_memmap_min_size, 'foo')
assert_raises(TypeError, get_config, 1)
assert_raises(TypeError, set_config, 1)
assert_raises(TypeError, set_config, 'foo', 1)
assert_raises(TypeError, _get_stim_channel, 1, None)
assert_raises(TypeError, _get_stim_channel, [1], None)
assert_raises(TypeError, _check_fname, 1)
assert_raises(ValueError, _check_subject, None, None)
assert_raises(ValueError, _check_subject, None, 1)
assert_raises(ValueError, _check_subject, 1, None)
@requires_mayavi
def test_check_mayavi():
"""Test mayavi version check"""
assert_raises(RuntimeError, _check_mayavi_version, '100.0.0')
def test_run_tests_if_main():
"""Test run_tests_if_main functionality"""
x = []
def test_a():
x.append(True)
@np.testing.dec.skipif(True)
def test_b():
return
try:
__name__ = '__main__'
run_tests_if_main(measure_mem=False) # dual meas causes problems
def test_c():
raise RuntimeError
try:
__name__ = '__main__'
run_tests_if_main(measure_mem=False) # dual meas causes problems
except RuntimeError:
pass
else:
raise RuntimeError('Error not raised')
finally:
del __name__
assert_true(len(x) == 2)
assert_true(x[0] and x[1])
def test_hash():
"""Test dictionary hashing and comparison functions"""
# does hashing all of these types work:
# {dict, list, tuple, ndarray, str, float, int, None}
d0 = dict(a=dict(a=0.1, b='fo', c=1), b=[1, 'b'], c=(), d=np.ones(3),
e=None)
d0[1] = None
d0[2.] = b'123'
d1 = deepcopy(d0)
assert_true(len(object_diff(d0, d1)) == 0)
assert_true(len(object_diff(d1, d0)) == 0)
assert_equal(object_hash(d0), object_hash(d1))
# change values slightly
d1['data'] = np.ones(3, int)
d1['d'][0] = 0
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['a']['a'] = 0.11
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['a']['d'] = 0 # non-existent key
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['b'].append(0) # different-length lists
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
assert_equal(object_hash(d0), object_hash(d1))
d1['e'] = 'foo' # non-None
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
d1 = deepcopy(d0)
d2 = deepcopy(d0)
d1['e'] = StringIO()
d2['e'] = StringIO()
d2['e'].write('foo')
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
d1 = deepcopy(d0)
d1[1] = 2
assert_true(len(object_diff(d0, d1)) > 0)
assert_true(len(object_diff(d1, d0)) > 0)
assert_not_equal(object_hash(d0), object_hash(d1))
# generators (and other types) not supported
d1 = deepcopy(d0)
d2 = deepcopy(d0)
d1[1] = (x for x in d0)
d2[1] = (x for x in d0)
assert_raises(RuntimeError, object_diff, d1, d2)
assert_raises(RuntimeError, object_hash, d1)
x = sparse.eye(2, 2, format='csc')
y = sparse.eye(2, 2, format='csr')
assert_true('type mismatch' in object_diff(x, y))
y = sparse.eye(2, 2, format='csc')
assert_equal(len(object_diff(x, y)), 0)
y[1, 1] = 2
assert_true('elements' in object_diff(x, y))
y = sparse.eye(3, 3, format='csc')
assert_true('shape' in object_diff(x, y))
y = 0
assert_true('type mismatch' in object_diff(x, y))
def test_md5sum():
"""Test md5sum calculation
"""
tempdir = _TempDir()
fname1 = op.join(tempdir, 'foo')
fname2 = op.join(tempdir, 'bar')
with open(fname1, 'wb') as fid:
fid.write(b'abcd')
with open(fname2, 'wb') as fid:
fid.write(b'efgh')
assert_equal(md5sum(fname1), md5sum(fname1, 1))
assert_equal(md5sum(fname2), md5sum(fname2, 1024))
assert_true(md5sum(fname1) != md5sum(fname2))
def test_tempdir():
"""Test TempDir
"""
tempdir2 = _TempDir()
assert_true(op.isdir(tempdir2))
x = str(tempdir2)
del tempdir2
assert_true(not op.isdir(x))
def test_estimate_rank():
"""Test rank estimation
"""
data = np.eye(10)
assert_array_equal(estimate_rank(data, return_singular=True)[1],
np.ones(10))
data[0, 0] = 0
assert_equal(estimate_rank(data), 9)
def test_logging():
"""Test logging (to file)
"""
assert_raises(ValueError, set_log_level, 'foo')
tempdir = _TempDir()
test_name = op.join(tempdir, 'test.log')
with open(fname_log, 'r') as old_log_file:
old_lines = clean_lines(old_log_file.readlines())
with open(fname_log_2, 'r') as old_log_file_2:
old_lines_2 = clean_lines(old_log_file_2.readlines())
if op.isfile(test_name):
os.remove(test_name)
# test it one way (printing default off)
set_log_file(test_name)
set_log_level('WARNING')
# should NOT print
evoked = Evoked(fname_evoked, condition=1)
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# should NOT print
evoked = Evoked(fname_evoked, condition=1, verbose=False)
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# should NOT print
evoked = Evoked(fname_evoked, condition=1, verbose='WARNING')
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# SHOULD print
evoked = Evoked(fname_evoked, condition=1, verbose=True)
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
set_log_file(None) # Need to do this to close the old file
os.remove(test_name)
# now go the other way (printing default on)
set_log_file(test_name)
set_log_level('INFO')
# should NOT print
evoked = Evoked(fname_evoked, condition=1, verbose='WARNING')
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# should NOT print
evoked = Evoked(fname_evoked, condition=1, verbose=False)
with open(test_name) as fid:
assert_true(fid.readlines() == [])
# SHOULD print
evoked = Evoked(fname_evoked, condition=1)
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
with open(fname_log, 'r') as old_log_file:
assert_equal(new_lines, old_lines)
# check to make sure appending works (and as default, raises a warning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
set_log_file(test_name, overwrite=False)
assert len(w) == 0
set_log_file(test_name)
assert len(w) == 1
evoked = Evoked(fname_evoked, condition=1)
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines_2)
# make sure overwriting works
set_log_file(test_name, overwrite=True)
# this line needs to be called to actually do some logging
evoked = Evoked(fname_evoked, condition=1)
del evoked
with open(test_name, 'r') as new_log_file:
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
def test_config():
"""Test mne-python config file support"""
tempdir = _TempDir()
key = '_MNE_PYTHON_CONFIG_TESTING'
value = '123456'
old_val = os.getenv(key, None)
os.environ[key] = value
assert_true(get_config(key) == value)
del os.environ[key]
# catch the warning about it being a non-standard config key
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
set_config(key, None, home_dir=tempdir)
assert_true(len(w) == 1)
assert_true(get_config(key, home_dir=tempdir) is None)
assert_raises(KeyError, get_config, key, raise_error=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
set_config(key, value, home_dir=tempdir)
assert_true(get_config(key, home_dir=tempdir) == value)
set_config(key, None, home_dir=tempdir)
if old_val is not None:
os.environ[key] = old_val
# Check if get_config with no input returns all config
key = 'MNE_PYTHON_TESTING_KEY'
config = {key: value}
with warnings.catch_warnings(record=True): # non-standard key
warnings.simplefilter('always')
set_config(key, value, home_dir=tempdir)
assert_equal(get_config(home_dir=tempdir), config)
def test_show_fiff():
"""Test show_fiff
"""
# this is not exhaustive, but hopefully bugs will be found in use
info = show_fiff(fname_evoked)
keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM',
'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE',
'FIFF_EPOCH']
assert_true(all(key in info for key in keys))
info = show_fiff(fname_raw, read_limit=1024)
@deprecated('message')
def deprecated_func():
pass
@deprecated('message')
class deprecated_class(object):
def __init__(self):
pass
def test_deprecated():
"""Test deprecated function
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
deprecated_func()
assert_true(len(w) == 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
deprecated_class()
assert_true(len(w) == 1)
@requires_good_network
def test_fetch_file():
"""Test file downloading
"""
tempdir = _TempDir()
urls = ['http://martinos.org/mne/',
'ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt']
with ArgvSetter(disable_stderr=False): # to capture stdout
for url in urls:
archive_name = op.join(tempdir, "download_test")
_fetch_file(url, archive_name, verbose=False)
assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
op.join(tempdir, 'test'), verbose=False)
resume_name = op.join(tempdir, "download_resume")
# touch file
with open(resume_name + '.part', 'w'):
os.utime(resume_name + '.part', None)
_fetch_file(url, resume_name, resume=True, verbose=False)
assert_raises(ValueError, _fetch_file, url, archive_name,
hash_='a', verbose=False)
assert_raises(RuntimeError, _fetch_file, url, archive_name,
hash_='a' * 32, verbose=False)
def test_sum_squared():
"""Test optimized sum of squares
"""
X = np.random.RandomState(0).randint(0, 50, (3, 3))
assert_equal(np.sum(X ** 2), sum_squared(X))
def test_sizeof_fmt():
"""Test sizeof_fmt
"""
assert_equal(sizeof_fmt(0), '0 bytes')
assert_equal(sizeof_fmt(1), '1 byte')
assert_equal(sizeof_fmt(1000), '1000 bytes')
def test_url_to_local_path():
"""Test URL to local path
"""
assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'),
op.join('.', 'home', 'why.html'))
def test_check_type_picks():
"""Test checking type integrity checks of picks
"""
picks = np.arange(12)
assert_array_equal(picks, _check_type_picks(picks))
picks = list(range(12))
assert_array_equal(np.array(picks), _check_type_picks(picks))
picks = None
assert_array_equal(None, _check_type_picks(picks))
picks = ['a', 'b']
assert_raises(ValueError, _check_type_picks, picks)
picks = 'b'
assert_raises(ValueError, _check_type_picks, picks)
def test_compute_corr():
"""Test Anscombe's Quartett
"""
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y = np.array([[8.04, 6.95, 7.58, 8.81, 8.33, 9.96,
7.24, 4.26, 10.84, 4.82, 5.68],
[9.14, 8.14, 8.74, 8.77, 9.26, 8.10,
6.13, 3.10, 9.13, 7.26, 4.74],
[7.46, 6.77, 12.74, 7.11, 7.81, 8.84,
6.08, 5.39, 8.15, 6.42, 5.73],
[8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8],
[6.58, 5.76, 7.71, 8.84, 8.47, 7.04,
5.25, 12.50, 5.56, 7.91, 6.89]])
r = compute_corr(x, y.T)
r2 = np.array([np.corrcoef(x, y[i])[0, 1]
for i in range(len(y))])
assert_allclose(r, r2)
assert_raises(ValueError, compute_corr, [1, 2], [])
def test_create_slices():
"""Test checking the create of time create_slices
"""
# Test that create_slices default provide an empty list
assert_true(create_slices(0, 0) == [])
# Test that create_slice return correct number of slices
assert_true(len(create_slices(0, 100)) == 100)
# Test with non-zero start parameters
assert_true(len(create_slices(50, 100)) == 50)
# Test slices' length with non-zero start and window_width=2
assert_true(len(create_slices(0, 100, length=2)) == 50)
# Test slices' length with manual slice separation
assert_true(len(create_slices(0, 100, step=10)) == 10)
# Test slices' within length for non-consecutive samples
assert_true(len(create_slices(0, 500, length=50, step=10)) == 46)
# Test that slices elements start, stop and step correctly
slices = create_slices(0, 10)
assert_true(slices[0].start == 0)
assert_true(slices[0].step == 1)
assert_true(slices[0].stop == 1)
assert_true(slices[-1].stop == 10)
# Same with larger window width
slices = create_slices(0, 9, length=3)
assert_true(slices[0].start == 0)
assert_true(slices[0].step == 1)
assert_true(slices[0].stop == 3)
assert_true(slices[-1].stop == 9)
# Same with manual slices' separation
slices = create_slices(0, 9, length=3, step=1)
assert_true(len(slices) == 7)
assert_true(slices[0].step == 1)
assert_true(slices[0].stop == 3)
assert_true(slices[-1].start == 6)
assert_true(slices[-1].stop == 9)
def test_time_mask():
"""Test safe time masking
"""
N = 10
x = np.arange(N).astype(float)
assert_equal(_time_mask(x, 0, N - 1).sum(), N)
assert_equal(_time_mask(x - 1e-10, 0, N - 1).sum(), N)
assert_equal(_time_mask(x - 1e-10, 0, N - 1, strict=True).sum(), N - 1)
def test_random_permutation():
"""Test random permutation function
"""
n_samples = 10
random_state = 42
python_randperm = random_permutation(n_samples, random_state)
# matlab output when we execute rng(42), randperm(10)
matlab_randperm = np.array([7, 6, 5, 1, 4, 9, 10, 3, 8, 2])
assert_array_equal(python_randperm, matlab_randperm - 1)
run_tests_if_main()
|
yousrabk/mne-python
|
mne/tests/test_utils.py
|
Python
|
bsd-3-clause
| 17,609
|
[
"Mayavi"
] |
7e1e91de00ea1f615c522956d52428c42be43e2a4c2b3c70e7576d4f8c7b7e21
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import integrate
from espressomd import visualization
import numpy
from matplotlib import pyplot
from threading import Thread
print("""
=======================================================
= lj_liquid.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 50000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.distto(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.mindist()
print("Start with minimal distance {}".format(act_min_dist))
system.max_num_cells = 2744
mayavi = visualization.mayavi_live(system)
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" % (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
mayavi.update()
# Just to see what else we may get from the c code
print("""
ro variables:
cell_grid {0.cell_grid}
cell_size {0.cell_size}
local_box_l {0.local_box_l}
max_cut {0.max_cut}
max_part {0.max_part}
max_range {0.max_range}
max_skin {0.max_skin}
n_nodes {0.n_nodes}
n_part {0.n_part}
n_part_types {0.n_part_types}
periodicity {0.periodicity}
transfer_rate {0.transfer_rate}
verlet_reuse {0.verlet_reuse}
""".format(system))
# write parameter file
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
energies = system.analysis.energy()
print(energies)
plot, = pyplot.plot([0],[energies['total']], label="total")
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
pyplot.legend()
pyplot.show(block=False)
j = 0
def main_loop():
global energies
print("run %d at time=%f " % (i, system.time))
integrate.integrate(int_steps)
mayavi.update()
energies = system.analysis.energy()
print(energies)
plot.set_xdata(numpy.append(plot.get_xdata(), system.time))
plot.set_ydata(numpy.append(plot.get_ydata(), energies['total']))
obs_file.write('{ time %s } %s\n' % (system.time, energies))
linear_momentum = system.analysis.analyze_linear_momentum()
print(linear_momentum)
def main_thread():
for i in range(0, int_n_times):
main_loop()
last_plotted = 0
def update_plot():
global last_plotted
current_time = plot.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
pyplot.xlim(0, plot.get_xdata()[-1])
pyplot.ylim(plot.get_ydata().min(), plot.get_ydata().max())
pyplot.draw()
t = Thread(target=main_thread)
t.daemon = True
t.start()
mayavi.register_callback(update_plot, interval=2000)
mayavi.run_gui_event_loop()
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
# id & type not working yet
obs_file.close()
set_file.close()
end_file.close()
# terminate program
print("\nFinished.")
|
tbereau/espresso
|
samples/python/visualization.py
|
Python
|
gpl-3.0
| 6,903
|
[
"ESPResSo",
"Mayavi"
] |
d0dd0dc0b1b4b78c59db437aa2bd5dec3d84b2f049b641ac7cb043ad4e623d2c
|
""" Class to manage connections for the Message Queue resources.
Also, set of 'private' helper functions to access and modify the message queue connection storage.
They are ment to be used only internally by the MQConnectionManager, which should
assure thread-safe access to it and standard S_OK/S_ERROR error handling.
MQConnection storage is a dict structure that contains the MQ connections used and reused for
producer/consumer communication. Example structure::
{
mardirac3.in2p3.fr: {'MQConnector':StompConnector, 'destinations':{'/queue/test1':['consumer1', 'producer1'],
'/queue/test2':['consumer1', 'producer1']}},
blabal.cern.ch: {'MQConnector':None, 'destinations':{'/queue/test2':['consumer2', 'producer2',]}}
}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Resources.MessageQueue.Utilities import getMQService
from DIRAC.Resources.MessageQueue.Utilities import getDestinationAddress
from DIRAC.Resources.MessageQueue.MQConnector import createMQConnector
from DIRAC.Core.Utilities.DErrno import EMQCONN
class MQConnectionManager(object):
"""Manages connections for the Message Queue resources in form of the interal connection storage."""
def __init__(self, connectionStorage=None):
self.log = gLogger.getSubLogger(self.__class__.__name__)
self.__lock = None
if connectionStorage:
self.__connectionStorage = connectionStorage
else:
self.__connectionStorage = {}
@property
def lock(self):
""" Lock to assure thread-safe access to the internal connection storage.
"""
if not self.__lock:
self.__lock = LockRing().getLock(self.__class__.__name__, recursive=True)
return self.__lock
def startConnection(self, mqURI, params, messengerType):
""" Function adds or updates the MQ connection. If the connection
does not exists, MQconnector is created and added.
Args:
mqURI(str):
params(dict): parameters to initialize the MQConnector.
messengerType(str): 'consumer' or 'producer'.
Returns:
S_OK/S_ERROR: with the value of the messenger Id in S_OK.
"""
self.lock.acquire()
try:
conn = getMQService(mqURI)
if self.__connectionExists(conn):
return self.addNewMessenger(mqURI=mqURI, messengerType=messengerType)
else: # Connection does not exist so we create the connector and we add a new connection
result = self.addNewMessenger(mqURI=mqURI, messengerType=messengerType)
if not result['OK']:
return result
mId = result['Value']
result = self.createConnectorAndConnect(parameters=params)
if not result['OK']:
return result
if self.__getConnector(conn):
return S_ERROR(EMQCONN, "The connector already exists!")
self.__setConnector(conn, result['Value'])
return S_OK(mId)
finally:
self.lock.release()
def addNewMessenger(self, mqURI, messengerType):
""" Function updates the MQ connection by adding the messenger Id to the internal connection storage.
Also the messengerId is chosen.
messenger Id is set to the maximum existing value (or 0 no messengers are connected) + 1.
messenger Id is calculated separately for consumers and producers
Args:
mqURI(str):
messengerType(str): 'consumer' or 'producer'.
Returns:
S_OK: with the value of the messenger Id or S_ERROR if the messenger was not added,
cause the same id already exists.
"""
# 'consumer1' ->1
# 'producer21' ->21
def msgIdToInt(msgIds, msgType):
return [int(m.replace(msgType, '')) for m in msgIds]
# The messengerId is str e.g. 'consumer5' or 'producer3'
def generateMessengerId(msgT):
return msgT + str(max(msgIdToInt(self.__getAllMessengersIdWithType(msgT), msgT) or [0]) + 1)
self.lock.acquire()
try:
conn = getMQService(mqURI)
dest = getDestinationAddress(mqURI)
mId = generateMessengerId(messengerType)
if self.__addMessenger(conn, dest, mId):
return S_OK(mId)
return S_ERROR(EMQCONN, "Failed to update the connection: the messenger %s already exists" % mId)
finally:
self.lock.release()
def createConnectorAndConnect(self, parameters):
result = createMQConnector(parameters=parameters)
if not result['OK']:
return result
connector = result['Value']
result = connector.setupConnection(parameters=parameters)
if not result['OK']:
return result
result = connector.connect()
if not result['OK']:
return result
return S_OK(connector)
def disconnect(self, connector):
if not connector:
return S_ERROR(EMQCONN, 'Failed to disconnect! Connector is None!')
return connector.disconnect()
def unsubscribe(self, connector, destination, messengerId):
if not connector:
return S_ERROR(EMQCONN, 'Failed to unsubscribe! Connector is None!')
return connector.unsubscribe(parameters={'destination': destination, 'messengerId': messengerId})
def getConnector(self, mqConnection):
""" Function returns MQConnector assigned to the mqURI.
Args:
mqConnection(str): connection name.
Returns:
S_OK/S_ERROR: with the value of the MQConnector in S_OK if not None
"""
self.lock.acquire()
try:
connector = self.__getConnector(mqConnection)
if not connector:
return S_ERROR('Failed to get the MQConnector!')
return S_OK(connector)
finally:
self.lock.release()
def stopConnection(self, mqURI, messengerId):
""" Function 'stops' the connection for given messenger, which means
it removes it from the messenger list. If this is the consumer, the
unsubscribe() connector method is called. If it is the last messenger
of this destination (queue or topic), then the destination is removed.
If it is the last destination from this connection. The disconnect function
is called and the connection is removed.
Args:
mqURI(str):
messengerId(str): e.g. 'consumer1' or 'producer10'.
Returns:
S_OK: with the value of the messenger Id or S_ERROR if the messenger was not added,
cause the same id already exists.
"""
self.lock.acquire()
try:
conn = getMQService(mqURI)
dest = getDestinationAddress(mqURI)
connector = self.__getConnector(conn)
if not self.__removeMessenger(conn, dest, messengerId):
return S_ERROR(EMQCONN, 'Failed to stop the connection!The messenger %s does not exist!' % messengerId)
else:
if 'consumer' in messengerId:
result = self.unsubscribe(connector, destination=dest, messengerId=messengerId)
if not result['OK']:
return result
if not self.__connectionExists(conn):
return self.disconnect(connector)
return S_OK()
finally:
self.lock.release()
def getAllMessengers(self):
""" Function returns a list of all messengers registered in connection storage.
Returns:
S_OK or S_ERROR: with the list of strings in the pseudo-path format e.g.
['blabla.cern.ch/queue/test1/consumer1','blabal.cern.ch/topic/test2/producer2']
"""
self.lock.acquire()
try:
return S_OK(self.__getAllMessengersInfo())
finally:
self.lock.release()
def removeAllConnections(self):
""" Function removes all existing connections and calls the disconnect
for connectors.
Returns:
S_OK or S_ERROR:
"""
self.lock.acquire()
try:
connections = self.__getAllConnections()
for conn in connections:
connector = self.__getConnector(conn)
if connector:
self.disconnect(connector)
self.__connectionStorage = {}
return S_OK()
finally:
self.lock.release()
# Set of 'private' helper functions to access and modify the message queue connection storage.
def __getConnection(self, mqConnection):
""" Function returns message queue connection from the storage.
Args:
mqConnection(str): message queue connection name.
Returns:
dict:
"""
return self.__connectionStorage.get(mqConnection, {})
def __getAllConnections(self):
""" Function returns a list of all connection names in the storage
Returns:
list:
"""
return list(self.__connectionStorage)
def __getConnector(self, mqConnection):
""" Function returns MQConnector from the storage.
Args:
mqConnection(str): message queue connection name.
Returns:
MQConnector or None
"""
return self.__getConnection(mqConnection).get("MQConnector", None)
def __setConnector(self, mqConnection, connector):
""" Function returns MQConnector from the storage.
Args:
mqConnection(str): message queue connection name.
connector(MQConnector):
Returns:
bool: False if connection does not exit
"""
connDict = self.__getConnection(mqConnection)
if not connDict:
return False
connDict["MQConnector"] = connector
return True
def __getDestinations(self, mqConnection):
""" Function returns dict with destinations (topics and queues) for given connection.
Args:
mqConnection(str): message queue connection name.
Returns:
dict: of form {'/queue/queue1':['producer1','consumer2']} or {}
"""
return self.__getConnection(mqConnection).get("destinations", {})
def __getMessengersId(self, mqConnection, mqDestination):
""" Function returns list of messengers for given connection and given destination.
Args:
mqConnection(str): message queue connection name.
mqDestination(str): message queue or topic name e.g. '/queue/myQueue1' .
Returns:
list: of form ['producer1','consumer2'] or []
"""
return self.__getDestinations(mqConnection).get(mqDestination, [])
def __getMessengersIdWithType(self, mqConnection, mqDestination, messengerType):
""" Function returns list of messnager for given connection, destination and type.
Args:
mqConnection(str): message queue connection name.
mqDestination(str): message queue or topic name e.g. '/queue/myQueue1' .
messengerType(str): 'consumer' or 'producer'
Returns:
list: of form ['producer1','producer2'], ['consumer8', 'consumer20'] or []
"""
return [p for p in self.__getMessengersId(mqConnection, mqDestination) if messengerType in p]
def __getAllMessengersId(self):
""" Function returns list of all messengers ids.
The list can contain duplicates because the same
producer id can be used for different queues.
Args:
Returns:
list: of form ['producer1','consumer1', 'producer1'] or []
"""
return [m for c in self.__connectionStorage.keys() for d in self.__getDestinations(c)
for m in self.__getMessengersId(c, d)]
def __getAllMessengersIdWithType(self, messengerType):
""" Function returns list of all messengers ids for given messengerType
Args:
messengerType(str): 'consumer' or 'producer'
Returns:
list: of form ['producer1','producer2'], ['consumer8', 'consumer20'] or []
"""
return [p for p in self.__getAllMessengersId() if messengerType in p]
def __getAllMessengersInfo(self):
""" Function returns list of all messengers in the pseudo-path format.
Returns:
list: of form ['blabla.cern.ch/queue/myQueue1/producer1','bibi.in2p3.fr/topic/myTopic331/consumer3'] or []
"""
def output(connection, dest, messenger):
return str(connection) + str(dest) + '/' + str(messenger)
return [output(c, d, m) for c in self.__connectionStorage.keys()
for d in self.__getDestinations(c) for m in self.__getMessengersId(c, d)]
def __connectionExists(self, mqConnection):
""" Function checks if given connection exists in the connection storage.
Args:
mqConnection(str): message queue connection name.
Returns:
bool:
"""
return mqConnection in self.__connectionStorage
def __destinationExists(self, mqConnection, mqDestination):
""" Function checks if given destination(queue or topic) exists in the connection storage.
Args:
mqConnection(str): message queue connection name.
mqDestination(str): message queue or topic name e.g. '/queue/myQueue1' .
Returns:
bool:
"""
return mqDestination in self.__getDestinations(mqConnection)
def __messengerExists(self, mqConnection, mqDestination, messengerId):
""" Function checks if given messenger(producer or consumer) exists in the connection storage.
Args:
mqConnection(str): message queue connection name.
mqDestination(str): message queue or topic name e.g. '/queue/myQueue1' .
messengerId(str): messenger name e.g. 'consumer1', 'producer4' .
Returns:
bool:
"""
return messengerId in self.__getMessengersId(mqConnection, mqDestination)
def __addMessenger(self, mqConnection, destination, messengerId):
""" Function adds a messenger(producer or consumer) to given connection and destination.
If connection or/and destination do not exist, they are created as well.
Args:
mqConnection(str): message queue connection name.
mqDestination(str): message queue or topic name e.g. '/queue/myQueue1' .
messengerId(str): messenger name e.g. 'consumer1', 'producer4'.
Returns:
bool: True if messenger is added or False if the messenger already exists.
"""
if self.__messengerExists(mqConnection, destination, messengerId):
return False
if self.__connectionExists(mqConnection):
if self.__destinationExists(mqConnection, destination):
self.__getMessengersId(mqConnection, destination).append(messengerId)
else:
self.__getDestinations(mqConnection)[destination] = [messengerId]
else:
self.__connectionStorage[mqConnection] = {"MQConnector": None, "destinations": {destination: [messengerId]}}
return True
def __removeMessenger(self, mqConnection, destination, messengerId):
""" Function removes messenger(producer or consumer) from given connection and destination.
If it is the last messenger in given destination and/or connection they are removed as well..
Args:
mqConnection(str): message queue connection name.
mqDestination(str): message queue or topic name e.g. '/queue/myQueue1' .
messengerId(str): messenger name e.g. 'consumer1', 'producer4'.
Returns:
bool: True if messenger is removed or False if the messenger was not in the storage.
"""
messengers = self.__getMessengersId(mqConnection, destination)
destinations = self.__getDestinations(mqConnection)
if messengerId in messengers:
messengers.remove(messengerId)
if not messengers: # If no more messengers we remove the destination.
destinations.pop(destination)
if not destinations: # If no more destinations we remove the connection
self.__connectionStorage.pop(mqConnection)
return True
else:
return False # messenger was not in the storage
|
yujikato/DIRAC
|
src/DIRAC/Resources/MessageQueue/MQConnectionManager.py
|
Python
|
gpl-3.0
| 15,401
|
[
"DIRAC"
] |
aee591e14082795201d33c5ec9a473cb3deae4e88e214a825d84760c9da54db7
|
"""
A Tkinter based backend for piddle.
Perry A. Stoll
Created: February 15, 1999
Requires PIL for rotated string support.
Known Problems:
- Doesn't handle the interactive commands yet.
- PIL based canvas inherits lack of underlining strings from piddlePIL
You can find the latest version of this file:
via http://piddle.sourceforge.net
"""
# we depend on PIL for rotated strings so watch for changes in PIL
import Tkinter, tkFont
tk = Tkinter
import rdkit.sping.pid
import string
__version__ = "0.3"
__date__ = "April 8, 1999"
__author__ = "Perry Stoll, perry.stoll@mail.com "
# fixups by chris lee, cwlee@artsci.wustl.edu
# $Id$
# - added drawImage scaling support
# - shifted baseline y parameter in drawString to work around font metric
# shift due to Tkinter's Canvas text_item object
# - fixed argument names so that argument keywords agreed with piddle.py (passes discipline.py)
#
#
# ToDo: for TKCanvas
# make sure that fontHeight() is returnng appropriate measure. Where is this info?
#
# $Log: pidTK.py,v $
# Revision 1.1 2002/07/12 18:34:47 glandrum
# added
#
# Revision 1.6 2000/11/03 00:56:57 clee
# fixed sizing error in TKCanvas
#
# Revision 1.5 2000/11/03 00:25:37 clee
# removed reference to "BaseTKCanvas" (should just use TKCanvas as default)
#
# Revision 1.4 2000/10/29 19:35:31 clee
# eliminated BaseTKCanvas in favor of straightforward "TKCanvas" name
#
# Revision 1.3 2000/10/29 01:57:41 clee
# - added scrollbar support to both TKCanvas and TKCanvasPIL
# - added getTKCanvas() access method to TKCanvasPIL
#
# Revision 1.2 2000/10/15 00:47:17 clee
# commit before continuing after getting pil to work as package
#
# Revision 1.1.1.1 2000/09/27 03:53:15 clee
# Simple Platform Independent Graphics
#
# Revision 1.6 2000/04/06 01:55:34 pmagwene
# - TKCanvas now uses multiple inheritance from Tkinter.Canvas and piddle.Canvas
# * for the most part works much like a normal Tkinter.Canvas object
# - TKCanvas draws rotated strings using PIL image, other objects using normal Tk calls
# - Minor fixes to FontManager and TKCanvas so can specify root window other than Tk()
# - Removed Quit/Clear buttons from default canvas
#
# Revision 1.5 2000/03/12 07:07:42 clee
# sync with 1_x
#
# Revision 1.4 2000/02/26 23:12:42 clee
# turn off compression by default on piddlePDF
# add doc string to new pil-based piddleTK
#
# Revision 1.3 2000/02/26 21:23:19 clee
# update that makes PIL based TKCanvas the default Canvas for TK.
# Updated piddletest.py. Also, added clear() methdo to piddlePIL's
# canvas it clears to "white" is this correct behavior? Not well
# specified in current documents.
#
class FontManager:
__alt_faces = {"serif": "Times", "sansserif": "Helvetica", "monospaced": "Courier"}
def __init__(self, master):
self.master = master
self.font_cache = {}
# the main interface
def stringWidth(self, s, font):
tkfont = self.piddleToTkFont(font)
return tkfont.measure(s)
def fontHeight(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontHeight(tkfont)
def fontAscent(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontAscent(tkfont)
def fontDescent(self, font):
tkfont = self.piddleToTkFont(font)
return self._tkfontDescent(tkfont)
def getTkFontString(self, font):
"""Return a string suitable to pass as the -font option to
to a Tk widget based on the piddle-style FONT"""
tkfont = self.piddleToTkFont(font)
# XXX: should just return the internal tk font name?
# return str(tkfont)
return ('-family %(family)s -size %(size)s '
'-weight %(weight)s -slant %(slant)s '
'-underline %(underline)s' % tkfont.config())
def getTkFontName(self, font):
"""Return a the name associated with the piddle-style FONT"""
tkfont = self.piddleToTkFont(font)
return str(tkfont)
def piddleToTkFont(self, font):
"""Return a tkFont instance based on the pid-style FONT"""
if font is None:
return ''
#default 12 pt, "Times", non-bold, non-italic
size = 12
family = "Times"
weight = "normal"
slant = "roman"
underline = "false"
if font.face:
# check if the user specified a generic face type
# like serif or monospaced. check is case-insenstive.
f = string.lower(font.face)
if self.__alt_faces.has_key(f):
family = self.__alt_faces[f]
else:
family = font.face
size = font.size or 12
if font.bold:
weight = "bold"
if font.italic:
slant = "italic"
if font.underline:
underline = 'true'
# ugh... is there a better way to do this?
key = (family, size, weight, slant, underline)
# check if we've already seen this font.
if self.font_cache.has_key(key):
# yep, don't bother creating a new one. just fetch it.
font = self.font_cache[key]
else:
# nope, let's create a new tk font.
# this way we will return info about the actual font
# selected by Tk, which may be different than what we ask
# for if it's not availible.
font = tkFont.Font(self.master, family=family, size=size, weight=weight, slant=slant,
underline=underline)
self.font_cache[(family, size, weight, slant, underline)] = font
return font
def _tkfontAscent(self, tkfont):
return tkfont.metrics("ascent")
def _tkfontDescent(self, tkfont):
return tkfont.metrics("descent")
class TKCanvas(tk.Canvas, rdkit.sping.pid.Canvas):
__TRANSPARENT = '' # transparent for Tk color
def __init__(self,
size=(300, 300),
name="sping.TK",
master=None,
scrollingViewPortSize=None, # a 2-tuple to define the size of the viewport
**kw):
"""This canvas allows you to add a tk.Canvas with a sping API for drawing.
To add scrollbars, the simpliest method is to set the 'scrollingViewPortSize'
equal to a tuple that describes the width and height of the visible porition
of the canvas on screen. This sets scrollregion=(0,0, size[0], size[1]).
Then you can add scrollbars as you would any tk.Canvas.
Note, because this is a subclass of tk.Canvas, you can use the normal keywords
to specify a tk.Canvas with scrollbars, however, you should then be careful to
set the "scrollregion" option to the same size as the 'size' passed to __init__.
Tkinter's scrollregion option essentially makes 'size' ignored. """
rdkit.sping.pid.Canvas.__init__(self, size=size, name=size)
if scrollingViewPortSize: # turn on ability to scroll
kw["scrollregion"] = (0, 0, size[0], size[1])
kw["height"] = scrollingViewPortSize[0]
kw["width"] = scrollingViewPortSize[1]
else:
kw["width"] = size[0]
kw["height"] = size[1]
apply(tk.Canvas.__init__, (self, master), kw) # use kw to pass other tk.Canvas options
self.config(background="white")
self.width, self.height = size
self._font_manager = FontManager(self)
self._configure()
self._item_ids = []
self._images = []
def _configure(self):
pass
def _display(self):
self.flush()
self.mainloop()
def _quit(self):
self.quit()
# Hmmm...the postscript generated by this causes my Ghostscript to barf...
def _to_ps_file(self, filename):
self.postscript(file=filename)
def isInteractive(self):
return 0
def onOver(self, event):
pass
def onClick(self, event):
pass
def onKey(self, event):
pass
def flush(self):
tk.Canvas.update(self)
def clear(self):
map(self.delete, self._item_ids)
self._item_ids = []
def _colorToTkColor(self, c):
return "#%02X%02X%02X" % (int(c.red * 255), int(c.green * 255), int(c.blue * 255))
def _getTkColor(self, color, defaultColor):
if color is None:
color = defaultColor
if color is rdkit.sping.pid.transparent:
color = self.__TRANSPARENT
else:
color = self._colorToTkColor(color)
return color
def drawLine(self, x1, y1, x2, y2, color=None, width=None):
color = self._getTkColor(color, self.defaultLineColor)
if width is None:
width = self.defaultLineWidth
new_item = self.create_line(x1, y1, x2, y2, fill=color, width=width)
self._item_ids.append(new_item)
# NYI: curve with fill
#def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
#
def stringWidth(self, s, font=None):
return self._font_manager.stringWidth(s, font or self.defaultFont)
def fontAscent(self, font=None):
return self._font_manager.fontAscent(font or self.defaultFont)
def fontDescent(self, font=None):
return self._font_manager.fontDescent(font or self.defaultFont)
def drawString(self, s, x, y, font=None, color=None, angle=None):
if angle:
try:
self._drawRotatedString(s, x, y, font, color, angle)
return
except ImportError:
print("PIL not available. Using unrotated strings.")
# fudge factor for TK on linux (at least)
# strings are being drawn using create_text in canvas
y = y - self.fontHeight(font) * .28 # empirical
#y = y - self.fontDescent(font)
color = self._getTkColor(color, self.defaultLineColor)
font = self._font_manager.getTkFontString(font or self.defaultFont)
new_item = self.create_text(x, y, text=s, font=font, fill=color, anchor=Tkinter.W)
self._item_ids.append(new_item)
def _drawRotatedString(self, s, x, y, font=None, color=None, angle=0):
# we depend on PIL for rotated strings so watch for changes in PIL
try:
import rdkit.sping.PIL.pidPIL
from PIL import Image, ImageTk
pp = rdkit.sping.PIL.pidPIL
except ImportError:
raise ImportError("Rotated strings only possible with PIL support")
pilCan = pp.PILCanvas(size=(self.width, self.height))
pilCan.defaultFont = self.defaultFont
pilCan.defaultLineColor = self.defaultLineColor
if '\n' in s or '\r' in s:
self.drawMultiLineString(s, x, y, font, color, angle)
return
if not font:
font = pilCan.defaultFont
if not color:
color = self.defaultLineColor
if color == rdkit.sping.pid.transparent:
return
# draw into an offscreen Image
tempsize = pilCan.stringWidth(s, font) * 1.2
tempimg = Image.new('RGB', (tempsize, tempsize), (0, 0, 0))
txtimg = Image.new('RGB', (tempsize, tempsize), (255, 255, 255))
from PIL import ImageDraw
temppen = ImageDraw.ImageDraw(tempimg)
temppen.setink((255, 255, 255))
pilfont = pp._pilFont(font)
if not pilfont:
raise ValueError("Bad font: %s" % font)
temppen.setfont(pilfont)
pos = [4, int(tempsize / 2 - pilCan.fontAscent(font)) - pilCan.fontDescent(font)]
temppen.text(pos, s)
pos[1] = int(tempsize / 2)
# rotate
if angle:
from math import pi, sin, cos
tempimg = tempimg.rotate(angle, Image.BILINEAR)
temppen = ImageDraw.ImageDraw(tempimg)
radians = -angle * pi / 180.0
r = tempsize / 2 - pos[0]
pos[0] = int(tempsize / 2 - r * cos(radians))
pos[1] = int(pos[1] - r * sin(radians))
###temppen.rectangle( (pos[0],pos[1],pos[0]+2,pos[1]+2) ) # PATCH for debugging
# colorize, and copy it in
mask = tempimg.convert('L').point(lambda c: c)
temppen.setink((color.red * 255, color.green * 255, color.blue * 255))
temppen.setfill(1)
temppen.rectangle((0, 0, tempsize, tempsize))
txtimg.paste(tempimg, (0, 0), mask)
##Based on code posted by John Michelson in the PIL SIG
transp = txtimg.convert("RGBA")
source = transp.split()
R, G, B, A = 0, 1, 2, 3
mask = transp.point(lambda i: i < 255 and 255) # use white as transparent
source[A].paste(mask)
transp = Image.merge(transp.mode, source) # build a new multiband image
self.drawImage(transp, x - pos[0], y - pos[1])
def drawRect(self, x1, y1, x2, y2, edgeColor=None, edgeWidth=None, fillColor=None):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
new_item = self.create_rectangle(x1, y1, x2, y2, fill=fillColor, width=edgeWidth,
outline=edgeColor)
self._item_ids.append(new_item)
# NYI:
#def drawRoundRect(self, x1,y1, x2,y2, rx=5, ry=5,
# edgeColor=None, edgeWidth=None, fillColor=None):
def drawEllipse(self, x1, y1, x2, y2, edgeColor=None, edgeWidth=None, fillColor=None):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
new_item = self.create_oval(x1, y1, x2, y2, fill=fillColor, outline=edgeColor, width=edgeWidth)
self._item_ids.append(new_item)
def drawArc(self, x1, y1, x2, y2, startAng=0, extent=360, edgeColor=None, edgeWidth=None,
fillColor=None):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
new_item = self.create_arc(x1, y1, x2, y2, start=startAng, extent=extent, fill=fillColor,
width=edgeWidth, outline=edgeColor)
self._item_ids.append(new_item)
def drawPolygon(self, pointlist, edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
fillColor = self._getTkColor(fillColor, self.defaultFillColor)
edgeColor = self._getTkColor(edgeColor, self.defaultLineColor)
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
if closed:
# draw a closed shape
new_item = self.create_polygon(pointlist, fill=fillColor, width=edgeWidth, outline=edgeColor)
else:
if fillColor == self.__TRANSPARENT:
# draw open-ended set of lines
d = {'fill': edgeColor, 'width': edgeWidth}
new_item = apply(self.create_line, pointlist, d)
else:
# open filled shape.
# draw it twice:
# once as a polygon with no edge outline with the fill color
# and once as an open set of lines of the appropriate color
new_item = self.create_polygon(pointlist, fill=fillColor, outline=self.__TRANSPARENT)
self._item_ids.append(new_item)
d = {'fill': edgeColor, 'width': edgeWidth}
new_item = apply(self.create_line, pointlist, d)
self._item_ids.append(new_item)
#def drawFigure(self, partList,
# edgeColor=None, edgeWidth=None, fillColor=None):
# use default implementation
def drawImage(self, image, x1, y1, x2=None, y2=None):
try:
from PIL import ImageTk
except ImportError:
raise NotImplementedError('drawImage - require the ImageTk module')
w, h = image.size
if not x2:
x2 = w + x1
if not y2:
y2 = h + y1
if (w != x2 - x1) or (h != y2 - y1): # need to scale image
myimage = image.resize((x2 - x1, y2 - y1))
else:
myimage = image
# unless I keep a copy of this PhotoImage, it seems to be garbage collected
# and the image is removed from the display after this function. weird
itk = ImageTk.PhotoImage(myimage, master=self)
new_item = self.create_image(x1, y1, image=itk, anchor=Tkinter.NW)
self._item_ids.append(new_item)
self._images.append(itk)
try:
import rdkit.sping.PIL
class TKCanvasPIL(rdkit.sping.PIL.PILCanvas):
"""This canvas maintains a PILCanvas as its backbuffer. Drawing calls
are made to the backbuffer and flush() sends the image to the screen
using TKCanvas.
You can also save what is displayed to a file in any of the formats
supported by PIL"""
def __init__(self, size=(300, 300), name='TKCanvas', master=None, **kw):
rdkit.sping.PIL.PILCanvas.__init__(self, size=size, name=name)
self._tkcanvas = apply(TKCanvas, (size, name, master), kw)
def flush(self):
rdkit.sping.PIL.PILCanvas.flush(self) # call inherited one first
self._tkcanvas.drawImage(self._image, 0, 0) # self._image should be a PIL image
self._tkcanvas.flush()
def getTKCanvas(self):
return self._tkcanvas
except ImportError:
raise ImportError("TKCanvasPIL requires sping PIL Canvas, PIL may not be installed")
|
jandom/rdkit
|
rdkit/sping/TK/pidTK.py
|
Python
|
bsd-3-clause
| 16,569
|
[
"RDKit"
] |
0c2dcbce16318886a434b2ba48970badab67523dd4bf6f60f5d35e0630319367
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import numpy as np
from functools import reduce
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.cc import uccsd
from pyscf.cc import addons
from pyscf.cc import uccsd_lambda
from pyscf.cc import gccsd, gccsd_lambda
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run()
mycc = uccsd.UCCSD(mf)
def tearDownModule():
global mol, mf, mycc
del mol, mf, mycc
class KnownValues(unittest.TestCase):
def test_update_lambda_real(self):
numpy.random.seed(21)
eris = mycc.ao2mo()
gcc1 = gccsd.GCCSD(scf.addons.convert_to_ghf(mf))
eri1 = gcc1.ao2mo()
orbspin = eri1.orbspin
nocc = mol.nelectron
nvir = mol.nao_nr()*2 - nocc
t1r = numpy.random.random((nocc,nvir))*.1
t2r = numpy.random.random((nocc,nocc,nvir,nvir))*.1
t2r = t2r - t2r.transpose(1,0,2,3)
t2r = t2r - t2r.transpose(0,1,3,2)
l1r = numpy.random.random((nocc,nvir))*.1
l2r = numpy.random.random((nocc,nocc,nvir,nvir))*.1
l2r = l2r - l2r.transpose(1,0,2,3)
l2r = l2r - l2r.transpose(0,1,3,2)
t1r = addons.spin2spatial(t1r, orbspin)
t2r = addons.spin2spatial(t2r, orbspin)
t1r = addons.spatial2spin(t1r, orbspin)
t2r = addons.spatial2spin(t2r, orbspin)
l1r = addons.spin2spatial(l1r, orbspin)
l2r = addons.spin2spatial(l2r, orbspin)
l1r = addons.spatial2spin(l1r, orbspin)
l2r = addons.spatial2spin(l2r, orbspin)
imds = gccsd_lambda.make_intermediates(gcc1, t1r, t2r, eri1)
l1ref, l2ref = gccsd_lambda.update_lambda(gcc1, t1r, t2r, l1r, l2r, eri1, imds)
t1 = addons.spin2spatial(t1r, orbspin)
t2 = addons.spin2spatial(t2r, orbspin)
l1 = addons.spin2spatial(l1r, orbspin)
l2 = addons.spin2spatial(l2r, orbspin)
imds = uccsd_lambda.make_intermediates(mycc, t1, t2, eris)
l1, l2 = uccsd_lambda.update_lambda(mycc, t1, t2, l1, l2, eris, imds)
self.assertAlmostEqual(float(abs(addons.spatial2spin(l1, orbspin)-l1ref).max()), 0, 8)
self.assertAlmostEqual(float(abs(addons.spatial2spin(l2, orbspin)-l2ref).max()), 0, 8)
l1ref = addons.spin2spatial(l1ref, orbspin)
l2ref = addons.spin2spatial(l2ref, orbspin)
self.assertAlmostEqual(abs(l1[0]-l1ref[0]).max(), 0, 8)
self.assertAlmostEqual(abs(l1[1]-l1ref[1]).max(), 0, 8)
self.assertAlmostEqual(abs(l2[0]-l2ref[0]).max(), 0, 8)
self.assertAlmostEqual(abs(l2[1]-l2ref[1]).max(), 0, 8)
self.assertAlmostEqual(abs(l2[2]-l2ref[2]).max(), 0, 8)
def test_update_lambda_complex(self):
nocca, noccb = mol.nelec
nmo = mol.nao_nr()
nvira,nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.random.random((nocca,nvira))-.9,
numpy.random.random((noccb,nvirb))-.9]
l1 = [numpy.random.random((nocca,nvira))-.9,
numpy.random.random((noccb,nvirb))-.9]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
l2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
l2[0] = l2[0] - l2[0].transpose(1,0,2,3)
l2[0] = l2[0] - l2[0].transpose(0,1,3,2)
l2[2] = l2[2] - l2[2].transpose(1,0,2,3)
l2[2] = l2[2] - l2[2].transpose(0,1,3,2)
# eris = mycc.ao2mo()
# imds = make_intermediates(mycc, t1, t2, eris)
# l1new, l2new = update_lambda(mycc, t1, t2, l1, l2, eris, imds)
# print(lib.finger(l1new[0]) --104.55975252585894)
# print(lib.finger(l1new[1]) --241.12677819375281)
# print(lib.finger(l2new[0]) --0.4957533529669417)
# print(lib.finger(l2new[1]) - 15.46423057451851 )
# print(lib.finger(l2new[2]) - 5.8430776663704407)
nocca, noccb = mol.nelec
mo_a = mf.mo_coeff[0] + numpy.sin(mf.mo_coeff[0]) * .01j
mo_b = mf.mo_coeff[1] + numpy.sin(mf.mo_coeff[1]) * .01j
nao = mo_a.shape[0]
eri = ao2mo.restore(1, mf._eri, nao)
eri0aa = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_a.conj(), mo_a)
eri0ab = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_b.conj(), mo_b)
eri0bb = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_b.conj(), mo_b, mo_b.conj(), mo_b)
eri0ba = eri0ab.transpose(2,3,0,1)
nvira = nao - nocca
nvirb = nao - noccb
eris = uccsd._ChemistsERIs(mol)
eris.oooo = eri0aa[:nocca,:nocca,:nocca,:nocca].copy()
eris.ovoo = eri0aa[:nocca,nocca:,:nocca,:nocca].copy()
eris.oovv = eri0aa[:nocca,:nocca,nocca:,nocca:].copy()
eris.ovvo = eri0aa[:nocca,nocca:,nocca:,:nocca].copy()
eris.ovov = eri0aa[:nocca,nocca:,:nocca,nocca:].copy()
eris.ovvv = eri0aa[:nocca,nocca:,nocca:,nocca:].copy()
eris.vvvv = eri0aa[nocca:,nocca:,nocca:,nocca:].copy()
eris.OOOO = eri0bb[:noccb,:noccb,:noccb,:noccb].copy()
eris.OVOO = eri0bb[:noccb,noccb:,:noccb,:noccb].copy()
eris.OOVV = eri0bb[:noccb,:noccb,noccb:,noccb:].copy()
eris.OVVO = eri0bb[:noccb,noccb:,noccb:,:noccb].copy()
eris.OVOV = eri0bb[:noccb,noccb:,:noccb,noccb:].copy()
eris.OVVV = eri0bb[:noccb,noccb:,noccb:,noccb:].copy()
eris.VVVV = eri0bb[noccb:,noccb:,noccb:,noccb:].copy()
eris.ooOO = eri0ab[:nocca,:nocca,:noccb,:noccb].copy()
eris.ovOO = eri0ab[:nocca,nocca:,:noccb,:noccb].copy()
eris.ooVV = eri0ab[:nocca,:nocca,noccb:,noccb:].copy()
eris.ovVO = eri0ab[:nocca,nocca:,noccb:,:noccb].copy()
eris.ovOV = eri0ab[:nocca,nocca:,:noccb,noccb:].copy()
eris.ovVV = eri0ab[:nocca,nocca:,noccb:,noccb:].copy()
eris.vvVV = eri0ab[nocca:,nocca:,noccb:,noccb:].copy()
eris.OOoo = eri0ba[:noccb,:noccb,:nocca,:nocca].copy()
eris.OVoo = eri0ba[:noccb,noccb:,:nocca,:nocca].copy()
eris.OOvv = eri0ba[:noccb,:noccb,nocca:,nocca:].copy()
eris.OVvo = eri0ba[:noccb,noccb:,nocca:,:nocca].copy()
eris.OVov = eri0ba[:noccb,noccb:,:nocca,nocca:].copy()
eris.OVvv = eri0ba[:noccb,noccb:,nocca:,nocca:].copy()
eris.VVvv = eri0ba[noccb:,noccb:,nocca:,nocca:].copy()
eris.focka = numpy.diag(mf.mo_energy[0])
eris.fockb = numpy.diag(mf.mo_energy[1])
eris.mo_energy = mf.mo_energy
t1[0] = t1[0] + numpy.sin(t1[0]) * .05j
t1[1] = t1[1] + numpy.sin(t1[1]) * .05j
t2[0] = t2[0] + numpy.sin(t2[0]) * .05j
t2[1] = t2[1] + numpy.sin(t2[1]) * .05j
t2[2] = t2[2] + numpy.sin(t2[2]) * .05j
l1[0] = l1[0] + numpy.sin(l1[0]) * .05j
l1[1] = l1[1] + numpy.sin(l1[1]) * .05j
l2[0] = l2[0] + numpy.sin(l2[0]) * .05j
l2[1] = l2[1] + numpy.sin(l2[1]) * .05j
l2[2] = l2[2] + numpy.sin(l2[2]) * .05j
imds = uccsd_lambda.make_intermediates(mycc, t1, t2, eris)
l1new_ref, l2new_ref = uccsd_lambda.update_lambda(mycc, t1, t2, l1, l2, eris, imds)
nocc = nocca + noccb
orbspin = numpy.zeros(nao*2, dtype=int)
orbspin[1::2] = 1
orbspin[nocc-1] = 0
orbspin[nocc ] = 1
eri1 = numpy.zeros([nao*2]*4, dtype=numpy.complex128)
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
eri1[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa] = eri0aa
eri1[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb] = eri0ab
eri1[idxb[:,None,None,None],idxb[:,None,None],idxa[:,None],idxa] = eri0ba
eri1[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb] = eri0bb
eri1 = eri1.transpose(0,2,1,3) - eri1.transpose(0,2,3,1)
erig = gccsd._PhysicistsERIs()
erig.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()
erig.ooov = eri1[:nocc,:nocc,:nocc,nocc:].copy()
erig.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()
erig.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()
erig.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()
erig.ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()
erig.vvvv = eri1[nocc:,nocc:,nocc:,nocc:].copy()
mo_e = numpy.empty(nao*2)
mo_e[orbspin==0] = mf.mo_energy[0]
mo_e[orbspin==1] = mf.mo_energy[1]
erig.fock = numpy.diag(mo_e)
erig.mo_energy = mo_e.real
myccg = gccsd.GCCSD(scf.addons.convert_to_ghf(mf))
t1 = myccg.spatial2spin(t1, orbspin)
t2 = myccg.spatial2spin(t2, orbspin)
l1 = myccg.spatial2spin(l1, orbspin)
l2 = myccg.spatial2spin(l2, orbspin)
imds = gccsd_lambda.make_intermediates(myccg, t1, t2, erig)
l1new, l2new = gccsd_lambda.update_lambda(myccg, t1, t2, l1, l2, erig, imds)
l1new = myccg.spin2spatial(l1new, orbspin)
l2new = myccg.spin2spatial(l2new, orbspin)
self.assertAlmostEqual(abs(l1new[0] - l1new_ref[0]).max(), 0, 11)
self.assertAlmostEqual(abs(l1new[1] - l1new_ref[1]).max(), 0, 11)
self.assertAlmostEqual(abs(l2new[0] - l2new_ref[0]).max(), 0, 11)
self.assertAlmostEqual(abs(l2new[1] - l2new_ref[1]).max(), 0, 11)
self.assertAlmostEqual(abs(l2new[2] - l2new_ref[2]).max(), 0, 11)
if __name__ == "__main__":
print("Full Tests for UCCSD lambda")
unittest.main()
|
sunqm/pyscf
|
pyscf/cc/test/test_uccsd_lambda.py
|
Python
|
apache-2.0
| 10,539
|
[
"PySCF"
] |
6730675b24ce89cb9e0b3b436652c241db5fc5398d6b02b83551e8f0091241f9
|
"""Example using the operator of fixed-template linearized deformation.
The linearized deformation operator with fixed template (image) ``I`` maps
a given displacement field ``v`` to the function ``x --> I(x + v(x))``.
This example consider a 2D case, where the displacement field ``v``
is a Gaussian in each component, with positive sign in the first and
negative sign in the second component. Note that in the deformed image,
the value at ``x`` is **taken from** the original image at ``x + v(x)``,
hence the values are moved by ``-v(x)`` when comparing deformed and
original templates.
The derivative and its adjoint are based on the deformation of the
gradient of the template, hence the result is expected to be some kind of
edge image or "edge vector field", respectively.
"""
import numpy as np
import odl
# --- Create template and displacement field --- #
# Template space: discretized functions on the rectangle [-1, 1]^2 with
# 100 samples per dimension.
templ_space = odl.uniform_discr([-1, -1], [1, 1], (100, 100))
# The template is a rectangle of size 1.0 x 0.5
template = odl.phantom.cuboid(templ_space, [-0.5, -0.25], [0.5, 0.25])
# Create a product space for displacement field
disp_field_space = templ_space.tangent_bundle
# Define a displacement field that bends the template a bit towards the
# upper left. We use a list of 2 functions and discretize it using the
# disp_field_space.element() method.
sigma = 0.5
disp_func = [
lambda x: 0.4 * np.exp(-(x[0] ** 2 + x[1] ** 2) / (2 * sigma ** 2)),
lambda x: -0.3 * np.exp(-(x[0] ** 2 + x[1] ** 2) / (2 * sigma ** 2))]
disp_field = disp_field_space.element(disp_func)
# Show template and displacement field
template.show('Template')
disp_field.show('Displacement field')
# --- Apply LinDeformFixedTempl, derivative and its adjoint --- #
# Initialize the deformation operator with fixed template
deform_op = odl.deform.LinDeformFixedTempl(template)
# Apply the deformation operator to get the deformed template.
deformed_template = deform_op(disp_field)
# Initialize the derivative of the deformation operator at the
# given displacement field. The result is again an operator.
deform_op_deriv = deform_op.derivative(disp_field)
# Evaluate the derivative at the vector field that has value 1 everywhere,
# i.e. the global shift by (-1, -1).
deriv_result = deform_op_deriv(disp_field_space.one())
# Evaluate the adjoint of derivative at the image that is 1 everywhere.
deriv_adj_result = deform_op_deriv.adjoint(templ_space.one())
# Show results
deformed_template.show('Deformed template')
deriv_result.show('Operator derivative applied to one()')
deriv_adj_result.show('Adjoint of the derivative applied to one()',
force_show=True)
|
kohr-h/odl
|
examples/deform/linearized_fixed_template.py
|
Python
|
mpl-2.0
| 2,749
|
[
"Gaussian"
] |
843942aac519d9fb6c843727a1de87c577f8e17b6ab41359e4809f054989e4d7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import z2pack
# Edit the paths to your Quantum Espresso and Wannier90 here
qedir = '/home/greschd/software/spack/opt/spack/linux-ubuntu20.04-skylake/gcc-9.3.0/quantum-espresso-6.6-hzv46p4wdlvw6rrgq3cqnz7fwwzd7um6/bin'
wandir = '/home/greschd/software/spack/opt/spack/linux-ubuntu20.04-skylake/gcc-9.3.0/wannier90-3.1.0-ldgbc7e5fmfkvjiyf2xiyzfopyr5haqa/bin'
# Commands to run pw, pw2wannier90, wannier90
mpirun = 'mpirun -np 4 '
pwcmd = mpirun + qedir + '/pw.x '
pw2wancmd = mpirun + qedir + '/pw2wannier90.x '
wancmd = wandir + '/wannier90.x'
z2cmd = (
wancmd + ' -pp bi;' + pwcmd + '< bi.nscf.in >& pw.log;' + pw2wancmd +
'< bi.pw2wan.in >& pw2wan.log;'
)
# creating the results folder, running the SCF calculation if needed
if not os.path.exists('./plots'):
os.mkdir('./plots')
if not os.path.exists('./results'):
os.mkdir('./results')
if not os.path.exists('./scf'):
os.makedirs('./scf')
print("Running the scf calculation")
shutil.copyfile('input/bi.scf.in', 'scf/bi.scf.in')
subprocess.call(pwcmd + ' < bi.scf.in > scf.out', shell=True, cwd='./scf')
# Copying the lattice parameters from bi.save/data-file.xml into bi.win
cell = ET.parse('scf/bi.xml').find('output').find('atomic_structure'
).find('cell')
unit = cell.get('unit', 'Bohr')
lattice = '\n'.join([cell.find(vec).text for vec in ['a1', 'a2', 'a3']])
with open('input/tpl_bi.win', 'r') as f:
tpl_bi_win = f.read()
with open('input/bi.win', 'w') as f:
f.write(tpl_bi_win.format(unit=unit, lattice=lattice))
# Creating the System. Note that the SCF charge file does not need to be
# copied, but instead can be referenced in the .files file.
# The k-points input is appended to the .in file
input_files = [
'input/' + name for name in ["bi.nscf.in", "bi.pw2wan.in", "bi.win"]
]
system = z2pack.fp.System(
input_files=input_files,
kpt_fct=[z2pack.fp.kpoint.qe_explicit, z2pack.fp.kpoint.wannier90_full],
kpt_path=["bi.nscf.in", "bi.win"],
command=z2cmd,
executable='/bin/bash',
mmn_path='bi.mmn'
)
# Run the WCC calculations
result_0 = z2pack.surface.run(
system=system,
surface=lambda s, t: [0, s / 2, t],
save_file='./results/res_0.json',
min_neighbour_dist=1e-3,
load=True
)
result_1 = z2pack.surface.run(
system=system,
surface=lambda s, t: [0.5, s / 2, t],
save_file='./results/res_1.json',
min_neighbour_dist=1e-3,
load=True
)
# Combining the two plots
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(9, 5))
z2pack.plot.wcc(result_0, axis=ax[0])
z2pack.plot.wcc(result_1, axis=ax[1])
plt.savefig('plots/plot.pdf', bbox_inches='tight')
print(
'Z2 topological invariant at kx = 0: {0}'.format(
z2pack.invariant.z2(result_0)
)
)
print(
'Z2 topological invariant at kx = 0.5: {0}'.format(
z2pack.invariant.z2(result_1)
)
)
|
Z2PackDev/Z2Pack
|
examples/fp/espresso/6.2/Bi/run.py
|
Python
|
gpl-3.0
| 3,047
|
[
"ESPResSo",
"Quantum ESPRESSO",
"Wannier90"
] |
26a2072afb17a3846e764dc71283b2def6954b9c46c0ebb04a03c1848aa4292b
|
import LFPy
import neuron # new
import numpy as np
import pickle
import os
def simulate_cells_serially(stimolo,
cell_ids,
data_name,
population_parameters,
cell_parameters,
synapse_parameters,
synapse_position_parameters,
electrode_parameters):
print('input' + str(stimolo))
# Load emitted spikes, 1st column: spike time, 2nd column: pre cell id
# print "Loading locally emitted spikes"
local_spikes_filename = population_parameters['input_dir'] + \
'spiketimes_' + str(stimolo) + '.1.out'
local_spikes = np.loadtxt(local_spikes_filename)
local_sp_times = local_spikes[:, 0]
local_sp_ids = local_spikes[:, 1]
# Load connectivity, 1st column post id, 2nd column pre id
connectivity_filename = population_parameters['input_dir'] \
+ 'Cmatrix_' + str(stimolo) + '.1.out'
connectivity_file = open(connectivity_filename, "r")
lines = connectivity_file.readlines()
incoming_connections = []
for line in lines:
incoming_connections.append(np.array(line.split(), dtype='int'))
connectivity_file.close()
pre_cells = {}
pre_cells['exc_exc'] = population_parameters['exc_ids']
pre_cells['exc_inh'] = population_parameters['exc_ids']
pre_cells['inh_exc'] = population_parameters['inh_ids']
pre_cells['inh_inh'] = population_parameters['inh_ids']
# n_thalamic_synapses = population_parameters['n_thalamic_synapses']
# n_external_synapses = population_parameters['n_external_synapses']
# setup data dictionary
output_data = {}
output_data['somav'] = {}
output_data['LFP'] = {}
output_data['somapos'] = {}
output_data['tot_isyn'] = {}
for i_cell, cell_id in enumerate(cell_ids):
if cell_id in population_parameters['exc_ids']:
print(str(cell_id))
cell_parameters.update({'morphology': 'pyr1.hoc'})
cell_parameters['passive_parameters'].update(
{'g_pas': 1. / 20000.})
elif cell_id in population_parameters['inh_ids']:
indin = int(cell_id - max(population_parameters['exc_ids']))
print(str(indin))
cell_parameters.update({'morphology': 'int1.hoc'})
cell_parameters['passive_parameters'].update(
{'g_pas': 1. / 10000.})
print("Setting up cell " + str(cell_id))
cell_seed = population_parameters['global_seed'] + cell_id
print("Setting random seed: " + str(cell_seed))
np.random.seed(cell_seed)
neuron.h('forall delete_section()') # new
cell = LFPy.Cell(**cell_parameters) # new
# load true position
if cell_id in population_parameters['exc_ids']:
cell_pos = np.loadtxt('PCsXYZ.txt')
x, y, z = cell_pos[cell_id]
elif cell_id in population_parameters['inh_ids']:
cell_pos = np.loadtxt('INTsXYZ.txt')
x, y, z = cell_pos[int(cell_id -
int(min(population_parameters['inh_ids'])))]
cell.set_pos(x=x, y=y, z=z)
if cell_id in population_parameters['exc_ids']:
local_synapse_types = ['exc_exc', 'inh_exc']
# thalamic_synapse_type = 'thalamic_exc'
# external_synapse_type = 'external_exc'
elif cell_id in population_parameters['inh_ids']:
local_synapse_types = ['exc_inh', 'inh_inh']
# thalamic_synapse_type = 'thalamic_inh'
# external_synapse_type = 'external_inh'
for synapse_type in local_synapse_types:
print("Setting up local synapses: ", synapse_type)
pre_ids = incoming_connections[cell_id]
# n_synapses = len(pre_ids)
for i_synapse, pre_id in enumerate(pre_ids):
if pre_id in pre_cells[synapse_type]:
syn_idx = int(cell.get_rand_idx_area_norm(
**synapse_position_parameters[synapse_type]))
synapse_parameters[synapse_type].update({'idx': syn_idx})
synapse = LFPy.Synapse(cell,
**synapse_parameters[synapse_type])
spike_times =\
local_sp_times[np.where(local_sp_ids == pre_id)[0]]
synapse.set_spike_times(spike_times)
print("Setting up thalamic synapses")
# Load thalamic input spike times, 1st column time,
# 2nd column post cell id
# print "Loading thalamic input spikes"
# thalamic_spikes_filename = population_parameters['input_dir'] \
# +'ths/th_'+str(stimolo)+'_'+str(cell_id)+'.out'
# print thalamic_spikes_filename
#
# thalamic_spike_times = np.loadtxt(thalamic_spikes_filename)
# synapse_ids =\
# np.random.randint(0,n_thalamic_synapses,len(thalamic_spike_times))
# for i_synapse in xrange(n_thalamic_synapses):
# syn_idx = int(cell.get_rand_idx_area_norm(\
# **synapse_position_parameters[thalamic_synapse_type]))
# synapse_parameters[thalamic_synapse_type].update({'idx':syn_idx})
# synapse = LFPy.Synapse(cell, \
# **synapse_parameters[thalamic_synapse_type])
# spike_times = \
# thalamic_spike_times[np.where(synapse_ids==i_synapse)[0]]
# synapse.set_spike_times(spike_times)
print("Setting up external synapses")
# Load external cortico-cortical input rate
# print "Loading external input spikes"
# external_spikes_filename = population_parameters['input_dir'] \
# + 'ccs/cc_'+str(stimolo)+'_'+str(cell_id)+'.out'
# external_spike_times = np.loadtxt(external_spikes_filename)
#
# synapse_ids =\
# np.random.randint(0,n_external_synapses,len(external_spike_times))
#
# for i_synapse in xrange(n_external_synapses):
# syn_idx = int(cell.get_rand_idx_area_norm(\
# **synapse_position_parameters[external_synapse_type]))
#
# synapse_parameters[external_synapse_type].update({'idx':syn_idx})
# synapse = LFPy.Synapse(cell,\
# **synapse_parameters[external_synapse_type])
# spike_times =\
# external_spike_times[np.where(synapse_ids==i_synapse)[0]]
# synapse.set_spike_times(spike_times)
# Run simulation
print("Running simulation...")
cell.simulate(rec_imem=True)
# Calculate LFP
print("Calculating LFP")
electrode = LFPy.RecExtElectrode(cell, **electrode_parameters)
electrode.calc_lfp()
# Store data
print("Storing data")
output_data['LFP'][cell_id] = electrode.LFP
output_data['somav'][cell_id] = cell.somav
output_data['somapos'][cell_id] = cell.somapos
output_data['tvec'] = cell.tvec
print("Saving data to file")
if not os.path.isdir(population_parameters['save_to_dir']):
os.mkdir(population_parameters['save_to_dir'])
print(output_data)
pickle.dump(output_data, open(
population_parameters['save_to_dir'] + data_name + str(stimolo), "wb"))
|
espenhgn/LFPy
|
examples/mazzoni_example/single_cell.py
|
Python
|
gpl-3.0
| 7,625
|
[
"NEURON"
] |
3bce907b0f726a62fbd349cd7c0c841876e9f4f656d30968216307d200e287c7
|
"""
Linter classes containing logic for checking various filetypes.
"""
import ast
import os
import re
import textwrap
from xsslint import visitors
from xsslint.reporting import ExpressionRuleViolation, FileResults, RuleViolation
from xsslint.rules import RuleSet
from xsslint.utils import Expression, ParseString, StringLines, is_skip_dir
class BaseLinter(object):
"""
BaseLinter provides some helper functions that are used by multiple linters.
"""
LINE_COMMENT_DELIM = None
def _is_valid_directory(self, skip_dirs, directory):
"""
Determines if the provided directory is a directory that could contain
a file that needs to be linted.
Arguments:
skip_dirs: The directories to be skipped.
directory: The directory to be linted.
Returns:
True if this directory should be linted for violations and False
otherwise.
"""
if is_skip_dir(skip_dirs, directory):
return False
return True
def _load_file(self, file_full_path):
"""
Loads a file into a string.
Arguments:
file_full_path: The full path of the file to be loaded.
Returns:
A string containing the files contents.
"""
with open(file_full_path, 'r') as input_file:
file_contents = input_file.read()
return file_contents.decode(encoding='utf-8')
def _load_and_check_file_is_safe(self, file_full_path, lint_function, results):
"""
Loads the Python file and checks if it is in violation.
Arguments:
file_full_path: The file to be loaded and linted.
lint_function: A function that will lint for violations. It must
take two arguments:
1) string contents of the file
2) results object
results: A FileResults to be used for this file
Returns:
The file results containing any violations.
"""
file_contents = self._load_file(file_full_path)
lint_function(file_contents, results)
return results
def _find_closing_char_index(
self, start_delim, open_char, close_char, template, start_index, num_open_chars=0, strings=None
):
"""
Finds the index of the closing char that matches the opening char.
For example, this could be used to find the end of a Mako expression,
where the open and close characters would be '{' and '}'.
Arguments:
start_delim: If provided (e.g. '${' for Mako expressions), the
closing character must be found before the next start_delim.
open_char: The opening character to be matched (e.g '{')
close_char: The closing character to be matched (e.g '}')
template: The template to be searched.
start_index: The start index of the last open char.
num_open_chars: The current number of open chars.
strings: A list of ParseStrings already parsed
Returns:
A dict containing the following, or None if unparseable:
close_char_index: The index of the closing character
strings: a list of ParseStrings
"""
strings = [] if strings is None else strings
# Find start index of an uncommented line.
start_index = self._uncommented_start_index(template, start_index)
# loop until we found something useful on an uncommented out line
while start_index is not None:
close_char_index = template.find(close_char, start_index)
if close_char_index < 0:
# If we can't find a close char, let's just quit.
return None
open_char_index = template.find(open_char, start_index, close_char_index)
parse_string = ParseString(template, start_index, close_char_index)
valid_index_list = [close_char_index]
if 0 <= open_char_index:
valid_index_list.append(open_char_index)
if parse_string.start_index is not None:
valid_index_list.append(parse_string.start_index)
min_valid_index = min(valid_index_list)
start_index = self._uncommented_start_index(template, min_valid_index)
if start_index == min_valid_index:
break
if start_index is None:
# No uncommented code to search.
return None
if parse_string.start_index == min_valid_index:
strings.append(parse_string)
if parse_string.end_index is None:
return None
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=parse_string.end_index,
num_open_chars=num_open_chars, strings=strings
)
if open_char_index == min_valid_index:
if start_delim is not None:
# if we find another starting delim, consider this unparseable
start_delim_index = template.find(start_delim, start_index, close_char_index)
if 0 <= start_delim_index < open_char_index:
return None
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=open_char_index + 1,
num_open_chars=num_open_chars + 1, strings=strings
)
if num_open_chars == 0:
return {
'close_char_index': close_char_index,
'strings': strings,
}
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=close_char_index + 1,
num_open_chars=num_open_chars - 1, strings=strings
)
def _uncommented_start_index(self, template, start_index):
"""
Finds the first start_index that is on an uncommented line.
Arguments:
template: The template to be searched.
start_index: The start index of the last open char.
Returns:
If start_index is on an uncommented out line, returns start_index.
Otherwise, returns the start_index of the first line that is
uncommented, if there is one. Otherwise, returns None.
"""
if self.LINE_COMMENT_DELIM is not None:
line_start_index = StringLines(template).index_to_line_start_index(start_index)
uncommented_line_start_index_regex = re.compile("^(?!\s*{})".format(self.LINE_COMMENT_DELIM), re.MULTILINE)
# Finds the line start index of the first uncommented line, including the current line.
match = uncommented_line_start_index_regex.search(template, line_start_index)
if match is None:
# No uncommented lines.
return None
elif match.start() < start_index:
# Current line is uncommented, so return original start_index.
return start_index
else:
# Return start of first uncommented line.
return match.start()
else:
# No line comment delimeter, so this acts as a no-op.
return start_index
class UnderscoreTemplateLinter(BaseLinter):
"""
The linter for Underscore.js template files.
"""
ruleset = RuleSet(
underscore_not_escaped='underscore-not-escaped',
)
def __init__(self, skip_dirs=None):
"""
Init method.
"""
super(UnderscoreTemplateLinter, self).__init__()
self._skip_underscore_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is an Underscore template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential underscore file
Returns:
The file results containing any violations.
"""
full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(full_path)
if not self._is_valid_directory(self._skip_underscore_dirs, directory):
return results
if not file_name.lower().endswith('.underscore'):
return results
return self._load_and_check_file_is_safe(full_path, self.check_underscore_file_is_safe, results)
def check_underscore_file_is_safe(self, underscore_template, results):
"""
Checks for violations in an Underscore.js template.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A file results objects to which violations will be added.
"""
self._check_underscore_expressions(underscore_template, results)
results.prepare_results(underscore_template)
def _check_underscore_expressions(self, underscore_template, results):
"""
Searches for Underscore.js expressions that contain violations.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A list of results into which violations will be added.
"""
expressions = self._find_unescaped_expressions(underscore_template)
for expression in expressions:
if not self._is_safe_unescaped_expression(expression):
results.violations.append(ExpressionRuleViolation(
self.ruleset.underscore_not_escaped, expression
))
def _is_safe_unescaped_expression(self, expression):
"""
Determines whether an expression is safely escaped, even though it is
using the expression syntax that doesn't itself escape (i.e. <%= ).
In some cases it is ok to not use the Underscore.js template escape
(i.e. <%- ) because the escaping is happening inside the expression.
Safe examples::
<%= HtmlUtils.ensureHtml(message) %>
<%= _.escape(message) %>
Arguments:
expression: The Expression being checked.
Returns:
True if the Expression has been safely escaped, and False otherwise.
"""
if expression.expression_inner.startswith('HtmlUtils.'):
return True
if expression.expression_inner.startswith('_.escape('):
return True
return False
def _find_unescaped_expressions(self, underscore_template):
"""
Returns a list of unsafe expressions.
At this time all expressions that are unescaped are considered unsafe.
Arguments:
underscore_template: The contents of the Underscore.js template.
Returns:
A list of Expressions.
"""
unescaped_expression_regex = re.compile("<%=.*?%>", re.DOTALL)
expressions = []
for match in unescaped_expression_regex.finditer(underscore_template):
expression = Expression(
match.start(), match.end(), template=underscore_template, start_delim="<%=", end_delim="%>"
)
expressions.append(expression)
return expressions
class JavaScriptLinter(BaseLinter):
"""
The linter for JavaScript files.
"""
LINE_COMMENT_DELIM = "//"
ruleset = RuleSet(
javascript_jquery_append='javascript-jquery-append',
javascript_jquery_prepend='javascript-jquery-prepend',
javascript_jquery_insertion='javascript-jquery-insertion',
javascript_jquery_insert_into_target='javascript-jquery-insert-into-target',
javascript_jquery_html='javascript-jquery-html',
javascript_concat_html='javascript-concat-html',
javascript_escape='javascript-escape',
javascript_interpolate='javascript-interpolate',
)
def __init__(self, underscore_linter, javascript_skip_dirs=None):
"""
Init method.
"""
super(JavaScriptLinter, self).__init__()
self.underscore_linter = underscore_linter
self.ruleset = self.ruleset + self.underscore_linter.ruleset
self._skip_javascript_dirs = javascript_skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a JavaScript file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential JavaScript file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.js') and not file_name.lower().endswith('.min.js'):
skip_dirs = self._skip_javascript_dirs
else:
return results
if not self._is_valid_directory(skip_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_javascript_file_is_safe, results)
def check_javascript_file_is_safe(self, file_contents, results):
"""
Checks for violations in a JavaScript file.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
no_caller_check = None
no_argument_check = None
self._check_jquery_function(
file_contents, "append", self.ruleset.javascript_jquery_append, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "prepend", self.ruleset.javascript_jquery_prepend, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "unwrap|wrap|wrapAll|wrapInner|after|before|replaceAll|replaceWith",
self.ruleset.javascript_jquery_insertion, no_caller_check, self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "appendTo|prependTo|insertAfter|insertBefore",
self.ruleset.javascript_jquery_insert_into_target, self._is_jquery_insert_caller_safe, no_argument_check, results
)
self._check_jquery_function(
file_contents, "html", self.ruleset.javascript_jquery_html, no_caller_check,
self._is_jquery_html_argument_safe, results
)
self._check_javascript_interpolate(file_contents, results)
self._check_javascript_escape(file_contents, results)
self._check_concat_with_html(file_contents, self.ruleset.javascript_concat_html, results)
self.underscore_linter.check_underscore_file_is_safe(file_contents, results)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def _get_expression_for_function(self, file_contents, function_start_match):
"""
Returns an expression that matches the function call opened with
function_start_match.
Arguments:
file_contents: The contents of the JavaScript file.
function_start_match: A regex match representing the start of the function
call (e.g. ".escape(").
Returns:
An Expression that best matches the function.
"""
start_index = function_start_match.start()
inner_start_index = function_start_match.end()
result = self._find_closing_char_index(
None, "(", ")", file_contents, start_index=inner_start_index
)
if result is not None:
end_index = result['close_char_index'] + 1
expression = Expression(
start_index, end_index, template=file_contents, start_delim=function_start_match.group(), end_delim=")"
)
else:
expression = Expression(start_index)
return expression
def _check_javascript_interpolate(self, file_contents, results):
"""
Checks that interpolate() calls are safe.
Only use of StringUtils.interpolate() or HtmlUtils.interpolateText()
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "StringUtils.", because those are safe
regex = re.compile(r"(?<!StringUtils).interpolate\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(self.ruleset.javascript_interpolate, expression))
def _check_javascript_escape(self, file_contents, results):
"""
Checks that only necessary escape() are used.
Allows for _.escape(), although this shouldn't be the recommendation.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "_.", because those are safe
regex = regex = re.compile(r"(?<!_).escape\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(self.ruleset.javascript_escape, expression))
def _check_jquery_function(self, file_contents, function_names, rule, is_caller_safe, is_argument_safe, results):
"""
Checks that the JQuery function_names (e.g. append(), prepend()) calls
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
function_names: A pipe delimited list of names of the functions
(e.g. "wrap|after|before").
rule: The name of the rule to use for validation errors (e.g.
self.ruleset.javascript_jquery_append).
is_caller_safe: A function to test if caller of the JQuery function
is safe.
is_argument_safe: A function to test if the argument passed to the
JQuery function is safe.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "HtmlUtils.", because those are safe
regex = re.compile(r"(?<!HtmlUtils).(?:{})\(".format(function_names))
for function_match in regex.finditer(file_contents):
is_violation = True
expression = self._get_expression_for_function(file_contents, function_match)
if expression.end_index is not None:
start_index = expression.start_index
inner_start_index = function_match.end()
close_paren_index = expression.end_index - 1
function_argument = file_contents[inner_start_index:close_paren_index].strip()
if is_argument_safe is not None and is_caller_safe is None:
is_violation = is_argument_safe(function_argument) is False
elif is_caller_safe is not None and is_argument_safe is None:
line_start_index = StringLines(file_contents).index_to_line_start_index(start_index)
caller_line_start = file_contents[line_start_index:start_index]
is_violation = is_caller_safe(caller_line_start) is False
else:
raise ValueError("Must supply either is_argument_safe, or is_caller_safe, but not both.")
if is_violation:
results.violations.append(ExpressionRuleViolation(rule, expression))
def _is_jquery_argument_safe_html_utils_call(self, argument):
"""
Checks that the argument sent to a jQuery DOM insertion function is a
safe call to HtmlUtils.
A safe argument is of the form:
- HtmlUtils.xxx(anything).toString()
- edx.HtmlUtils.xxx(anything).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
# match on HtmlUtils.xxx().toString() or edx.HtmlUtils
match = re.search(r"(?:edx\.)?HtmlUtils\.[a-zA-Z0-9]+\(.*\)\.toString\(\)", argument)
return match is not None and match.group() == argument
def _is_jquery_argument_safe(self, argument):
"""
Check the argument sent to a jQuery DOM insertion function (e.g.
append()) to check if it is safe.
Safe arguments include:
- the argument can end with ".el", ".$el" (with no concatenation)
- the argument can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
- the argument can be a single string literal with no HTML tags
- the argument can be a call to $() with the first argument a string
literal with a single HTML tag. For example, ".append($('<br/>'))"
or ".append($('<br/>'))".
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
match_variable_name = re.search("[_$a-zA-Z]+[_$a-zA-Z0-9]*", argument)
if match_variable_name is not None and match_variable_name.group() == argument:
if argument.endswith('El') or argument.startswith('$'):
return True
elif argument.startswith('"') or argument.startswith("'"):
# a single literal string with no HTML is ok
# 1. it gets rid of false negatives for non-jquery calls (e.g. graph.append("g"))
# 2. JQuery will treat this as a plain text string and will escape any & if needed.
string = ParseString(argument, 0, len(argument))
if string.string == argument and "<" not in argument:
return True
elif argument.startswith('$('):
# match on JQuery calls with single string and single HTML tag
# Examples:
# $("<span>")
# $("<div/>")
# $("<div/>", {...})
match = re.search(r"""\$\(\s*['"]<[a-zA-Z0-9]+\s*[/]?>['"]\s*[,)]""", argument)
if match is not None:
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
# check rules that shouldn't use concatenation
elif "+" not in argument:
if argument.endswith('.el') or argument.endswith('.$el'):
return True
return False
def _is_jquery_html_argument_safe(self, argument):
"""
Check the argument sent to the jQuery html() function to check if it is
safe.
Safe arguments to html():
- no argument (i.e. getter rather than setter)
- empty string is safe
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to html() in code (i.e. html(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
if argument == "" or argument == "''" or argument == '""':
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
return False
def _is_jquery_insert_caller_safe(self, caller_line_start):
"""
Check that the caller of a jQuery DOM insertion function that takes a
target is safe (e.g. thisEl.appendTo(target)).
If original line was::
draggableObj.iconEl.appendTo(draggableObj.containerEl);
Parameter caller_line_start would be:
draggableObj.iconEl
Safe callers include:
- the caller can be ".el", ".$el"
- the caller can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
Arguments:
caller_line_start: The line leading up to the jQuery function call.
Returns:
True if the caller is safe, and False otherwise.
"""
# matches end of line for caller, which can't itself be a function
caller_match = re.search(r"(?:\s*|[.])([_$a-zA-Z]+[_$a-zA-Z0-9])*$", caller_line_start)
if caller_match is None:
return False
caller = caller_match.group(1)
if caller is None:
return False
elif caller.endswith('El') or caller.startswith('$'):
return True
elif caller == 'el' or caller == 'parentNode':
return True
return False
def _check_concat_with_html(self, file_contents, rule, results):
"""
Checks that strings with HTML are not concatenated
Arguments:
file_contents: The contents of the JavaScript file.
rule: The rule that was violated if this fails.
results: A file results objects to which violations will be added.
"""
lines = StringLines(file_contents)
last_expression = None
# Match quoted strings that starts with '<' or ends with '>'.
regex_string_with_html = r"""
{quote} # Opening quote.
(
\s*< # Starts with '<' (ignoring spaces)
([^{quote}]|[\\]{quote})* # followed by anything but a closing quote.
| # Or,
([^{quote}]|[\\]{quote})* # Anything but a closing quote
>\s* # ending with '>' (ignoring spaces)
)
{quote} # Closing quote.
"""
# Match single or double quote.
regex_string_with_html = "({}|{})".format(
regex_string_with_html.format(quote="'"),
regex_string_with_html.format(quote='"'),
)
# Match quoted HTML strings next to a '+'.
regex_concat_with_html = re.compile(
r"(\+\s*{string_with_html}|{string_with_html}\s*\+)".format(
string_with_html=regex_string_with_html,
),
re.VERBOSE
)
for match in regex_concat_with_html.finditer(file_contents):
found_new_violation = False
if last_expression is not None:
last_line = lines.index_to_line_number(last_expression.start_index)
# check if violation should be expanded to more of the same line
if last_line == lines.index_to_line_number(match.start()):
last_expression = Expression(
last_expression.start_index, match.end(), template=file_contents
)
else:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
found_new_violation = True
else:
found_new_violation = True
if found_new_violation:
last_expression = Expression(
match.start(), match.end(), template=file_contents
)
# add final expression
if last_expression is not None:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
class PythonLinter(BaseLinter):
"""
The linter for Python files.
The current implementation of the linter does naive Python parsing. It does
not use the parser. One known issue is that parsing errors found inside a
docstring need to be disabled, rather than being automatically skipped.
Skipping docstrings is an enhancement that could be added.
"""
LINE_COMMENT_DELIM = "#"
ruleset = RuleSet(
python_parse_error='python-parse-error',
python_custom_escape='python-custom-escape',
# The Visitor classes are python-specific and should be moved into the PythonLinter once they have
# been decoupled from the MakoTemplateLinter.
) + visitors.ruleset
def __init__(self, skip_dirs=None):
"""
Init method.
"""
super(PythonLinter, self).__init__()
self._skip_python_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Python file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Python file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.py') is False:
return results
# skip tests.py files
# TODO: Add configuration for files and paths
if file_name.lower().endswith('tests.py'):
return results
# skip this linter code (i.e. xss_linter.py)
if file_name == os.path.basename(__file__):
return results
if not self._is_valid_directory(self._skip_python_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_python_file_is_safe, results)
def check_python_file_is_safe(self, file_contents, results):
"""
Checks for violations in a Python file.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
root_node = self.parse_python_code(file_contents, results)
self.check_python_code_is_safe(file_contents, root_node, results)
# Check rules specific to .py files only
# Note that in template files, the scope is different, so you can make
# different assumptions.
if root_node is not None:
# check format() rules that can be run on outer-most format() calls
visitor = visitors.OuterFormatVisitor(file_contents, results)
visitor.visit(root_node)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def check_python_code_is_safe(self, python_code, root_node, results):
"""
Checks for violations in Python code snippet. This can also be used for
Python that appears in files other than .py files, like in templates.
Arguments:
python_code: The contents of the Python code.
root_node: The root node of the Python code parsed by AST.
results: A file results objects to which violations will be added.
"""
if root_node is not None:
# check illegal concatenation and interpolation
visitor = visitors.AllNodeVisitor(python_code, results)
visitor.visit(root_node)
# check rules parse with regex
self._check_custom_escape(python_code, results)
def parse_python_code(self, python_code, results):
"""
Parses Python code.
Arguments:
python_code: The Python code to be parsed.
Returns:
The root node that was parsed, or None for SyntaxError.
"""
python_code = self._strip_file_encoding(python_code)
try:
return ast.parse(python_code)
except SyntaxError as e:
if e.offset is None:
expression = Expression(0)
else:
lines = StringLines(python_code)
line_start_index = lines.line_number_to_start_index(e.lineno)
expression = Expression(line_start_index + e.offset)
results.violations.append(ExpressionRuleViolation(
self.ruleset.python_parse_error, expression
))
return None
def _strip_file_encoding(self, file_contents):
"""
Removes file encoding from file_contents because the file was already
read into Unicode, and the AST parser complains.
Arguments:
file_contents: The Python file contents.
Returns:
The Python file contents with the encoding stripped.
"""
# PEP-263 Provides Regex for Declaring Encoding
# Example: -*- coding: <encoding name> -*-
# This is only allowed on the first two lines, and it must be stripped
# before parsing, because we have already read into Unicode and the
# AST parser complains.
encoding_regex = re.compile(r"^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
encoding_match = encoding_regex.search(file_contents)
# If encoding comment not found on first line, search second line.
if encoding_match is None:
lines = StringLines(file_contents)
if lines.line_count() >= 2:
encoding_match = encoding_regex.search(lines.line_number_to_line(2))
# If encoding was found, strip it
if encoding_match is not None:
file_contents = file_contents.replace(encoding_match.group(), '#', 1)
return file_contents
def _check_custom_escape(self, file_contents, results):
"""
Checks for custom escaping calls, rather than using a standard escaping
method.
Arguments:
file_contents: The contents of the Python file
results: A list of results into which violations will be added.
"""
for match in re.finditer("(<.*<|<.*<)", file_contents):
expression = Expression(match.start(), match.end())
results.violations.append(ExpressionRuleViolation(
self.ruleset.python_custom_escape, expression
))
class MakoTemplateLinter(BaseLinter):
"""
The linter for Mako template files.
"""
LINE_COMMENT_DELIM = "##"
ruleset = RuleSet(
mako_missing_default='mako-missing-default',
mako_multiple_page_tags='mako-multiple-page-tags',
mako_unparseable_expression='mako-unparseable-expression',
mako_unwanted_html_filter='mako-unwanted-html-filter',
mako_invalid_html_filter='mako-invalid-html-filter',
mako_invalid_js_filter='mako-invalid-js-filter',
mako_js_missing_quotes='mako-js-missing-quotes',
mako_js_html_string='mako-js-html-string',
mako_html_entities='mako-html-entities',
mako_unknown_context='mako-unknown-context',
# NOTE The MakoTemplateLinter directly checks for python_wrap_html and directly
# instantiates Visitor instances to check for python issues. This logic should
# be moved into the PythonLinter. The MakoTemplateLinter should only check for
# Mako-specific issues.
python_wrap_html='python-wrap-html',
) + visitors.ruleset
def __init__(self, javascript_linter, python_linter, skip_dirs=None):
"""
Init method.
"""
super(MakoTemplateLinter, self).__init__()
self.javascript_linter = javascript_linter
self.python_linter = python_linter
self.ruleset = self.ruleset + self.javascript_linter.ruleset + self.python_linter.ruleset
self._skip_mako_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Mako template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Mako file
Returns:
The file results containing any violations.
"""
mako_file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(mako_file_full_path)
if not results.is_file:
return results
if not self._is_valid_directory(directory):
return results
# TODO: When safe-by-default is turned on at the platform level, will we:
# 1. Turn it on for .html only, or
# 2. Turn it on for all files, and have different rulesets that have
# different rules of .xml, .html, .js, .txt Mako templates (e.g. use
# the n filter to turn off h for some of these)?
# For now, we only check .html and .xml files
if not (file_name.lower().endswith('.html') or file_name.lower().endswith('.xml')):
return results
return self._load_and_check_file_is_safe(mako_file_full_path, self._check_mako_file_is_safe, results)
def _is_valid_directory(self, directory):
"""
Determines if the provided directory is a directory that could contain
Mako template files that need to be linted.
Arguments:
directory: The directory to be linted.
Returns:
True if this directory should be linted for Mako template violations
and False otherwise.
"""
if is_skip_dir(self._skip_mako_dirs, directory):
return False
# TODO: This is an imperfect guess concerning the Mako template
# directories. This needs to be reviewed before turning on safe by
# default at the platform level.
if ('/templates/' in directory) or directory.endswith('/templates'):
return True
return False
def _check_mako_file_is_safe(self, mako_template, results):
"""
Checks for violations in a Mako template.
Arguments:
mako_template: The contents of the Mako template.
results: A file results objects to which violations will be added.
"""
if self._is_django_template(mako_template):
return
has_page_default = self._has_page_default(mako_template, results)
self._check_mako_expressions(mako_template, has_page_default, results)
self._check_mako_python_blocks(mako_template, has_page_default, results)
results.prepare_results(mako_template, line_comment_delim=self.LINE_COMMENT_DELIM)
def _is_django_template(self, mako_template):
"""
Determines if the template is actually a Django template.
Arguments:
mako_template: The template code.
Returns:
True if this is really a Django template, and False otherwise.
"""
if re.search('({%.*%})|({{.*}})|({#.*#})', mako_template) is not None:
return True
return False
def _get_page_tag_count(self, mako_template):
"""
Determines the number of page expressions in the Mako template. Ignores
page expressions that are commented out.
Arguments:
mako_template: The contents of the Mako template.
Returns:
The number of page expressions
"""
count = len(re.findall('<%page ', mako_template, re.IGNORECASE))
count_commented = len(re.findall(r'##\s+<%page ', mako_template, re.IGNORECASE))
return max(0, count - count_commented)
def _has_page_default(self, mako_template, results):
"""
Checks if the Mako template contains the page expression marking it as
safe by default.
Arguments:
mako_template: The contents of the Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations regarding page default if necessary
Returns:
True if the template has the page default, and False otherwise.
"""
page_tag_count = self._get_page_tag_count(mako_template)
# check if there are too many page expressions
if 2 <= page_tag_count:
results.violations.append(RuleViolation(self.ruleset.mako_multiple_page_tags))
return False
# make sure there is exactly 1 page expression, excluding commented out
# page expressions, before proceeding
elif page_tag_count != 1:
results.violations.append(RuleViolation(self.ruleset.mako_missing_default))
return False
# check that safe by default (h filter) is turned on
page_h_filter_regex = re.compile('<%page[^>]*expression_filter=(?:"h"|\'h\')[^>]*/>')
page_match = page_h_filter_regex.search(mako_template)
if not page_match:
results.violations.append(RuleViolation(self.ruleset.mako_missing_default))
return page_match
def _check_mako_expressions(self, mako_template, has_page_default, results):
"""
Searches for Mako expressions and then checks if they contain
violations, including checking JavaScript contexts for JavaScript
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
expressions = self._find_mako_expressions(mako_template)
contexts = self._get_contexts(mako_template)
self._check_javascript_contexts(mako_template, contexts, results)
for expression in expressions:
if expression.end_index is None:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_unparseable_expression, expression
))
continue
context = self._get_context(contexts, expression.start_index)
self._check_expression_and_filters(mako_template, expression, context, has_page_default, results)
def _check_javascript_contexts(self, mako_template, contexts, results):
"""
Lint the JavaScript contexts for JavaScript violations inside a Mako
template.
Arguments:
mako_template: The contents of the Mako template.
contexts: A list of context dicts with 'type' and 'index'.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_start_index = None
for context in contexts:
if context['type'] == 'javascript':
if javascript_start_index < 0:
javascript_start_index = context['index']
else:
if javascript_start_index is not None:
javascript_end_index = context['index']
javascript_code = mako_template[javascript_start_index:javascript_end_index]
self._check_javascript_context(javascript_code, javascript_start_index, results)
javascript_start_index = None
if javascript_start_index is not None:
javascript_code = mako_template[javascript_start_index:]
self._check_javascript_context(javascript_code, javascript_start_index, results)
def _check_javascript_context(self, javascript_code, start_offset, results):
"""
Lint a single JavaScript context for JavaScript violations inside a Mako
template.
Arguments:
javascript_code: The template contents of the JavaScript context.
start_offset: The offset of the JavaScript context inside the
original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_results = FileResults("")
self.javascript_linter.check_javascript_file_is_safe(javascript_code, javascript_results)
self._shift_and_add_violations(javascript_results, start_offset, results)
def _check_mako_python_blocks(self, mako_template, has_page_default, results):
"""
Searches for Mako python blocks and checks if they contain
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
# Finds Python blocks such as <% ... %>, skipping other Mako start tags
# such as <%def> and <%page>.
python_block_regex = re.compile(r'<%\s(?P<code>.*?)%>', re.DOTALL)
for python_block_match in python_block_regex.finditer(mako_template):
self._check_expression_python(
python_code=python_block_match.group('code'),
start_offset=(python_block_match.start() + len('<% ')),
has_page_default=has_page_default,
results=results
)
def _check_expression_python(self, python_code, start_offset, has_page_default, results):
"""
Lint the Python inside a single Python expression in a Mako template.
Arguments:
python_code: The Python contents of an expression.
start_offset: The offset of the Python content inside the original
Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
Side effect:
Adds Python violations to results.
"""
python_results = FileResults("")
# Dedent expression internals so it is parseable.
# Note that the final columns reported could be off somewhat.
adjusted_python_code = textwrap.dedent(python_code)
first_letter_match = re.search('\w', python_code)
adjusted_first_letter_match = re.search('\w', adjusted_python_code)
if first_letter_match is not None and adjusted_first_letter_match is not None:
start_offset += (first_letter_match.start() - adjusted_first_letter_match.start())
python_code = adjusted_python_code
root_node = self.python_linter.parse_python_code(python_code, python_results)
self.python_linter.check_python_code_is_safe(python_code, root_node, python_results)
# Check mako expression specific Python rules.
if root_node is not None:
visitor = visitors.HtmlStringVisitor(python_code, python_results, True)
visitor.visit(root_node)
for unsafe_html_string_node in visitor.unsafe_html_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
self.ruleset.python_wrap_html, visitor.node_to_expression(unsafe_html_string_node)
))
if has_page_default:
for over_escaped_entity_string_node in visitor.over_escaped_entity_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_html_entities, visitor.node_to_expression(over_escaped_entity_string_node)
))
python_results.prepare_results(python_code, line_comment_delim=self.LINE_COMMENT_DELIM)
self._shift_and_add_violations(python_results, start_offset, results)
def _shift_and_add_violations(self, other_linter_results, start_offset, results):
"""
Adds results from a different linter to the Mako results, after shifting
the offset into the original Mako template.
Arguments:
other_linter_results: Results from another linter.
start_offset: The offset of the linted code, a part of the template,
inside the original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations to results.
"""
# translate the violations into the proper location within the original
# Mako template
for violation in other_linter_results.violations:
expression = violation.expression
expression.start_index += start_offset
if expression.end_index is not None:
expression.end_index += start_offset
results.violations.append(ExpressionRuleViolation(violation.rule, expression))
def _check_expression_and_filters(self, mako_template, expression, context, has_page_default, results):
"""
Checks that the filters used in the given Mako expression are valid
for the given context. Adds violation to results if there is a problem.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
context: The context of the page in which the expression was found
(e.g. javascript, html).
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
if context == 'unknown':
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_unknown_context, expression
))
return
# Example: finds "| n, h}" when given "${x | n, h}"
filters_regex = re.compile(r'\|([.,\w\s]*)\}')
filters_match = filters_regex.search(expression.expression)
# Check Python code inside expression.
if filters_match is None:
python_code = expression.expression[2:-1]
else:
python_code = expression.expression[2:filters_match.start()]
self._check_expression_python(python_code, expression.start_index + 2, has_page_default, results)
# Check filters.
if filters_match is None:
if context == 'javascript':
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_invalid_js_filter, expression
))
return
filters = filters_match.group(1).replace(" ", "").split(",")
if filters == ['n', 'decode.utf8']:
# {x | n, decode.utf8} is valid in any context
pass
elif context == 'html':
if filters == ['h']:
if has_page_default:
# suppress this violation if the page default hasn't been set,
# otherwise the template might get less safe
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_unwanted_html_filter, expression
))
elif filters == ['n', 'strip_all_tags_but_br']:
# {x | n, strip_all_tags_but_br} is valid in html context
pass
else:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_invalid_html_filter, expression
))
elif context == 'javascript':
self._check_js_expression_not_with_html(mako_template, expression, results)
if filters == ['n', 'dump_js_escaped_json']:
# {x | n, dump_js_escaped_json} is valid
pass
elif filters == ['n', 'js_escaped_string']:
# {x | n, js_escaped_string} is valid, if surrounded by quotes
self._check_js_string_expression_in_quotes(mako_template, expression, results)
else:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_invalid_js_filter, expression
))
def _check_js_string_expression_in_quotes(self, mako_template, expression, results):
"""
Checks that a Mako expression using js_escaped_string is surrounded by
quotes.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is None:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_js_missing_quotes, expression
))
def _check_js_expression_not_with_html(self, mako_template, expression, results):
"""
Checks that a Mako expression in a JavaScript context does not appear in
a string that also contains HTML.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is not None and re.search('[<>]', parse_string.string) is not None:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_js_html_string, expression
))
def _find_string_wrapping_expression(self, mako_template, expression):
"""
Finds the string wrapping the Mako expression if there is one.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
Returns:
ParseString representing a scrubbed version of the wrapped string,
where the Mako expression was replaced with "${...}", if a wrapped
string was found. Otherwise, returns None if none found.
"""
lines = StringLines(mako_template)
start_index = lines.index_to_line_start_index(expression.start_index)
if expression.end_index is not None:
end_index = lines.index_to_line_end_index(expression.end_index)
else:
return None
# scrub out the actual expression so any code inside the expression
# doesn't interfere with rules applied to the surrounding code (i.e.
# checking JavaScript).
scrubbed_lines = "".join((
mako_template[start_index:expression.start_index],
"${...}",
mako_template[expression.end_index:end_index]
))
adjusted_start_index = expression.start_index - start_index
start_index = 0
while True:
parse_string = ParseString(scrubbed_lines, start_index, len(scrubbed_lines))
# check for validly parsed string
if 0 <= parse_string.start_index < parse_string.end_index:
# check if expression is contained in the given string
if parse_string.start_index < adjusted_start_index < parse_string.end_index:
return parse_string
else:
# move to check next string
start_index = parse_string.end_index
else:
break
return None
def _get_contexts(self, mako_template):
"""
Returns a data structure that represents the indices at which the
template changes from HTML context to JavaScript and back.
Return:
A list of dicts where each dict contains:
- index: the index of the context.
- type: the context type (e.g. 'html' or 'javascript').
"""
contexts_re = re.compile(
r"""
<script.*?(?<!/)> | # script tag start
</script> | # script tag end
<%static:require_module(_async)?.*?(?<!/)> | # require js script tag start (optionally the _async version)
</%static:require_module(_async)?> | # require js script tag end (optionally the _async version)
<%static:webpack.*(?<!/)> | # webpack script tag start
</%static:webpack> | # webpack script tag end
<%static:studiofrontend.*?(?<!/)> | # studiofrontend script tag start
</%static:studiofrontend> | # studiofrontend script tag end
<%block[ ]*name=['"]requirejs['"]\w*(?<!/)> | # require js tag start
</%block> # require js tag end
""",
re.VERBOSE | re.IGNORECASE
)
media_type_re = re.compile(r"""type=['"].*?['"]""", re.IGNORECASE)
contexts = [{'index': 0, 'type': 'html'}]
javascript_types = [
'text/javascript', 'text/ecmascript', 'application/ecmascript', 'application/javascript',
'text/x-mathjax-config', 'json/xblock-args', 'application/json',
]
html_types = ['text/template']
for context in contexts_re.finditer(mako_template):
match_string = context.group().lower()
if match_string.startswith("<script"):
match_type = media_type_re.search(match_string)
context_type = 'javascript'
if match_type is not None:
# get media type (e.g. get text/javascript from
# type="text/javascript")
match_type = match_type.group()[6:-1].lower()
if match_type in html_types:
context_type = 'html'
elif match_type not in javascript_types:
context_type = 'unknown'
contexts.append({'index': context.end(), 'type': context_type})
elif match_string.startswith("</"):
contexts.append({'index': context.start(), 'type': 'html'})
else:
contexts.append({'index': context.end(), 'type': 'javascript'})
return contexts
def _get_context(self, contexts, index):
"""
Gets the context (e.g. javascript, html) of the template at the given
index.
Arguments:
contexts: A list of dicts where each dict contains the 'index' of the context
and the context 'type' (e.g. 'html' or 'javascript').
index: The index for which we want the context.
Returns:
The context (e.g. javascript or html) for the given index.
"""
current_context = contexts[0]['type']
for context in contexts:
if context['index'] <= index:
current_context = context['type']
else:
break
return current_context
def _find_mako_expressions(self, mako_template):
"""
Finds all the Mako expressions in a Mako template and creates a list
of dicts for each expression.
Arguments:
mako_template: The content of the Mako template.
Returns:
A list of Expressions.
"""
start_delim = '${'
start_index = 0
expressions = []
while True:
start_index = mako_template.find(start_delim, start_index)
if start_index < 0:
break
# If start of mako expression is commented out, skip it.
uncommented_start_index = self._uncommented_start_index(mako_template, start_index)
if uncommented_start_index != start_index:
start_index = uncommented_start_index
continue
result = self._find_closing_char_index(
start_delim, '{', '}', mako_template, start_index=start_index + len(start_delim)
)
if result is None:
expression = Expression(start_index)
# for parsing error, restart search right after the start of the
# current expression
start_index = start_index + len(start_delim)
else:
close_char_index = result['close_char_index']
expression = mako_template[start_index:close_char_index + 1]
expression = Expression(
start_index,
end_index=close_char_index + 1,
template=mako_template,
start_delim=start_delim,
end_delim='}',
strings=result['strings'],
)
# restart search after the current expression
start_index = expression.end_index
expressions.append(expression)
return expressions
|
gymnasium/edx-platform
|
scripts/xsslint/xsslint/linters.py
|
Python
|
agpl-3.0
| 61,116
|
[
"VisIt"
] |
1d3739f4056e128ef9585b94d718229e0031cdc5a5c38ab267ea452e81ac62ee
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-add-resources
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Add resources from the BDII database for a given VO
"""
__RCSID__ = "$Id$"
import signal
import re
import os
from urlparse import urlparse
from DIRAC.Core.Base import Script
def processScriptSwitches():
global vo, dry, doCEs, doSEs
Script.registerSwitch( "V:", "vo=", "Virtual Organization" )
Script.registerSwitch( "D", "dry", "Dry run" )
Script.registerSwitch( "C", "ce", "Process Computing Elements" )
Script.registerSwitch( "S", "se", "Process Storage Elements" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile]' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = True )
vo = ''
dry = False
doCEs = False
doSEs = False
for sw in Script.getUnprocessedSwitches():
if sw[0] in ( "V", "vo" ):
vo = sw[1]
if sw[0] in ( "D", "dry" ):
dry = True
if sw[0] in ( "C", "ce" ):
doCEs = True
if sw[0] in ( "S", "se" ):
doSEs = True
from DIRAC import gLogger, exit as DIRACExit, S_OK
from DIRAC.ConfigurationSystem.Client.Utilities import getGridCEs, getSiteUpdates, getCEsFromCS, \
getGridSRMs, getSRMUpdates
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
ceBdiiDict = None
def checkUnusedCEs():
global vo, dry, ceBdiiDict
gLogger.notice( 'looking for new computing resources in the BDII database...' )
result = getCEsFromCS()
if not result['OK']:
gLogger.error( 'ERROR: failed to get CEs from CS', result['Message'] )
DIRACExit( -1 )
knownCEs = result['Value']
result = getGridCEs( vo, ceBlackList = knownCEs )
if not result['OK']:
gLogger.error( 'ERROR: failed to get CEs from BDII', result['Message'] )
DIRACExit( -1 )
ceBdiiDict = result['BdiiInfo']
siteDict = result['Value']
if siteDict:
gLogger.notice( 'New resources available:\n' )
for site in siteDict:
diracSite = 'Unknown'
result = getDIRACSiteName( site )
if result['OK']:
diracSite = ','.join( result['Value'] )
ces = siteDict[site].keys()
if ces:
gLogger.notice( " %s, DIRAC site %s" % ( site, diracSite) )
for ce in ces:
gLogger.notice( ' '*4+ce )
gLogger.notice( ' %s, %s' % ( siteDict[site][ce]['CEType'], '%s_%s_%s' % siteDict[site][ce]['System'] ) )
else:
gLogger.notice( 'No new resources available, exiting' )
DIRACExit( 0 )
inp = raw_input( "\nDo you want to add sites ? [default=yes] [yes|no]: ")
inp = inp.strip()
if not inp and inp.lower().startswith( 'n' ):
gLogger.notice( 'Nothing else to be done, exiting' )
DIRACExit( 0 )
gLogger.notice( '\nAdding new sites/CEs interactively\n' )
sitesAdded = []
for site in siteDict:
# Get the country code:
country = ''
ces = siteDict[site].keys()
for ce in ces:
country = ce.strip().split('.')[-1].lower()
if len( country ) == 2:
break
if country == 'gov':
country = 'us'
break
if not country or len( country ) != 2:
country = 'xx'
result = getDIRACSiteName( site )
if not result['OK']:
gLogger.notice( '\nThe site %s is not yet in the CS, give it a name' % site )
diracSite = raw_input( '[help|skip|<domain>.<name>.%s]: ' % country )
if diracSite.lower() == "skip":
continue
if diracSite.lower() == "help":
gLogger.notice( '%s site details:' % site )
for k,v in ceBdiiDict[site].items():
if k != "CEs":
gLogger.notice( '%s\t%s' % (k,v) )
gLogger.notice( '\nEnter DIRAC site name in the form <domain>.<name>.%s\n' % country )
diracSite = raw_input( '[<domain>.<name>.%s]: ' % country )
try:
_, _, _ = diracSite.split( '.' )
except ValueError:
gLogger.error( 'ERROR: DIRAC site name does not follow convention: %s' % diracSite )
continue
diracSites = [diracSite]
else:
diracSites = result['Value']
if len( diracSites ) > 1:
gLogger.notice( 'Attention! GOC site %s corresponds to more than one DIRAC sites:' % site )
gLogger.notice( str( diracSites ) )
gLogger.notice( 'Please, pay attention which DIRAC site the new CEs will join\n' )
newCEs = {}
addedCEs = []
for ce in ces:
ceType = siteDict[site][ce]['CEType']
for diracSite in diracSites:
if ce in addedCEs:
continue
yn = raw_input( "Add CE %s of type %s to %s? [default yes] [yes|no]: " % ( ce, ceType, diracSite ) )
if yn == '' or yn.lower() == 'y':
newCEs.setdefault( diracSite, [] )
newCEs[diracSite].append( ce )
addedCEs.append( ce )
for diracSite in diracSites:
if diracSite in newCEs:
cmd = "dirac-admin-add-site %s %s %s" % ( diracSite, site, ' '.join( newCEs[diracSite] ) )
gLogger.notice( "\nNew site/CEs will be added with command:\n%s" % cmd )
yn = raw_input( "Add it ? [default yes] [yes|no]: " )
if not ( yn == '' or yn.lower() == 'y' ) :
continue
if dry:
gLogger.notice( "Command is skipped in the dry run" )
else:
result = shellCall( 0, cmd )
if not result['OK']:
gLogger.error( 'Error while executing dirac-admin-add-site command' )
yn = raw_input( "Do you want to continue ? [default no] [yes|no]: " )
if yn == '' or yn.lower().startswith( 'n' ):
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
DIRACExit( 0 )
else:
exitStatus, stdData, errData = result[ 'Value' ]
if exitStatus:
gLogger.error( 'Error while executing dirac-admin-add-site command\n', '\n'.join( [stdData, errData] ) )
yn = raw_input( "Do you want to continue ? [default no] [yes|no]: " )
if yn == '' or yn.lower().startswith( 'n' ):
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
DIRACExit( 0 )
else:
sitesAdded.append( ( site, diracSite ) )
gLogger.notice( stdData )
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
else:
gLogger.notice( 'No new CEs were added this time' )
def updateCS( changeSet ):
global vo, dry, ceBdiiDict
changeList = list( changeSet )
changeList.sort()
if dry:
gLogger.notice( 'The following needed changes are detected:\n' )
else:
gLogger.notice( 'We are about to make the following changes to CS:\n' )
for entry in changeList:
gLogger.notice( "%s/%s %s -> %s" % entry )
if not dry:
csAPI = CSAPI()
csAPI.initialize()
result = csAPI.downloadCSData()
if not result['OK']:
gLogger.error( 'Failed to initialize CSAPI object', result['Message'] )
DIRACExit( -1 )
for section, option, value, new_value in changeSet:
if value == 'Unknown' or not value:
csAPI.setOption( cfgPath( section, option ), new_value )
else:
csAPI.modifyValue( cfgPath( section, option ), new_value )
yn = raw_input( 'Do you want to commit changes to CS ? [default yes] [yes|no]: ' )
if yn == '' or yn.lower().startswith( 'y' ):
result = csAPI.commit()
if not result['OK']:
gLogger.error( "Error while commit to CS", result['Message'] )
else:
gLogger.notice( "Successfully committed %d changes to CS" % len( changeSet ) )
def updateSites():
global vo, dry, ceBdiiDict
result = getSiteUpdates( vo, bdiiInfo = ceBdiiDict )
if not result['OK']:
gLogger.error( 'Failed to get site updates', result['Message'] )
DIRACExit( -1 )
changeSet = result['Value']
updateCS( changeSet )
def checkUnusedSEs():
global vo, dry
result = getGridSRMs( vo, unUsed = True )
if not result['OK']:
gLogger.error( 'Failed to look up SRMs in BDII', result['Message'] )
siteSRMDict = result['Value']
# Evaluate VOs
result = getVOs()
if result['OK']:
csVOs = set( result['Value'] )
else:
csVOs = {vo}
changeSetFull = set()
for site in siteSRMDict:
for gridSE in siteSRMDict[site]:
changeSet = set()
seDict = siteSRMDict[site][gridSE]['SE']
srmDict = siteSRMDict[site][gridSE]['SRM']
# Check the SRM version
version = srmDict.get( 'GlueServiceVersion', '' )
if not ( version and version.startswith( '2' ) ):
gLogger.debug( 'Skipping SRM service with version %s' % version )
continue
result = getDIRACSiteName( site )
if not result['OK']:
gLogger.notice( 'Unused se %s is detected at unused site %s' % ( gridSE, site ) )
gLogger.notice( 'Consider adding site %s to the DIRAC CS' % site )
continue
diracSites = result['Value']
yn = raw_input( '\nDo you want to add new SRM SE %s at site(s) %s ? default yes [yes|no]: ' % ( gridSE, str( diracSites ) ) )
if not yn or yn.lower().startswith( 'y' ):
if len( diracSites ) > 1:
prompt = 'Which DIRAC site the new SE should be attached to ?'
for i, s in enumerate( diracSites ):
prompt += '\n[%d] %s' % ( i, s )
prompt += '\nEnter your choice number: '
inp = raw_input( prompt )
try:
ind = int( inp )
except:
gLogger.notice( 'Can not interpret your choice: %s, try again later' % inp )
continue
diracSite = diracSites[ind]
else:
diracSite = diracSites[0]
domain, siteName, country = diracSite.split( '.' )
recName = '%s-disk' % siteName
inp = raw_input( 'Give a DIRAC name to the grid SE %s, default %s : ' % ( gridSE, recName ) )
diracSEName = inp
if not inp:
diracSEName = recName
gLogger.notice( 'Adding new SE %s at site %s' % ( diracSEName, diracSite ) )
seSection = cfgPath( '/Resources/StorageElements', diracSEName )
changeSet.add( ( seSection, 'BackendType', seDict.get( 'GlueSEImplementationName', 'Unknown' ) ) )
changeSet.add( ( seSection, 'Description', seDict.get( 'GlueSEName', 'Unknown' ) ) )
bdiiVOs = set( [ re.sub( '^VO:', '', rule ) for rule in srmDict.get( 'GlueServiceAccessControlBaseRule', [] ) ] )
seVOs = csVOs.intersection( bdiiVOs )
changeSet.add( ( seSection, 'VO', ','.join( seVOs ) ) )
accessSection = cfgPath( seSection, 'AccessProtocol.1' )
changeSet.add( ( accessSection, 'Protocol', 'srm' ) )
changeSet.add( ( accessSection, 'ProtocolName', 'SRM2' ) )
endPoint = srmDict.get( 'GlueServiceEndpoint', '' )
host = urlparse( endPoint ).hostname
port = result['Value']['Port']
changeSet.add( ( accessSection, 'Host', host ) )
changeSet.add( ( accessSection, 'Port', port ) )
changeSet.add( ( accessSection, 'Access', 'remote' ) )
voPathSection = cfgPath( accessSection, 'VOPath' )
if 'VOPath' in seDict:
path = seDict['VOPath']
voFromPath = os.path.basename( path )
if voFromPath != diracVO:
gLogger.notice( '\n!!! Warning: non-conventional VO path: %s\n' % path )
changeSet.add( ( voPathSection, diracVO, path ) )
path = os.path.dirname( path )
else:
# Try to guess the Path
domain = '.'.join( host.split( '.' )[-2:] )
path = '/dpm/%s/home' % domain
changeSet.add( ( accessSection, 'Path', path ) )
changeSet.add( ( accessSection, 'SpaceToken', '' ) )
changeSet.add( ( accessSection, 'WSUrl', '/srm/managerv2?SFN=' ) )
gLogger.notice( 'SE %s will be added with the following parameters' % diracSEName )
changeList = list( changeSet )
changeList.sort()
for entry in changeList:
gLogger.notice( entry )
yn = raw_input( 'Do you want to add new SE %s ? default yes [yes|no]: ' % diracSEName )
if not yn or yn.lower().startswith( 'y' ):
changeSetFull = changeSetFull.union( changeSet )
if dry:
if changeSetFull:
gLogger.notice( 'Skipping commit of the new SE data in a dry run' )
else:
gLogger.notice( "No new SE to be added" )
return S_OK()
if changeSetFull:
csAPI = CSAPI()
csAPI.initialize()
result = csAPI.downloadCSData()
if not result['OK']:
gLogger.error( 'Failed to initialize CSAPI object', result['Message'] )
DIRACExit( -1 )
changeList = list( changeSetFull )
changeList.sort()
for section, option, value in changeList:
csAPI.setOption( cfgPath( section, option ), value )
yn = raw_input( 'New SE data is accumulated\n Do you want to commit changes to CS ? default yes [yes|no]: ' )
if not yn or yn.lower().startswith( 'y' ):
result = csAPI.commit()
if not result['OK']:
gLogger.error( "Error while commit to CS", result['Message'] )
else:
gLogger.notice( "Successfully committed %d changes to CS" % len( changeSetFull ) )
else:
gLogger.notice( "No new SE to be added" )
return S_OK()
def updateSEs():
global vo, dry
result = getSRMUpdates( vo )
if not result['OK']:
gLogger.error( 'Failed to get SRM updates', result['Message'] )
DIRACExit( -1 )
changeSet = result['Value']
updateCS( changeSet )
def handler( signum, frame ):
gLogger.notice( '\nExit is forced, bye...' )
DIRACExit( -1 )
if __name__ == "__main__":
signal.signal( signal.SIGTERM, handler )
signal.signal( signal.SIGINT, handler )
vo = ''
dry = False
doCEs = False
doSEs = False
ceBdiiDict = None
processScriptSwitches()
if not vo:
gLogger.error( 'No VO specified' )
DIRACExit( -1 )
diracVO = vo
vo = getVOOption( vo, 'VOMSName', vo )
if doCEs:
yn = raw_input( 'Do you want to check/add new sites to CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
checkUnusedCEs()
yn = raw_input( 'Do you want to update CE details in the CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
updateSites()
if doSEs:
yn = raw_input( 'Do you want to check/add new storage elements to CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
result = checkUnusedSEs()
yn = raw_input( 'Do you want to update SE details in the CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
updateSEs()
|
coberger/DIRAC
|
ConfigurationSystem/scripts/dirac-admin-add-resources.py
|
Python
|
gpl-3.0
| 15,883
|
[
"DIRAC"
] |
c77e8cb225b1c8d2c5fc056e92ed3ca1b65d5dfc6d5798210c4239204e3b4e56
|
# -*- coding: utf-8 -*-
from datetime import (
datetime,
timedelta,
)
from inspect import isclass
import requests
from atomx.version import API_VERSION, VERSION
from atomx import models
from atomx.utils import (
get_model_name,
model_name_to_rest,
)
from atomx.exceptions import (
APIError,
ModelNotFoundError,
InvalidCredentials,
MissingArgumentError,
)
__title__ = 'atomx'
__version__ = VERSION
__author__ = 'Spot Media Solutions Sdn. Bhd.'
__copyright__ = 'Copyright 2015-2016 Spot Media Solutions Sdn. Bhd.'
API_ENDPOINT = 'https://api.atomx.com/{}'.format(API_VERSION)
class Atomx(object):
"""Interface for the api on api.atomx.com.
To learn more about the api visit the
`atomx wiki <https://wiki.atomx.com/api>`_
:param str email: email address of your atomx user
:param str password: password of your atomx user
:param str totp: 6 digit auth token if the account has 2-factor authentication enabled.
:param str api_endpoint: url for connections to the api
(defaults to `https://api.atomx.com/{API_VERSION}`)
:param bool save_response: If `True` save the last api response meta info
(without the resource payload) in :attr:`.Atomx.last_response`. (default: `True`)
:return: :class:`.Atomx` session to interact with the api
"""
def __init__(self, email, password, totp=None,
api_endpoint=API_ENDPOINT, save_response=True, expiration=None):
self.auth_token = None
self.user = None
self.api_endpoint = api_endpoint.rstrip('/') + '/'
self.save_response = save_response
#: Contains the response of the last api call, if `save_response` was set `True`
self.last_response = None
self.login(email, password, totp, expiration)
@property
def _auth_header(self):
if self.auth_token:
return {'Authorization': 'Bearer ' + self.auth_token}
def login(self, email, password, totp=None, expiration=None):
"""Gets new authentication token for user ``email``.
This method is automatically called in :meth:`__init__` so
you rarely have to call this method directly.
:param str email: Email to use for login.
:param str password: Password to use for login.
:param str totp: 6 digit auth token if the account has 2-factor authentication enabled.
:param int expiration: Number of seconds that the auth token should be valid. (optional)
:return: None
:raises: :class:`.exceptions.InvalidCredentials` if ``email``/``password`` is wrong
"""
json = {'email': email, 'password': password}
if totp:
json['totp'] = str(totp)
if expiration:
json['expiration'] = expiration
r = requests.post(self.api_endpoint + 'login', json=json)
if not r.ok:
if r.status_code == 401:
raise InvalidCredentials
raise APIError(r.json()['error'])
self.auth_token = r.json()['auth_token']
self.user = models.User(session=self, **r.json()['user'])
def logout(self):
"""Removes authentication token from session."""
self.auth_token = None
self.user = None
def search(self, query, index=None):
"""Search for ``query``.
Returns a `dict` with all found results for:
'Advertisers', 'Campaigns', 'Creatives', 'Placements', 'Publishers', 'Sites'.
The resulting :mod:`.models` have only `id` and `name` loaded since that's
what's returned from the api `/search` call, but attributes will be lazy loaded
once you try to accessed them.
Or you can just fetch everything with one api call with :meth:`.AtomxModel.reload`.
Example::
>>> atomx = Atomx('apiuser@example.com', 'password')
>>> search_result = atomx.search('atomx')
>>> assert 'campaigns' in search_result
>>> campaign = search_result['campaigns'][0]
>>> assert isinstance(campaign, models.Campaign)
>>> # campaign has only `id` and `name` loaded but you
>>> # can still access (lazy load) all attributes
>>> assert isinstance(campaign.budget, float)
>>> # or reload all attributes with one api call
>>> campaign.reload()
:param str query: keyword to search for.
:param list index: :class:`str` or :class:`list` of the indexes you want to get returned.
E.g. ``index=['campaigns', 'domains']``.
:return: dict with list of :mod:`.models` as values
"""
params = {'q': query}
if index:
if isinstance(index, list):
index = ','.join(index)
params['index'] = index
r = requests.get(self.api_endpoint + 'search',
params=params,
headers=self._auth_header)
r_json = r.json()
if not r.ok:
raise APIError(r_json['error'])
search_result = r_json['search']
if self.save_response:
del r_json['search']
self.last_response = r_json
self.last_response['_headers'] = r.headers
# convert publisher, creative dicts etc from search result to Atomx.model
for m in search_result.keys():
model_name = get_model_name(m)
if model_name:
search_result[m] = [getattr(models, model_name)(session=self, **v)
for v in search_result[m]]
return search_result
def report(self, scope=None, groups=None, metrics=None, where=None,
from_=None, to=None, daterange=None, timezone='UTC',
emails=None, when=None, interval=None, name=None,
sort=None, limit=None, offset=None, save=True, editable=False):
"""Create a report.
See the `reporting atomx wiki <https://wiki.atomx.com/reporting>`_
for details about parameters and available groups, metrics.
:param str scope: Specifies the report type. Should be one of:
'advertiser', 'publisher', 'inventory', 'dsp',
'network_managed', 'network_buy', 'network_sell'.
If undefined it tries to determine the `scope` automatically based
on the access rights of the api user.
:param list groups: columns to group by.
:param list metrics: columns to sum on.
:param list where: is a list of expression lists.
An expression list is in the form of ``[column, op, value]``:
- ``column`` can be any of the ``groups`` or ``metrics`` parameter columns.
- ``op`` can be any of ``==``, ``!=``, ``<``, ``>``, ``in`` or ``not in`` as a string.
- ``value`` is either a number or in case of ``in``
and ``not in`` a list of numbers.
:param datetime.datetime from_: :class:`datetime.datetime` where the report
should start (inclusive). (Defaults to last week)
:param datetime.datetime to: :class:`datetime.datetime` where the report
should end (exclusive). (Defaults to `datetime.now()` if undefined)
:param str daterange: Use :param:`daterange` to automatically set the reports
`from` and `to` parameters relativ to the current date.
Both :param:`from_` and :param:`to` have to be ``None`` for it.
Dateranges are: today, yesterday, last7days, last14days, last30days, monthtodate,
lastmonth, yeartodate, lifetime. (Defaults to ``None``)
:param str timezone: Timezone used for all times. (Defaults to `UTC`)
For a supported list see https://wiki.atomx.com/timezones
:param emails: One or multiple email addresses that should get
notified once the report is finished and ready to download.
:type emails: str or list
:param str when: When should the scheduled report run. (daily, monthly, monday-sunday)
:param str interval: Time period included in the scheduled report ('N days' or 'N month')
:param str name: Optional name for the report.
:param str or list sort: List of columns to sort by.
:param int limit: Number of rows to return
:param int offset: Number of rows to skip.
:param bool save: Should the report appear in the users report history (defaults to `True`).
:param bool editable: Should other users be able to change the date range of this report.
:return: A :class:`atomx.models.Report` model
"""
report_json = {'timezone': timezone, 'save': save, 'editable': editable}
if name:
report_json['name'] = name
if groups:
report_json['groups'] = groups
if metrics:
report_json['metrics'] = metrics
elif not groups:
raise MissingArgumentError('Either `groups` or `metrics` have to be set.')
if scope is None:
user = self.user
if len(user.networks) > 0:
pass # user has network access so could be any report (leave scope as None)
elif len(user.publishers) > 0 and len(user.advertisers) == 0:
scope = 'publishers'
elif len(user.advertisers) > 0 and len(user.publishers) == 0:
scope = 'advertisers'
if scope is None:
raise MissingArgumentError('Unable to detect scope automatically. '
'Please set `scope` parameter.')
report_json['scope'] = scope
if where:
report_json['where'] = where
if when and interval: # scheduled report
report_json['when'] = when
report_json['interval'] = interval
elif not from_ and not to and daterange: # Rolling report
report_json['daterange'] = daterange
else: # Normal report
if from_ is None:
from_ = datetime.now() - timedelta(days=7)
if isinstance(from_, datetime):
report_json['from'] = from_.strftime("%Y-%m-%d %H:00:00")
else:
report_json['from'] = from_
if to is None:
to = datetime.now()
if isinstance(to, datetime):
report_json['to'] = to.strftime("%Y-%m-%d %H:00:00")
else:
report_json['to'] = to
if emails:
if not isinstance(emails, list):
emails = [emails]
report_json['emails'] = emails
params = {}
if limit:
params['limit'] = limit
if offset:
params['offset'] = offset
if sort:
if isinstance(sort, list):
sort = ','.join(sort)
params['sort'] = sort
r = requests.post(self.api_endpoint + 'report',
params=params, json=report_json, headers=self._auth_header)
r_json = r.json()
if not r.ok:
raise APIError(r_json['error'])
report = r_json['report']
if self.save_response:
del r_json['report']
self.last_response = r_json
self.last_response['_headers'] = r.headers
return models.Report(session=self, **report)
def get(self, resource, *args, **kwargs):
"""Returns a list of models from :mod:`.models` if you query for
multiple models or a single instance of a model from :mod:`.models`
if you query for a specific `id`
:param str resource: Specify the resource to get from the atomx api.
Examples:
Query all advertisers::
>>> atomx = Atomx('apiuser@example.com', 'password')
>>> advertisers = atomx.get('advertisers')
>>> assert isinstance(advertisers, list)
>>> assert isinstance(advertisers[0], atomx.models.Advertiser)
Get publisher with id 23::
>>> publisher = atomx.get('publisher/23')
>>>> # or get the same publisher using the id as parameter
>>> publisher = atomx.get('publisher', 23)
>>>> # or use an atomx model
>>> publisher = atomx.get(atomx.models.Publisher(23))
>>> assert publisher.id == 23
>>> assert isinstance(publisher, atomx.models.Publisher)
Get all profiles for advertiser 42::
>>> profiles = atomx.get('advertiser/42/profiles')
>>> assert isinstance(profiles, list)
>>> assert isinstance(profiles[0], atomx.models.Profile)
>>> assert profiles[0].advertiser.id == 42
:param args: All non-keyword arguments will get used to compute the ``resource``.
This makes it easier if you want to work with a variable resource path.
.. code-block:: python
advertiser_id = 42
attribute = 'profiles'
profiles = atomx.get('advertiser', advertiser_id, attribute)
# is equivalent to atomx.get('advertiser/42/profiles')
:param kwargs: Any argument is passed as URL parameter to the respective api endpoint.
See `API URL Parameters <https://wiki.atomx.com/api#url_parameters>`_
in the wiki.
Example:
Get the first 20 domains that contain ``atom``::
>>> atom_domains = atomx.get('domains', hostname='*atom*', limit=20)
>>> assert len(atom_domains) == 20
>>> assert 'atom' in atom_domains[1].hostname
:return: a class from :mod:`.models` or a list of models depending on param `resource`
"""
if isclass(resource) and issubclass(resource, models.AtomxModel):
resource = resource._resource_name
elif hasattr(resource, '_resource_name'):
resource_path = resource._resource_name
if hasattr(resource, 'id'):
resource_path += '/' + str(resource.id)
resource = resource_path
else:
resource = resource.strip('/')
for a in args:
resource += '/' + str(a)
r = requests.get(self.api_endpoint + resource, params=kwargs, headers=self._auth_header)
if not r.ok:
raise APIError(r.json()['error'])
r_json = r.json()
model_name = r_json['resource']
res = r_json[model_name]
if self.save_response:
del r_json[model_name]
self.last_response = r_json
self.last_response['_headers'] = r.headers
model = get_model_name(model_name)
if model and res:
if isinstance(res, list):
return [getattr(models, model)(session=self, **m) for m in res]
return getattr(models, model)(session=self, **res)
elif model_name == 'reporting': # special case for `/reports` status
return {
'reports': [models.Report(session=self, **m) for m in res['reports']],
'scheduled': [models.Report(session=self, **m) for m in res['scheduled']]
}
return res
def post(self, resource, json, **kwargs):
"""Send HTTP POST to ``resource`` with ``json`` content.
Used by :meth:`.models.AtomxModel.create`.
:param resource: Name of the resource to `POST` to.
:param json: Content of the `POST` request.
:param kwargs: URL Parameters of the request.
:return: :class:`dict` with the newly created resource.
"""
r = requests.post(self.api_endpoint + resource.strip('/'),
json=json, params=kwargs, headers=self._auth_header)
r_json = r.json()
if not r.ok:
raise APIError(r_json['error'])
model_name = r_json['resource']
res = r_json[model_name]
if self.save_response:
del r_json[model_name]
self.last_response = r_json
self.last_response['_headers'] = r.headers
model = get_model_name(model_name)
if model and isinstance(res, list):
return [getattr(models, model)(session=self, **m) for m in res]
return res
def put(self, resource, id, json, **kwargs):
"""Send HTTP PUT to ``resource``/``id`` with ``json`` content.
Used by :meth:`.models.AtomxModel.save`.
:param resource: Name of the resource to `PUT` to.
:param id: Id of the resource you want to modify
:param json: Content of the `PUT` request.
:param kwargs: URL Parameters of the request.
:return: :class:`dict` with the modified resource.
"""
r = requests.put(self.api_endpoint + resource.strip('/') + '/' + str(id),
json=json, params=kwargs, headers=self._auth_header)
r_json = r.json()
if not r.ok:
raise APIError(r_json['error'])
model_name = r_json['resource']
res = r_json[model_name]
if self.save_response:
del r_json[model_name]
self.last_response = r_json
self.last_response['_headers'] = r.headers
return res
def delete(self, resource, *args, **kwargs):
"""Send HTTP DELETE to ``resource``.
:param resource: Name of the resource to `DELETE`.
:param args: All non-keyword arguments will be used to compute the final ``resource``.
:param kwargs: Optional keyword arguments will be passed as query string to the
delete request.
:return: message or resource returned by the api.
"""
if hasattr(resource, '_resource_name') and hasattr(resource, 'id'):
resource = '{}/{}'.format(resource._resource_name, resource.id)
resource = resource.strip('/')
for a in args:
resource += '/' + str(a)
r = requests.delete(self.api_endpoint + resource, params=kwargs, headers=self._auth_header)
r_json = r.json()
if not r.ok:
raise APIError(r_json['error'])
model_name = r_json['resource']
res = r_json[model_name]
if self.save_response:
del r_json[model_name]
self.last_response = r_json
self.last_response['_headers'] = r.headers
return res
def save(self, model):
"""Alias for :meth:`.models.AtomxModel.save` with `session` argument."""
return model.save(self)
def create(self, model):
"""Alias for :meth:`.models.AtomxModel.create` with `session` argument."""
return model.create(self)
def remove(self, model):
"""Alias for :meth:`.models.AtomxModel.delete` with `session` argument."""
return model.delete(self)
|
atomx/atomx-api-python
|
atomx/__init__.py
|
Python
|
isc
| 18,863
|
[
"VisIt"
] |
c67dfaff3f62ba156263ebf78462c2cc050d681e353afcab538dfc96dcee5a35
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Class advice.
This module was adapted from 'protocols.advice', part of the Python
Enterprise Application Kit (PEAK). Please notify the PEAK authors
(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
Zope-specific changes are required, so that the PEAK version of this module
can be kept in sync.
PEAK is a Python application framework that interoperates with (but does
not require) Zope 3 and Twisted. It provides tools for manipulating UML
models, object-relational persistence, aspect-oriented programming, and more.
Visit the PEAK home page at http://peak.telecommunity.com for more information.
$Id: advice.py 110699 2010-04-09 08:16:17Z regebro $
"""
from types import FunctionType
try:
from types import ClassType
__python3 = False
except ImportError:
__python3 = True
import sys
def getFrameInfo(frame):
"""Return (kind,module,locals,globals) for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
if not namespaceIsModule:
# some kind of funky exec
kind = "exec"
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else:
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind, module, f_locals, f_globals
def addClassAdvisor(callback, depth=2):
"""Set up 'callback' to be passed the containing class upon creation
This function is designed to be called by an "advising" function executed
in a class suite. The "advising" function supplies a callback that it
wishes to have executed when the containing class is created. The
callback will be given one argument: the newly created containing class.
The return value of the callback will be used in place of the class, so
the callback should return the input if it does not wish to replace the
class.
The optional 'depth' argument to this function determines the number of
frames between this function and the targeted class suite. 'depth'
defaults to 2, since this skips this function's frame and one calling
function frame. If you use this function from a function called directly
in the class suite, the default will be correct, otherwise you will need
to determine the correct depth yourself.
This function works by installing a special class factory function in
place of the '__metaclass__' of the containing class. Therefore, only
callbacks *after* the last '__metaclass__' assignment in the containing
class will be executed. Be sure that classes using "advising" functions
declare any '__metaclass__' *first*, to ensure all callbacks are run."""
frame = sys._getframe(depth)
kind, module, caller_locals, caller_globals = getFrameInfo(frame)
# This causes a problem when zope interfaces are used from doctest.
# In these cases, kind == "exec".
#
#if kind != "class":
# raise SyntaxError(
# "Advice must be in the body of a class statement"
# )
previousMetaclass = caller_locals.get('__metaclass__')
if __python3:
defaultMetaclass = caller_globals.get('__metaclass__', type)
else:
defaultMetaclass = caller_globals.get('__metaclass__', ClassType)
def advise(name, bases, cdict):
if '__metaclass__' in cdict:
del cdict['__metaclass__']
if previousMetaclass is None:
if bases:
# find best metaclass or use global __metaclass__ if no bases
meta = determineMetaclass(bases)
else:
meta = defaultMetaclass
elif isClassAdvisor(previousMetaclass):
# special case: we can't compute the "true" metaclass here,
# so we need to invoke the previous metaclass and let it
# figure it out for us (and apply its own advice in the process)
meta = previousMetaclass
else:
meta = determineMetaclass(bases, previousMetaclass)
newClass = meta(name,bases,cdict)
# this lets the callback replace the class completely, if it wants to
return callback(newClass)
# introspection data only, not used by inner function
advise.previousMetaclass = previousMetaclass
advise.callback = callback
# install the advisor
caller_locals['__metaclass__'] = advise
def isClassAdvisor(ob):
"""True if 'ob' is a class advisor function"""
return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
def determineMetaclass(bases, explicit_mc=None):
"""Determine metaclass from 1+ bases and optional explicit __metaclass__"""
meta = [getattr(b,'__class__',type(b)) for b in bases]
if explicit_mc is not None:
# The explicit metaclass needs to be verified for compatibility
# as well, and allowed to resolve the incompatible bases, if any
meta.append(explicit_mc)
if len(meta)==1:
# easy case
return meta[0]
candidates = minimalBases(meta) # minimal set of metaclasses
if not candidates:
# they're all "classic" classes
assert(not __python3) # This should not happen under Python 3
return ClassType
elif len(candidates)>1:
# We could auto-combine, but for now we won't...
raise TypeError("Incompatible metatypes",bases)
# Just one, return it
return candidates[0]
def minimalBases(classes):
"""Reduce a list of base classes to its ordered minimum equivalent"""
if not __python3:
classes = [c for c in classes if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
return candidates
|
c0defreak/python-for-android
|
python-modules/zope/zope/interface/advice.py
|
Python
|
apache-2.0
| 7,245
|
[
"VisIt"
] |
2fd76a01410f04bc11e363572a1e7c387f2a94f2dda88ab1a9501b68c5ada31b
|
"""
Tests for transformer objects.
"""
from __future__ import division
from __future__ import unicode_literals
from deepchem.molnet import load_delaney
from deepchem.trans.transformers import FeaturizationTransformer
from deepchem.trans.transformers import DataTransforms
from tensorflow.examples.tutorials.mnist import input_data
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
import numpy as np
import pandas as pd
import deepchem as dc
import scipy.ndimage
class TestTransformers(unittest.TestCase):
"""
Test top-level API for transformer objects.
"""
def setUp(self):
super(TestTransformers, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
'''
init to load the MNIST data for DataTransforms Tests
'''
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# extracting validation set of MNIST for testing the DataTransforms
valid = dc.data.NumpyDataset(mnist.validation.images,
mnist.validation.labels)
# extract only the images (no need of the labels)
data = (valid.X)[0]
# reshaping the vector to image
data = np.reshape(data, (28, 28))
self.d = data
def test_y_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t, np.log(y + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_transform_unlabelled(self):
ul_dataset = dc.data.tests.load_unlabelled_data()
# transforming y should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_y=True).transform(ul_dataset)
# transforming w should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_w=True).transform(ul_dataset)
# transforming X should be okay
dc.trans.NormalizationTransformer(
transform_X=True, dataset=ul_dataset).transform(ul_dataset)
def test_X_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t, np.log(X + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_log_transformer_select(self):
"""Tests logarithmic data transformer with selection."""
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
tid = []
tasklist = ["task0", "task3", "task4", "task5"]
first_task = "task0"
for task in tasklist:
tiid = dfe.columns.get_loc(task) - dfe.columns.get_loc(first_task)
tid = np.concatenate((tid, np.array([tiid])))
tasks = tid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_y=True, tasks=tasks, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t[:, tasks], np.log(y[:, tasks] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer_select(self):
# Tests logarithmic data transformer with selection.
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
fid = []
featurelist = ["feat0", "feat1", "feat2", "feat3", "feat5"]
first_feature = "feat0"
for feature in featurelist:
fiid = dfe.columns.get_loc(feature) - dfe.columns.get_loc(first_feature)
fid = np.concatenate((fid, np.array([fiid])))
features = fid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_X=True, features=features, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t[:, features], np.log(X[:, features] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that y_t has zero mean, unit std.
assert np.isclose(y_t.mean(), 0.)
assert np.isclose(y_t.std(), 1.)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)
def test_X_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that X_t has zero mean, unit std.
# np.set_printoptions(threshold='nan')
mean = X_t.mean(axis=0)
assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7
orig_std_array = X.std(axis=0)
std_array = X_t.std(axis=0)
# Entries with zero std are not normalized
for orig_std, std in zip(orig_std_array, std_array):
if not np.isclose(orig_std, 0):
assert np.isclose(std, 1)
# TODO(rbharath): Untransform doesn't work properly for binary feature
# vectors. Need to figure out what's wrong here. (low priority)
## Check that untransform does the right thing.
# np.testing.assert_allclose(normalization_transformer.untransform(X_t), X)
def test_cdf_X_transformer(self):
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_X=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_y_transformer(self):
# Test CDF transformer on Gaussian normal dataset.
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_y=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
np.testing.assert_allclose(cdf_transformer.untransform(y_t), y)
def test_clipping_X_transformer(self):
"""Test clipping transformer on X of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.ones((n_samples, n_features))
target = 5. * X
X *= 6.
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_X=True, x_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
np.testing.assert_allclose(X_t, target)
def test_clipping_y_transformer(self):
"""Test clipping transformer on y of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.zeros((n_samples, n_features))
y = np.ones((n_samples, n_tasks))
target = 5. * y
y *= 6.
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_y=True, y_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
np.testing.assert_allclose(y_t, target)
def test_power_X_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_X=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values in each column.
np.testing.assert_allclose(X_t.shape[1], len(powers) * X.shape[1])
np.testing.assert_allclose(X, X_t[:, :2])
np.testing.assert_allclose(np.power(X, 2), X_t[:, 2:4])
np.testing.assert_allclose(np.power(X, 0.5), X_t[:, 4:])
def test_power_y_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_y=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an X transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values in each column.
np.testing.assert_allclose(y_t.shape[1], len(powers) * y.shape[1])
np.testing.assert_allclose(y, y_t[:, :2])
np.testing.assert_allclose(np.power(y, 2), y_t[:, 2:4])
np.testing.assert_allclose(np.power(y, 0.5), y_t[:, 4:])
# Check that untransform does the right thing.
np.testing.assert_allclose(power_transformer.untransform(y_t), y)
def test_singletask_balancing_transformer(self):
"""Test balancing transformer on single-task dataset."""
classification_dataset = dc.data.tests.load_classification_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=classification_dataset)
X, y, w, ids = (classification_dataset.X, classification_dataset.y,
classification_dataset.w, classification_dataset.ids)
classification_dataset = balancing_transformer.transform(
classification_dataset)
X_t, y_t, w_t, ids_t = (classification_dataset.X, classification_dataset.y,
classification_dataset.w,
classification_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(classification_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_multitask_balancing_transformer(self):
"""Test balancing transformer on multitask dataset."""
multitask_dataset = dc.data.tests.load_multitask_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = balancing_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(multitask_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_coulomb_fit_transformer(self):
"""Test coulomb fit transformer on singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformer = dc.trans.CoulombFitTransformer(dataset)
X_t = fit_transformer.X_transform(dataset.X)
assert len(X_t.shape) == 2
def test_IRV_transformer(self):
n_features = 128
n_samples = 20
test_samples = 5
n_tasks = 2
X = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
X_test = np.random.randint(2, size=(test_samples, n_features))
y_test = np.zeros((test_samples, n_tasks))
w_test = np.ones((test_samples, n_tasks))
test_dataset = dc.data.NumpyDataset(X_test, y_test, w_test, ids=None)
sims = np.sum(
X_test[0, :] * X, axis=1, dtype=float) / np.sum(
np.sign(X_test[0, :] + X), axis=1, dtype=float)
sims = sorted(sims, reverse=True)
IRV_transformer = dc.trans.IRVTransformer(10, n_tasks, dataset)
test_dataset_trans = IRV_transformer.transform(test_dataset)
dataset_trans = IRV_transformer.transform(dataset)
assert test_dataset_trans.X.shape == (test_samples, 20 * n_tasks)
assert np.allclose(test_dataset_trans.X[0, :10], sims[:10])
assert np.allclose(test_dataset_trans.X[0, 10:20], [0] * 10)
assert not np.isclose(dataset_trans.X[0, 0], 1.)
def test_featurization_transformer(self):
fp_size = 2048
tasks, all_dataset, transformers = load_delaney('Raw')
train = all_dataset[0]
transformer = FeaturizationTransformer(
transform_X=True,
dataset=train,
featurizer=dc.feat.CircularFingerprint(size=fp_size))
new_train = transformer.transform(train)
self.assertEqual(new_train.y.shape, train.y.shape)
self.assertEqual(new_train.X.shape[-1], fp_size)
def test_blurring(self):
# Check Blurring
dt = DataTransforms(self.d)
blurred = dt.gaussian_blur(sigma=1.5)
check_blur = scipy.ndimage.gaussian_filter(self.d, 1.5)
assert np.allclose(check_blur, blurred)
def test_rotation(self):
# Check rotation
dt = DataTransforms(self.d)
angles = [0, 5, 10, 90]
for ang in angles:
rotate = dt.rotate(ang)
check_rotate = scipy.ndimage.rotate(self.d, ang)
assert np.allclose(rotate, check_rotate)
# Some more test cases for flip
rotate = dt.rotate(-90)
check_rotate = scipy.ndimage.rotate(self.d, 270)
assert np.allclose(rotate, check_rotate)
def test_flipping(self):
# Check flip
dt = DataTransforms(self.d)
flip_lr = dt.flip(direction="lr")
flip_ud = dt.flip(direction="ud")
check_lr = np.fliplr(self.d)
check_ud = np.flipud(self.d)
assert np.allclose(flip_ud, check_ud)
assert np.allclose(flip_lr, check_lr)
def test_scaling(self):
# Check Scales
dt = DataTransforms(self.d)
h = 150
w = 150
scale = scipy.misc.imresize(self.d, (h, w))
check_scale = dt.scale(h, w)
np.allclose(scale, check_scale)
def test_shift(self):
# Check shift
dt = DataTransforms(self.d)
height = 5
width = 5
if len(self.d.shape) == 2:
shift = scipy.ndimage.shift(self.d, [height, width])
if len(self.d.shape) == 3:
shift = scipy.ndimage.shift(self.d, [height, width, 0])
check_shift = dt.shift(width, height)
assert np.allclose(shift, check_shift)
def test_gaussian_noise(self):
# check gaussian noise
dt = DataTransforms(self.d)
np.random.seed(0)
random_noise = self.d
random_noise = random_noise + np.random.normal(
loc=0, scale=25.5, size=self.d.shape)
np.random.seed(0)
check_random_noise = dt.gaussian_noise(mean=0, std=25.5)
assert np.allclose(random_noise, check_random_noise)
def test_salt_pepper_noise(self):
# check salt and pepper noise
dt = DataTransforms(self.d)
np.random.seed(0)
prob = 0.05
random_noise = self.d
noise = np.random.random(size=self.d.shape)
random_noise[noise < (prob / 2)] = 0
random_noise[noise > (1 - prob / 2)] = 255
np.random.seed(0)
check_random_noise = dt.salt_pepper_noise(prob, salt=255, pepper=0)
assert np.allclose(random_noise, check_random_noise)
|
ktaneishi/deepchem
|
deepchem/trans/tests/test_transformers.py
|
Python
|
mit
| 24,790
|
[
"Gaussian"
] |
f1fa5e63bd736fd4da39ac9747c1a99c51c970544f085628ba5a0ba47fc836d4
|
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import subprocess
import tarfile
from testrunner.local import testsuite
from testrunner.objects import testcase
class BenchmarksTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(BenchmarksTestSuite, self).__init__(name, root)
self.testroot = root
def ListTests(self, context):
tests = []
for test in [
"kraken/ai-astar",
"kraken/audio-beat-detection",
"kraken/audio-dft",
"kraken/audio-fft",
"kraken/audio-oscillator",
"kraken/imaging-darkroom",
"kraken/imaging-desaturate",
"kraken/imaging-gaussian-blur",
"kraken/json-parse-financial",
"kraken/json-stringify-tinderbox",
"kraken/stanford-crypto-aes",
"kraken/stanford-crypto-ccm",
"kraken/stanford-crypto-pbkdf2",
"kraken/stanford-crypto-sha256-iterative",
"octane/box2d",
"octane/code-load",
"octane/crypto",
"octane/deltablue",
"octane/earley-boyer",
"octane/gbemu",
"octane/mandreel",
"octane/navier-stokes",
"octane/pdfjs",
"octane/raytrace",
"octane/regexp",
"octane/richards",
"octane/splay",
"sunspider/3d-cube",
"sunspider/3d-morph",
"sunspider/3d-raytrace",
"sunspider/access-binary-trees",
"sunspider/access-fannkuch",
"sunspider/access-nbody",
"sunspider/access-nsieve",
"sunspider/bitops-3bit-bits-in-byte",
"sunspider/bitops-bits-in-byte",
"sunspider/bitops-bitwise-and",
"sunspider/bitops-nsieve-bits",
"sunspider/controlflow-recursive",
"sunspider/crypto-aes",
"sunspider/crypto-md5",
"sunspider/crypto-sha1",
"sunspider/date-format-tofte",
"sunspider/date-format-xparb",
"sunspider/math-cordic",
"sunspider/math-partial-sums",
"sunspider/math-spectral-norm",
"sunspider/regexp-dna",
"sunspider/string-base64",
"sunspider/string-fasta",
"sunspider/string-tagcloud",
"sunspider/string-unpack-code",
"sunspider/string-validate-input"]:
tests.append(testcase.TestCase(self, test))
return tests
def GetFlagsForTestCase(self, testcase, context):
result = []
result += context.mode_flags
if testcase.path.startswith("kraken"):
result.append(os.path.join(self.testroot, "%s-data.js" % testcase.path))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
elif testcase.path.startswith("octane"):
result.append(os.path.join(self.testroot, "octane/base.js"))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
result += ["-e", "BenchmarkSuite.RunSuites({});"]
elif testcase.path.startswith("sunspider"):
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
return testcase.flags + result
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def _DownloadIfNecessary(self, url, revision, target_dir):
# Maybe we're still up to date?
revision_file = "CHECKED_OUT_%s" % target_dir
checked_out_revision = None
if os.path.exists(revision_file):
with open(revision_file) as f:
checked_out_revision = f.read()
if checked_out_revision == revision:
return
# If we have a local archive file with the test data, extract it.
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
archive_file = "downloaded_%s_%s.tar.gz" % (target_dir, revision)
if os.path.exists(archive_file):
with tarfile.open(archive_file, "r:gz") as tar:
tar.extractall()
with open(revision_file, "w") as f:
f.write(revision)
return
# No cached copy. Check out via SVN, and pack as .tar.gz for later use.
command = "svn co %s -r %s %s" % (url, revision, target_dir)
code = subprocess.call(command, shell=True)
if code != 0:
raise Exception("Error checking out %s benchmark" % target_dir)
with tarfile.open(archive_file, "w:gz") as tar:
tar.add("%s" % target_dir)
with open(revision_file, "w") as f:
f.write(revision)
def DownloadData(self):
old_cwd = os.getcwd()
os.chdir(os.path.abspath(self.root))
self._DownloadIfNecessary(
("http://svn.webkit.org/repository/webkit/trunk/PerformanceTests/"
"SunSpider/tests/sunspider-1.0/"),
"153700", "sunspider")
self._DownloadIfNecessary(
("http://kraken-mirror.googlecode.com/svn/trunk/kraken/tests/"
"kraken-1.1/"),
"8", "kraken")
self._DownloadIfNecessary(
"http://octane-benchmark.googlecode.com/svn/trunk/",
"22", "octane")
os.chdir(old_cwd)
def VariantFlags(self):
# Both --nocrankshaft and --stressopt are very slow.
return [[]]
def GetSuite(name, root):
return BenchmarksTestSuite(name, root)
|
x684867/nemesis
|
src/node/deps/v8/test/benchmarks/testcfg.py
|
Python
|
mit
| 6,609
|
[
"Gaussian"
] |
b27fe37598af4c908cace8dbbe204208eab962e2c24ce0aa2cd3261ac149ddeb
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Add Chain",
"author": "Brian Hinton (Nichod)",
"version": (0, 1, 1),
"blender": (2, 71, 0),
"location": "Toolshelf > Create Tab",
"description": "Adds Chain with curve guide for easy creation",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/Add_Chain",
"category": "Object",
}
import bpy
from bpy.types import Operator, Panel
def Add_Chain():
##Adds Empty to scene
bpy.ops.object.add(type='EMPTY',
view_align=False,
enter_editmode=False,
location=(0, 0, 0),
rotation=(0, 0, 0),
)
##Changes name of Empty to rot_link adds variable emp
emp = bpy.context.object
emp.name = "rot_link"
##Rotate emp ~ 90 degrees
emp.rotation_euler = [1.570796, 0, 0]
##Adds Curve Path to scene
bpy.ops.curve.primitive_nurbs_path_add(view_align=False,
enter_editmode=False,
location=(0, 0, 0),
rotation=(0, 0, 0),
)
##Change Curve name to deform adds variable curv
curv = bpy.context.object
curv.name = "deform"
##Inserts Torus primitive
bpy.ops.mesh.primitive_torus_add(major_radius=1,
minor_radius=0.25,
major_segments=12,
minor_segments=4,
abso_major_rad=1,
abso_minor_rad=0.5,
)
##Positions Torus primitive to center of scene
bpy.context.active_object.location = 0.0, 0.0, 0.0
##Reseting Torus rotation in case of 'Align to view' option enabled
bpy.context.active_object.rotation_euler = 0.0, 0.0, 0.0
##Changes Torus name to chain adds variable tor
tor = bpy.context.object
tor.name = "chain"
##Adds Array Modifier to tor
bpy.ops.object.modifier_add(type='ARRAY')
##Adds subsurf modifier tor
bpy.ops.object.modifier_add(type='SUBSURF')
##Smooths tor
bpy.ops.object.shade_smooth()
##Select curv
sce = bpy.context.scene
sce.objects.active = curv
##Toggle into editmode
bpy.ops.object.editmode_toggle()
## TODO, may be better to move objects directly.
##Translate curve object
bpy.ops.transform.translate(value=(2, 0, 0),
constraint_axis=(True, False, False),
constraint_orientation='GLOBAL',
mirror=False,
proportional='DISABLED',
proportional_edit_falloff='SMOOTH',
proportional_size=1,
snap=False,
snap_target='CLOSEST',
snap_point=(0, 0, 0),
snap_align=False,
snap_normal=(0, 0, 0),
release_confirm=False,
)
##Toggle into objectmode
bpy.ops.object.editmode_toggle()
##Select tor or chain
sce.objects.active = tor
##Selects Array Modifier for editing
array = tor.modifiers['Array']
##Change Array Modifier Parameters
array.fit_type = 'FIT_CURVE'
array.curve = curv
array.offset_object = emp
array.use_object_offset = True
array.relative_offset_displace = 0.549, 0.0, 0.0
##Add curve modifier
bpy.ops.object.modifier_add(type='CURVE')
##Selects Curve Modifier for editing
cur = tor.modifiers['Curve']
##Change Curve Modifier Parameters
cur.object = curv
class AddChain(bpy.types.Operator):
"""Add a Chain"""
bl_idname = "mesh.primitive_chain_add"
bl_label = "Add Chain"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
Add_Chain()
return {'FINISHED'}
class add_chain(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Create'
bl_label = "Add Chain"
bl_context = "objectmode"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.operator(AddChain.bl_idname, text="Chain")
def register():
bpy.utils.register_module(__name__)
pass
def unregister():
bpy.utils.unregister_module(__name__)
pass
if __name__ == "__main__":
register()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/object_add_chain.py
|
Python
|
gpl-3.0
| 5,456
|
[
"Brian"
] |
aca8d807e367e92183f029d83301c32a34eab1aba7521ee43b26bf1857b886c6
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************************
espressopp.integrator.LangevinThermostatOnGroup
***********************************************
Thermalize particles in the ParticleGroup only.
.. function:: espressopp.integrator.LangevinThermostatOnGroup(system, particle_group)
:param system: The system object.
:type system: espressopp.System
:param particle_group: The particle group.
:type particle_group: espressopp.ParticleGroup
Example
###########
>>> pg = espressopp.ParticleGroup(system.storage)
>>> for pid in range(10):
>>> pg.add(pid)
>>> thermostat = espressopp.integrator.LangevinThermostatOnGroup(system, pg)
>>> thermostat.temperature = 1.0
>>> thermostat.gamma = 1.0
>>> integrator.addExtension(thermostat)
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_LangevinThermostatOnGroup
class LangevinThermostatOnGroupLocal(ExtensionLocal, integrator_LangevinThermostatOnGroup):
def __init__(self, system, particle_group):
if pmi.workerIsActive():
cxxinit(self, integrator_LangevinThermostatOnGroup, system, particle_group)
if pmi.isController :
class LangevinThermostatOnGroup(Extension, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.integrator.LangevinThermostatOnGroupLocal',
pmiproperty = [ 'gamma', 'temperature']
)
|
espressopp/espressopp
|
src/integrator/LangevinThermostatOnGroup.py
|
Python
|
gpl-3.0
| 2,223
|
[
"ESPResSo"
] |
83c3e60fd33ffa696b237305cb3acbb78af08b50591a17380952967c5ee74616
|
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
from scipy.stats import skew
import argparse
import copy
# evaluate the probed surface to get average values mapped to original segmentation
def Execute(args):
print("get average along line probes")
reader_lines = vmtkscripts.vmtkSurfaceReader()
reader_lines.InputFileName = args.lines_file
reader_lines.Execute()
lines_surface = reader_lines.Surface
n_cells = lines_surface.GetNumberOfCells()
n_pts = lines_surface.GetCell(0).GetNumberOfPoints()
lines = np.empty((n_cells, n_pts))
pts = np.empty((n_cells, 3))
da = lines_surface.GetPointData().GetArray("NRRDImage")
for i in range(n_cells):
cellids = lines_surface.GetCell(i).GetPointIds()
#n_pts = cell.GetNumberOfPoints()
for j in range(n_pts):
if(j == n_pts // 2):
pts[i,:] = np.array(lines_surface.GetPoint(cellids.GetId(j)))
lines[i, j] = lines_surface.GetPointData().GetArray("NRRDImage").GetTuple(cellids.GetId(j))[0]
ln_avg = np.average(lines, axis=1)
ln_std = np.std(lines, axis=1, ddof=1)
ln_skew = skew(lines, axis=1, bias=False)
avg_min = ln_avg.min()
ln_avg_norm = (ln_avg + avg_min) / (ln_avg.max() + avg_min)
# get weighted average
x = np.linspace(-args.slice_thickness, args.slice_thickness, lines.shape[1])
std = args.slice_thickness/2.0
mean = 0.0
dist = 1.0/np.sqrt(2.0*np.pi*std**2)*np.exp(-(x-mean)**2/(2.0*std**2))
ln_avg_weight = np.average(lines, axis=1, weights = dist)
reader_surface = vmtkscripts.vmtkSurfaceReader()
reader_surface.InputFileName = args.surface_file
reader_surface.Execute()
Surface = reader_surface.Surface
#Create the tree
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(Surface)
pointLocator.BuildLocator()
array = vtk.vtkDoubleArray()
array.SetNumberOfComponents(n_pts)
array.SetName("rawImageSamples")
array.SetNumberOfTuples(n_cells)
avg = vtk.vtkDoubleArray()
avg.SetNumberOfComponents(1)
avg.SetName("avgSample")
avg.SetNumberOfTuples(n_cells)
avg_norm = vtk.vtkDoubleArray()
avg_norm.SetNumberOfComponents(1)
avg_norm.SetName("normalized")
avg_norm.SetNumberOfTuples(n_cells)
stddev = vtk.vtkDoubleArray()
stddev.SetNumberOfComponents(1)
stddev.SetName("stddev")
stddev.SetNumberOfTuples(n_cells)
skewness = vtk.vtkDoubleArray()
skewness.SetNumberOfComponents(1)
skewness.SetName("skewness")
skewness.SetNumberOfTuples(n_cells)
weighted_avg = vtk.vtkDoubleArray()
weighted_avg.SetNumberOfComponents(1)
weighted_avg.SetName("weighted_average")
weighted_avg.SetNumberOfTuples(n_cells)
for i in range(n_cells):
surf_id = pointLocator.FindClosestPoint(pts[i])
#print(ln_avg.shape)
avg.SetValue(surf_id, ln_avg[i])
array.SetTuple(surf_id, list(lines[i,:]))
avg_norm.SetValue(surf_id, ln_avg_norm[i])
stddev.SetValue(surf_id, ln_std[i])
skewness.SetValue(surf_id, ln_skew[i])
weighted_avg.SetValue(surf_id, ln_avg_weight[i])
Surface.GetPointData().AddArray(avg)
Surface.GetPointData().AddArray(array)
Surface.GetPointData().AddArray(avg_norm)
Surface.GetPointData().AddArray(stddev)
Surface.GetPointData().AddArray(skewness)
Surface.GetPointData().AddArray(weighted_avg)
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = args.file_out
writer.Input = Surface
writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='average probed information along lines')
parser.add_argument("-i", dest="surface_file", required=True, help="input surface file", metavar="FILE")
parser.add_argument("-l", dest="lines_file", required=True, help="input file with probed lines", metavar="FILE")
parser.add_argument("-o", dest="file_out", required=True, help="output file with averages probed lines", metavar="FILE")
parser.add_argument("-t", '--thickness', dest="slice_thickness", type=float, help='half thickness of lines ', default=0.5625)
args = parser.parse_args()
#print(args)
Execute(args)
|
kayarre/Tools
|
vmtk/avg_probe_surface.py
|
Python
|
bsd-2-clause
| 4,337
|
[
"VTK"
] |
c45f922f81d0f2691853aa2eecb9f5bf412c7728011bcba4fc14532b39132583
|
from __future__ import print_function, division
import os
import numpy as np
try:
asstr = np.compat.asstr
except AttributeError: # For Numpy 1.4.1
import sys
if sys.version_info[0] >= 3:
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
else:
asstr = str
from .exceptions import TableException
from .decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
try:
import h5py
h5py_installed = True
except:
h5py_installed = False
STRING_TYPES = [bytes, np.string_, str]
try:
STRING_TYPES.append(np.bytes_)
except AttributeError:
pass
try:
STRING_TYPES.append(unicode)
except NameError:
pass
def _check_h5py_installed():
if not h5py_installed:
raise Exception("Cannot read/write HDF5 files - h5py required")
def _get_group(filename, group="", append=False):
if append:
f = h5py.File(filename, 'a')
else:
f = h5py.File(filename, 'w')
if group:
if append:
if group in f.keys():
g = f[group]
else:
g = f.create_group(group)
else:
g = f.create_group(group)
else:
g = f
return f, g
def _create_required_groups(g, path):
'''
Given a file or group handle, and a path, make sure that the specified
path exists and create if necessary.
'''
for dirname in path.split('/'):
if not dirname in g:
g = g.create_group(dirname)
else:
g = g[dirname]
def _list_tables(file_handle):
list_of_names = []
file_handle.visit(list_of_names.append)
tables = {}
for item in list_of_names:
if isinstance(file_handle[item], h5py.highlevel.Dataset):
if file_handle[item].dtype.names:
tables[item] = item
return tables
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, table=None, verbose=True):
'''
Read a table from an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to read the table from
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to read the table from
Optional Keyword Arguments:
*table*: [ string ]
The name of the table to read from the HDF5 file (this is only
required if there are more than one table in the file)
'''
_check_h5py_installed()
self.reset()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
else:
if not os.path.exists(filename):
raise Exception("File not found: %s" % filename)
f = h5py.File(filename, 'r')
g = f['/']
# If no table is requested, check that there is only one table
if table is None:
tables = _list_tables(g)
if len(tables) == 1:
table = tables.keys()[0]
else:
raise TableException(tables, 'table')
# Set the table name
self.table_name = str(table)
self._setup_table(len(g[table]), g[table].dtype)
# Add columns to table
for name in g[table].dtype.names:
self.data[name][:] = g[table][name][:]
for attribute in g[table].attrs:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8
if type(g[table].attrs[attribute]) in STRING_TYPES:
self.add_keyword(attribute, asstr(g[table].attrs[attribute]))
else:
self.add_keyword(attribute, g[table].attrs[attribute])
if f is not None:
f.close()
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read_set(self, filename, pedantic=False, verbose=True):
'''
Read all tables from an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to read the tables from
'''
_check_h5py_installed()
self.reset()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
else:
if not os.path.exists(filename):
raise Exception("File not found: %s" % filename)
f = h5py.File(filename, 'r')
g = f['/']
for keyword in g.attrs:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8
if type(g.attrs[keyword]) in STRING_TYPES:
self.keywords[keyword] = asstr(g.attrs[keyword])
else:
self.keywords[keyword] = g.attrs[keyword]
from .basetable import Table
for table in _list_tables(g):
t = Table()
read(t, filename, table=table, verbose=verbose)
self.append(t)
if f is not None:
f.close()
def write(self, filename, compression=False, group="", append=False,
overwrite=False, ignore_groups=False):
'''
Write the table to an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to write the table to
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to write the table to
Optional Keyword Arguments:
*compression*: [ True | False ]
Whether to compress the table inside the HDF5 file
*group*: [ string ]
The group to write the table to inside the HDF5 file
*append*: [ True | False ]
Whether to append the table to an existing HDF5 file
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
*ignore_groups*: [ True | False ]
With this option set to True, groups are removed from table names.
With this option set to False, tables are placed in groups that
are present in the table name, and the groups are created if
necessary.
'''
_check_h5py_installed()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
if group:
if group in g:
g = g[group]
else:
g = g.create_group(group)
else:
if os.path.exists(filename) and not append:
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
f, g = _get_group(filename, group=group, append=append)
if self.table_name:
name = self.table_name
else:
name = "Table"
if ignore_groups:
name = os.path.basename(name)
else:
path = os.path.dirname(name)
if path:
_create_required_groups(g, path)
if name in g.keys():
raise Exception("Table %s/%s already exists" % (group, name))
dset = g.create_dataset(name, data=self.data, compression=compression)
for keyword in self.keywords:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8. In addition, we have to use
# np.string_ to ensure that fixed-length attributes are used.
if isinstance(self.keywords[keyword], basestring):
dset.attrs[keyword] = np.string_(self.keywords[keyword])
else:
dset.attrs[keyword] = self.keywords[keyword]
if f is not None:
f.close()
def write_set(self, filename, compression=False, group="", append=False,
overwrite=False, ignore_groups=False, **kwargs):
'''
Write the tables to an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to write the tables to
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to write the tables to
Optional Keyword Arguments:
*compression*: [ True | False ]
Whether to compress the tables inside the HDF5 file
*group*: [ string ]
The group to write the table to inside the HDF5 file
*append*: [ True | False ]
Whether to append the tables to an existing HDF5 file
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
*ignore_groups*: [ True | False ]
With this option set to True, groups are removed from table names.
With this option set to False, tables are placed in groups that
are present in the table name, and the groups are created if
necessary.
'''
_check_h5py_installed()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
if group:
if group in g:
g = g[group]
else:
g = g.create_group(group)
else:
if os.path.exists(filename) and not append:
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
f, g = _get_group(filename, group=group, append=append)
for keyword in self.keywords:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8. In addition, we have to use
# np.string_ to ensure that fixed-length attributes are used.
if isinstance(self.keywords[keyword], basestring):
g.attrs[keyword] = np.string_(self.keywords[keyword])
else:
g.attrs[keyword] = self.keywords[keyword]
for i, table_key in enumerate(self.tables):
if self.tables[table_key].table_name:
name = self.tables[table_key].table_name
else:
name = "Table_%02i" % i
if ignore_groups:
name = os.path.basename(name)
else:
path = os.path.dirname(name)
if path:
_create_required_groups(g, path)
if name in g.keys():
raise Exception("Table %s/%s already exists" % (group, name))
dset = g.create_dataset(name, data=self.tables[table_key].data, compression=compression)
for keyword in self.tables[table_key].keywords:
# Due to a bug in HDF5, in order to get this to work in Python 3, we
# need to encode string values in utf-8. In addition, we have to use
# np.string_ to ensure that fixed-length attributes are used.
if isinstance(self.tables[table_key].keywords[keyword], basestring):
dset.attrs[keyword] = np.string_(self.tables[table_key].keywords[keyword])
else:
dset.attrs[keyword] = self.tables[table_key].keywords[keyword]
if f is not None:
f.close()
|
atpy/atpy
|
atpy/hdf5table.py
|
Python
|
mit
| 11,069
|
[
"VisIt"
] |
83f5b3e9dcb7eff96196b7b4b6499df0e993bbf2a282ef30cce85b76fb8fb178
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
..note:: Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator "
"instead. You can also get the cost on the training dataset in the summary.",
DeprecationWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter,
HasTol, HasSeed, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.000...
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol,
HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure` or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
<a href=http://www.icml2010.org/papers/387.pdf>Lin and Cohen</a>. From the abstract:
PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering \
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5), \
(2, 0, 0.5), (2, 1, 0.7), \
(3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9), \
(4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1), \
(5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight")
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |1 |
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |0 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
rikima/spark
|
python/pyspark/ml/clustering.py
|
Python
|
apache-2.0
| 50,292
|
[
"Gaussian"
] |
22437810ccd0e2896959e12a4ba96e0747159990dee7d370654e891dd8d7ed85
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDeseq(RPackage):
"""Differential gene expression analysis based on the negative binomial
distribution
Estimate variance-mean dependence in count data from high-throughput
sequencing assays and test for differential expression based on a model
using the negative binomial distribution"""
homepage = "https://bioconductor.org/packages/DESeq"
git = "https://git.bioconductor.org/packages/DESeq.git"
version('1.42.0', commit='da76bc64e8c4073b58eaf1c93aa4e89bec5c4e50')
version('1.36.0', commit='db4af67b49d3bd8c321d19efbe9415cd2e4ddb7e')
version('1.34.1', commit='e86f1b03a30bc02de4bfd4a0759af2f65cb48c62')
version('1.32.0', commit='e3d623b815b53d79eae7cdd09d097cc6098d28c9')
version('1.30.0', commit='90c93d991dd980d538c13b0361d3345f9546794e')
version('1.28.0', commit='738371466e6ccf00179fd35b617c8ba0e1e91630')
depends_on('r-biocgenerics@0.7.5:', type=('build', 'run'))
depends_on('r-biobase@2.21.7:', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-geneplotter', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-deseq/package.py
|
Python
|
lgpl-2.1
| 1,546
|
[
"Bioconductor"
] |
643543456af0faf95a016b4bfd4f1cfc0c9e5d004b2cb59d0835dc11a6c711f1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements abstract base classes for post-processing entries.
Any class which modifies entries should inherit these classes.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Oct 6, 2011"
import abc
class EntryPostProcessor(metaclass=abc.ABCMeta):
@abc.abstractmethod
def process_entry(self, entry):
"""
Process a single entry.
Args:
entry: An ComputedEntry object.
Returns:
An processed entry. None if entry is not compatible within the
processing scheme.
"""
return
@abc.abstractmethod
def process_entries(self, entries):
"""
Process a sequence of entries.
Args:
entries: A sequence of ComputedEntries.
Returns:
An list of processed entries. ComputedEntries in the original list
which are not compatible with the processing scheme are excluded.
"""
return
@property
@abc.abstractmethod
def corrected_compound_formulas(self):
"""
List of compound formulas that are corrected.
"""
return
|
dongsenfo/pymatgen
|
pymatgen/entries/post_processors_abc.py
|
Python
|
mit
| 1,391
|
[
"pymatgen"
] |
76c909446890af1fee0f2cbbd9eb14b384a48f492b7ee71b1854fef06d894479
|
"""
ElasticSearch wrapper
"""
import logging
from cmreslogging.handlers import CMRESHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
class ElasticSearchBackend(AbstractBackend):
"""
ElasticsearchBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have a CMRESHandler which is part of an external library named 'cmreslogging' based on 'logging'.
CMRESHandler is a specific handler created to send log records to an ElasticSearch DB. It does not need a Formatter
object.
"""
def __init__(self, backendParams=None):
"""
CMRESHandler needs, at least, a hostname, a username, a password, a port and a specific index
from the ElasticSearch DB to send log records.
"""
# We give a format containing only asctime to add the field in elasticsearch
# asctime is not created at the initialization of the LogRecords but built in the format process
if not backendParams:
backendParams = {}
backendParams["Format"] = "%(asctime)s"
super(ElasticSearchBackend, self).__init__(CMRESHandler, logging.Formatter, backendParams)
def _setHandlerParameters(self, backendParams=None):
"""
Get the handler parameters from the backendParams.
The keys of handlerParams should correspond to the parameter names of the associated handler.
The method should be overridden in every backend that needs handler parameters.
The method should be called before creating the handler object.
:param dict parameters: parameters of the backend. ex: {'FileName': file.log}
"""
# fixed parameters
self._handlerParams["use_ssl"] = True
self._handlerParams["verify_ssl"] = True
self._handlerParams["auth_type"] = CMRESHandler.AuthType.NO_AUTH
# variable parameters
self._handlerParams["es_index_name"] = ""
self._handlerParams["buffer_size"] = 1000
self._handlerParams["flush_frequency_in_sec"] = 1
user = None
password = None
host = ""
port = 9203
if backendParams is not None:
self._handlerParams["es_index_name"] = backendParams.get("Index", self._handlerParams["es_index_name"])
self._handlerParams["buffer_size"] = backendParams.get("BufferSize", self._handlerParams["buffer_size"])
self._handlerParams["flush_frequency_in_sec"] = backendParams.get(
"FlushTime", self._handlerParams["flush_frequency_in_sec"]
)
user = backendParams.get("User", user)
password = backendParams.get("Password", password)
if user is not None and password is not None:
self._handlerParams["auth_type"] = CMRESHandler.AuthType.BASIC_AUTH
self._handlerParams["auth_details"] = (user, password)
host = backendParams.get("Host", host)
port = int(backendParams.get("Port", port))
self._handlerParams["hosts"] = [{"host": host, "port": port}]
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/LogBackends/ElasticSearchBackend.py
|
Python
|
gpl-3.0
| 3,113
|
[
"DIRAC"
] |
f661632aac3ed6781fe2bbc95573c710421e52bbf4493e1bf1132a8a1205ace4
|
# -*- coding:utf-8 -*-
# @author xupingmao
# @since 2016/12/09
# @modified 2021/12/05 11:23:34
"""xnote工具类总入口
xutils是暴露出去的统一接口,类似于windows.h一样
建议通过xutils暴露统一接口,其他的utils由xutils导入
"""
from __future__ import print_function
from __future__ import absolute_import
from threading import current_thread
from xutils.imports import *
# xnote工具
import xutils.textutil as textutil
import xutils.ziputil as ziputil
import xutils.fsutil as fsutil
import xutils.logutil as logutil
import xutils.dateutil as dateutil
import xutils.htmlutil as htmlutil
from xutils.ziputil import *
from xutils.netutil import splithost, http_get, http_post
from xutils.textutil import *
from xutils.dateutil import *
from xutils.netutil import *
from xutils.fsutil import *
from xutils.cacheutil import cache, cache_get, cache_put, cache_del
from xutils.functions import History, MemTable, listremove
# TODO xutils是最基础的库,后续会移除对xconfig的依赖,xutils会提供配置的函数出去在上层进行配置
from xutils.base import Storage
from xutils.logutil import *
from xutils.webutil import *
from xutils.exeutil import *
from xutils.func_util import *
import shutil
import logging
import logging.handlers
FS_IMG_EXT_LIST = None
FS_TEXT_EXT_LIST = None
FS_AUDIO_EXT_LIST = None
FS_CODE_EXT_LIST = None
IS_TEST = False
#################################################################
wday_map = {
"no-repeat": "一次性",
"*": "每天",
"1": "周一",
"2": "周二",
"3": "周三",
"4": "周四",
"5": "周五",
"6": "周六",
"7": "周日"
}
def print_exc():
"""打印系统异常堆栈"""
ex_type, ex, tb = sys.exc_info()
exc_info = traceback.format_exc()
print(exc_info)
return exc_info
def print_stacktrace():
print_exc()
def print_table_row(row, max_length):
for item in row:
print(str(item)[:max_length].ljust(max_length), end='')
print('')
def print_table(data, max_length=20, headings = None, ignore_attrs = None):
'''打印表格数据'''
if len(data) == 0:
return
if headings is None:
headings = list(data[0].keys())
if ignore_attrs:
for key in ignore_attrs:
if key in headings:
headings.remove(key)
print_table_row(headings, max_length)
for item in data:
row = map(lambda key:item.get(key), headings)
print_table_row(row, max_length)
class SearchResult(dict):
def __init__(self, name=None, url='#', raw=None):
self.name = name
self.url = url
self.raw = raw
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
return None
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
#################################################################
## File System Utilities
## @see fsutil
#################################################################
def do_check_file_type(filename, target_set):
"""根据文件后缀判断是否是图片"""
if filename.endswith(".x0"):
filename = fsutil.decode_name(filename)
name, ext = os.path.splitext(filename)
return ext.lower() in target_set
def is_img_file(filename):
"""根据文件后缀判断是否是图片"""
return do_check_file_type(filename, FS_IMG_EXT_LIST)
def is_text_file(filename):
"""根据文件后缀判断是否是文本文件"""
return do_check_file_type(filename, FS_TEXT_EXT_LIST)
def is_audio_file(filename):
return do_check_file_type(filename, FS_AUDIO_EXT_LIST)
def is_code_file(filename):
return do_check_file_type(filename, FS_CODE_EXT_LIST)
def get_text_ext():
return FS_TEXT_EXT_LIST
def is_editable(fpath):
return is_text_file(fpath) or is_code_file(fpath)
def attrget(obj, attr, default_value = None):
if hasattr(obj, attr):
return getattr(obj, attr, default_value)
else:
return default_value
### DB Utilities
def db_execute(path, sql, args = None):
from xutils.base import Storage
db = sqlite3.connect(path)
cursorobj = db.cursor()
kv_result = []
try:
# print(sql)
if args is None:
cursorobj.execute(sql)
else:
cursorobj.execute(sql, args)
result = cursorobj.fetchall()
# result.rowcount
db.commit()
for single in result:
resultMap = Storage()
for i, desc in enumerate(cursorobj.description):
name = desc[0]
resultMap[name] = single[i]
kv_result.append(resultMap)
except Exception as e:
raise e
finally:
db.close()
return kv_result
#################################################################
## Str Utilities
#################################################################
def json_str(**kw):
return json.dumps(kw)
def decode_bytes(bytes):
for encoding in ["utf-8", "gbk", "mbcs", "latin_1"]:
try:
return bytes.decode(encoding)
except:
pass
return None
def obj2dict(obj):
v = {}
for k in dir(obj):
if k[0] != '_':
v[k] = getattr(obj, k)
return v
def get_safe_file_name(filename):
"""处理文件名中的特殊符号"""
for c in " @$:#\\|":
filename = filename.replace(c, "_")
return filename
#################################################################
## Platform/OS Utilities, Python 2 do not have this file
#################################################################
def system(cmd, cwd = None):
p = subprocess.Popen(cmd, cwd=cwd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# out = p.stdout.read()
# err = p.stderr.read()
# if PY2:
# encoding = sys.getfilesystemencoding()
# os.system(cmd.encode(encoding))
# else:
# os.system(cmd)
def is_windows():
return os.name == "nt"
def is_mac():
return platform.system() == "Darwin"
def is_linux():
return os.name == "linux"
def mac_say(msg):
def escape(str):
new_str_list = ['"']
for c in str:
if c != '"':
new_str_list.append(c)
else:
new_str_list.append('\\"')
new_str_list.append('"')
return ''.join(new_str_list)
msglist = re.split(r"[,.;?!():,。?!;:\n\"'<>《》\[\]]", msg)
for m in msglist:
m = m.strip()
if m == "":
continue
cmd = u("say %s") % escape(m)
trace("MacSay", cmd)
os.system(cmd.encode("utf-8"))
def windows_say(msg):
try:
import comtypes.client as cc
# dynamic=True不生成静态的Python代码
voice = cc.CreateObject("SAPI.SpVoice", dynamic=True)
voice.Speak(msg)
except ImportError:
pass
except:
print_exc()
def say(msg):
if IS_TEST:
return
if is_windows():
windows_say(msg)
elif is_mac():
mac_say(msg)
else:
# 防止调用语音API的程序没有正确处理循环
time.sleep(0.5)
#################################################################
## 规则引擎组件
#################################################################
class BaseRule:
"""规则引擎基类"""
def __init__(self, pattern=None):
self.pattern = pattern
def match(self, ctx, input_str = None):
if input_str is not None:
return re.match(self.pattern, input_str)
return None
def match_execute(self, ctx, input_str = None):
try:
matched = self.match(ctx, input_str)
if matched:
self.execute(ctx, *matched.groups())
except Exception as e:
print_exc()
def execute(self, ctx, *argv):
raise NotImplementedError()
class RecordInfo:
def __init__(self, name, url):
self.name = name
self.url = url
class RecordList:
"""访问记录,可以统计最近访问的,最多访问的记录"""
def __init__(self, max_size = 1000):
self.max_size = max_size
self.records = []
def visit(self, name, url=None):
self.records.append(RecordInfo(name, url))
if len(self.records) > self.max_size:
del self.records[0]
def recent(self, count=5):
records = self.records[-count:]
records.reverse()
return records
def most(self, count):
return []
def init(config):
global FS_IMG_EXT_LIST
global FS_TEXT_EXT_LIST
global FS_AUDIO_EXT_LIST
global FS_CODE_EXT_LIST
global IS_TEST
FS_TEXT_EXT_LIST = config.FS_TEXT_EXT_LIST
FS_IMG_EXT_LIST = config.FS_IMG_EXT_LIST
FS_AUDIO_EXT_LIST = config.FS_AUDIO_EXT_LIST
FS_CODE_EXT_LIST = config.FS_CODE_EXT_LIST
IS_TEST = config.IS_TEST
xutils.webutil.init_webutil_env(is_test = IS_TEST)
|
xupingmao/xnote
|
xutils/__init__.py
|
Python
|
gpl-3.0
| 9,260
|
[
"VisIt"
] |
cb17872eb5c6ecc577a93bef69040bf95ebb31525209f05fad1a5924f49ea7c3
|
import numpy as np
from numpy.polynomial import Polynomial, Chebyshev, Legendre,\
Laguerre, Hermite, HermiteE
from scipy.misc import factorial
import pdb
from .core import ParamFunction, EMPTY_VAR
from .utils import fsign, dtype_c2r, dtype_r2c
__all__ = ['PReLU', 'Poly', 'Mobius', 'Georgiou1992', 'Gaussian', 'PMul']
class PReLU(ParamFunction):
'''
Parametric ReLU,
.. math::
:nowrap:
\\begin{align}
f(x)=\\text{relu}(x)=\\begin{cases}
x, &\Re[x]>0\\\\
\\text{leak}\cdot x,&\Re[x]<0
\end{cases}
\\end{align}
where leak is a trainable parameter if var_mask[0] is True.
Args:
leak (float, default=0.1): leakage,
var_mask (1darray<bool>, default=[True]): variable mask
Attributes:
leak (float): leakage,
var_mask (1darray<bool>): variable mask
'''
__display_attrs__ = ['leak']
def __init__(self, input_shape, itype, leak=0.1, var_mask=[True]):
dtype = np.find_common_type(('float32', np.dtype(type(leak))), ()).name
otype = np.find_common_type((dtype, itype), ()).name
if leak > 1 or leak < 0:
raise ValueError('leak parameter should be 0-1!')
super(PReLU, self).__init__(input_shape, input_shape,
itype, otype=otype,
dtype=dtype, params=[leak],
var_mask=var_mask)
@property
def leak(self): return self.params[0]
def forward(self, x, **kwargs):
if self.leak == 0:
return np.maximum(x, 0)
else:
return np.maximum(x, self.leak * x)
def backward(self, xy, dy, **kwargs):
x, y = xy
dx = dy.copy(order='F')
xmask = x < 0
if self.leak == 0:
dx[xmask] = 0
else:
dx[xmask] = self.leak * dy[xmask]
if self.var_mask[0]:
da = np.sum(dy[xmask] * x[xmask].conj())
dw = np.array([da], dtype=self.dtype)
else:
dw = EMPTY_VAR
return dw, dx
class Poly(ParamFunction):
'''
Ploynomial function layer.
e.g. for polynomial kernel, we have
* :math:`f(x) = \sum\limits_i \\text{params}[i]x^i/i!` \
(factorial_rescale is True)
* :math:`f(x) = \sum\limits_i \\text{params}[i]x^i` \
(factorial_rescale is False)
Args:
kernel (str, default='polynomial'): the kind of polynomial
serie expansion, see `Poly.kernel_dict for detail.`
factorial_rescale (bool, default=False): rescale
high order factors to avoid overflow.
var_mask (1darray<bool>, default=(True,True,...)): variable mask
Attributes:
kernel (str): the kind of polynomial serie expansion,
see `Poly.kernel_dict for detail.`
factorial_rescale (bool): rescale high order factors to avoid overflow.
var_mask (1darray<bool>): variable mask
'''
__display_attrs__ = ['kernel', 'max_order',
'var_mask', 'factorial_rescale']
kernel_dict = {'polynomial': Polynomial, 'chebyshev': Chebyshev,
'legendre': Legendre, 'laguerre': Laguerre,
'hermite': Hermite, 'hermiteE': HermiteE}
'''dict of available kernels, with values target functions.'''
def __init__(self, input_shape, itype, params,
kernel='polynomial', var_mask=None,
factorial_rescale=False):
# check input data
if kernel not in self.kernel_dict:
raise ValueError('Kernel %s not found, should be one of %s' % (
kernel, self.kernel_dict))
super(Poly, self).__init__(input_shape, input_shape,
itype, params=params, var_mask=var_mask)
self.kernel = kernel
self.factorial_rescale = factorial_rescale
@property
def max_order(self):
'''int: maximum order appeared.'''
return len(self.params) - 1
def forward(self, x, **kwargs):
factor = 1. / factorial(np.arange(len(self.params))
) if self.factorial_rescale else 1
p = self.kernel_dict[self.kernel](self.params * factor)
y = p(x)
return y
def backward(self, xy, dy, **kwargs):
factor = 1. / factorial(np.arange(len(self.params))
) if self.factorial_rescale\
else np.ones(len(self.params))
x, y = xy
p = self.kernel_dict[self.kernel](self.params * factor)
dp = p.deriv()
dx = dp(x) * dy
dw = []
for i, mask in enumerate(self.var_mask):
if mask:
basis_func = self.kernel_dict[self.kernel].basis(i)
dwi = (basis_func(x) * dy * factor[i]).sum()
dw.append(dwi)
return np.array(dw, dtype=self.dtype), dx
class Mobius(ParamFunction):
'''
Mobius transformation, :math:`f(x) = \\frac{(z-a)(b-c)}{(z-c)(b-a)}`
:math:`a, b, c` map to :math:`0, 1, \infty` respectively.
'''
__display_attrs__ = ['var_mask']
def __init__(self, input_shape, itype, params, var_mask=None):
# check input data
if len(params) != 3:
raise ValueError('Mobius take 3 params! but get %s' % len(params))
super(Mobius, self).__init__(input_shape, input_shape,
itype, params=params, var_mask=var_mask)
def forward(self, x, **kwargs):
a, b, c = self.params
return (b - c) / (b - a) * (x - a) / (x - c)
def backward(self, xy, dy, **kwargs):
x, y = xy
a, b, c = self.params
dx = (a - c) * (c - b) / (a - b) / (x - c)**2 * dy
dw = []
if self.var_mask[0]:
dw.append((b - c) / (a - b)**2 * ((x - b) / (x - c) * dy).sum())
if self.var_mask[1]:
dw.append(-(a - c) / (a - b)**2 * ((x - a) / (x - c) * dy).sum())
if self.var_mask[2]:
dw.append(((x - a) * (x - b) / (x - c)**2 / (a - b) * dy).sum())
return np.array(dw, dtype=self.dtype), dx
class Georgiou1992(ParamFunction):
'''
Function :math:`f(x) = \\frac{x}{c+|x|/r}`
'''
__display_attrs__ = ['c', 'r', 'var_mask']
def __init__(self, input_shape, itype, params, var_mask=None):
params = np.asarray(params)
if np.iscomplexobj(params):
raise ValueError(
'Args c, r for %s should not be complex!'
% self.__class__.__name__)
if params[1] == 0:
raise ValueError('r = 0 get!')
super(Georgiou1992, self).__init__(input_shape, input_shape,
itype, params=params,
var_mask=var_mask,
tags={'analytical': 3})
@property
def c(self): return self.params[0]
@property
def r(self): return self.params[1]
def forward(self, x, **kwargs):
c, r = self.params
return x / (c + np.abs(x) / r)
def backward(self, xy, dy, **kwargs):
x, y = xy
c, r = self.params
deno = 1. / (c + np.abs(x) / r)**2
dw = []
if self.var_mask[0]:
dw.append((-x * deno * dy).real.sum())
if self.var_mask[1]:
dw.append((x * np.abs(x) / r**2 * deno * dy).real.sum())
dx = c * dy * deno
if self.otype[:7] == 'complex':
dx = dx + x.conj() / r * 1j * (fsign(x) * dy).imag * deno
return np.array(dw, dtype=self.dtype), dx
class Gaussian(ParamFunction):
'''
Function :math:`f(x) = \\frac{1}{\sqrt{2\pi}\sigma} \
\exp(-\\frac{\\|x-\mu\\|^2}{2\sigma^2})`,
where :math:`\mu,\sigma` are mean and variance respectively.
'''
__display_attrs__ = ['mean', 'variance', 'var_mask']
def __init__(self, input_shape, itype, params, var_mask=None):
dtype = itype
params = np.asarray(params, dtype=dtype)
otype = dtype_c2r(itype) if itype[:7] == 'complex' else itype
if params[1].imag != 0 or params[1] <= 0:
raise ValueError('non-positive variance get!')
super(Gaussian, self).__init__(input_shape, input_shape, itype,
dtype=dtype, otype=otype, params=params,
var_mask=var_mask,
tags={'analytical': 2})
@property
def mean(self): return self.params[0]
@property
def variance(self): return self.params[1]
def forward(self, x, **kwargs):
mu, sig = self.params
sig = np.real(sig)
xx = x - mu
return np.exp(-(xx * xx.conj()).real / (2 * sig**2.)) /\
np.sqrt(2 * np.pi) / sig
def backward(self, xy, dy, **kwargs):
x, y = xy
ydy = y * dy
mu, sig = self.params
xx = x - mu
sig = np.real(sig)
dw = []
if self.var_mask[0]:
dw.append((xx.real / sig**2 * ydy).sum())
if self.var_mask[1]:
dw.append(((xx * xx.conj() - sig**2).real / sig**3 * ydy).sum())
dx = -xx.conj() / sig**2 * ydy
return np.array(dw, dtype=self.dtype), dx
class PMul(ParamFunction):
'''
Function :math:`f(x) = cx`, where c is trainable if var_mask[0] is True.
Args:
c (number, default=1.0): multiplier.
Attributes:
c (number): multiplier.
'''
__display_attrs__ = ['c', 'var_mask']
def __init__(self, input_shape, itype, c=1., var_mask=None):
if var_mask is None:
var_mask = [True]
params = np.atleast_1d(c)
dtype = params.dtype.name
otype = np.find_common_type((dtype, itype), ()).name
super(PMul, self).__init__(input_shape, input_shape, itype,
dtype=dtype, otype=otype, params=params,
var_mask=np.atleast_1d(var_mask))
@property
def c(self): return self.params[0]
def forward(self, x, **kwargs):
return self.params[0] * x
def backward(self, xy, dy, **kwargs):
c = self.params[0]
dx = dy * c
dw = EMPTY_VAR if not self.var_mask[0] else np.array(
[(dy * xy[0]).sum()])
return dw, dx
|
GiggleLiu/poorman_nn
|
poornn/pfunctions.py
|
Python
|
mit
| 10,358
|
[
"Gaussian"
] |
f3c2ce53668eddf57da22fc6902005cebfb3dfd14196f5e990e6be3a231ad831
|
import os
import subprocess
import pysam
from TestUtils import BAM_DATADIR, force_str
def build_fetch_with_samtoolsshell(fn):
retval = os.popen("samtools view {} 2> /dev/null | wc -l".format(fn)).read()
return int(retval.strip())
def build_fetch_with_samtoolspipe(fn):
FNULL = open(os.devnull, 'w')
with subprocess.Popen(["samtools", "view", fn],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=FNULL) as proc:
return len(proc.stdout.readlines())
def build_fetch_with_pysam(*args, **kwargs):
with pysam.AlignmentFile(*args, **kwargs) as inf:
return len(list(inf.fetch()))
def build_query_sequences_with_samtoolsshell(fn):
retval = os.popen("samtools view {} 2> /dev/null | cut -f 11".format(fn)).read()
return force_str(retval).splitlines()
def build_query_sequences_with_samtoolspipe(fn):
FNULL = open(os.devnull, 'w')
with subprocess.Popen(["samtools", "view", fn],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=FNULL) as proc:
data = [force_str(x).split()[10] for x in proc.stdout.readlines()]
return data
def build_query_sequences_with_pysam(*args, **kwargs):
with pysam.AlignmentFile(*args, **kwargs) as inf:
data = [x.query_sequence for x in inf]
return data
def build_query_qualities_with_pysam(*args, **kwargs):
with pysam.AlignmentFile(*args, **kwargs) as inf:
data = [x.query_qualities for x in inf]
return data
def build_query_sequences_flagfilter_with_samtoolsshell(fn):
retval = os.popen("samtools view -f 2 {} 2> /dev/null | cut -f 11".format(fn)).read()
return force_str(retval).splitlines()
def build_query_sequences_flagfilter_with_samtoolspipe(fn):
FNULL = open(os.devnull, 'w')
with subprocess.Popen(["samtools", "view", "-f", "2", fn],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=FNULL) as proc:
data = [force_str(x).split()[10] for x in proc.stdout.readlines()]
return data
def build_query_sequences_flagfilter_with_pysam(*args, **kwargs):
with pysam.AlignmentFile(*args, **kwargs) as inf:
data = [x.query_sequence for x in inf if x.is_proper_pair]
return data
def build_query_sequences_directflagfilter_with_pysam(*args, **kwargs):
with pysam.AlignmentFile(*args, **kwargs) as inf:
data = [x.query_sequence for x in inf if x.flag & 2]
return data
def build_aligned_pairs_with_pysam(*args, **kwargs):
matches_only = kwargs.pop("matches_only", False)
with_seq = kwargs.pop("with_seq", False)
with pysam.AlignmentFile(*args, **kwargs) as inf:
data = [x.get_aligned_pairs(matches_only=matches_only, with_seq=with_seq)
for x in inf if not x.is_unmapped]
return data
|
kyleabeauchamp/pysam
|
tests/AlignmentFileFetchTestUtils.py
|
Python
|
mit
| 2,973
|
[
"pysam"
] |
26c8a84b6583991118c129fae0639b8e975a166c2e4966eb16bba9f403ab2ae1
|
import numpy as np
from ase.units import Bohr, Hartree
from ase.lattice import bulk
from gpaw import GPAW
from gpaw.eigensolvers.rmm_diis_old import RMM_DIIS
from gpaw.mixer import Mixer
from gpaw.response.df0 import DF
from gpaw.response.bse import BSE
GS = 1
df = 1
bse = 1
check_spectrum = 1
if GS:
a = 4.043
atoms = bulk('Al', 'fcc', a=a)
atoms.center()
calc = GPAW(h=0.2,
eigensolver=RMM_DIIS(),
mixer=Mixer(0.1,3),
kpts=(4,2,2),
xc='LDA',
nbands=4,
convergence={'bands':'all'})
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al.gpw','all')
if bse:
bse = BSE('Al.gpw',
w=np.linspace(0,24,241),
nv=[0,4],
nc=[0,4],
coupling=True,
mode='RPA',
q=np.array([0.25, 0, 0]),
ecut=50.,
eta=0.2)
bse.get_dielectric_function('Al_bse.dat')
if df:
# Excited state calculation
q = np.array([1/4.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al.gpw',
q=q,
w=w,
eta=0.2,
ecut=50,
hilbert_trans=False)
df.get_EELS_spectrum(filename='Al_df.dat')
df.write('Al.pckl')
df.check_sum_rule()
if check_spectrum:
d = np.loadtxt('Al_bse.dat')[:,2]
wpeak = 16.4
Nw = 164
if d[Nw] > d[Nw-1] and d[Nw] > d[Nw+1]:
pass
else:
raise ValueError('Plasmon peak not correct ! ')
if np.abs(d[Nw] - 27.4958893542) > 1e-5:
print d[Nw]
raise ValueError('Please check spectrum strength ! ')
d2 = np.loadtxt('Al_df.dat')
if np.abs(d[:240] - d2[:240, 2]).sum() > 0.003:
raise ValueError('Please compare two spectrum')
|
robwarm/gpaw-symm
|
gpaw/test/bse_aluminum.py
|
Python
|
gpl-3.0
| 1,837
|
[
"ASE",
"GPAW"
] |
fad548500b10af1189468f0b098271751aab36881b942ca4b474e1ef752619a4
|
# pre_NAMD.py
# Creates the files used for NAMD based on the .pdb file dowloaded from PDB bank
#
# Usage:
# python pre_NAMD.py $PDBID
#
# $PDBID=the 4 characters identification code of the .pdb file
#
# Input:
# $PDBID.pdb: .pdb file downloaded from PDB bank
#
# Output:
# $PDBID_p.pdb: .pdb file with water molecules removed
# $PDBID_p_h.pdb: .pdb file with water removed and hydrogen atoms added
# $PDBID_p_h.psf: .psf file of $PDBID_p_h.pdb
# $PDBID_p_h.log: Log file of adding hydrogen atoms
# $PDBID_wb.pdb: .pdb file of the water box model
# $PDBID_wb.psf: .psf file of $PDBID_wb.pdb
# $PDBID_wb.log: Log file of the water box model generation
# $PDBID_wb_i.pdb: .pdb file of the ionized water box model (For NAMD)
# $PDBID_wb_i.psf: .psf file of PDBID_wb_i.pdb (For NAMD)
# $PDBID.log: Log file of the whole process (output of VMD)
# $PDBID_center.txt: File contains the grid and center information of
# the ionized water box model
#
# Author: Xiaofei Zhang
# Date: June 20 2016
from __future__ import print_function
import sys, os
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# main
if len(sys.argv) != 2:
print_error("Usage: python pre_NAMD.py $PDBID")
sys.exit(-1)
mypath = os.path.realpath(__file__)
tclpath = os.path.split(mypath)[0] + os.path.sep + 'tcl' + os.path.sep
pdbid = sys.argv[1]
logfile = pdbid+'.log'
# Using the right path of VMD
vmd = "/Volumes/VMD-1.9.2/VMD 1.9.2.app/Contents/vmd/vmd_MACOSXX86"
print("Input: "+pdbid+".pdb")
# Remove water
print("Remove water..")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'remove_water.tcl' + ' ' + '-args' + ' '+ pdbid +'> '+ logfile
os.system(cmdline)
# Create .psf
print("Create PSF file...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'create_psf.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Build water box
print("Build water box...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'build_water_box.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Add ions
print("Add ions...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'add_ion.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Calculate grid and center
print("Calculate center coordinates...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'get_center.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
print("Finish!")
# end main
|
Xiaofei-Zhang/NAMD_Docking_pipeline
|
pre_NAMD/pre_NAMD.py
|
Python
|
mit
| 2,553
|
[
"NAMD",
"VMD"
] |
093fdf8733f3fa018279e1b2e4efd7f4a4550a1de0df8c03ef04567ed51c5f01
|
#!/usr/bin/env python
import sys
import os
import subprocess
#Local imports
from chemistry import elementdata, io
'''Usage: python make_isa_files.py'''
description='''
This program takes, as input, a generic template file, and creates
input files using geometries from all .gxyz files that are
located in the pwd and match a user-identified prefix.
File prefix is user specified. Template file should always be of
the form 'template_file'_template.inp . Geometry files should be of the form
'geometry_file'_id.gxyz and should follow standard file format for .gxyz
files. id can either be an identification number or simply a string.
Output files will be of the form 'template_file'_id.com, where id
is an identification tag that matches the correspoinding .gxyz file.
'''
epilog='''
Last Updated: 07/31/14 by mvanvleet
'''
submit_template = '''#!/bin/bash
runcamcasp.py {0} --clt {0}.clt -d {0} --direct -q default --nproc 20 --ifexists delete
'''
maindir = os.getcwd().replace("/scripts",'')
templatesdir = maindir + '/templates/'
inputdir = maindir + '/input/'
geometriesdir = maindir + '/geometries/'
isadir = maindir + '/isa/'
ip_file = templatesdir + 'ips.dat'
template_file = templatesdir + 'isa_template.clt'
dimer_info_file = 'dimer_info.dat'
with open (inputdir + dimer_info_file,'r') as f:
data = [ line.split() for line in f.readlines()]
itag = [ i[0] if i else [] for i in data ].index('MonA_Name')
mona = data[itag][1]
itag = [ i[0] if i else [] for i in data ].index('MonB_Name')
monb = data[itag][1]
itag = [ i[0] if i else [] for i in data ].index('MonA_Charge')
q_mona = int(data[itag][1])
itag = [ i[0] if i else [] for i in data ].index('MonB_Charge')
q_monb = int(data[itag][1])
subprocess.call(['mkdir','-p',isadir])
if mona == monb:
mons = [mona]
else:
mons = [mona,monb]
for mon in mons:
atomtype_file = inputdir + mon + '.atomtypes'
geometry_file = templatesdir + mon + '.xyz'
##################### Begin Script #######################################
#read in data from input template file
input_file = file(template_file, 'r')
prelines = []
postlines = []
before_geometry_block = True
for line in input_file:
if 'GEOMETRY_BLOCK_GOES_HERE' not in line and before_geometry_block:
prelines.append(line)
continue
elif 'GEOMETRY_BLOCK_GOES_HERE' in line:
before_geometry_block = False
elif not before_geometry_block:
postlines.append(line)
continue
input_file.close()
# read in geometry from corresponding .gxyz file and format for gamess
print geometry_file
xyz = io.ReadCoordinates(geometry_file)[0]
# Read in atomtypes from atomtypes file
with open(atomtype_file,'r') as f:
atomtypes = [ line.split()[0] for line in f.readlines()[2:]] #skip title lines
# Ammend geometry_block to include atomtypes
geometry_block = []
for i,line in enumerate(xyz):
template = '{:2} {:>3} {:>14.6f} {:14.6f} {:>14.6f} \tType {:5} \n'
args1 = [line[0] + str(i), elementdata.AtomicNumber(line[0]), line[1], line[2], line[3], atomtypes[i] ]
geometry_block.append(template.format(*args1))
#actually write output file
output_file_name = isadir + mon + '.clt'
output_file = file(output_file_name, 'w')
for line in prelines:
output_file.write(line)
for line in geometry_block:
output_file.write(line)
for line in postlines:
output_file.write(line)
output_file.close()
# Substitute in ionization potential and molecule name
#molecule = geometry_file.replace('.gxyz','')
molecule = mon
charge = q_mona if mon == mona else q_monb
ip = subprocess.check_output(['grep','-iw',molecule,ip_file])
ip = float(ip.split()[2]) # 2nd column contains i.p.
fill_items = ['FILL_IP', 'FILL_CHARGE', 'FILL_MOLECULE']
items = [ip, charge, molecule]
for [fill,item] in zip(fill_items,items):
subprocess.call(['sed','-i',"s/"+fill+'/'+str(item)+'/',output_file_name])
print 'Successfully wrote input file.'
# Make submit script
with open(isadir + 'submit_' + mon + '.sh','w') as f:
f.write(submit_template.format(mon))
#################### End Script ###########################################
|
mvanvleet/workflow-for-force-fields
|
scripts/make_isa_files.py
|
Python
|
gpl-3.0
| 4,328
|
[
"GAMESS"
] |
19bf501d11ff1eae8d44918e3bf5f1e97a7b47a33b6ccf64c9947448b63a1831
|
#
# Python Design Patterns: Visitor
# Author: Jakub Vojvoda [github.com/JakubVojvoda]
# 2016
#
# Source code is licensed under MIT License
# (for more details see LICENSE)
#
import sys
#
# Visitor
# declares a Visit operation for each class of ConcreteElement
# in the object structure
#
class Visitor:
def visitElementA(self, element):
pass
def visitElemeentB(self, element):
pass
#
# Concrete Visitors
# implement each operation declared by Visitor, which implement
# a fragment of the algorithm defined for the corresponding class
# of object in the structure
#
class ConcreteVisitor1(Visitor):
def __init__(self):
Visitor.__init__(self)
def visitElementA(self, concreteElementA):
print("Concrete Visitor 1: Element A visited.")
def visitElementB(self, concreteElementB):
print("Concrete Visitor 1: Element B visited.")
class ConcreteVisitor2(Visitor):
def __init__(self):
Visitor.__init__(self)
def visitElementA(self, concreteElementA):
print("Concrete Visitor 2: Element A visited.")
def visitElementB(self, concreteElementB):
print("Concrete Visitor 2: Element B visited.")
#
# Element
# defines an accept operation that takes a visitor as an argument
#
class Element:
def accept(self, visitor):
pass
#
# Concrete Elements
# implement an accept operation that takes a visitor as an argument
#
class ConcreteElementA(Element):
def __init__(self):
Element.__init__(self)
def accept(self, visitor):
visitor.visitElementA(self)
class ConcreteElementB(Element):
def __init__(self):
Element.__init__(self)
def accept(self, visitor):
visitor.visitElementB(self)
if __name__ == "__main__":
elementA = ConcreteElementA()
elementB = ConcreteElementB()
visitor1 = ConcreteVisitor1()
visitor2 = ConcreteVisitor2()
elementA.accept(visitor1)
elementA.accept(visitor2)
elementB.accept(visitor1)
elementB.accept(visitor2)
|
JakubVojvoda/design-patterns-python
|
visitor/Visitor.py
|
Python
|
mit
| 1,965
|
[
"VisIt"
] |
edb6a9d057bbe80ea0976d7c0425cb48f0e7c28031656065a86e2165dd78e5bf
|
#!/usr/bin/env python3
import os
import subprocess
from apt import liteapt
from gi.repository import Gtk
class SystrayApp(object):
def __init__(self):
self.apt = liteapt
self.tray = Gtk.StatusIcon()
self.menu = Gtk.Menu()
self.menu_about = Gtk.MenuItem()
self.menu_check_updates = Gtk.MenuItem()
self.menu_log = Gtk.MenuItem()
self.menu_quit = Gtk.MenuItem()
self.about_dialog = ''
if os.path.isfile('/usr/share/pixmaps/updaten.png'):
self.tray.set_from_file('/usr/share/pixmaps/updaten.png')
else:
self.tray.set_from_stock(Gtk.STOCK_ABOUT)
self.tray.set_tooltip_text('Lite Updater')
self.tray.connect('popup-menu', self.on_right_click)
def on_right_click(self, icon, event_button, event_time):
self.make_menu(icon, event_button, event_time)
def make_menu(self, icon, event_button, event_time):
try:
for widget in self.menu.get_children():
self.menu.remove(widget)
except Exception as e:
print(e)
self.menu_about.set_label('About')
self.menu_about.connect('activate', self.show_about_dialog)
self.menu_check_updates.set_label('Check updates')
self.menu_about.connect('activate', self.apt.check_updateables)
self.menu_log('View Log')
self.menu_log.connect('activate', self.show_log)
self.menu_quit.set_label('Quit')
self.menu_quit.connect('activate', Gtk.main_quit)
self.menu.append(self.menu_about)
self.menu.append(self.menu_check_updates)
self.menu.append(self.menu_log)
self.menu.append(self.menu_quit)
self.menu.show_all()
def pos(menu, icon):
return Gtk.StatusIcon.position_menu(menu, icon)
self.menu.popup(None, None, pos, icon, event_button, event_time)
def show_about_dialog(self, widget):
self.about_dialog = Gtk.AboutDialog()
self.about_dialog.set_destroy_with_parent(True)
self.about_dialog.set_icon_name('Lite Updater')
self.about_dialog.set_name('Lite Updater')
self.about_dialog.set_version('0.1')
self.about_dialog.set_comments('Check for Linux Lite updates')
self.about_dialog.set_authors(['Brian Tomlinson <brian.tomlinson@linux.com>'])
self.about_dialog.run()
self.about_dialog.destroy()
def check_updates(self, widget):
num_avail, pkgs = self.apt.check_updateables()
title = "You have %s updates available!" % num_avail
message = "%s" % pkgs
return self.apt.send_notification(title, message)
def show_log(self, widget):
subprocess.Popen('/usr/bin/leafpad /tmp/liteupdater.log', shell=True)
if __name__ == '__main__':
SystrayApp()
Gtk.main()
|
darthlukan/liteupdater
|
usr/lib/python3/dist-packages/liteupdater/liteupdater.py
|
Python
|
gpl-2.0
| 2,842
|
[
"Brian"
] |
9280841c64a8cd5b0eb906cf96492886738fca19ccdb8688de72a638c89583a4
|
class Visit:
visitList = {}
def __init__(self, date, cost, idNum=None):
if idNum == None:
self.idNum = len(Visit.visitList) + 1
else:
self.idNum = idNum
self.date = date
self.cost = cost
self.doctor = None
Visit.visitList[self.idNum] = self
def addDoctor(self, doctor):
self.doctor = doctor
class Doctor:
doctorList = {}
def __init__(self, name, startDate, endDate=None, idNum=None):
if idNum == None:
self.idNum = len(Doctor.doctorList.keys()) + 1
else:
self.idNum = idNum
self.name = name
self.startDate = startDate
self.endDate = endDate
self.diagnoses = []
self.testsRun = []
self.treatmentsRx = []
self.visits = []
Doctor.doctorList[self.idNum] = self
def addVisit(self, visit):
self.visits.append(visit)
def addCondition(self, condition):
self.diagnoses.append(condition)
def addTreatment(self, treatment):
self.treatmentsRx.append(treatment)
def addTest(self, test):
self.testsRun.append(test)
def getStartDate(self):
firstDate = None
if self.visits:
firstDate = self.visits[0].date
for each in range(1, len(self.visits)):
if firstDate > self.visits[each].date:
firstDate = self.visits[each].date
return firstDate
def getEndDate(self):
lastDate = None
if self.visits:
lastDate = self.visits[0].date
for each in range(1, len(self.visits)):
if lastDate < self.visits[each].date:
lastDate = self.visits[each].date
return lastDate
class Condition:
conditionList = {}
statusList = ("Confirmed", "Disconfirmed", "Preliminary", "Cured")
def __init__(self, name, status, startDate, endDate, notes, idNum=None):
if idNum == None:
self.idNum = len(Condition.conditionList.keys()) + 1
else:
self.idNum = idNum
self.name = name
if status not in Condition.statusList:
self.status = "Preliminary"
else:
self.status = status
self.startDate = startDate
self.endDate = endDate
self.notes = notes
self.diagnosingDr = None
self.symptoms = []
self.treatments = []
self.tests = []
Condition.conditionList[self.idNum] = self
def addDoctor(self, doctor):
self.diagnosingDr = doctor
def addSymptom(self, symptom):
self.symptoms.append(symptom)
def addTreatment(self, treatment):
self.treatments.append(treatment)
def addTest(self, test):
self.tests.append(test)
def getStartDate(self):
return self.startDate
def getEndDate(self):
return self.endDate
class Test:
testList = {}
def __init__(self, name, dateTaken, cost, notes, filePath, idNum=None):
if idNum == None:
self.idNum = len(Test.testList.keys()) + 1
else:
self.idNum = idNum
self.name = name
self.date = dateTaken
self.cost = cost
self.notes = notes
self.file = filePath
self.forCondition = None
self.doctor = None
Test.testList[self.idNum] = self
def addCondition(self, condition):
self.forCondition = condition
def addDoctor(self, doctor):
self.doctor = doctor
def getStartDate(self):
return self.date
def getEndDate(self):
return None
class Symptom:
symptomList = {}
def __init__(self, name, idNum=None):
if idNum == None:
self.idNum = len(Symptom.symptomList.keys()) + 1
else:
self.idNum = idNum
self.name = name
self.occurrences = []
Symptom.symptomList[self.idNum] = self
def addOccurrence(self, symptomOccurrence):
self.occurrences.append(symptomOccurrence)
def getStartDate(self):
firstDate = None
if self.occurrences:
firstDate = self.occurrences[0].date
for each in range(1, len(self.occurrences)):
if firstDate > self.occurrences[each].date:
firstDate = self.occurrences[each].date
return firstDate
def getEndDate(self):
lastDate = None
if self.occurrences:
lastDate = self.occurrences[0].date
for each in range(1, len(self.occurrences)):
if lastDate < self.occurrences[each].date:
lastDate = self.occurrences[each].date
return lastDate
class SymptomOccurrence:
symptomOccurrenceList = {}
def __init__(self, date, idNum=None):
if idNum == None:
self.idNum = len(SymptomOccurrence.symptomOccurrenceList.keys()) + 1
else:
self.idNum = idNum
self.date = date
self.occurrenceOf = None
def addOccurrenceOf(self, symptom):
self.occurrenceOf = symptom
class Treatment:
treatmentList = {}
def __init__(self, name, dosageUnit, cost, idNum=None):
if idNum == None:
self.idNum = len(Treatment.treatmentList.keys()) + 1
else:
self.idNum = idNum
self.name = name
self.dosageUnit = dosageUnit
self.cost = cost
self.forCondition = None
self.doctor = None
self.treatmentDetails = []
Treatment.treatmentList[self.idNum] = self
def addCondition(self, condition):
self.forCondition = condition
def addDoctor(self, doctor):
self.doctor = doctor
def addDetail(self, treatmentDetail):
self.treatmentDetails.append(treatmentDetail)
def getStartDate(self):
firstDate = None
if self.treatmentDetails:
firstDate = self.treatmentDetails[0].date
for each in range(1, len(self.treatmentDetails)):
if firstDate > self.treatmentDetails[each].date:
firstDate = self.treatmentDetails[each].date
return firstDate
def getEndDate(self):
lastDate = None
if self.treatmentDetails:
lastDate = self.treatmentDetails[0].date
for each in range(1, len(self.treatmentDetails)):
if lastDate < self.treatmentDetails[each].date:
lastDate = self.treatmentDetails[each].date
return lastDate
class TreatmentDetail:
treatmentDetailList = {}
def __init__(self, dosage, date, idNum=None):
if idNum == None:
self.idNum = len(TreatmentDetail.treatmentDetailList.keys()) + 1
else:
self.idNum = idNum
self.dosage = dosage
self.date = date
self.detailOf = None
TreatmentDetail.treatmentDetailList[self.idNum] = self
def addDetailOf(self, treatment):
self.detailOf = treatment
class Patient:
patientList = {}
def __init__(self, name, idNum=None):
if idNum == None:
self.idNum = len(Patient.patientList.keys()) + 1
else:
self.idNum = idNum
self.name = name
self.conditions = []
self.symptoms = []
self.tests = []
self.treatments = []
Patient.patientList[self.idNum] = self
def addCondition(self):
self.conditions.append(Condition())
class Hierarchy:
def __init__(self):
self.doctors = {}
self.conditions = {}
self.treatments = {}
self.treatmentsDetails = {}
self.symptoms = {}
self.symptomsOccurrences = {}
self.tests = {}
self.visits = {}
|
zhaladshar/HealthView
|
classes.py
|
Python
|
gpl-3.0
| 7,792
|
[
"VisIt"
] |
d4fb45150bd6f492057063663a4f4975c608eee15c0524f7f9e9203eee657704
|
# -*- coding: utf-8 -*-
"""Functions for FIR filter design."""
from math import ceil, log
import operator
import warnings
import numpy as np
from numpy.fft import irfft, fft, ifft
from scipy.special import sinc
from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning,
lstsq)
from . import _sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
fs = 2*nyq
return fs
# Some notes on function parameters:
#
# `cutoff` and `width` are given as numbers between 0 and 1. These are
# relative frequencies, expressed as a fraction of the Nyquist frequency.
# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
Examples
--------
Suppose we want to design a lowpass filter, with 65 dB attenuation
in the stop band. The Kaiser window parameter to be used in the
window method is computed by `kaiser_beta(65)`:
>>> from scipy.signal import kaiser_beta
>>> kaiser_beta(65)
6.20426
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter,
expressed as a fraction of the Nyquist frequency.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
Examples
--------
Suppose we want to design a FIR filter using the Kaiser window method
that will have 211 taps and a transition width of 9 Hz for a signal that
is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
is computed as follows:
>>> from scipy.signal import kaiser_atten
>>> kaiser_atten(211, 0.0375)
64.48099630593983
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Determine the filter window parameters for the Kaiser window method.
The parameters returned by this function are generally used to create
a finite impulse response filter using the window method, with either
`firwin` or `firwin2`.
Parameters
----------
ripple : float
Upper bound for the deviation (in dB) of the magnitude of the
filter's frequency response from that of the desired filter (not
including frequencies in any transition intervals). That is, if w
is the frequency expressed as a fraction of the Nyquist frequency,
A(w) is the actual frequency response of the filter and D(w) is the
desired frequency response, the design requirement is that::
abs(A(w) - D(w))) < 10**(-ripple/20)
for 0 <= w <= 1 and w not in a transition interval.
width : float
Width of transition region, normalized so that 1 corresponds to pi
radians / sample. That is, the frequency is expressed as a fraction
of the Nyquist frequency.
Returns
-------
numtaps : int
The length of the Kaiser window.
beta : float
The beta parameter for the Kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.windows.kaiser(numtaps, beta, sym=True)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476.
Examples
--------
We will use the Kaiser window method to design a lowpass FIR filter
for a signal that is sampled at 1000 Hz.
We want at least 65 dB rejection in the stop band, and in the pass
band the gain should vary no more than 0.5%.
We want a cutoff frequency of 175 Hz, with a transition between the
pass band and the stop band of 24 Hz. That is, in the band [0, 163],
the gain varies no more than 0.5%, and in the band [187, 500], the
signal is attenuated by at least 65 dB.
>>> from scipy.signal import kaiserord, firwin, freqz
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0
>>> cutoff = 175
>>> width = 24
The Kaiser method accepts just a single parameter to control the pass
band ripple and the stop band rejection, so we use the more restrictive
of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
so we will use 65 dB as the design parameter.
Use `kaiserord` to determine the length of the filter and the
parameter for the Kaiser window.
>>> numtaps, beta = kaiserord(65, width/(0.5*fs))
>>> numtaps
167
>>> beta
6.20426
Use `firwin` to create the FIR filter.
>>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
... scale=False, nyq=0.5*fs)
Compute the frequency response of the filter. ``w`` is the array of
frequencies, and ``h`` is the corresponding complex array of frequency
responses.
>>> w, h = freqz(taps, worN=8000)
>>> w *= 0.5*fs/np.pi # Convert w to Hz.
Compute the deviation of the magnitude of the filter's response from
that of the ideal lowpass filter. Values in the transition region are
set to ``nan``, so they won't appear in the plot.
>>> ideal = w < cutoff # The "ideal" frequency response.
>>> deviation = np.abs(np.abs(h) - ideal)
>>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
Plot the deviation. A close look at the left end of the stop band shows
that the requirement for 65 dB attenuation is violated in the first lobe
by about 0.125 dB. This is not unusual for the Kaiser window method.
>>> plt.plot(w, 20*np.log10(np.abs(deviation)))
>>> plt.xlim(0, 0.5*fs)
>>> plt.ylim(-90, -60)
>>> plt.grid(alpha=0.25)
>>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Deviation from ideal (dB)')
>>> plt.title('Lowpass Filter Frequency Response')
>>> plt.show()
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=None, fs=None):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist frequency, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist frequency.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be odd if a passband includes the
Nyquist frequency.
cutoff : float or 1-D array_like
Cutoff frequency of filter (expressed in the same units as `fs`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `fs/2`. The values 0 and
`fs/2` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `fs`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
If True, the gain at the frequency 0 (i.e., the "DC gain") is 1.
If False, the DC gain is 0. Can also be a string argument for the
desired filter type (equivalent to ``btype`` in IIR design functions).
.. versionadded:: 1.3.0
Support for string arguments.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `fs/2` (the Nyquist frequency) if the first passband ends at
`fs/2` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `cutoff` must be between 0 and `nyq`. Default
is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to ``fs/2``, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See Also
--------
firwin2
firls
minimum_phase
remez
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
""" # noqa: E501
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
nyq = 0.5 * _get_fs(fs, nyq)
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than fs/2.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
if isinstance(pass_zero, str):
if pass_zero in ('bandstop', 'lowpass'):
if pass_zero == 'lowpass':
if cutoff.size != 1:
raise ValueError('cutoff must have one element if '
'pass_zero=="lowpass", got %s'
% (cutoff.shape,))
elif cutoff.size <= 1:
raise ValueError('cutoff must have at least two elements if '
'pass_zero=="bandstop", got %s'
% (cutoff.shape,))
pass_zero = True
elif pass_zero in ('bandpass', 'highpass'):
if pass_zero == 'highpass':
if cutoff.size != 1:
raise ValueError('cutoff must have one element if '
'pass_zero=="highpass", got %s'
% (cutoff.shape,))
elif cutoff.size <= 1:
raise ValueError('cutoff must have at least two elements if '
'pass_zero=="bandpass", got %s'
% (cutoff.shape,))
pass_zero = False
else:
raise ValueError('pass_zero must be True, False, "bandpass", '
'"lowpass", "highpass", or "bandstop", got '
'%s' % (pass_zero,))
pass_zero = bool(operator.index(pass_zero)) # ensure bool-like
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist frequency.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2-D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .windows import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=None,
antisymmetric=False, fs=None):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1-D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency is half `fs`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must
not be repeated.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `freq` must be between 0 and `nyq`. Default is 1.
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
firls
firwin
minimum_phase
remez
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
nyq = 0.5 * _get_fs(fs, nyq)
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with fs/2.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if freq[1] == 0:
raise ValueError('Value 0 must not be repeated in freq')
if freq[-2] == nyq:
raise ValueError('Value fs/2 must not be repeated in freq')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist frequency.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist frequencies.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero "
"frequency.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
if (d == 0).any():
# Tweak any repeated values in freq so that interp works.
freq = np.array(freq, copy=True)
eps = np.finfo(float).eps * nyq
for k in range(len(freq) - 1):
if freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Check if freq is strictly increasing after tweak
d = np.diff(freq)
if (d <= 0).any():
raise ValueError("freq cannot contain numbers that are too close "
"(within eps * (fs/2): "
"{}) to a repeated value".format(eps))
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .windows import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=None, type='bandpass',
maxiter=25, grid_density=16, fs=None):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges.
All elements must be non-negative and less than half the sampling
frequency as given by `fs`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
*Deprecated. Use `fs` instead.*
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
* 'bandpass' : flat response in bands. This is the default.
* 'differentiator' : frequency proportional response in bands.
* 'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
fs : float, optional
The sampling frequency of the signal. Default is 1.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
firls
firwin
firwin2
minimum_phase
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
In these examples `remez` gets used creating a bandpass, bandstop, lowpass
and highpass filter. The used parameters are the filter order, an array
with according frequency boundaries, the desired attenuation values and the
sampling frequency. Using `freqz` the corresponding frequency response
gets calculated and plotted.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> def plot_response(fs, w, h, title):
... "Utility function to plot response functions"
... fig = plt.figure()
... ax = fig.add_subplot(111)
... ax.plot(0.5*fs*w/np.pi, 20*np.log10(np.abs(h)))
... ax.set_ylim(-40, 5)
... ax.set_xlim(0, 0.5*fs)
... ax.grid(True)
... ax.set_xlabel('Frequency (Hz)')
... ax.set_ylabel('Gain (dB)')
... ax.set_title(title)
This example shows a steep low pass transition according to the small
transition width and high filter order:
>>> fs = 22050.0 # Sample rate, Hz
>>> cutoff = 8000.0 # Desired cutoff frequency, Hz
>>> trans_width = 100 # Width of transition from pass band to stop band, Hz
>>> numtaps = 400 # Size of the FIR filter.
>>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [1, 0], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Low-pass Filter")
This example shows a high pass filter:
>>> fs = 22050.0 # Sample rate, Hz
>>> cutoff = 2000.0 # Desired cutoff frequency, Hz
>>> trans_width = 250 # Width of transition from pass band to stop band, Hz
>>> numtaps = 125 # Size of the FIR filter.
>>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs],
... [0, 1], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "High-pass Filter")
For a signal sampled with 22 kHz a bandpass filter with a pass band of 2-5
kHz gets calculated using the Remez algorithm. The transition width is 260
Hz and the filter order 10:
>>> fs = 22000.0 # Sample rate, Hz
>>> band = [2000, 5000] # Desired pass band, Hz
>>> trans_width = 260 # Width of transition from pass band to stop band, Hz
>>> numtaps = 10 # Size of the FIR filter.
>>> edges = [0, band[0] - trans_width, band[0], band[1],
... band[1] + trans_width, 0.5*fs]
>>> taps = signal.remez(numtaps, edges, [0, 1, 0], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Band-pass Filter")
It can be seen that for this bandpass filter, the low order leads to higher
ripple and less steep transitions. There is very low attenuation in the
stop band and little overshoot in the pass band. Of course the desired
gain can be better approximated with a higher filter order.
The next example shows a bandstop filter. Because of the high filter order
the transition is quite steep:
>>> fs = 20000.0 # Sample rate, Hz
>>> band = [6000, 8000] # Desired stop band, Hz
>>> trans_width = 200 # Width of transition from pass band to stop band, Hz
>>> numtaps = 175 # Size of the FIR filter.
>>> edges = [0, band[0] - trans_width, band[0], band[1], band[1] + trans_width, 0.5*fs]
>>> taps = signal.remez(numtaps, edges, [1, 0, 1], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Band-stop Filter")
>>> plt.show()
"""
if Hz is None and fs is None:
fs = 1.0
elif Hz is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'Hz' and 'fs'.")
fs = Hz
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError as e:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'") from e
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
maxiter, grid_density)
def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None):
"""
FIR filter design using least-squares error minimization.
Calculate the filter coefficients for the linear-phase finite
impulse response (FIR) filter which has the best approximation
to the desired frequency response described by `bands` and
`desired` in the least squares sense (i.e., the integral of the
weighted mean-squared error within the specified bands is
minimized).
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be odd.
bands : array_like
A monotonic nondecreasing sequence containing the band edges in
Hz. All elements must be non-negative and less than or equal to
the Nyquist frequency given by `nyq`.
desired : array_like
A sequence the same size as `bands` containing the desired gain
at the start and end point of each band.
weight : array_like, optional
A relative weighting to give to each band region when solving
the least squares problem. `weight` has to be half the size of
`bands`.
nyq : float, optional
*Deprecated. Use `fs` instead.*
Nyquist frequency. Each frequency in `bands` must be between 0
and `nyq` (inclusive). Default is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `bands`
must be between 0 and ``fs/2`` (inclusive). Default is 2.
Returns
-------
coeffs : ndarray
Coefficients of the optimal (in a least squares sense) FIR filter.
See also
--------
firwin
firwin2
minimum_phase
remez
Notes
-----
This implementation follows the algorithm given in [1]_.
As noted there, least squares design has multiple advantages:
1. Optimal in a least-squares sense.
2. Simple, non-iterative method.
3. The general solution can obtained by solving a linear
system of equations.
4. Allows the use of a frequency dependent weighting function.
This function constructs a Type I linear phase FIR filter, which
contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
.. math:: coeffs(n) = coeffs(numtaps - 1 - n)
The odd number of coefficients and filter symmetry avoid boundary
conditions that could otherwise occur at the Nyquist and 0 frequencies
(e.g., for Type II, III, or IV variants).
.. versionadded:: 0.18
References
----------
.. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
OpenStax CNX. Aug 9, 2005.
http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7
Examples
--------
We want to construct a band-pass filter. Note that the behavior in the
frequency ranges between our stop bands and pass bands is unspecified,
and thus may overshoot depending on the parameters of our filter:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(2)
>>> fs = 10.0 # Hz
>>> desired = (0, 0, 1, 1, 0, 0)
>>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
... fir_firls = signal.firls(73, bands, desired, fs=fs)
... fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
... hs = list()
... ax = axs[bi]
... for fir in (fir_firls, fir_remez, fir_firwin2):
... freq, response = signal.freqz(fir)
... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
... for band, gains in zip(zip(bands[::2], bands[1::2]),
... zip(desired[::2], desired[1::2])):
... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
... if bi == 0:
... ax.legend(hs, ('firls', 'remez', 'firwin2'),
... loc='lower center', frameon=False)
... else:
... ax.set_xlabel('Frequency (Hz)')
... ax.grid(True)
... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
...
>>> fig.tight_layout()
>>> plt.show()
""" # noqa
nyq = 0.5 * _get_fs(fs, nyq)
numtaps = int(numtaps)
if numtaps % 2 == 0 or numtaps < 1:
raise ValueError("numtaps must be odd and >= 1")
M = (numtaps-1) // 2
# normalize bands 0->1 and make it 2 columns
nyq = float(nyq)
if nyq <= 0:
raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
bands = np.asarray(bands).flatten() / nyq
if len(bands) % 2 != 0:
raise ValueError("bands must contain frequency pairs.")
if (bands < 0).any() or (bands > 1).any():
raise ValueError("bands must be between 0 and 1 relative to Nyquist")
bands.shape = (-1, 2)
# check remaining params
desired = np.asarray(desired).flatten()
if bands.size != desired.size:
raise ValueError("desired must have one entry per frequency, got %s "
"gains for %s frequencies."
% (desired.size, bands.size))
desired.shape = (-1, 2)
if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
raise ValueError("bands must be monotonically nondecreasing and have "
"width > 0.")
if (bands[:-1, 1] > bands[1:, 0]).any():
raise ValueError("bands must not overlap.")
if (desired < 0).any():
raise ValueError("desired must be non-negative.")
if weight is None:
weight = np.ones(len(desired))
weight = np.asarray(weight).flatten()
if len(weight) != len(desired):
raise ValueError("weight must be the same size as the number of "
"band pairs (%s)." % (len(bands),))
if (weight < 0).any():
raise ValueError("weight must be non-negative.")
# Set up the linear matrix equation to be solved, Qa = b
# We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
# where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
# We omit the factor of 0.5 above, instead adding it during coefficient
# calculation.
# We also omit the 1/π from both Q and b equations, as they cancel
# during solving.
# We have that:
# q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
# Using our nomalization ω=πf and with a constant weight W over each
# interval f1->f2 we get:
# q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)
# Now we assemble our sum of Toeplitz and Hankel
Q1 = toeplitz(q[:M+1])
Q2 = hankel(q[:M+1], q[M:])
Q = Q1 + Q2
# Now for b(n) we have that:
# b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
# Using our normalization ω=πf and with a constant weight W over each
# interval and a linear term for D(ω) we get (over each f1->f2 interval):
# b(n) = W ∫ (mf+c)cos(πnf)df
# = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = n[:M + 1] # only need this many coefficients here
# Choose m and c such that we are at the start and end weights
m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
c = desired[:, [0]] - bands[:, [0]] * m
b = bands * (m*bands + c) * np.sinc(bands * n)
# Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
b[0] -= m * bands * bands / 2.
b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2
b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)
# Now we can solve the equation
try: # try the fast way
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
a = solve(Q, b, sym_pos=True, check_finite=False)
for ww in w:
if (ww.category == LinAlgWarning and
str(ww.message).startswith('Ill-conditioned matrix')):
raise LinAlgError(str(ww.message))
except LinAlgError: # in case Q is rank deficient
# This is faster than pinvh, even though we don't explicitly use
# the symmetry here. gelsy was faster than gelsd and gelss in
# some non-exhaustive tests.
a = lstsq(Q, b, lapack_driver='gelsy')[0]
# make coefficients symmetric (linear phase)
coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
return coeffs
def _dhtm(mag):
"""Compute the modified 1-D discrete Hilbert transform
Parameters
----------
mag : ndarray
The magnitude spectrum. Should be 1-D with an even length, and
preferably a fast length for FFT/IFFT.
"""
# Adapted based on code by Niranjan Damera-Venkata,
# Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
sig = np.zeros(len(mag))
# Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
midpt = len(mag) // 2
sig[1:midpt] = 1
sig[midpt+1:] = -1
# eventually if we want to support complex filters, we will need a
# np.abs() on the mag inside the log, and should remove the .real
recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
return recon
def minimum_phase(h, method='homomorphic', n_fft=None):
"""Convert a linear-phase FIR filter to minimum phase
Parameters
----------
h : array
Linear-phase FIR filter coefficients.
method : {'hilbert', 'homomorphic'}
The method to use:
'homomorphic' (default)
This method [4]_ [5]_ works best with filters with an
odd number of taps, and the resulting minimum phase filter
will have a magnitude response that approximates the square
root of the the original filter's magnitude response.
'hilbert'
This method [1]_ is designed to be used with equiripple
filters (e.g., from `remez`) with unity or zero gain
regions.
n_fft : int
The number of points to use for the FFT. Should be at least a
few times larger than the signal length (see Notes).
Returns
-------
h_minimum : array
The minimum-phase version of the filter, with length
``(length(h) + 1) // 2``.
See Also
--------
firwin
firwin2
remez
Notes
-----
Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection
of an FFT length to estimate the complex cepstrum of the filter.
In the case of the Hilbert method, the deviation from the ideal
spectrum ``epsilon`` is related to the number of stopband zeros
``n_stop`` and FFT length ``n_fft`` as::
epsilon = 2. * n_stop / n_fft
For example, with 100 stopband zeros and a FFT length of 2048,
``epsilon = 0.0976``. If we conservatively assume that the number of
stopband zeros is one less than the filter length, we can take the FFT
length to be the next power of 2 that satisfies ``epsilon=0.01`` as::
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
This gives reasonable results for both the Hilbert and homomorphic
methods, and gives the value used when ``n_fft=None``.
Alternative implementations exist for creating minimum-phase filters,
including zero inversion [2]_ and spectral factorization [3]_ [4]_.
For more information, see:
http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters
Examples
--------
Create an optimal linear-phase filter, then convert it to minimum phase:
>>> from scipy.signal import remez, minimum_phase, freqz, group_delay
>>> import matplotlib.pyplot as plt
>>> freq = [0, 0.2, 0.3, 1.0]
>>> desired = [1, 0]
>>> h_linear = remez(151, freq, desired, Hz=2.)
Convert it to minimum phase:
>>> h_min_hom = minimum_phase(h_linear, method='homomorphic')
>>> h_min_hil = minimum_phase(h_linear, method='hilbert')
Compare the three filters:
>>> fig, axs = plt.subplots(4, figsize=(4, 8))
>>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil),
... ('-', '-', '--'), ('k', 'r', 'c')):
... w, H = freqz(h)
... w, gd = group_delay((h, 1))
... w /= np.pi
... axs[0].plot(h, color=color, linestyle=style)
... axs[1].plot(w, np.abs(H), color=color, linestyle=style)
... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style)
... axs[3].plot(w, gd, color=color, linestyle=style)
>>> for ax in axs:
... ax.grid(True, color='0.5')
... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1)
>>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples')
>>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase')
>>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])):
... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency')
>>> axs[1].set(ylabel='Magnitude')
>>> axs[2].set(ylabel='Magnitude (dB)')
>>> axs[3].set(ylabel='Group delay')
>>> plt.tight_layout()
References
----------
.. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and
complex minimum phase digital FIR filters," Acoustics, Speech,
and Signal Processing, 1999. Proceedings., 1999 IEEE International
Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3.
:doi:`10.1109/ICASSP.1999.756179`
.. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR
filters by direct factorization," Signal Processing,
vol. 10, no. 4, pp. 369-383, Jun. 1986.
.. [3] T. Saramaki, "Finite Impulse Response Filter Design," in
Handbook for Digital Signal Processing, chapter 4,
New York: Wiley-Interscience, 1993.
.. [4] J. S. Lim, Advanced Topics in Signal Processing.
Englewood Cliffs, N.J.: Prentice Hall, 1988.
.. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck,
"Discrete-Time Signal Processing," 2nd edition.
Upper Saddle River, N.J.: Prentice Hall, 1999.
""" # noqa
h = np.asarray(h)
if np.iscomplexobj(h):
raise ValueError('Complex filters not supported')
if h.ndim != 1 or h.size <= 2:
raise ValueError('h must be 1-D and at least 2 samples long')
n_half = len(h) // 2
if not np.allclose(h[-n_half:][::-1], h[:n_half]):
warnings.warn('h does not appear to by symmetric, conversion may '
'fail', RuntimeWarning)
if not isinstance(method, str) or method not in \
('homomorphic', 'hilbert',):
raise ValueError('method must be "homomorphic" or "hilbert", got %r'
% (method,))
if n_fft is None:
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
n_fft = int(n_fft)
if n_fft < len(h):
raise ValueError('n_fft must be at least len(h)==%s' % len(h))
if method == 'hilbert':
w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half)
H = np.real(fft(h, n_fft) * np.exp(1j * w))
dp = max(H) - 1
ds = 0 - min(H)
S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2
H += ds
H *= S
H = np.sqrt(H, out=H)
H += 1e-10 # ensure that the log does not explode
h_minimum = _dhtm(H)
else: # method == 'homomorphic'
# zero-pad; calculate the DFT
h_temp = np.abs(fft(h, n_fft))
# take 0.25*log(|H|**2) = 0.5*log(|H|)
h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up
np.log(h_temp, out=h_temp)
h_temp *= 0.5
# IDFT
h_temp = ifft(h_temp).real
# multiply pointwise by the homomorphic filter
# lmin[n] = 2u[n] - d[n]
win = np.zeros(n_fft)
win[0] = 1
stop = (len(h) + 1) // 2
win[1:stop] = 2
if len(h) % 2:
win[stop] = 1
h_temp *= win
h_temp = ifft(np.exp(fft(h_temp)))
h_minimum = h_temp.real
n_out = n_half + len(h) % 2
return h_minimum[:n_out]
|
scipy/scipy
|
scipy/signal/_fir_filter_design.py
|
Python
|
bsd-3-clause
| 47,821
|
[
"Brian"
] |
16236c68dd79598df0d04947c1a1682f20683069bd9ae74b4b2caf3884e0c092
|
import sys # Import sys to write to output files
print("#----------------------------------------------------------------------------------------------#")
print("#----------------------------------------------------------------------------------------------#")
print("# Script to Extract the Optimized Structure from a Gaussian or ORCA output File #")
print("#----------------------------------------------------------------------------------------------#")
print("#----------------------------------------------------------------------------------------------#")
# ASCII FONTS from: http://patorjk.com/software/taag/
# Font = "Big", "Ivrit"
def Stamp_Hashmi():
print('\n')
print(' _____ _ _ _')
print('| __ \ | | | | | |')
print('| | | | _____ _____| | ___ _ __ ___ __| | | |__ _ _')
print("| | | |/ _ \ \ / / _ \ |/ _ \| '_ \ / _ \/ _` | | '_ \| | | |")
print('| |__| | __/\ V / __/ | (_) | |_) | __/ (_| | | |_) | |_| |')
print('|_____/ \___| \_/ \___|_|\___/| .__/ \___|\__,_| |_.__/ \__, |')
print('| | | | | | | |(_) __/ |')
print('| |__| | __ _ ___| |__ _ __ _|_| _ |___/')
print("| __ |/ _` / __| '_ \| '_ ` _ \| |")
print('| | | | (_| \__ \ | | | | | | | | |')
print('|_| |_|\__,_|___/_| |_|_| |_| |_|_|')
print("Dated: November 01, 2016\n")
#############################################################################################################
# This section is for the definition of Dictionaries, Classes, Functions etc.
#############################################################################################################
# A Dictionary to Convert Atomic Symbols to Atomic Numbers
SymbolToNumber = {
"H" :1, "He" :2, "Li" :3, "Be" :4, "B" :5, "C" :6, "N" :7, "O" :8, "F" :9,
"Ne" :10, "Na" :11, "Mg" :12, "Al" :13, "Si" :14, "P" :15, "S" :16, "Cl" :17,
"Ar" :18, "K" :19, "Ca" :20, "Sc" :21, "Ti" :22, "V" :23, "Cr" :24,
"Mn" :25, "Fe" :26, "Co" :27, "Ni" :28, "Cu" :29, "Zn" :30, "Ga" :31,
"Ge" :32, "As" :33, "Se" :34, "Br" :35, "Kr" :36, "Rb" :37, "Sr" :38,
"Y" :39, "Zr" :40, "Nb" :41, "Mo" :42, "Tc" :43, "Ru" :44, "Rh" :45,
"Pd" :46, "Ag" :47, "Cd" :48, "In" :49, "Sn" :50, "Sb" :51, "Te" :52,
"I" :53, "Xe" :54, "Cs" :55, "Ba" :56, "La" :57, "Ce" :58, "Pr" :59,
"Nd" :60, "Pm" :61, "Sm" :62, "Eu" :63, "Gd" :64, "Tb" :65, "Dy" :66,
"Ho" :67, "Er" :68, "Tm" :69, "Yb" :70, "Lu" :71, "Hf" :72, "Ta" :73,
"W" :74, "Re" :75, "Os" :76, "Ir" :77, "Pt" :78, "Au" :79, "Hg" :80,
"Tl" :81, "Pb" :82, "Bi" :83, "Po" :84, "At" :85, "Rn" :86, "Fr" :87,
"Ra" :88, "Ac" :89, "Th" :90, "Pa" :91, "U" :92, "Np" :93, "Pu" :94,
"Am" :95, "Cm" :96, "Bk" :97, "Cf" :98, "Es" :99, "Fm" :100, "Md" :101,
"No" :102, "Lr" :103, "Rf" :104, "Db" :105, "Sg" :106, "Bh" :107,
"Hs" :108, "Mt" :109, "Ds" :110, "Rg" :111, "Cn" :112, "Uut":113,
"Fl" :114, "Uup":115, "Lv" :116, "Uus":117, "Uuo":118}
# Invert the Above: Atomic Numbers to Atomic Symbols
NumberToSymbol = {v: k for k, v in SymbolToNumber.items()}
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+#
# A Class for Extracting the Optimized Coordinates from a Gaussian or ORCA ouput File#
#=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Start of Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
class ExtractCoords(object):
"""A Class for Extracting the Optimized Coordinates from a Gaussian or ORCA ouput File"""
def __init__(self, file):
self.file = file
#------------------------------------------------------------------------------#
# Define a function to read the input file and extract the optimized structure #
#------------------------------------------------------------------------------#
#----------------------------- Start of Function -----------------------------#
def extractCoordinates(self, file):
#print("\nExtracting the Molecule from the given output file")
f = open(file, 'r')
program = "N/A"
# Determine if we're dealing with Gaussian09 or ORCA
for line in f:
if line.find("Entering Gaussian System, Link 0=g09") != -1 or line.find("Copyright (c) 1988,1990,1992,1993,1995,1998,2003,2009, Gaussian, Inc.") != -1:
print("Reading Gaussian output file: ", file, '\n')
program = "g09"
break
elif line.find("* O R C A *") != -1:
print("Reading ORCA output file: ", file, '\n')
program = "orca"
break
f.close()
# GEOMETRY READING SECTION
geom = []
# Read through Gaussian file, read "Standard orientation"
if program == "g09":
f = open(file, 'r')
for line in f:
if line.find("Standard orientation:") != -1:
del geom[:] # Delete a previous orientation if any and store the current one
for i in range(0, 4):
readStructure = f.__next__() # Read the structure after 4th line from 'Standard Orientation'
while True:
readStructure = f.__next__()
if readStructure.find("-----------") == -1: # Keep reading unless find ------
readStructure = readStructure.split()
geom.append(readStructure) # To append the current orientation to the list 'geom'
#print(readStructure)
else:
break
# A Loop to delete the 1st and 3rd item of the list and convert 2nd item (Atomic Number) to Symbol
for i in geom:
del i[0:3:2]
i[0] = NumberToSymbol[int(i[0])]
i[1] = float(i[1]) # To convert the x coordinates from a string to floating number
i[2] = float(i[2]) # To convert the y coordinates from a string to floating number
i[3] = float(i[3]) # To convert the z coordinates from a string to floating number
return geom
# Read through ORCA file and find the Cartesian coordinates
elif program == "orca":
f = open(file, 'r')
for line in f:
if line.find("CARTESIAN COORDINATES (ANGSTROEM)") != -1:
del geom[:]
readStructure = f.__next__()
while True:
readStructure = f.__next__()
if readStructure and readStructure.strip():
readStructure = readStructure.split()
geom.append(readStructure)
else:
break
# A Loop to convert the coordinates to floating point numbers
for i in geom:
i[1] = float(i[1])
i[2] = float(i[2])
i[3] = float(i[3])
return geom
#------------------------------ End of Function ------------------------------#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End of Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+#
# A Class for Writing the Script Output to an output file #
#=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Start of Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
class WriteOutputFile(object):
"""A Class for Writing the Script Output to an output file for Gaussian, ORCA, or an xyz file"""
def __init__(self, geometry, filename):
self.geometry = geometry
#self.filename = filename
self.filename = filename.split('\\').pop().split('/').pop().rsplit('.', 1)[0]
#----------------------------------------------------#
# To write the new coordinates into a Gaussian file #
#----------------------------------------------------#
def GaussianFile(self, geometry):
output_file = sys.stdout
sys.stdout = open(self.filename+'R'+'.gjf', 'w')
#print('%nprocshared=24') #shared processor line
#print('%mem=3000mb')#memory line
#print('%chk=checkpoint_file.chk') #Checkpoint File)
print('#p opt pbe1pbe def2svp empiricaldispersion=gd3bj scf=xqc integral=(grid=ultrafine)\n') #Route card
print('Title Card Required\n') #Title Card
print('0 1') #Charge and Multiplicity
#Below are the xyz coordinates
for i in geometry:
print(" {:<3} {: .8f} {: .8f} {: .8f}".format(i[0], i[1], i[2], i[3]))
print('\n')
sys.stdout.close()
sys.stdout = output_file
#=====================================================#
#------------------------------------------------------#
# To write the new coordinates into an ORCA input file #
#------------------------------------------------------#
def OrcaFile(self, geometry):
output_file2 = sys.stdout
sys.stdout = open(self.filename+'R'+'.inp', 'w')
print('%MaxCore 3000') # Amount of memory per core
print('% pal nprocs 16') #Number of Processors
print(' end') #End of Block
print('#Single Point Energy of test molecule') #Title Card
print('! PBE0 D3 RIJCOSX def2-SVP def2-SVP/J Grid5 GridX7 VERYTIGHTSCF\n') #Route Card
#print('! moread')
#print('%moinp "old_test.gbw"\n')
print('*xyz 0 1') #Charge and Multiplicity
#Below are the xyz coordinates
for i in geometry:
print("{:<3} {: .8f} {: .8f} {: .8f}".format(i[0], i[1], i[2], i[3]))
print('*')
sys.stdout.close()
sys.stdout = output_file2
#=====================================================#
#-------------------------------------------------#
# To write the new coordinates into an xyz file #
#-------------------------------------------------#
def xyzFile(self, geometry):
# Find the total number of atoms in the molecule
n_atoms = len(geometry)
output_file3 = sys.stdout
sys.stdout = open(self.filename+'R'+'.xyz', 'w')
print(n_atoms)
print('XYZ Coordinates of the given molecule')
#Below are the xyz coordinates
for i in geometry:
print("{:<3} {: .8f} {: .8f} {: .8f}".format(i[0], i[1], i[2], i[3]))
sys.stdout.close()
sys.stdout = output_file3
#=====================================================#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End of Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#==============================================================================#
###############################################################################
# #
# The Main Part of the Program Starts Here #
# #
###############################################################################
# Specify the output file
# Take the input file in the first argument. Uncomment below if you want to use the script in linux with file in the first argument
output_file = sys.argv[1]
#output_file = 'file_opt.log'
#output_file = 'c60.out'
# Molecule is a variable that runs the class ExtractCoords
molecule = ExtractCoords(output_file)
# Define a list to store the extracted structure from the output file
geometry = molecule.extractCoordinates(output_file)
# Print the extracted coordinates
#print('The total number of atoms in the given molecule are:', n_atoms, '\n')
for i in geometry:
print("{:<3} {: .8f} {: .8f} {: .8f}".format(i[0], i[1], i[2], i[3]))
#========================================================================================#
write_output = WriteOutputFile(geometry, output_file) # Class to write the extracted geometry to a file
#write_output.GaussianFile(geometry) # Write to a Gaussian input file
#write_output.OrcaFile(geometry) # Write to an ORCA input file
write_output.xyzFile(geometry) # Write to an xyz file
#========================================================================================#
Stamp_Hashmi()
#-----------------------------------#
# A Script by Muhammad Ali Hashmi #
# muhammad.hashmi@vuw.ac.nz #
#-----------------------------------#
|
i4hashmi/Python_Scripts
|
Extract_Optimized_Molecule/Extract_Optimized_Molecule_Gaussian+ORCA.py
|
Python
|
gpl-3.0
| 13,194
|
[
"Gaussian",
"ORCA"
] |
e16f2e48c307a2dc2137d1c194678093f94074effff4916af23f06987b2eb86a
|
#
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2011-2019 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
'''
diamondd has a long-lived supervisor process responsible for accepting
connections. The supervisor process forks to produce a child process which
handles a particular search. This ensures that any resource leaks within
the search logic will not accumulate in a long-running process.
The supervisor diamondd process is single-threaded, and all of its network I/O
is performed non-blockingly. It is responsible for the following:
1. Listening for incoming control and blast channel connections and pairing
them via a nonce communicated when the connection is first established.
2. Establishing a temporary directory and forking a child process for every
connection pair.
3. Cleaning up after search processes which have exited by deleting their
temporary directories and killing all of their children (filters and helper
processes).
The child is responsible for handling the search. Initially it has only one
thread, which is responsible for handling the control connection back to the
client. All client RPCs, including search reexecution, are handled in this
thread. When the start() RPC is received, the client creates N worker
threads (configurable, defaulting to the number of processors on the
machine) to process objects for the search.
Several pieces of mutable state are shared between threads. The control
thread configures a ScopeListLoader which iterates over the in-scope Diamond
objects, returning a new object to each worker thread that asks for one.
The blast channel is also shared. There are also shared objects for logging
and for tracking of statistics and session variables. All of these objects
have locking to ensure consistency.
Each worker thread maintains a private TCP connection to the Redis server,
which is used for result and attribute caching. Each worker thread also
maintains one child process for each filter in the filter stack. These
children are the actual filter code, and communicate with the worker thread
via a pair of pipes. Because each worker thread has its own set of filter
processes, worker threads can process objects independently.
Each worker thread executes a loop:
1. Obtain a new object from the ScopeListLoader.
2. Retrieve result cache entries from Redis.
3. Walk the result cache entries to determine if a drop decision can be
made. If so, drop the object.
4. For each filter in the filter chain, determine whether we received a
valid result cache entry for the filter. If so, attempt to obtain attribute
cache entries from Redis. If successful, merge the cached attributes into
the object. Otherwise, execute the filter. If the filter produces a drop
decision, break.
5. Transmit new result cache entries, as well as attribute cache entries
for filters producing less than 2 MB/s of attribute values, to Redis.
6. If accepting the object, transmit it to the client via the blast
channel.
If a filter crashes while processing an object, the object is dropped and
the filter is restarted. If a worker thread or the control thread crashes,
the exception is logged and the entire search is terminated.
'''
from builtins import object
from datetime import datetime, timedelta
import logging
from logging.handlers import TimedRotatingFileHandler
import os
import re
import signal
import sys
from raven.conf import setup_logging
from raven.handlers.logging import SentryHandler
import opendiamond
from opendiamond.blobcache import ExecutableBlobCache
from opendiamond.helpers import daemonize, signalname
from opendiamond.rpc import RPCConnection, ConnectionFailure
from opendiamond.server.child import ChildManager
from opendiamond.server.listen import ConnListener
from opendiamond.server.search import Search
SEARCH_LOG_DATE_FORMAT = '%Y-%m-%d-%H:%M:%S'
SEARCH_LOG_FORMAT = 'search-%s-%d.log' # Args: date, pid
SEARCH_LOG_REGEX = r'search-(.+)-[0-9]+\.log$' # Match group: timestamp
_log = logging.getLogger(__name__)
class _Signalled(BaseException):
'''Exception indicating that a signal has been received.'''
def __init__(self, sig):
BaseException.__init__(self)
self.signal = sig
self.signame = signalname(sig)
class _TimestampedLogFormatter(logging.Formatter):
'''Format a log message with a timestamp including milliseconds delimited
by a decimal point.'''
def __init__(self):
logging.Formatter.__init__(self, '%(asctime)s %(message)s',
'%Y-%m-%d %H:%M:%S')
# We're overriding a method; we can't control its name
# pylint: disable=invalid-name
def formatTime(self, record, datefmt=None):
s = datetime.fromtimestamp(record.created).strftime(datefmt)
return s + '.%03d' % record.msecs
# pylint: enable=invalid-name
class DiamondServer(object):
caught_signals = (signal.SIGINT, signal.SIGTERM, signal.SIGUSR1)
def __init__(self, config):
# Daemonize before doing any other setup
if config.daemonize:
daemonize()
self.config = config
self._children = ChildManager(config.cgroupdir, not config.oneshot)
self._listener = ConnListener(config.diamondd_port)
self._last_log_prune = datetime.fromtimestamp(0)
self._last_cache_prune = datetime.fromtimestamp(0)
self._ignore_signals = False
# Configure signals
for sig in self.caught_signals:
signal.signal(sig, self._handle_signal)
# Configure logging
baselog = logging.getLogger()
baselog.setLevel(config.loglevel)
if not config.daemonize:
# In daemon mode, stderr goes to /dev/null, so don't bother
# logging there.
handler = logging.StreamHandler()
baselog.addHandler(handler)
self._logfile_handler = TimedRotatingFileHandler(
os.path.join(config.logdir, 'diamondd.log'), when='midnight',
backupCount=config.logdays)
self._logfile_handler.setFormatter(_TimestampedLogFormatter())
baselog.addHandler(self._logfile_handler)
# We intentionally catch all exceptions
# pylint doesn't understand the conditional return in ConnListener.accept()
# pylint: disable=broad-except,unpacking-non-sequence
def run(self):
try:
# Log startup of parent
_log.info('Starting supervisor %s, pid %d',
opendiamond.__version__, os.getpid())
_log.info('Server IDs: %s', ', '.join(self.config.serverids))
if self.config.cache_server:
_log.info('Cache: %s:%d', *self.config.cache_server)
while True:
# Check for search logs that need to be pruned
self._prune_child_logs()
# Check for blob cache objects that need to be pruned
self._prune_blob_cache()
# Accept a new connection pair
control, data = self._listener.accept()
# Fork a child for this connection pair. In the child, this
# does not return.
self._children.start(self._child, control, data)
# Close the connection pair in the parent
control.close()
data.close()
except _Signalled as s:
_log.info('Supervisor exiting on %s', s.signame)
# Stop listening for incoming connections
self._listener.shutdown()
# Kill our children and clean up after them
self._children.kill_all()
# Shut down logging
logging.shutdown()
# Ensure our exit status reflects that we died on the signal
signal.signal(s.signal, signal.SIG_DFL)
os.kill(os.getpid(), s.signal)
except Exception:
_log.exception('Supervisor exception')
# Don't attempt to shut down cleanly; just flush logging buffers
logging.shutdown()
sys.exit(1)
# pylint: enable=broad-except,unpacking-non-sequence
# We intentionally catch all exceptions
# pylint: disable=broad-except
def _child(self, control, data):
'''Main function for child process.'''
# Close supervisor log, open child log
baselog = logging.getLogger()
baselog.removeHandler(self._logfile_handler)
del self._logfile_handler
now = datetime.now().strftime(SEARCH_LOG_DATE_FORMAT)
logname = SEARCH_LOG_FORMAT % (now, os.getpid())
logpath = os.path.join(self.config.logdir, logname)
handler = logging.FileHandler(logpath)
handler.setFormatter(_TimestampedLogFormatter())
baselog.addHandler(handler)
if self.config.sentry_dsn:
sentry_handler = SentryHandler(self.config.sentry_dsn)
sentry_handler.setLevel(logging.ERROR)
setup_logging(sentry_handler)
# Okay, now we have logging
search = None
try:
try:
# Close listening socket and half-open connections
self._listener.shutdown()
# Log startup of child
_log.info('Starting search %s, pid %d',
opendiamond.__version__, os.getpid())
_log.info('Peer: %s', control.getpeername()[0])
_log.info('Worker threads: %d', self.config.threads)
# Set up connection wrappers and search object
control = RPCConnection(control)
search = Search(self.config, RPCConnection(data))
# Dispatch RPCs on the control connection until we die
while True:
control.dispatch(search)
finally:
# Ensure that further signals (particularly SIGUSR1 from
# worker threads) don't interfere with the shutdown process.
self._ignore_signals = True
except ConnectionFailure:
# Client closed connection
_log.info('Client closed connection')
except _Signalled as s:
# Worker threads raise SIGUSR1 when they've encountered a
# fatal error
if s.signal != signal.SIGUSR1:
_log.info('Search exiting on %s', s.signame)
except Exception:
_log.exception('Control thread exception')
finally:
if search is not None:
search.shutdown()
logging.shutdown()
# pylint: enable=broad-except
def _prune_child_logs(self):
'''Remove search logs older than the configured number of days.'''
# Do this check no more than once an hour
if datetime.now() - self._last_log_prune < timedelta(hours=1):
return
self._last_log_prune = datetime.now()
threshold = datetime.now() - timedelta(days=self.config.logdays)
pattern = re.compile(SEARCH_LOG_REGEX)
count = 0
for file in os.listdir(self.config.logdir):
# First check the timestamp in the file name. This prevents
# us from having to stat a bunch of log files that we aren't
# interesting in GCing anyway.
match = pattern.match(file)
if match is None:
continue
try:
start = datetime.strptime(match.group(1),
SEARCH_LOG_DATE_FORMAT)
except ValueError:
continue
if start >= threshold:
continue
# Now examine the file's mtime to ensure we're not deleting logs
# from long-running searches
path = os.path.join(self.config.logdir, file)
try:
if datetime.fromtimestamp(os.stat(path).st_mtime) < threshold:
os.unlink(path)
count += 1
except OSError:
pass
if count > 0:
_log.info('Pruned %d search logs', count)
def _prune_blob_cache(self):
'''Remove blob cache entries older than the configured number of
days.'''
# Do this check no more than once an hour
if datetime.now() - self._last_cache_prune < timedelta(hours=1):
return
self._last_cache_prune = datetime.now()
ExecutableBlobCache.prune(self.config.cachedir,
self.config.blob_cache_days)
def _handle_signal(self, sig, _frame):
'''Signal handler in the supervisor.'''
if not self._ignore_signals:
raise _Signalled(sig)
|
cmusatyalab/opendiamond
|
opendiamond/server/server.py
|
Python
|
epl-1.0
| 12,962
|
[
"BLAST"
] |
abd79741b91a8cd6db1b36181cf40409da2cdd0676a2652ba7e770e4958f6216
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os
import pytest
import numpy as np
from pyspark.sql.types import ArrayType, DoubleType
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.data import SparkXShards
from zoo.orca.data.image.utils import chunks
from zoo.orca.learn.utils import convert_predict_rdd_to_dataframe, _dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, convert_predict_rdd_to_xshard, update_predict_xshards
resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
class TestUtil(TestCase):
def setUp(self):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.sc = init_orca_context(cores=4)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
from pyspark.sql import SparkSession
spark = SparkSession(self.sc)
spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
def tearDown(self):
""" teardown any state that was previously setup with a setup_method
call.
"""
stop_orca_context()
def test_convert_predict_rdd_to_dataframe(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_rdd = rdd.map(lambda x: np.array([float(x)] * 50))
result_df = convert_predict_rdd_to_dataframe(df, pred_rdd)
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_rdd_to_dataframe_multi_output(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_rdd = rdd.map(lambda x: [np.array([float(x)] * 25), np.array([float(x)] * 25)])
result_df = convert_predict_rdd_to_dataframe(df, pred_rdd)
expr = "sum(cast(feature <> flatten(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_rdd_to_xshard(self):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
pred_rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
result_shards = convert_predict_rdd_to_xshard(shards, pred_rdd)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_convert_predict_rdd_to_xshard_multi_output(self):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
pred_rdd = self.sc.range(0, 110).map(lambda x: [np.array([x]*24), np.array([x]*26)])
result_shards = convert_predict_rdd_to_xshard(shards, pred_rdd)
result = np.concatenate([np.concatenate(shard["prediction"], axis=1)
for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_update_predict_xshard(self):
def get_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)})
shards = SparkXShards(shards)
return shards
data_shards = get_xshards("x")
pred_shards = get_xshards("prediction")
result_shards = update_predict_xshards(data_shards, pred_shards)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_update_predict_xshard_multi_output(self):
def get_data_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)})
shards = SparkXShards(shards)
return shards
def get_pred_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)}).map(lambda x: {key: [x[key][:, :24], x[key][:, 24:]]})
shards = SparkXShards(shards)
return shards
data_shards = get_data_xshards("x")
pred_shards = get_pred_xshards("prediction")
result_shards = update_predict_xshards(data_shards, pred_shards)
result = np.concatenate([np.concatenate(shard["prediction"], axis=1)
for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_convert_predict_xshards_to_dataframe(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_shards = _dataframe_to_xshards(df, feature_cols=["feature"]).transform_shard(
lambda x: {"prediction": x["x"]})
result_df = convert_predict_xshards_to_dataframe(df, pred_shards)
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_xshards_to_dataframe_multi_output(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_shards = _dataframe_to_xshards(df, feature_cols=["feature"]).transform_shard(
lambda x: {"prediction": [x["x"][:, :25], x["x"][:, 25:]]})
result_df = convert_predict_xshards_to_dataframe(df, pred_shards)
expr = "sum(cast(feature <> flatten(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_array2dict(self):
from zoo.orca.learn.utils import arrays2dict
record_num = 100
shard_size = 30
data = [(np.float32(np.random.randn(1, 50)), np.float32([np.random.randint(0, 2,)]))
for i in range(record_num)]
result = arrays2dict(data, feature_cols=["feature"], label_cols=["label"],
shard_size=shard_size)
for i, d in enumerate(result):
if (record_num % shard_size == 0) or (i != record_num // shard_size):
assert d['x'].shape[0] == shard_size
assert d['y'].shape[0] == shard_size
else:
assert d['x'].shape[0] == record_num % shard_size
assert d['y'].shape[0] == record_num % shard_size
def test_array2dict_shard_size_none(self):
from zoo.orca.learn.utils import arrays2dict
record_num = 100
data = [(np.float32(np.random.randn(1, 50)), np.float32([np.random.randint(0, 2,)]))
for i in range(record_num)]
result = arrays2dict(data, feature_cols=["feature"], label_cols=["label"], shard_size=None)
for i, d in enumerate(result):
assert d['x'].shape[0] == record_num
assert d['y'].shape[0] == record_num
def test_dataframe_to_xshards(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
num_partitions = df.rdd.getNumPartitions()
# test shard_size = None
shards = _dataframe_to_xshards(df, feature_cols=["feature"], label_cols=["label"])
num_shards = shards.rdd.count()
assert num_shards == num_partitions
from zoo.orca import OrcaContext
OrcaContext._shard_size = 1
shards = _dataframe_to_xshards(df, feature_cols=["feature"], label_cols=["label"])
num_shards = shards.rdd.count()
assert num_shards == df.rdd.count()
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/test_utils.py
|
Python
|
apache-2.0
| 9,763
|
[
"ORCA"
] |
d0f6f780b3ef6bf3ef4a6392ff2f6a48740e0d6d49bda56f53581ac442181583
|
# Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import parsimonious
from parsimonious import ParseError
class Grammar:
def __init__(self):
self.rules = []
def rule(self, r):
self.rules.append(r)
def decorate(action):
def decorator(self, node, children):
rule_name = action.__name__[len("visit_"):]
result = action(self, node, children)
if hasattr(result, "origin"):
result.origin(node)
result._rule = rule_name
return result
return decorator
return decorate
def parser(self, cls):
extra_rules = []
visitors = {}
for kw in getattr(cls, "keywords", ()):
extra_rules.append('%s = _ "%s" !~"[_a-zA-Z0-9]" _' % (kw.upper(), kw))
visitors["visit_%s" % kw.upper()] = lambda self, node, children, x=kw: x
for name, sym in getattr(cls, "symbols", {}).items():
extra_rules.append('%s = _ "%s" _' % (name, sym))
visitors["visit_%s" % name] = lambda self, node, children, x=sym: x
class Parser(cls, parsimonious.NodeVisitor):
rules = "\n".join(self.rules + extra_rules)
grammar = parsimonious.Grammar(rules)
def rule(self, name, text):
return self.visit(self.grammar[name].parse(text))
for k, v, in visitors.items():
cls.__dict__[k] = v
return Parser
|
bozzzzo/quark
|
quarkc/grammar.py
|
Python
|
apache-2.0
| 2,034
|
[
"VisIt"
] |
6973741bcffe414a619c77c30d4f301412e055110927b34c6e384e1ece4aa05f
|
# -*- coding: utf-8 -*-
"""
solace.application
~~~~~~~~~~~~~~~~~~
The WSGI application for Solace.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
from urlparse import urlparse, urlsplit, urljoin
from fnmatch import fnmatch
from functools import update_wrapper
from simplejson import dumps
from babel import UnknownLocaleError, Locale
from werkzeug import Request as RequestBase, Response, cached_property, \
import_string, redirect, SharedDataMiddleware, url_quote, \
url_decode
from werkzeug.exceptions import HTTPException, NotFound, BadRequest, Forbidden
from werkzeug.routing import BuildError, RequestRedirect
from werkzeug.contrib.securecookie import SecureCookie
from solace.utils.ctxlocal import local, LocalProperty
# already resolved and imported views
_resolved_views = {}
class Request(RequestBase):
"""The request class."""
in_api = False
csrf_protected = False
_locale = None
_pulled_flash_messages = None
#: each request might transmit up to four megs of payload that
#: is stored in memory. If more is transmitted, Werkzeug will
#: abort the request with an appropriate status code. This should
#: not happen unless someone really tempers with the data.
max_form_memory_size = 4 * 1024 * 1024
def __init__(self, environ):
RequestBase.__init__(self, environ)
before_request_init.emit()
self.url_adapter = url_map.bind_to_environ(self.environ)
self.view_lang = self.match_exception = None
try:
self.endpoint, self.view_arguments = self.url_adapter.match()
view_lang = self.view_arguments.pop('lang_code', None)
if view_lang is not None:
try:
self.view_lang = Locale.parse(view_lang)
if not has_section(self.view_lang):
raise UnknownLocaleError(str(self.view_lang))
except UnknownLocaleError:
self.view_lang = None
self.match_exception = NotFound()
except HTTPException, e:
self.endpoint = self.view_arguments = None
self.match_exception = e
self.sql_queries = []
local.request = self
after_request_init.emit(request=self)
current = LocalProperty('request')
def dispatch(self):
"""Where do we want to go today?"""
before_request_dispatch.emit(request=self)
try:
if self.match_exception is not None:
raise self.match_exception
rv = self.view(self, **self.view_arguments)
except BadRequest, e:
rv = get_view('core.bad_request')(self)
except Forbidden, e:
rv = get_view('core.forbidden')(self)
except NotFound, e:
rv = get_view('core.not_found')(self)
rv = self.process_view_result(rv)
after_request_dispatch.emit(request=self, response=rv)
return rv
def process_view_result(self, rv):
"""Processes a view's return value and ensures it's a response
object. This is automatically called by the dispatch function
but is also handy for view decorators.
"""
if isinstance(rv, basestring):
rv = Response(rv, mimetype='text/html')
elif not isinstance(rv, Response):
rv = Response.force_type(rv, self.environ)
return rv
def _get_locale(self):
"""The locale of the incoming request. If a locale is unsupported, the
default english locale is used. If the locale is assigned it will be
stored in the session so that that language changes are persistent.
"""
if self._locale is not None:
return self._locale
rv = self.session.get('locale')
if rv is not None:
rv = Locale.parse(rv)
# we could trust the cookie here because it's signed, but we do not
# because the configuration could have changed in the meantime.
if not has_section(rv):
rv = None
if rv is None:
rv = select_locale(self.accept_languages)
self._locale = rv
return rv
def _set_locale(self, locale):
self._locale = Locale.parse(locale)
self.__dict__.pop('translations', None)
self.session['locale'] = str(self._locale)
locale = property(_get_locale, _set_locale)
del _get_locale, _set_locale
@cached_property
def translations(self):
"""The translations for this request."""
return load_translations(self.locale)
@property
def timezone_known(self):
"""If the JavaScript on the client set the timezone already this returns
True, otherwise False.
"""
return self.session.get('timezone') is not None
@cached_property
def tzinfo(self):
"""The timezone information."""
offset = self.session.get('timezone')
if offset is not None:
return Timezone(offset)
@cached_property
def next_url(self):
"""Sometimes we want to redirect to different URLs back or forth.
For example the login function uses this attribute to find out
where it should go.
If there is a `next` parameter on the URL or in the form data, the
function will redirect there, if it's not there, it checks the
referrer.
It's usually better to use the get_redirect_target method.
"""
return self.get_redirect_target()
def get_localized_next_url(self, locale=None):
"""Like `next_url` but tries to go to the localized section."""
if locale is None:
locale = self.locale
next_url = self.get_redirect_target()
if next_url is None:
return
scheme, netloc, path, query = urlsplit(next_url)[:4]
path = path.decode('utf-8')
# aha. we're redirecting somewhere out of our control
if netloc != self.host or not path.startswith(self.script_root):
return next_url
path = path[len(self.script_root):]
try:
endpoint, values = self.url_adapter.match(path)
except NotFound, e:
return next_url
except RequestRedirect:
pass
if 'lang_code' not in values:
return next_url
values['lang_code'] = str(locale)
return self.url_adapter.build(endpoint, values) + \
(query and '?' + query or '')
def get_redirect_target(self, invalid_targets=()):
"""Check the request and get the redirect target if possible.
If not this function returns just `None`. The return value of this
function is suitable to be passed to `redirect`.
"""
check_target = self.values.get('_redirect_target') or \
self.values.get('next') or \
self.referrer
# if there is no information in either the form data
# or the wsgi environment about a jump target we have
# to use the target url
if not check_target:
return
# otherwise drop the leading slash
check_target = check_target.lstrip('/')
root_url = self.url_root
root_parts = urlparse(root_url)
check_parts = urlparse(urljoin(root_url, check_target))
check_query = url_decode(check_parts[4])
def url_equals(to_check):
if to_check[:4] != check_parts[:4]:
return False
args = url_decode(to_check[4])
for key, value in args.iteritems():
if check_query.get(key) != value:
return False
return True
# if the jump target is on a different server we probably have
# a security problem and better try to use the target url.
# except the host is whitelisted in the config
if root_parts[:2] != check_parts[:2]:
host = check_parts[1].split(':', 1)[0]
for rule in settings.ALLOWED_REDIRECTS:
if fnmatch(host, rule):
break
else:
return
# if the jump url is the same url as the current url we've had
# a bad redirect before and use the target url to not create a
# infinite redirect.
if url_equals(urlparse(self.url)):
return
# if the `check_target` is one of the invalid targets we also
# fall back.
for invalid in invalid_targets:
if url_equals(urlparse(urljoin(root_url, invalid))):
return
return check_target
@cached_property
def user(self):
"""The current user."""
return get_auth_system().get_user(self)
@property
def is_logged_in(self):
"""Is the user logged in?"""
return self.user is not None
@cached_property
def view(self):
"""The view function."""
return get_view(self.endpoint)
@cached_property
def session(self):
"""The active session."""
return SecureCookie.load_cookie(self, settings.COOKIE_NAME,
settings.SECRET_KEY)
@property
def is_behind_proxy(self):
"""Are we behind a proxy? Accessed by Werkzeug when needed."""
return settings.IS_BEHIND_PROXY
def list_languages(self):
"""Lists all languages."""
return [dict(
name=locale.display_name,
key=key,
selected=self.locale == locale,
select_url=url_for('core.set_language', locale=key),
section_url=url_for('kb.overview', lang_code=key)
) for key, locale in list_languages()]
def flash(self, message, error=False):
"""Flashes a message."""
type = error and 'error' or 'info'
self.session.setdefault('flashes', []).append((type, message))
def pull_flash_messages(self):
"""Returns all flash messages. They will be removed from the
session at the same time. This also pulls the messages from
the database that are queued for the user.
"""
msgs = self._pulled_flash_messages or []
if self.user is not None:
to_delete = set()
for msg in UserMessage.query.filter_by(user=self.user).all():
msgs.append((msg.type, msg.text))
to_delete.add(msg.id)
if to_delete:
UserMessage.query.filter(UserMessage.id.in_(to_delete)).delete(synchronize_session='fetch')
session.commit()
if 'flashes' in self.session:
msgs += self.session.pop('flashes')
self._pulled_flash_messages = msgs
return msgs
def get_view(endpoint):
"""Returns the view for the endpoint. It will cache both positive and
negative hits, so never pass untrusted values to it. If a view does
not exist, `None` is returned.
"""
view = _resolved_views.get(endpoint)
if view is not None:
return view
try:
view = import_string('solace.views.' + endpoint)
except (ImportError, AttributeError):
view = import_string(endpoint, silent=True)
_resolved_views[endpoint] = view
return view
def json_response(message=None, html=None, error=False, login_could_fix=False,
**extra):
"""Returns a JSON response for the JavaScript code. The "wire protocoll"
is basically just a JSON object with some common attributes that are
checked by the success callback in the JavaScript code before the handler
processes it.
The `error` and `login_could_fix` keys are internally used by the flashing
system on the client.
"""
extra.update(message=message, html=html, error=error,
login_could_fix=login_could_fix)
for key, value in extra.iteritems():
extra[key] = remote_export_primitive(value)
return Response(dumps(extra), mimetype='application/json')
def not_logged_in_json_response():
"""Standard response that the user is not logged in."""
return json_response(message=_(u'You have to login in order to '
u'visit this page.'),
error=True, login_could_fix=True)
def require_admin(f):
"""Decorates a view function so that it requires a user that is
logged in.
"""
def decorated(request, **kwargs):
if not request.user.is_admin:
message = _(u'You cannot access this resource.')
if request.is_xhr:
return json_response(message=message, error=True)
raise Forbidden(message)
return f(request, **kwargs)
return require_login(update_wrapper(decorated, f))
def require_login(f):
"""Decorates a view function so that it requires a user that is
logged in.
"""
def decorated(request, **kwargs):
if not request.is_logged_in:
if request.is_xhr:
return not_logged_in_json_response()
request.flash(_(u'You have to login in order to visit this page.'))
return redirect(url_for('core.login', next=request.url))
return f(request, **kwargs)
return update_wrapper(decorated, f)
def iter_endpoint_choices(new, current=None):
"""Iterate over all possibilities for URL generation."""
yield new
if current is not None and '.' in current:
yield current.rsplit('.', 1)[0] + '.' + new
def inject_lang_code(request, endpoint, values):
"""Returns a dict with the values for the given endpoint. You must not alter
the dict because it might be shared. If the given endpoint does not exist
`None` is returned.
"""
rv = values
if 'lang_code' not in rv:
try:
if request.url_adapter.map.is_endpoint_expecting(
endpoint, 'lang_code'):
rv = values.copy()
rv['lang_code'] = request.view_lang or str(request.locale)
except KeyError:
return
return rv
def url_for(endpoint, **values):
"""Returns a URL for a given endpoint with some interpolation."""
external = values.pop('_external', False)
if hasattr(endpoint, 'get_url_values'):
endpoint, values = endpoint.get_url_values(**values)
request = Request.current
anchor = values.pop('_anchor', None)
assert request is not None, 'no active request'
for endpoint_choice in iter_endpoint_choices(endpoint, request.endpoint):
real_values = inject_lang_code(request, endpoint_choice, values)
if real_values is None:
continue
try:
url = request.url_adapter.build(endpoint_choice, real_values,
force_external=external)
except BuildError:
continue
view = get_view(endpoint)
if is_exchange_token_protected(view):
xt = get_exchange_token(request)
url = '%s%s_xt=%s' % (url, '?' in url and '&' or '?', xt)
if anchor is not None:
url += '#' + url_quote(anchor)
return url
raise BuildError(endpoint, values, 'GET')
def save_session(request, response):
"""Saves the session to the response. Called automatically at
the end of a request.
"""
if not request.in_api and request.session.should_save:
request.session.save_cookie(response, settings.COOKIE_NAME)
def finalize_response(request, response):
"""Finalizes the response. Applies common response processors."""
if not isinstance(response, Response):
response = Response.force_type(response, request.environ)
if response.status == 200:
response.add_etag()
response = response.make_conditional(request)
before_response_sent.emit(request=request, response=response)
return response
@Request.application
def application(request):
"""The WSGI application. The majority of the handling here happens
in the :meth:`Request.dispatch` method and the functions that are
connected to the request signals.
"""
try:
try:
response = request.dispatch()
except HTTPException, e:
response = e.get_response(request.environ)
return finalize_response(request, response)
finally:
after_request_shutdown.emit()
application = SharedDataMiddleware(application, {
'/_static': os.path.join(os.path.dirname(__file__), 'static')
})
# imported here because of possible circular dependencies
from solace import settings
from solace.urls import url_map
from solace.i18n import select_locale, load_translations, Timezone, _, \
list_languages, has_section
from solace.auth import get_auth_system
from solace.database import session
from solace.models import UserMessage
from solace.signals import before_request_init, after_request_init, \
before_request_dispatch, after_request_dispatch, \
after_request_shutdown, before_response_sent
from solace.utils.remoting import remote_export_primitive
from solace.utils.csrf import get_exchange_token, is_exchange_token_protected
# remember to save the session
before_response_sent.connect(save_session)
# important because of initialization code (such as signal subscriptions)
import solace.badges
|
mitsuhiko/solace
|
solace/application.py
|
Python
|
bsd-3-clause
| 17,413
|
[
"VisIt"
] |
b562bf7aea007ceefe38c230bc2f5980fac6cf05aedcf86eedd0e22c0c2a15e8
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
Spack allows very fine-grained control over how packages are installed and
over how they are built and configured. To make this easy, it has its own
syntax for declaring a dependence. We call a descriptor of a particular
package configuration a "spec".
The syntax looks like this:
.. code-block:: sh
$ spack install mpileaks ^openmpi @1.2:1.4 +debug %intel @12.1 =bgqos_0
0 1 2 3 4 5 6
The first part of this is the command, 'spack install'. The rest of the
line is a spec for a particular installation of the mpileaks package.
0. The package to install
1. A dependency of the package, prefixed by ^
2. A version descriptor for the package. This can either be a specific
version, like "1.2", or it can be a range of versions, e.g. "1.2:1.4".
If multiple specific versions or multiple ranges are acceptable, they
can be separated by commas, e.g. if a package will only build with
versions 1.0, 1.2-1.4, and 1.6-1.8 of mavpich, you could say:
depends_on("mvapich@1.0,1.2:1.4,1.6:1.8")
3. A compile-time variant of the package. If you need openmpi to be
built in debug mode for your package to work, you can require it by
adding +debug to the openmpi spec when you depend on it. If you do
NOT want the debug option to be enabled, then replace this with -debug.
4. The name of the compiler to build with.
5. The versions of the compiler to build with. Note that the identifier
for a compiler version is the same '@' that is used for a package version.
A version list denoted by '@' is associated with the compiler only if
if it comes immediately after the compiler name. Otherwise it will be
associated with the current package spec.
6. The architecture to build with. This is needed on machines where
cross-compilation is required
Here is the EBNF grammar for a spec::
spec-list = { spec [ dep-list ] }
dep_list = { ^ spec }
spec = id [ options ]
options = { @version-list | +variant | -variant | ~variant |
%compiler | arch=architecture | [ flag ]=value}
flag = { cflags | cxxflags | fcflags | fflags | cppflags |
ldflags | ldlibs }
variant = id
architecture = id
compiler = id [ version-list ]
version-list = version [ { , version } ]
version = id | id: | :id | id:id
id = [A-Za-z0-9_][A-Za-z0-9_.-]*
Identifiers using the <name>=<value> command, such as architectures and
compiler flags, require a space before the name.
There is one context-sensitive part: ids in versions may contain '.', while
other ids may not.
There is one ambiguity: since '-' is allowed in an id, you need to put
whitespace space before -variant for it to be tokenized properly. You can
either use whitespace, or you can just use ~variant since it means the same
thing. Spack uses ~variant in directory names and in the canonical form of
specs to avoid ambiguity. Both are provided because ~ can cause shell
expansion when it is the first character in an id typed on the command line.
"""
import base64
import sys
import collections
import ctypes
import hashlib
import itertools
import os
from operator import attrgetter
from six import StringIO
from six import string_types
from six import iteritems
from llnl.util.filesystem import find_headers, find_libraries, is_exe
from llnl.util.lang import *
from llnl.util.tty.color import *
import spack
import spack.architecture
import spack.compilers as compilers
import spack.error
import spack.parse
import spack.store
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
from spack.dependency import *
from spack.util.module_cmd import get_path_from_module, load_module
from spack.error import SpecError, UnsatisfiableSpecError
from spack.provider_index import ProviderIndex
from spack.util.crypto import prefix_bits
from spack.util.executable import Executable
from spack.util.prefix import Prefix
from spack.util.spack_yaml import syaml_dict
from spack.util.string import *
from spack.variant import *
from spack.version import *
from yaml.error import MarkedYAMLError
__all__ = [
'Spec',
'parse',
'parse_anonymous_spec',
'SpecError',
'SpecParseError',
'DuplicateDependencyError',
'DuplicateVariantError',
'DuplicateCompilerSpecError',
'UnsupportedCompilerError',
'UnknownVariantError',
'DuplicateArchitectureError',
'InconsistentSpecError',
'InvalidDependencyError',
'NoProviderError',
'MultipleProviderError',
'UnsatisfiableSpecError',
'UnsatisfiableSpecNameError',
'UnsatisfiableVersionSpecError',
'UnsatisfiableCompilerSpecError',
'UnsatisfiableVariantSpecError',
'UnsatisfiableCompilerFlagSpecError',
'UnsatisfiableArchitectureSpecError',
'UnsatisfiableProviderSpecError',
'UnsatisfiableDependencySpecError',
'AmbiguousHashError',
'InvalidHashError',
'NoSuchHashError',
'RedundantSpecError']
#: Valid pattern for an identifier in Spack
identifier_re = r'\w[\w-]*'
compiler_color = '@g' #: color for highlighting compilers
version_color = '@c' #: color for highlighting versions
architecture_color = '@m' #: color for highlighting architectures
enabled_variant_color = '@B' #: color for highlighting enabled variants
disabled_variant_color = '@r' #: color for highlighting disabled varaints
dependency_color = '@.' #: color for highlighting dependencies
hash_color = '@K' #: color for highlighting package hashes
#: This map determines the coloring of specs when using color output.
#: We make the fields different colors to enhance readability.
#: See spack.color for descriptions of the color codes.
color_formats = {'%': compiler_color,
'@': version_color,
'=': architecture_color,
'+': enabled_variant_color,
'~': disabled_variant_color,
'^': dependency_color,
'#': hash_color}
#: Regex used for splitting by spec field separators.
#: These need to be escaped to avoid metacharacters in
#: ``color_formats.keys()``.
_separators = '[\\%s]' % '\\'.join(color_formats.keys())
#: Versionlist constant so we don't have to build a list
#: every time we call str()
_any_version = VersionList([':'])
#: Max integer helps avoid passing too large a value to cyaml.
maxint = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
def colorize_spec(spec):
"""Returns a spec colorized according to the colors specified in
color_formats."""
class insert_color:
def __init__(self):
self.last = None
def __call__(self, match):
# ignore compiler versions (color same as compiler)
sep = match.group(0)
if self.last == '%' and sep == '@':
return cescape(sep)
self.last = sep
return '%s%s' % (color_formats[sep], cescape(sep))
return colorize(re.sub(_separators, insert_color(), str(spec)) + '@.')
@key_ordering
class ArchSpec(object):
""" The ArchSpec class represents an abstract architecture specification
that a package should be built with. At its core, each ArchSpec is
comprised of three elements: a platform (e.g. Linux), an OS (e.g.
RHEL6), and a target (e.g. x86_64).
"""
# TODO: Formalize the specifications for architectures and then use
# the appropriate parser here to read these specifications.
def __init__(self, *args):
to_attr_string = lambda s: str(s) if s and s != "None" else None
self.platform, self.platform_os, self.target = (None, None, None)
if len(args) == 1:
spec_like = args[0]
if isinstance(spec_like, ArchSpec):
self._dup(spec_like)
elif isinstance(spec_like, string_types):
spec_fields = spec_like.split("-")
if len(spec_fields) == 3:
self.platform, self.platform_os, self.target = tuple(
to_attr_string(f) for f in spec_fields)
else:
raise ValueError("%s is an invalid arch spec" % spec_like)
elif len(args) == 3:
self.platform = to_attr_string(args[0])
self.platform_os = to_attr_string(args[1])
self.target = to_attr_string(args[2])
elif len(args) != 0:
raise TypeError("Can't make arch spec from %s" % args)
def _autospec(self, spec_like):
if isinstance(spec_like, ArchSpec):
return spec_like
return ArchSpec(spec_like)
def _cmp_key(self):
return (self.platform, self.platform_os, self.target)
def _dup(self, other):
self.platform = other.platform
self.platform_os = other.platform_os
self.target = other.target
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
""" The platform of the architecture spec will be verified as a
supported Spack platform before it's set to ensure all specs
refer to valid platforms.
"""
value = str(value) if value is not None else None
self._platform = value
@property
def platform_os(self):
return self._platform_os
@platform_os.setter
def platform_os(self, value):
""" The OS of the architecture spec will update the platform field
if the OS is set to one of the reserved OS types so that the
default OS type can be resolved. Since the reserved OS
information is only available for the host machine, the platform
will assumed to be the host machine's platform.
"""
value = str(value) if value is not None else None
if value in spack.architecture.Platform.reserved_oss:
curr_platform = str(spack.architecture.platform())
self.platform = self.platform or curr_platform
if self.platform != curr_platform:
raise ValueError(
"Can't set arch spec OS to reserved value '%s' when the "
"arch platform (%s) isn't the current platform (%s)" %
(value, self.platform, curr_platform))
spec_platform = spack.architecture.get_platform(self.platform)
value = str(spec_platform.operating_system(value))
self._platform_os = value
@property
def target(self):
return self._target
@target.setter
def target(self, value):
""" The target of the architecture spec will update the platform field
if the target is set to one of the reserved target types so that
the default target type can be resolved. Since the reserved target
information is only available for the host machine, the platform
will assumed to be the host machine's platform.
"""
value = str(value) if value is not None else None
if value in spack.architecture.Platform.reserved_targets:
curr_platform = str(spack.architecture.platform())
self.platform = self.platform or curr_platform
if self.platform != curr_platform:
raise ValueError(
"Can't set arch spec target to reserved value '%s' when "
"the arch platform (%s) isn't the current platform (%s)" %
(value, self.platform, curr_platform))
spec_platform = spack.architecture.get_platform(self.platform)
value = str(spec_platform.target(value))
self._target = value
def satisfies(self, other, strict=False):
other = self._autospec(other)
sdict, odict = self.to_cmp_dict(), other.to_cmp_dict()
if strict or self.concrete:
return all(getattr(self, attr) == getattr(other, attr)
for attr in odict if odict[attr])
else:
return all(getattr(self, attr) == getattr(other, attr)
for attr in odict if sdict[attr] and odict[attr])
def constrain(self, other):
""" Projects all architecture fields that are specified in the given
spec onto the instance spec if they're missing from the instance
spec. This will only work if the two specs are compatible.
"""
other = self._autospec(other)
if not self.satisfies(other):
raise UnsatisfiableArchitectureSpecError(self, other)
constrained = False
for attr, svalue in iteritems(self.to_cmp_dict()):
ovalue = getattr(other, attr)
if svalue is None and ovalue is not None:
setattr(self, attr, ovalue)
constrained = True
return constrained
def copy(self):
clone = ArchSpec.__new__(ArchSpec)
clone._dup(self)
return clone
@property
def concrete(self):
return all(v for k, v in iteritems(self.to_cmp_dict()))
def to_cmp_dict(self):
"""Returns a dictionary that can be used for field comparison."""
return dict([
('platform', self.platform),
('platform_os', self.platform_os),
('target', self.target)])
def to_dict(self):
d = syaml_dict([
('platform', self.platform),
('platform_os', self.platform_os),
('target', self.target)])
return syaml_dict([('arch', d)])
@staticmethod
def from_dict(d):
"""Import an ArchSpec from raw YAML/JSON data.
This routine implements a measure of compatibility with older
versions of Spack. Spack releases before 0.10 used a single
string with no OS or platform identifiers. We import old Spack
architectures with platform ``spack09``, OS ``unknown``, and the
old arch string as the target.
Specs from `0.10` or later have a more fleshed out architecture
descriptor with a platform, an OS, and a target.
"""
if not isinstance(d['arch'], dict):
return ArchSpec('spack09', 'unknown', d['arch'])
d = d['arch']
return ArchSpec(d['platform'], d['platform_os'], d['target'])
def __str__(self):
return "%s-%s-%s" % (self.platform, self.platform_os, self.target)
def __repr__(self):
return str(self)
def __contains__(self, string):
return string in str(self)
@key_ordering
class CompilerSpec(object):
"""The CompilerSpec field represents the compiler or range of compiler
versions that a package should be built with. CompilerSpecs have a
name and a version list. """
def __init__(self, *args):
nargs = len(args)
if nargs == 1:
arg = args[0]
# If there is one argument, it's either another CompilerSpec
# to copy or a string to parse
if isinstance(arg, string_types):
c = SpecParser().parse_compiler(arg)
self.name = c.name
self.versions = c.versions
elif isinstance(arg, CompilerSpec):
self.name = arg.name
self.versions = arg.versions.copy()
else:
raise TypeError(
"Can only build CompilerSpec from string or " +
"CompilerSpec. Found %s" % type(arg))
elif nargs == 2:
name, version = args
self.name = name
self.versions = VersionList()
self.versions.add(ver(version))
else:
raise TypeError(
"__init__ takes 1 or 2 arguments. (%d given)" % nargs)
def _add_version(self, version):
self.versions.add(version)
def _autospec(self, compiler_spec_like):
if isinstance(compiler_spec_like, CompilerSpec):
return compiler_spec_like
return CompilerSpec(compiler_spec_like)
def satisfies(self, other, strict=False):
other = self._autospec(other)
return (self.name == other.name and
self.versions.satisfies(other.versions, strict=strict))
def constrain(self, other):
"""Intersect self's versions with other.
Return whether the CompilerSpec changed.
"""
other = self._autospec(other)
# ensure that other will actually constrain this spec.
if not other.satisfies(self):
raise UnsatisfiableCompilerSpecError(other, self)
return self.versions.intersect(other.versions)
@property
def concrete(self):
"""A CompilerSpec is concrete if its versions are concrete and there
is an available compiler with the right version."""
return self.versions.concrete
@property
def version(self):
if not self.concrete:
raise SpecError("Spec is not concrete: " + str(self))
return self.versions[0]
def copy(self):
clone = CompilerSpec.__new__(CompilerSpec)
clone.name = self.name
clone.versions = self.versions.copy()
return clone
def _cmp_key(self):
return (self.name, self.versions)
def to_dict(self):
d = syaml_dict([('name', self.name)])
d.update(self.versions.to_dict())
return syaml_dict([('compiler', d)])
@staticmethod
def from_dict(d):
d = d['compiler']
return CompilerSpec(d['name'], VersionList.from_dict(d))
def __str__(self):
out = self.name
if self.versions and self.versions != _any_version:
vlist = ",".join(str(v) for v in self.versions)
out += "@%s" % vlist
return out
def __repr__(self):
return str(self)
@key_ordering
class DependencySpec(object):
"""DependencySpecs connect two nodes in the DAG, and contain deptypes.
Dependencies can be one (or more) of several types:
- build: needs to be in the PATH at build time.
- link: is linked to and added to compiler flags.
- run: needs to be in the PATH for the package to run.
Fields:
- spec: Spec depended on by parent.
- parent: Spec that depends on `spec`.
- deptypes: list of strings, representing dependency relationships.
"""
def __init__(self, parent, spec, deptypes):
self.parent = parent
self.spec = spec
self.deptypes = tuple(sorted(set(deptypes)))
def update_deptypes(self, deptypes):
deptypes = set(deptypes)
deptypes.update(self.deptypes)
deptypes = tuple(sorted(deptypes))
changed = self.deptypes != deptypes
self.deptypes = deptypes
return changed
def copy(self):
return DependencySpec(self.parent, self.spec, self.deptypes)
def _cmp_key(self):
return (self.parent.name if self.parent else None,
self.spec.name if self.spec else None,
self.deptypes)
def __str__(self):
return "%s %s--> %s" % (self.parent.name if self.parent else None,
self.deptypes,
self.spec.name if self.spec else None)
_valid_compiler_flags = [
'cflags', 'cxxflags', 'fflags', 'ldflags', 'ldlibs', 'cppflags']
class FlagMap(HashableMap):
def __init__(self, spec):
super(FlagMap, self).__init__()
self.spec = spec
def satisfies(self, other, strict=False):
if strict or (self.spec and self.spec._concrete):
return all(f in self and set(self[f]) == set(other[f])
for f in other)
else:
return all(set(self[f]) == set(other[f])
for f in other if (other[f] != [] and f in self))
def constrain(self, other):
"""Add all flags in other that aren't in self to self.
Return whether the spec changed.
"""
if other.spec and other.spec._concrete:
for k in self:
if k not in other:
raise UnsatisfiableCompilerFlagSpecError(
self[k], '<absent>')
changed = False
for k in other:
if k in self and not set(self[k]) <= set(other[k]):
raise UnsatisfiableCompilerFlagSpecError(
' '.join(f for f in self[k]),
' '.join(f for f in other[k]))
elif k not in self:
self[k] = other[k]
changed = True
return changed
@staticmethod
def valid_compiler_flags():
return _valid_compiler_flags
def copy(self):
clone = FlagMap(None)
for name, value in self.items():
clone[name] = value
return clone
def _cmp_key(self):
return tuple((k, tuple(v)) for k, v in sorted(iteritems(self)))
def __str__(self):
sorted_keys = [k for k in sorted(self.keys()) if self[k] != []]
cond_symbol = ' ' if len(sorted_keys) > 0 else ''
return cond_symbol + ' '.join(
str(key) + '=\"' + ' '.join(
str(f) for f in self[key]) + '\"'
for key in sorted_keys) + cond_symbol
class DependencyMap(HashableMap):
"""Each spec has a DependencyMap containing specs for its dependencies.
The DependencyMap is keyed by name. """
def __str__(self):
return "{deps: %s}" % ', '.join(str(d) for d in sorted(self.values()))
def _command_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'command' attribute.
Tries to search for ``spec.name`` in the ``spec.prefix.bin`` directory.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
Executable: An executable of the command
Raises:
RuntimeError: If the command is not found
"""
path = os.path.join(spec.prefix.bin, spec.name)
if is_exe(path):
return Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix.bin))
def _headers_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'headers' attribute.
Tries to search for ``*.h`` files recursively starting from
``spec.prefix.include``.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
HeaderList: The headers in ``prefix.include``
Raises:
RuntimeError: If no headers are found
"""
headers = find_headers('*', root=spec.prefix.include, recurse=True)
if headers:
return headers
else:
msg = 'Unable to locate {0} headers in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix.include))
def _libs_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'libs' attribute.
Tries to search for ``lib{spec.name}`` recursively starting from
``spec.prefix``.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
LibraryList: The libraries found
Raises:
RuntimeError: If no libraries are found
"""
# Variable 'name' is passed to function 'find_libraries', which supports
# glob characters. For example, we have a package with a name 'abc-abc'.
# Now, we don't know if the original name of the package is 'abc_abc'
# (and it generates a library 'libabc_abc.so') or 'abc-abc' (and it
# generates a library 'libabc-abc.so'). So, we tell the function
# 'find_libraries' to give us anything that matches 'libabc?abc' and it
# gives us either 'libabc-abc.so' or 'libabc_abc.so' (or an error)
# depending on which one exists (there is a possibility, of course, to
# get something like 'libabcXabc.so, but for now we consider this
# unlikely).
name = 'lib' + spec.name.replace('-', '?')
if '+shared' in spec:
libs = find_libraries(
name, root=spec.prefix, shared=True, recurse=True
)
elif '~shared' in spec:
libs = find_libraries(
name, root=spec.prefix, shared=False, recurse=True
)
else:
# Prefer shared
libs = find_libraries(
name, root=spec.prefix, shared=True, recurse=True
)
if libs:
return libs
libs = find_libraries(
name, root=spec.prefix, shared=False, recurse=True
)
if libs:
return libs
else:
msg = 'Unable to recursively locate {0} libraries in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix))
class ForwardQueryToPackage(object):
"""Descriptor used to forward queries from Spec to Package"""
def __init__(self, attribute_name, default_handler=None):
"""Create a new descriptor.
Parameters:
attribute_name (str): name of the attribute to be
searched for in the Package instance
default_handler (callable, optional): default function to be
called if the attribute was not found in the Package
instance
"""
self.attribute_name = attribute_name
# Turn the default handler into a function with the right
# signature that always returns None
if default_handler is None:
default_handler = lambda descriptor, spec, cls: None
self.default = default_handler
def __get__(self, instance, cls):
"""Retrieves the property from Package using a well defined chain
of responsibility.
The order of call is:
1. if the query was through the name of a virtual package try to
search for the attribute `{virtual_name}_{attribute_name}`
in Package
2. try to search for attribute `{attribute_name}` in Package
3. try to call the default handler
The first call that produces a value will stop the chain.
If no call can handle the request or a None value is produced,
then AttributeError is raised.
"""
pkg = instance.package
try:
query = instance.last_query
except AttributeError:
# There has been no query yet: this means
# a spec is trying to access its own attributes
_ = instance[instance.name] # NOQA: ignore=F841
query = instance.last_query
callbacks_chain = []
# First in the chain : specialized attribute for virtual packages
if query.isvirtual:
specialized_name = '{0}_{1}'.format(
query.name, self.attribute_name
)
callbacks_chain.append(lambda: getattr(pkg, specialized_name))
# Try to get the generic method from Package
callbacks_chain.append(lambda: getattr(pkg, self.attribute_name))
# Final resort : default callback
callbacks_chain.append(lambda: self.default(self, instance, cls))
# Trigger the callbacks in order, the first one producing a
# value wins
value = None
for f in callbacks_chain:
try:
value = f()
break
except AttributeError:
pass
# 'None' value raises AttributeError : this permits to 'disable'
# the call in a particular package by returning None from the
# queried attribute, or will trigger an exception if things
# searched for were not found
if value is None:
fmt = '\'{name}\' package has no relevant attribute \'{query}\'\n' # NOQA: ignore=E501
fmt += '\tspec : \'{spec}\'\n'
fmt += '\tqueried as : \'{spec.last_query.name}\'\n'
fmt += '\textra parameters : \'{spec.last_query.extra_parameters}\'\n' # NOQA: ignore=E501
message = fmt.format(
name=pkg.name,
query=self.attribute_name,
spec=instance
)
raise AttributeError(message)
return value
def __set__(self, instance, value):
cls_name = type(instance).__name__
msg = "'{0}' object attribute '{1}' is read-only"
raise AttributeError(msg.format(cls_name, self.attribute_name))
class SpecBuildInterface(ObjectWrapper):
command = ForwardQueryToPackage(
'command',
default_handler=_command_default_handler
)
headers = ForwardQueryToPackage(
'headers',
default_handler=_headers_default_handler
)
libs = ForwardQueryToPackage(
'libs',
default_handler=_libs_default_handler
)
def __init__(self, spec, name, query_parameters):
super(SpecBuildInterface, self).__init__(spec)
# Represents a query state in a BuildInterface object
QueryState = collections.namedtuple(
'QueryState', ['name', 'extra_parameters', 'isvirtual']
)
is_virtual = Spec.is_virtual(name)
self.last_query = QueryState(
name=name,
extra_parameters=query_parameters,
isvirtual=is_virtual
)
@key_ordering
class Spec(object):
@staticmethod
def from_literal(spec_dict, normal=True):
"""Builds a Spec from a dictionary containing the spec literal.
The dictionary must have a single top level key, representing the root,
and as many secondary level keys as needed in the spec.
The keys can be either a string or a Spec or a tuple containing the
Spec and the dependency types.
Args:
spec_dict (dict): the dictionary containing the spec literal
normal (bool): if True the same key appearing at different levels
of the ``spec_dict`` will map to the same object in memory.
Examples:
A simple spec ``foo`` with no dependencies:
.. code-block:: python
{'foo': None}
A spec ``foo`` with a ``(build, link)`` dependency ``bar``:
.. code-block:: python
{'foo':
{'bar:build,link': None}}
A spec with a diamond dependency and various build types:
.. code-block:: python
{'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
}
}}
The same spec with a double copy of ``dt-diamond-bottom`` and
no diamond structure:
.. code-block:: python
{'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
}
}, normal=False}
Constructing a spec using a Spec object as key:
.. code-block:: python
mpich = Spec('mpich')
libelf = Spec('libelf@1.8.11')
expected_normalized = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {libelf: None},
libelf: None
},
mpich: None
},
mpich: None
},
})
"""
# Maps a literal to a Spec, to be sure we are reusing the same object
spec_cache = LazySpecCache()
def spec_builder(d):
# The invariant is that the top level dictionary must have
# only one key
assert len(d) == 1
# Construct the top-level spec
spec_like, dep_like = next(iter(d.items()))
# If the requirements was for unique nodes (default)
# then re-use keys from the local cache. Otherwise build
# a new node every time.
if not isinstance(spec_like, Spec):
spec = spec_cache[spec_like] if normal else Spec(spec_like)
else:
spec = spec_like
if dep_like is None:
return spec
def name_and_dependency_types(s):
"""Given a key in the dictionary containing the literal,
extracts the name of the spec and its dependency types.
Args:
s (str): key in the dictionary containing the literal
"""
t = s.split(':')
if len(t) > 2:
msg = 'more than one ":" separator in key "{0}"'
raise KeyError(msg.format(s))
n = t[0]
if len(t) == 2:
dtypes = tuple(dt.strip() for dt in t[1].split(','))
else:
dtypes = ()
return n, dtypes
def spec_and_dependency_types(s):
"""Given a non-string key in the literal, extracts the spec
and its dependency types.
Args:
s (spec or tuple): either a Spec object or a tuple
composed of a Spec object and a string with the
dependency types
"""
if isinstance(s, Spec):
return s, ()
spec_obj, dtypes = s
return spec_obj, tuple(dt.strip() for dt in dtypes.split(','))
# Recurse on dependencies
for s, s_dependencies in dep_like.items():
if isinstance(s, string_types):
dag_node, dependency_types = name_and_dependency_types(s)
else:
dag_node, dependency_types = spec_and_dependency_types(s)
dependency_spec = spec_builder({dag_node: s_dependencies})
spec._add_dependency(dependency_spec, dependency_types)
return spec
return spec_builder(spec_dict)
def __init__(self, spec_like, **kwargs):
# Copy if spec_like is a Spec.
if isinstance(spec_like, Spec):
self._dup(spec_like)
return
# Parse if the spec_like is a string.
if not isinstance(spec_like, string_types):
raise TypeError("Can't make spec out of %s" % type(spec_like))
# parse string types *into* this spec
spec_list = SpecParser(self).parse(spec_like)
if len(spec_list) > 1:
raise ValueError("More than one spec in string: " + spec_like)
if len(spec_list) < 1:
raise ValueError("String contains no specs: " + spec_like)
# Specs are by default not assumed to be normal, but in some
# cases we've read them from a file want to assume normal.
# This allows us to manipulate specs that Spack doesn't have
# package.py files for.
self._normal = kwargs.get('normal', False)
self._concrete = kwargs.get('concrete', False)
# Allow a spec to be constructed with an external path.
self.external_path = kwargs.get('external_path', None)
self.external_module = kwargs.get('external_module', None)
@property
def external(self):
return bool(self.external_path) or bool(self.external_module)
def get_dependency(self, name):
dep = self._dependencies.get(name)
if dep is not None:
return dep
raise InvalidDependencyError(
self.name + " does not depend on " + comma_or(name))
def _find_deps(self, where, deptype):
deptype = canonical_deptype(deptype)
return [dep for dep in where.values()
if deptype and (not dep.deptypes or
any(d in deptype for d in dep.deptypes))]
def dependencies(self, deptype='all'):
return [d.spec
for d in self._find_deps(self._dependencies, deptype)]
def dependents(self, deptype='all'):
return [d.parent
for d in self._find_deps(self._dependents, deptype)]
def dependencies_dict(self, deptype='all'):
return dict((d.spec.name, d)
for d in self._find_deps(self._dependencies, deptype))
def dependents_dict(self, deptype='all'):
return dict((d.parent.name, d)
for d in self._find_deps(self._dependents, deptype))
#
# Private routines here are called by the parser when building a spec.
#
def _add_version(self, version):
"""Called by the parser to add an allowable version."""
self.versions.add(version)
def _add_flag(self, name, value):
"""Called by the parser to add a known flag.
Known flags currently include "arch"
"""
valid_flags = FlagMap.valid_compiler_flags()
if name == 'arch' or name == 'architecture':
parts = tuple(value.split('-'))
plat, os, tgt = parts if len(parts) == 3 else (None, None, value)
self._set_architecture(platform=plat, platform_os=os, target=tgt)
elif name == 'platform':
self._set_architecture(platform=value)
elif name == 'os' or name == 'operating_system':
self._set_architecture(platform_os=value)
elif name == 'target':
self._set_architecture(target=value)
elif name in valid_flags:
assert(self.compiler_flags is not None)
self.compiler_flags[name] = value.split()
else:
# FIXME:
# All other flags represent variants. 'foo=true' and 'foo=false'
# map to '+foo' and '~foo' respectively. As such they need a
# BoolValuedVariant instance.
if str(value).upper() == 'TRUE' or str(value).upper() == 'FALSE':
self.variants[name] = BoolValuedVariant(name, value)
else:
self.variants[name] = AbstractVariant(name, value)
def _set_architecture(self, **kwargs):
"""Called by the parser to set the architecture."""
arch_attrs = ['platform', 'platform_os', 'target']
if self.architecture and self.architecture.concrete:
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two architectures." % self.name)
if not self.architecture:
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
self.architecture = ArchSpec(*new_vals)
else:
new_attrvals = [(a, v) for a, v in iteritems(kwargs)
if a in arch_attrs]
for new_attr, new_value in new_attrvals:
if getattr(self.architecture, new_attr):
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two '%s' specified "
"for its architecture" % (self.name, new_attr))
else:
setattr(self.architecture, new_attr, new_value)
def _set_compiler(self, compiler):
"""Called by the parser to set the compiler."""
if self.compiler:
raise DuplicateCompilerSpecError(
"Spec for '%s' cannot have two compilers." % self.name)
self.compiler = compiler
def _add_dependency(self, spec, deptypes):
"""Called by the parser to add another spec as a dependency."""
if spec.name in self._dependencies:
raise DuplicateDependencyError(
"Cannot depend on '%s' twice" % spec)
# create an edge and add to parent and child
dspec = DependencySpec(self, spec, deptypes)
self._dependencies[spec.name] = dspec
spec._dependents[self.name] = dspec
#
# Public interface
#
@property
def fullname(self):
return (
('%s.%s' % (self.namespace, self.name)) if self.namespace else
(self.name if self.name else ''))
@property
def root(self):
"""Follow dependent links and find the root of this spec's DAG.
In spack specs, there should be a single root (the package being
installed). This will throw an assertion error if that is not
the case.
"""
if not self._dependents:
return self
# If the spec has multiple dependents, ensure that they all
# lead to the same place. Spack shouldn't deal with any DAGs
# with multiple roots, so something's wrong if we find one.
depiter = iter(self._dependents.values())
first_root = next(depiter).parent.root
assert(all(first_root is d.parent.root for d in depiter))
return first_root
@property
def package(self):
return spack.repo.get(self)
@property
def package_class(self):
"""Internal package call gets only the class object for a package.
Use this to just get package metadata.
"""
return spack.repo.get_pkg_class(self.fullname)
@property
def virtual(self):
"""Right now, a spec is virtual if no package exists with its name.
TODO: revisit this -- might need to use a separate namespace and
be more explicit about this.
Possible idea: just use conventin and make virtual deps all
caps, e.g., MPI vs mpi.
"""
return Spec.is_virtual(self.name)
@staticmethod
def is_virtual(name):
"""Test if a name is virtual without requiring a Spec."""
return (name is not None) and (not spack.repo.exists(name))
@property
def concrete(self):
"""A spec is concrete if it describes a single build of a package.
More formally, a spec is concrete if concretize() has been called
on it and it has been marked `_concrete`.
Concrete specs either can be or have been built. All constraints
have been resolved, optional dependencies have been added or
removed, a compiler has been chosen, and all variants have
values.
"""
return self._concrete
def traverse(self, **kwargs):
direction = kwargs.get('direction', 'children')
depth = kwargs.get('depth', False)
get_spec = lambda s: s.spec
if direction == 'parents':
get_spec = lambda s: s.parent
if depth:
for d, dspec in self.traverse_edges(**kwargs):
yield d, get_spec(dspec)
else:
for dspec in self.traverse_edges(**kwargs):
yield get_spec(dspec)
def traverse_edges(self, visited=None, d=0, deptype='all',
deptype_query=default_deptype, dep_spec=None, **kwargs):
"""Generic traversal of the DAG represented by this spec.
This will yield each node in the spec. Options:
order [=pre|post]
Order to traverse spec nodes. Defaults to preorder traversal.
Options are:
'pre': Pre-order traversal; each node is yielded before its
children in the dependency DAG.
'post': Post-order traversal; each node is yielded after its
children in the dependency DAG.
cover [=nodes|edges|paths]
Determines how extensively to cover the dag. Possible values:
'nodes': Visit each node in the dag only once. Every node
yielded by this function will be unique.
'edges': If a node has been visited once but is reached along a
new path from the root, yield it but do not descend
into it. This traverses each 'edge' in the DAG once.
'paths': Explore every unique path reachable from the root.
This descends into visited subtrees and will yield
nodes twice if they're reachable by multiple paths.
depth [=False]
Defaults to False. When True, yields not just nodes in the
spec, but also their depth from the root in a (depth, node)
tuple.
key [=id]
Allow a custom key function to track the identity of nodes
in the traversal.
root [=True]
If False, this won't yield the root node, just its descendents.
direction [=children|parents]
If 'children', does a traversal of this spec's children. If
'parents', traverses upwards in the DAG towards the root.
"""
# get initial values for kwargs
depth = kwargs.get('depth', False)
key_fun = kwargs.get('key', id)
if isinstance(key_fun, string_types):
key_fun = attrgetter(key_fun)
yield_root = kwargs.get('root', True)
cover = kwargs.get('cover', 'nodes')
direction = kwargs.get('direction', 'children')
order = kwargs.get('order', 'pre')
deptype = canonical_deptype(deptype)
deptype_query = canonical_deptype(deptype_query)
# Make sure kwargs have legal values; raise ValueError if not.
def validate(name, val, allowed_values):
if val not in allowed_values:
raise ValueError("Invalid value for %s: %s. Choices are %s"
% (name, val, ",".join(allowed_values)))
validate('cover', cover, ('nodes', 'edges', 'paths'))
validate('direction', direction, ('children', 'parents'))
validate('order', order, ('pre', 'post'))
if visited is None:
visited = set()
key = key_fun(self)
# Node traversal does not yield visited nodes.
if key in visited and cover == 'nodes':
return
def return_val(dspec):
if not dspec:
# make a fake dspec for the root.
if direction == 'parents':
dspec = DependencySpec(self, None, ())
else:
dspec = DependencySpec(None, self, ())
return (d, dspec) if depth else dspec
yield_me = yield_root or d > 0
# Preorder traversal yields before successors
if yield_me and order == 'pre':
yield return_val(dep_spec)
# Edge traversal yields but skips children of visited nodes
if not (key in visited and cover == 'edges'):
# This code determines direction and yields the children/parents
if direction == 'children':
successors = self.dependencies_dict(deptype)
succ = lambda s: s.spec
elif direction == 'parents':
successors = self.dependents_dict(deptype)
succ = lambda s: s.parent
else:
raise ValueError('Invalid traversal direction: %s' % direction)
visited.add(key)
for name, dspec in sorted(successors.items()):
child = successors[name]
children = succ(child).traverse_edges(
visited,
d=(d + 1),
deptype=deptype,
deptype_query=deptype_query,
dep_spec=dspec,
**kwargs)
for elt in children:
yield elt
# Postorder traversal yields after successors
if yield_me and order == 'post':
yield return_val(dep_spec)
@property
def short_spec(self):
"""Returns a version of the spec with the dependencies hashed
instead of completely enumerated."""
return self.format('$_$@$%@$+$=$/')
@property
def cshort_spec(self):
"""Returns an auto-colorized version of ``self.short_spec``."""
return self.cformat('$_$@$%@$+$=$/')
@property
def prefix(self):
return Prefix(spack.store.layout.path_for_spec(self))
def dag_hash(self, length=None):
"""Return a hash of the entire spec DAG, including connectivity."""
if self._hash:
return self._hash[:length]
else:
yaml_text = syaml.dump(
self.to_node_dict(), default_flow_style=True, width=maxint)
sha = hashlib.sha1(yaml_text.encode('utf-8'))
b32_hash = base64.b32encode(sha.digest()).lower()
if sys.version_info[0] >= 3:
b32_hash = b32_hash.decode('utf-8')
if self.concrete:
self._hash = b32_hash
return b32_hash[:length]
def dag_hash_bit_prefix(self, bits):
"""Get the first <bits> bits of the DAG hash as an integer type."""
return base32_prefix_bits(self.dag_hash(), bits)
def to_node_dict(self):
d = syaml_dict()
if self.versions:
d.update(self.versions.to_dict())
if self.architecture:
d.update(self.architecture.to_dict())
if self.compiler:
d.update(self.compiler.to_dict())
if self.namespace:
d['namespace'] = self.namespace
params = syaml_dict(
sorted(
v.yaml_entry() for _, v in self.variants.items()
)
)
params.update(sorted(self.compiler_flags.items()))
if params:
d['parameters'] = params
if self.external:
d['external'] = {
'path': self.external_path,
'module': bool(self.external_module)
}
# TODO: restore build dependencies here once we have less picky
# TODO: concretization.
deps = self.dependencies_dict(deptype=('link', 'run'))
if deps:
d['dependencies'] = syaml_dict([
(name,
syaml_dict([
('hash', dspec.spec.dag_hash()),
('type', sorted(str(s) for s in dspec.deptypes))])
) for name, dspec in sorted(deps.items())
])
return syaml_dict([(self.name, d)])
def to_dict(self):
node_list = []
for s in self.traverse(order='pre', deptype=('link', 'run')):
node = s.to_node_dict()
node[s.name]['hash'] = s.dag_hash()
node_list.append(node)
return syaml_dict([('spec', node_list)])
def to_yaml(self, stream=None):
return syaml.dump(
self.to_dict(), stream=stream, default_flow_style=False)
def to_json(self, stream=None):
return sjson.dump(self.to_dict(), stream)
@staticmethod
def from_node_dict(node):
name = next(iter(node))
node = node[name]
spec = Spec(name)
spec.namespace = node.get('namespace', None)
spec._hash = node.get('hash', None)
if 'version' in node or 'versions' in node:
spec.versions = VersionList.from_dict(node)
if 'arch' in node:
spec.architecture = ArchSpec.from_dict(node)
if 'compiler' in node:
spec.compiler = CompilerSpec.from_dict(node)
else:
spec.compiler = None
if 'parameters' in node:
for name, value in node['parameters'].items():
if name in _valid_compiler_flags:
spec.compiler_flags[name] = value
else:
spec.variants[name] = MultiValuedVariant.from_node_dict(
name, value)
elif 'variants' in node:
for name, value in node['variants'].items():
spec.variants[name] = MultiValuedVariant.from_node_dict(
name, value
)
for name in FlagMap.valid_compiler_flags():
spec.compiler_flags[name] = []
if 'external' in node:
spec.external_path = None
spec.external_module = None
# This conditional is needed because sometimes this function is
# called with a node already constructed that contains a 'versions'
# and 'external' field. Related to virtual packages provider
# indexes.
if node['external']:
spec.external_path = node['external']['path']
spec.external_module = node['external']['module']
if spec.external_module is False:
spec.external_module = None
else:
spec.external_path = None
spec.external_module = None
# Don't read dependencies here; from_node_dict() is used by
# from_yaml() to read the root *and* each dependency spec.
return spec
@staticmethod
def read_yaml_dep_specs(dependency_dict):
"""Read the DependencySpec portion of a YAML-formatted Spec.
This needs to be backward-compatible with older spack spec
formats so that reindex will work on old specs/databases.
"""
for dep_name, elt in dependency_dict.items():
if isinstance(elt, string_types):
# original format, elt is just the dependency hash.
dag_hash, deptypes = elt, ['build', 'link']
elif isinstance(elt, tuple):
# original deptypes format: (used tuples, not future-proof)
dag_hash, deptypes = elt
elif isinstance(elt, dict):
# new format: elements of dependency spec are keyed.
dag_hash, deptypes = elt['hash'], elt['type']
else:
raise SpecError("Couldn't parse dependency types in spec.")
yield dep_name, dag_hash, list(deptypes)
@staticmethod
def from_dict(data):
"""Construct a spec from YAML.
Parameters:
data -- a nested dict/list data structure read from YAML or JSON.
"""
nodes = data['spec']
# Read nodes out of list. Root spec is the first element;
# dependencies are the following elements.
dep_list = [Spec.from_node_dict(node) for node in nodes]
if not dep_list:
raise SpecError("YAML spec contains no nodes.")
deps = dict((spec.name, spec) for spec in dep_list)
spec = dep_list[0]
for node in nodes:
# get dependency dict from the node.
name = next(iter(node))
if 'dependencies' not in node[name]:
continue
yaml_deps = node[name]['dependencies']
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
# Fill in dependencies by looking them up by name in deps dict
deps[name]._dependencies[dname] = DependencySpec(
deps[name], deps[dname], dtypes)
return spec
@staticmethod
def from_yaml(stream):
"""Construct a spec from YAML.
Parameters:
stream -- string or file object to read from.
"""
try:
data = syaml.load(stream)
return Spec.from_dict(data)
except MarkedYAMLError as e:
raise syaml.SpackYAMLError("error parsing YAML spec:", str(e))
@staticmethod
def from_json(stream):
"""Construct a spec from JSON.
Parameters:
stream -- string or file object to read from.
"""
try:
data = sjson.load(stream)
return Spec.from_dict(data)
except Exception as e:
raise sjson.SpackJSONError("error parsing JSON spec:", str(e))
def _concretize_helper(self, presets=None, visited=None):
"""Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
concretized, they're added to the presets, and ancestors
will prefer the settings of their children.
"""
if presets is None:
presets = {}
if visited is None:
visited = set()
if self.name in visited:
return False
changed = False
# Concretize deps first -- this is a bottom-up process.
for name in sorted(self._dependencies.keys()):
changed |= self._dependencies[
name].spec._concretize_helper(presets, visited)
if self.name in presets:
changed |= self.constrain(presets[self.name])
else:
# Concretize virtual dependencies last. Because they're added
# to presets below, their constraints will all be merged, but we'll
# still need to select a concrete package later.
if not self.virtual:
changed |= any(
(spack.concretizer.concretize_architecture(self),
spack.concretizer.concretize_compiler(self),
spack.concretizer.concretize_compiler_flags(
self), # has to be concretized after compiler
spack.concretizer.concretize_version(self),
spack.concretizer.concretize_variants(self)))
presets[self.name] = self
visited.add(self.name)
return changed
def _replace_with(self, concrete):
"""Replace this virtual spec with a concrete spec."""
assert(self.virtual)
for name, dep_spec in self._dependents.items():
dependent = dep_spec.parent
deptypes = dep_spec.deptypes
# remove self from all dependents, unless it is already removed
if self.name in dependent._dependencies:
del dependent._dependencies[self.name]
# add the replacement, unless it is already a dep of dependent.
if concrete.name not in dependent._dependencies:
dependent._add_dependency(concrete, deptypes)
def _expand_virtual_packages(self):
"""Find virtual packages in this spec, replace them with providers,
and normalize again to include the provider's (potentially virtual)
dependencies. Repeat until there are no virtual deps.
Precondition: spec is normalized.
.. todo::
If a provider depends on something that conflicts with
other dependencies in the spec being expanded, this can
produce a conflicting spec. For example, if mpich depends
on hwloc@:1.3 but something in the spec needs hwloc1.4:,
then we should choose an MPI other than mpich. Cases like
this are infrequent, but should implement this before it is
a problem.
"""
# Make an index of stuff this spec already provides
self_index = ProviderIndex(self.traverse(), restrict=True)
changed = False
done = False
while not done:
done = True
for spec in list(self.traverse()):
replacement = None
if spec.external:
continue
if spec.virtual:
replacement = self._find_provider(spec, self_index)
if replacement:
# TODO: may break if in-place on self but
# shouldn't happen if root is traversed first.
spec._replace_with(replacement)
done = False
break
if not replacement:
# Get a list of possible replacements in order of
# preference.
candidates = spack.concretizer.choose_virtual_or_external(
spec)
# Try the replacements in order, skipping any that cause
# satisfiability problems.
for replacement in candidates:
if replacement is spec:
break
# Replace spec with the candidate and normalize
copy = self.copy()
copy[spec.name]._dup(replacement, deps=False)
try:
# If there are duplicate providers or duplicate
# provider deps, consolidate them and merge
# constraints.
copy.normalize(force=True)
break
except SpecError:
# On error, we'll try the next replacement.
continue
# If replacement is external then trim the dependencies
if replacement.external:
if (spec._dependencies):
changed = True
spec._dependencies = DependencyMap()
replacement._dependencies = DependencyMap()
replacement.architecture = self.architecture
# TODO: could this and the stuff in _dup be cleaned up?
def feq(cfield, sfield):
return (not cfield) or (cfield == sfield)
if replacement is spec or (
feq(replacement.name, spec.name) and
feq(replacement.versions, spec.versions) and
feq(replacement.compiler, spec.compiler) and
feq(replacement.architecture, spec.architecture) and
feq(replacement._dependencies, spec._dependencies) and
feq(replacement.variants, spec.variants) and
feq(replacement.external_path,
spec.external_path) and
feq(replacement.external_module,
spec.external_module)):
continue
# Refine this spec to the candidate. This uses
# replace_with AND dup so that it can work in
# place. TODO: make this more efficient.
if spec.virtual:
spec._replace_with(replacement)
changed = True
if spec._dup(replacement, deps=False, cleardeps=False):
changed = True
spec._dependencies.owner = spec
self_index.update(spec)
done = False
break
return changed
def concretize(self):
"""A spec is concrete if it describes one build of a package uniquely.
This will ensure that this spec is concrete.
If this spec could describe more than one version, variant, or build
of a package, this will add constraints to make it concrete.
Some rigorous validation and checks are also performed on the spec.
Concretizing ensures that it is self-consistent and that it's
consistent with requirements of its pacakges. See flatten() and
normalize() for more details on this.
It also ensures that:
.. code-block:: python
for x in self.traverse():
assert x.package.spec == x
which may not be true *during* the concretization step.
"""
if not self.name:
raise SpecError("Attempting to concretize anonymous spec")
if self._concrete:
return
changed = True
force = False
while changed:
changes = (self.normalize(force),
self._expand_virtual_packages(),
self._concretize_helper())
changed = any(changes)
force = True
for s in self.traverse(deptype_query=all):
# After concretizing, assign namespaces to anything left.
# Note that this doesn't count as a "change". The repository
# configuration is constant throughout a spack run, and
# normalize and concretize evaluate Packages using Repo.get(),
# which respects precedence. So, a namespace assignment isn't
# changing how a package name would have been interpreted and
# we can do it as late as possible to allow as much
# compatibility across repositories as possible.
if s.namespace is None:
s.namespace = spack.repo.repo_for_pkg(s.name).namespace
# Add any patches from the package to the spec.
patches = []
for cond, patch_list in s.package_class.patches.items():
if s.satisfies(cond):
for patch in patch_list:
patches.append(patch.sha256)
if patches:
mvar = s.variants.setdefault(
'patches', MultiValuedVariant('patches', ())
)
mvar.value = patches
# FIXME: Monkey patches mvar to store patches order
mvar._patches_in_order_of_appearance = patches
# Apply patches required on dependencies by depends_on(..., patch=...)
for dspec in self.traverse_edges(deptype=all,
cover='edges', root=False):
pkg_deps = dspec.parent.package_class.dependencies
if dspec.spec.name not in pkg_deps:
continue
patches = []
for cond, dependency in pkg_deps[dspec.spec.name].items():
if dspec.parent.satisfies(cond):
for pcond, patch_list in dependency.patches.items():
if dspec.spec.satisfies(pcond):
for patch in patch_list:
patches.append(patch.sha256)
if patches:
mvar = dspec.spec.variants.setdefault(
'patches', MultiValuedVariant('patches', ())
)
mvar.value = mvar.value + tuple(patches)
# FIXME: Monkey patches mvar to store patches order
l = getattr(mvar, '_patches_in_order_of_appearance', [])
mvar._patches_in_order_of_appearance = dedupe(l + patches)
for s in self.traverse():
if s.external_module:
compiler = spack.compilers.compiler_for_spec(
s.compiler, s.architecture)
for mod in compiler.modules:
load_module(mod)
s.external_path = get_path_from_module(s.external_module)
# Mark everything in the spec as concrete, as well.
self._mark_concrete()
# Now that the spec is concrete we should check if
# there are declared conflicts
matches = []
for x in self.traverse():
for conflict_spec, when_list in x.package.conflicts.items():
if x.satisfies(conflict_spec):
for when_spec, msg in when_list:
if x.satisfies(when_spec):
matches.append((x, conflict_spec, when_spec, msg))
if matches:
raise ConflictsInSpecError(self, matches)
# At this point the spec-package mutual references should
# be self-consistent
for x in self.traverse():
x.package.spec = x
def _mark_concrete(self, value=True):
"""Mark this spec and its dependencies as concrete.
Only for internal use -- client code should use "concretize"
unless there is a need to force a spec to be concrete.
"""
for s in self.traverse(deptype_query=all):
s._normal = value
s._concrete = value
def concretized(self):
"""This is a non-destructive version of concretize(). First clones,
then returns a concrete version of this package without modifying
this package. """
clone = self.copy()
clone.concretize()
return clone
def flat_dependencies(self, **kwargs):
"""Return a DependencyMap containing all of this spec's
dependencies with their constraints merged.
If copy is True, returns merged copies of its dependencies
without modifying the spec it's called on.
If copy is False, clears this spec's dependencies and
returns them.
"""
copy = kwargs.get('copy', True)
deptype_query = kwargs.get('deptype_query', 'all')
flat_deps = {}
try:
deptree = self.traverse(root=False, deptype_query=deptype_query)
for spec in deptree:
if spec.name not in flat_deps:
if copy:
spec = spec.copy(deps=False)
flat_deps[spec.name] = spec
else:
flat_deps[spec.name].constrain(spec)
if not copy:
for spec in flat_deps.values():
spec._dependencies.clear()
spec._dependents.clear()
self._dependencies.clear()
return flat_deps
except UnsatisfiableSpecError as e:
# Here, the DAG contains two instances of the same package
# with inconsistent constraints. Users cannot produce
# inconsistent specs like this on the command line: the
# parser doesn't allow it. Spack must be broken!
raise InconsistentSpecError("Invalid Spec DAG: %s" % e.message)
def index(self, deptype='all'):
"""Return DependencyMap that points to all the dependencies in this
spec."""
dm = DependencyMap()
for spec in self.traverse(deptype=deptype):
dm[spec.name] = spec
return dm
def _evaluate_dependency_conditions(self, name):
"""Evaluate all the conditions on a dependency with this name.
Args:
name (str): name of dependency to evaluate conditions on.
Returns:
(Dependency): new Dependency object combining all constraints.
If the package depends on <name> in the current spec
configuration, return the constrained dependency and
corresponding dependency types.
If no conditions are True (and we don't depend on it), return
``(None, None)``.
"""
pkg = spack.repo.get(self.fullname)
conditions = pkg.dependencies[name]
substitute_abstract_variants(self)
# evaluate when specs to figure out constraints on the dependency.
dep = None
for when_spec, dependency in conditions.items():
if self.satisfies(when_spec, strict=True):
if dep is None:
dep = Dependency(self.name, Spec(name), type=())
try:
dep.merge(dependency)
except UnsatisfiableSpecError as e:
e.message = ("Conflicting conditional dependencies on"
"package %s for spec %s" % (self.name, self))
raise e
return dep
def _find_provider(self, vdep, provider_index):
"""Find provider for a virtual spec in the provider index.
Raise an exception if there is a conflicting virtual
dependency already in this spec.
"""
assert(vdep.virtual)
# note that this defensively copies.
providers = provider_index.providers_for(vdep)
# If there is a provider for the vpkg, then use that instead of
# the virtual package.
if providers:
# Remove duplicate providers that can concretize to the same
# result.
for provider in providers:
for spec in providers:
if spec is not provider and provider.satisfies(spec):
providers.remove(spec)
# Can't have multiple providers for the same thing in one spec.
if len(providers) > 1:
raise MultipleProviderError(vdep, providers)
return providers[0]
else:
# The user might have required something insufficient for
# pkg_dep -- so we'll get a conflict. e.g., user asked for
# mpi@:1.1 but some package required mpi@2.1:.
required = provider_index.providers_for(vdep.name)
if len(required) > 1:
raise MultipleProviderError(vdep, required)
elif required:
raise UnsatisfiableProviderSpecError(required[0], vdep)
def _merge_dependency(
self, dependency, visited, spec_deps, provider_index):
"""Merge dependency information from a Package into this Spec.
Args:
dependency (Dependency): dependency metadata from a package;
this is typically the result of merging *all* matching
dependency constraints from the package.
visited (set): set of dependency nodes already visited by
``normalize()``.
spec_deps (dict): ``dict`` of all dependencies from the spec
being normalized.
provider_index (dict): ``provider_index`` of virtual dep
providers in the ``Spec`` as normalized so far.
NOTE: Caller should assume that this routine owns the
``dependency`` parameter, i.e., it needs to be a copy of any
internal structures.
This is the core of ``normalize()``. There are some basic steps:
* If dep is virtual, evaluate whether it corresponds to an
existing concrete dependency, and merge if so.
* If it's real and it provides some virtual dep, see if it provides
what some virtual dependency wants and merge if so.
* Finally, if none of the above, merge dependency and its
constraints into this spec.
This method returns True if the spec was changed, False otherwise.
"""
changed = False
dep = dependency.spec
# If it's a virtual dependency, try to find an existing
# provider in the spec, and merge that.
if dep.virtual:
visited.add(dep.name)
provider = self._find_provider(dep, provider_index)
if provider:
dep = provider
else:
index = ProviderIndex([dep], restrict=True)
items = list(spec_deps.items())
for name, vspec in items:
if index.providers_for(vspec):
vspec._replace_with(dep)
del spec_deps[vspec.name]
changed = True
else:
required = index.providers_for(vspec.name)
if required:
raise UnsatisfiableProviderSpecError(required[0], dep)
provider_index.update(dep)
# If the spec isn't already in the set of dependencies, add it.
# Note: dep is always owned by this method. If it's from the
# caller, it's a copy from _evaluate_dependency_conditions. If it
# comes from a vdep, it's a defensive copy from _find_provider.
if dep.name not in spec_deps:
spec_deps[dep.name] = dep
changed = True
else:
# merge package/vdep information into spec
try:
changed |= spec_deps[dep.name].constrain(dep)
except UnsatisfiableSpecError as e:
fmt = 'An unsatisfiable {0}'.format(e.constraint_type)
fmt += ' constraint has been detected for spec:'
fmt += '\n\n{0}\n\n'.format(spec_deps[dep.name].tree(indent=4))
fmt += 'while trying to concretize the partial spec:'
fmt += '\n\n{0}\n\n'.format(self.tree(indent=4))
fmt += '{0} requires {1} {2} {3}, but spec asked for {4}'
e.message = fmt.format(
self.name,
dep.name,
e.constraint_type,
e.required,
e.provided)
raise
# Add merged spec to my deps and recurse
spec_dependency = spec_deps[dep.name]
if dep.name not in self._dependencies:
self._add_dependency(spec_dependency, dependency.type)
changed |= spec_dependency._normalize_helper(
visited, spec_deps, provider_index)
return changed
def _normalize_helper(self, visited, spec_deps, provider_index):
"""Recursive helper function for _normalize."""
if self.name in visited:
return False
visited.add(self.name)
# if we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual or self.external:
return False
# Combine constraints from package deps with constraints from
# the spec, until nothing changes.
any_change = False
changed = True
pkg = spack.repo.get(self.fullname)
while changed:
changed = False
for dep_name in pkg.dependencies:
# Do we depend on dep_name? If so pkg_dep is not None.
dep = self._evaluate_dependency_conditions(dep_name)
# If dep is a needed dependency, merge it.
if dep and (spack.package_testing.check(self.name) or
set(dep.type) - set(['test'])):
changed |= self._merge_dependency(
dep, visited, spec_deps, provider_index)
any_change |= changed
return any_change
def normalize(self, force=False):
"""When specs are parsed, any dependencies specified are hanging off
the root, and ONLY the ones that were explicitly provided are there.
Normalization turns a partial flat spec into a DAG, where:
1. Known dependencies of the root package are in the DAG.
2. Each node's dependencies dict only contains its known direct
deps.
3. There is only ONE unique spec for each package in the DAG.
* This includes virtual packages. If there a non-virtual
package that provides a virtual package that is in the spec,
then we replace the virtual package with the non-virtual one.
TODO: normalize should probably implement some form of cycle
detection, to ensure that the spec is actually a DAG.
"""
if not self.name:
raise SpecError("Attempting to normalize anonymous spec")
# Set _normal and _concrete to False when forced
if force:
self._mark_concrete(False)
if self._normal:
return False
# Ensure first that all packages & compilers in the DAG exist.
self.validate_or_raise()
# Get all the dependencies into one DependencyMap
spec_deps = self.flat_dependencies(copy=False, deptype_query=all)
# Initialize index of virtual dependency providers if
# concretize didn't pass us one already
provider_index = ProviderIndex(
[s for s in spec_deps.values()], restrict=True)
# traverse the package DAG and fill out dependencies according
# to package files & their 'when' specs
visited = set()
any_change = self._normalize_helper(visited, spec_deps, provider_index)
# If there are deps specified but not visited, they're not
# actually deps of this package. Raise an error.
extra = set(spec_deps.keys()).difference(visited)
if extra:
raise InvalidDependencyError(
self.name + " does not depend on " + comma_or(extra))
# Mark the spec as normal once done.
self._normal = True
return any_change
def normalized(self):
"""
Return a normalized copy of this spec without modifying this spec.
"""
clone = self.copy()
clone.normalize()
return clone
def validate_or_raise(self):
"""Checks that names and values in this spec are real. If they're not,
it will raise an appropriate exception.
"""
# FIXME: this function should be lazy, and collect all the errors
# FIXME: before raising the exceptions, instead of being greedy and
# FIXME: raise just the first one encountered
for spec in self.traverse():
# raise an UnknownPackageError if the spec's package isn't real.
if (not spec.virtual) and spec.name:
spack.repo.get(spec.fullname)
# validate compiler in addition to the package name.
if spec.compiler:
if not compilers.supported(spec.compiler):
raise UnsupportedCompilerError(spec.compiler.name)
# Ensure correctness of variants (if the spec is not virtual)
if not spec.virtual:
pkg_cls = spec.package_class
pkg_variants = pkg_cls.variants
not_existing = set(spec.variants) - set(pkg_variants)
if not_existing:
raise UnknownVariantError(spec.name, not_existing)
substitute_abstract_variants(spec)
def constrain(self, other, deps=True):
"""Merge the constraints of other with self.
Returns True if the spec changed as a result, False if not.
"""
# If we are trying to constrain a concrete spec, either the spec
# already satisfies the constraint (and the method returns False)
# or it raises an exception
if self.concrete:
if self.satisfies(other):
return False
else:
raise UnsatisfiableSpecError(
self, other, 'constrain a concrete spec'
)
other = self._autospec(other)
if not (self.name == other.name or
(not self.name) or
(not other.name)):
raise UnsatisfiableSpecNameError(self.name, other.name)
if (other.namespace is not None and
self.namespace is not None and
other.namespace != self.namespace):
raise UnsatisfiableSpecNameError(self.fullname, other.fullname)
if not self.versions.overlaps(other.versions):
raise UnsatisfiableVersionSpecError(self.versions, other.versions)
for v in [x for x in other.variants if x in self.variants]:
if not self.variants[v].compatible(other.variants[v]):
raise UnsatisfiableVariantSpecError(
self.variants[v], other.variants[v]
)
# TODO: Check out the logic here
sarch, oarch = self.architecture, other.architecture
if sarch is not None and oarch is not None:
if sarch.platform is not None and oarch.platform is not None:
if sarch.platform != oarch.platform:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
if sarch.platform_os is not None and oarch.platform_os is not None:
if sarch.platform_os != oarch.platform_os:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
if sarch.target is not None and oarch.target is not None:
if sarch.target != oarch.target:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
changed = False
if self.compiler is not None and other.compiler is not None:
changed |= self.compiler.constrain(other.compiler)
elif self.compiler is None:
changed |= (self.compiler != other.compiler)
self.compiler = other.compiler
changed |= self.versions.intersect(other.versions)
changed |= self.variants.constrain(other.variants)
changed |= self.compiler_flags.constrain(other.compiler_flags)
old = str(self.architecture)
sarch, oarch = self.architecture, other.architecture
if sarch is None or other.architecture is None:
self.architecture = sarch or oarch
else:
if sarch.platform is None or oarch.platform is None:
self.architecture.platform = sarch.platform or oarch.platform
if sarch.platform_os is None or oarch.platform_os is None:
sarch.platform_os = sarch.platform_os or oarch.platform_os
if sarch.target is None or oarch.target is None:
sarch.target = sarch.target or oarch.target
changed |= (str(self.architecture) != old)
if deps:
changed |= self._constrain_dependencies(other)
return changed
def _constrain_dependencies(self, other):
"""Apply constraints of other spec's dependencies to this spec."""
other = self._autospec(other)
if not self._dependencies or not other._dependencies:
return False
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
# check and be more specific about what's wrong.
if not other.satisfies_dependencies(self):
raise UnsatisfiableDependencySpecError(other, self)
# Handle common first-order constraints directly
changed = False
for name in self.common_dependencies(other):
changed |= self[name].constrain(other[name], deps=False)
if name in self._dependencies:
changed |= self._dependencies[name].update_deptypes(
other._dependencies[name].deptypes)
# Update with additional constraints from other spec
for name in other.dep_difference(self):
dep_spec_copy = other.get_dependency(name)
dep_copy = dep_spec_copy.spec
deptypes = dep_spec_copy.deptypes
self._add_dependency(dep_copy.copy(), deptypes)
changed = True
return changed
def common_dependencies(self, other):
"""Return names of dependencies that self an other have in common."""
# XXX(deptype): handle deptypes via deptype kwarg.
common = set(
s.name for s in self.traverse(root=False))
common.intersection_update(
s.name for s in other.traverse(root=False))
return common
def constrained(self, other, deps=True):
"""Return a constrained copy without modifying this spec."""
clone = self.copy(deps=deps)
clone.constrain(other, deps)
return clone
def dep_difference(self, other):
"""Returns dependencies in self that are not in other."""
mine = set(s.name for s in self.traverse(root=False))
mine.difference_update(
s.name for s in other.traverse(root=False))
return mine
def _autospec(self, spec_like):
"""
Used to convert arguments to specs. If spec_like is a spec, returns
it. If it's a string, tries to parse a string. If that fails, tries
to parse a local spec from it (i.e. name is assumed to be self's name).
"""
if isinstance(spec_like, spack.spec.Spec):
return spec_like
try:
spec = spack.spec.Spec(spec_like)
if not spec.name:
raise SpecError(
"anonymous package -- this will always be handled")
return spec
except SpecError:
return parse_anonymous_spec(spec_like, self.name)
def satisfies(self, other, deps=True, strict=False, strict_deps=False):
"""Determine if this spec satisfies all constraints of another.
There are two senses for satisfies:
* `loose` (default): the absence of a constraint in self
implies that it *could* be satisfied by other, so we only
check that there are no conflicts with other for
constraints that this spec actually has.
* `strict`: strict means that we *must* meet all the
constraints specified on other.
"""
other = self._autospec(other)
# The only way to satisfy a concrete spec is to match its hash exactly.
if other.concrete:
return self.concrete and self.dag_hash() == other.dag_hash()
# A concrete provider can satisfy a virtual dependency.
if not self.virtual and other.virtual:
try:
pkg = spack.repo.get(self.fullname)
except spack.repository.UnknownEntityError:
# If we can't get package info on this spec, don't treat
# it as a provider of this vdep.
return False
if pkg.provides(other.name):
for provided, when_specs in pkg.provided.items():
if any(self.satisfies(when_spec, deps=False, strict=strict)
for when_spec in when_specs):
if provided.satisfies(other):
return True
return False
# Otherwise, first thing we care about is whether the name matches
if self.name != other.name and self.name and other.name:
return False
# namespaces either match, or other doesn't require one.
if (other.namespace is not None and
self.namespace is not None and
self.namespace != other.namespace):
return False
if self.versions and other.versions:
if not self.versions.satisfies(other.versions, strict=strict):
return False
elif strict and (self.versions or other.versions):
return False
# None indicates no constraints when not strict.
if self.compiler and other.compiler:
if not self.compiler.satisfies(other.compiler, strict=strict):
return False
elif strict and (other.compiler and not self.compiler):
return False
var_strict = strict
if (not self.name) or (not other.name):
var_strict = True
if not self.variants.satisfies(other.variants, strict=var_strict):
return False
# Architecture satisfaction is currently just string equality.
# If not strict, None means unconstrained.
if self.architecture and other.architecture:
if not self.architecture.satisfies(other.architecture, strict):
return False
elif strict and (other.architecture and not self.architecture):
return False
if not self.compiler_flags.satisfies(
other.compiler_flags,
strict=strict):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
if deps:
deps_strict = strict
if self._concrete and not other.name:
# We're dealing with existing specs
deps_strict = True
return self.satisfies_dependencies(other, strict=deps_strict)
else:
return True
def satisfies_dependencies(self, other, strict=False):
"""
This checks constraints on common dependencies against each other.
"""
other = self._autospec(other)
if strict:
if other._dependencies and not self._dependencies:
return False
selfdeps = self.traverse(root=False)
otherdeps = other.traverse(root=False)
if not all(any(d.satisfies(dep) for d in selfdeps)
for dep in otherdeps):
return False
elif not self._dependencies or not other._dependencies:
# if either spec doesn't restrict dependencies then both are
# compatible.
return True
# Handle first-order constraints directly
for name in self.common_dependencies(other):
if not self[name].satisfies(other[name], deps=False):
return False
# For virtual dependencies, we need to dig a little deeper.
self_index = ProviderIndex(self.traverse(), restrict=True)
other_index = ProviderIndex(other.traverse(), restrict=True)
# This handles cases where there are already providers for both vpkgs
if not self_index.satisfies(other_index):
return False
# These two loops handle cases where there is an overly restrictive
# vpkg in one spec for a provider in the other (e.g., mpi@3: is not
# compatible with mpich2)
for spec in self.virtual_dependencies():
if (spec.name in other_index and
not other_index.providers_for(spec)):
return False
for spec in other.virtual_dependencies():
if spec.name in self_index and not self_index.providers_for(spec):
return False
return True
def virtual_dependencies(self):
"""Return list of any virtual deps in this spec."""
return [spec for spec in self.traverse() if spec.virtual]
@property
def patches(self):
"""Return patch objects for any patch sha256 sums on this Spec.
This is for use after concretization to iterate over any patches
associated with this spec.
TODO: this only checks in the package; it doesn't resurrect old
patches from install directories, but it probably should.
"""
if 'patches' not in self.variants:
return []
patches = []
# FIXME: The private attribute below is attached after
# FIXME: concretization to store the order of patches somewhere.
# FIXME: Needs to be refactored in a cleaner way.
for sha256 in self.variants['patches']._patches_in_order_of_appearance:
patch = self.package.lookup_patch(sha256)
if patch:
patches.append(patch)
continue
# if not found in this package, check immediate dependents
# for dependency patches
for dep_spec in self._dependents.values():
patch = dep_spec.parent.package.lookup_patch(sha256)
if patch:
patches.append(patch)
return patches
def _dup(self, other, deps=True, cleardeps=True, caches=None):
"""Copy the spec other into self. This is an overwriting
copy. It does not copy any dependents (parents), but by default
copies dependencies.
To duplicate an entire DAG, call _dup() on the root of the DAG.
Args:
other (Spec): spec to be copied onto ``self``
deps (bool or Sequence): if True copies all the dependencies. If
False copies None. If a sequence of dependency types copy
only those types.
cleardeps (bool): if True clears the dependencies of ``self``,
before possibly copying the dependencies of ``other`` onto
``self``
caches (bool or None): preserve cached fields such as
``_normal``, ``_concrete``, and ``_cmp_key_cache``. By
default this is ``False`` if DAG structure would be
changed by the copy, ``True`` if it's an exact copy.
Returns:
True if ``self`` changed because of the copy operation,
False otherwise.
"""
# We don't count dependencies as changes here
changed = True
if hasattr(self, 'name'):
changed = (self.name != other.name and
self.versions != other.versions and
self.architecture != other.architecture and
self.compiler != other.compiler and
self.variants != other.variants and
self._normal != other._normal and
self.concrete != other.concrete and
self.external_path != other.external_path and
self.external_module != other.external_module and
self.compiler_flags != other.compiler_flags)
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
self.architecture = other.architecture.copy() if other.architecture \
else None
self.compiler = other.compiler.copy() if other.compiler else None
if cleardeps:
self._dependents = DependencyMap()
self._dependencies = DependencyMap()
self.compiler_flags = other.compiler_flags.copy()
self.compiler_flags.spec = self
self.variants = other.variants.copy()
self.variants.spec = self
self.external_path = other.external_path
self.external_module = other.external_module
self.namespace = other.namespace
# Cached fields are results of expensive operations.
# If we preserved the original structure, we can copy them
# safely. If not, they need to be recomputed.
if caches is None:
caches = (deps is True or deps == all_deptypes)
# If we copy dependencies, preserve DAG structure in the new spec
if deps:
# If caller restricted deptypes to be copied, adjust that here.
# By default, just copy all deptypes
deptypes = all_deptypes
if isinstance(deps, (tuple, list)):
deptypes = deps
self._dup_deps(other, deptypes, caches)
if caches:
self._hash = other._hash
self._cmp_key_cache = other._cmp_key_cache
self._normal = other._normal
self._concrete = other._concrete
else:
self._hash = None
self._cmp_key_cache = None
self._normal = False
self._concrete = False
return changed
def _dup_deps(self, other, deptypes, caches):
new_specs = {self.name: self}
for dspec in other.traverse_edges(cover='edges',
root=False):
if (dspec.deptypes and
not any(d in deptypes for d in dspec.deptypes)):
continue
if dspec.parent.name not in new_specs:
new_specs[dspec.parent.name] = dspec.parent.copy(
deps=False, caches=caches)
if dspec.spec.name not in new_specs:
new_specs[dspec.spec.name] = dspec.spec.copy(
deps=False, caches=caches)
new_specs[dspec.parent.name]._add_dependency(
new_specs[dspec.spec.name], dspec.deptypes)
def copy(self, deps=True, **kwargs):
"""Make a copy of this spec.
Args:
deps (bool or tuple): Defaults to True. If boolean, controls
whether dependencies are copied (copied if True). If a
tuple is provided, *only* dependencies of types matching
those in the tuple are copied.
kwargs: additional arguments for internal use (passed to ``_dup``).
Returns:
A copy of this spec.
Examples:
Deep copy with dependnecies::
spec.copy()
spec.copy(deps=True)
Shallow copy (no dependencies)::
spec.copy(deps=False)
Only build and run dependencies::
deps=('build', 'run'):
"""
clone = Spec.__new__(Spec)
clone._dup(self, deps=deps, **kwargs)
return clone
@property
def version(self):
if not self.versions.concrete:
raise SpecError("Spec version is not concrete: " + str(self))
return self.versions[0]
def __getitem__(self, name):
"""Get a dependency from the spec by its name. This call implicitly
sets a query state in the package being retrieved. The behavior of
packages may be influenced by additional query parameters that are
passed after a colon symbol.
Note that if a virtual package is queried a copy of the Spec is
returned while for non-virtual a reference is returned.
"""
query_parameters = name.split(':')
if len(query_parameters) > 2:
msg = 'key has more than one \':\' symbol.'
msg += ' At most one is admitted.'
raise KeyError(msg)
name, query_parameters = query_parameters[0], query_parameters[1:]
if query_parameters:
# We have extra query parameters, which are comma separated
# values
csv = query_parameters.pop().strip()
query_parameters = re.split(r'\s*,\s*', csv)
try:
value = next(
itertools.chain(
# Regular specs
(x for x in self.traverse() if x.name == name),
(x for x in self.traverse()
if (not x.virtual) and x.package.provides(name))
)
)
except StopIteration:
raise KeyError("No spec with name %s in %s" % (name, self))
if self._concrete:
return SpecBuildInterface(value, name, query_parameters)
return value
def __contains__(self, spec):
"""True if this spec satisfies the provided spec, or if any dependency
does. If the spec has no name, then we parse this one first.
"""
spec = self._autospec(spec)
for s in self.traverse():
if s.satisfies(spec, strict=True):
return True
return False
def sorted_deps(self):
"""Return a list of all dependencies sorted by name."""
deps = self.flat_dependencies()
return tuple(deps[name] for name in sorted(deps))
def _eq_dag(self, other, vs, vo, deptypes):
"""Recursive helper for eq_dag and ne_dag. Does the actual DAG
traversal."""
vs.add(id(self))
vo.add(id(other))
if self.ne_node(other):
return False
if len(self._dependencies) != len(other._dependencies):
return False
ssorted = [self._dependencies[name]
for name in sorted(self._dependencies)]
osorted = [other._dependencies[name]
for name in sorted(other._dependencies)]
for s_dspec, o_dspec in zip(ssorted, osorted):
if deptypes and s_dspec.deptypes != o_dspec.deptypes:
return False
s, o = s_dspec.spec, o_dspec.spec
visited_s = id(s) in vs
visited_o = id(o) in vo
# Check for duplicate or non-equal dependencies
if visited_s != visited_o:
return False
# Skip visited nodes
if visited_s or visited_o:
continue
# Recursive check for equality
if not s._eq_dag(o, vs, vo, deptypes):
return False
return True
def eq_dag(self, other, deptypes=True):
"""True if the full dependency DAGs of specs are equal."""
return self._eq_dag(other, set(), set(), deptypes)
def ne_dag(self, other, deptypes=True):
"""True if the full dependency DAGs of specs are not equal."""
return not self.eq_dag(other, set(), set(), deptypes)
def _cmp_node(self):
"""Comparison key for just *this node* and not its deps."""
return (self.name,
self.namespace,
self.versions,
self.variants,
self.architecture,
self.compiler,
self.compiler_flags)
def eq_node(self, other):
"""Equality with another spec, not including dependencies."""
return self._cmp_node() == other._cmp_node()
def ne_node(self, other):
"""Inequality with another spec, not including dependencies."""
return self._cmp_node() != other._cmp_node()
def _cmp_key(self):
"""This returns a key for the spec *including* DAG structure.
The key is the concatenation of:
1. A tuple describing this node in the DAG.
2. The hash of each of this node's dependencies' cmp_keys.
"""
if self._cmp_key_cache:
return self._cmp_key_cache
dep_tuple = tuple(
(d.spec.name, hash(d.spec), tuple(sorted(d.deptypes)))
for name, d in sorted(self._dependencies.items()))
key = (self._cmp_node(), dep_tuple)
if self._concrete:
self._cmp_key_cache = key
return key
def colorized(self):
return colorize_spec(self)
def format(self, format_string='$_$@$%@+$+$=', **kwargs):
"""Prints out particular pieces of a spec, depending on what is
in the format string.
The format strings you can provide are::
$_ Package name
$. Full package name (with namespace)
$@ Version with '@' prefix
$% Compiler with '%' prefix
$%@ Compiler with '%' prefix & compiler version with '@' prefix
$%+ Compiler with '%' prefix & compiler flags prefixed by name
$%@+ Compiler, compiler version, and compiler flags with same
prefixes as above
$+ Options
$= Architecture prefixed by 'arch='
$/ 7-char prefix of DAG hash with '-' prefix
$$ $
You can also use full-string versions, which elide the prefixes::
${PACKAGE} Package name
${VERSION} Version
${COMPILER} Full compiler string
${COMPILERNAME} Compiler name
${COMPILERVER} Compiler version
${COMPILERFLAGS} Compiler flags
${OPTIONS} Options
${ARCHITECTURE} Architecture
${SHA1} Dependencies 8-char sha1 prefix
${HASH:len} DAG hash with optional length specifier
${SPACK_ROOT} The spack root directory
${SPACK_INSTALL} The default spack install directory,
${SPACK_PREFIX}/opt
${PREFIX} The package prefix
Note these are case-insensitive: for example you can specify either
``${PACKAGE}`` or ``${package}``.
Optionally you can provide a width, e.g. ``$20_`` for a 20-wide name.
Like printf, you can provide '-' for left justification, e.g.
``$-20_`` for a left-justified name.
Anything else is copied verbatim into the output stream.
Args:
format_string (str): string containing the format to be expanded
**kwargs (dict): the following list of keywords is supported
- color (bool): True if returned string is colored
- transform (dict): maps full-string formats to a callable \
that accepts a string and returns another one
Examples:
The following line:
.. code-block:: python
s = spec.format('$_$@$+')
translates to the name, version, and options of the package, but no
dependencies, arch, or compiler.
TODO: allow, e.g., ``$6#`` to customize short hash length
TODO: allow, e.g., ``$//`` for full hash.
"""
color = kwargs.get('color', False)
# Dictionary of transformations for named tokens
token_transforms = {}
token_transforms.update(kwargs.get('transform', {}))
length = len(format_string)
out = StringIO()
named = escape = compiler = False
named_str = fmt = ''
def write(s, c):
f = color_formats[c] + cescape(s) + '@.'
cwrite(f, stream=out, color=color)
iterator = enumerate(format_string)
for i, c in iterator:
if escape:
fmt = '%'
if c == '-':
fmt += c
i, c = next(iterator)
while c in '0123456789':
fmt += c
i, c = next(iterator)
fmt += 's'
if c == '_':
name = self.name if self.name else ''
out.write(fmt % name)
elif c == '.':
out.write(fmt % self.fullname)
elif c == '@':
if self.versions and self.versions != _any_version:
write(fmt % (c + str(self.versions)), c)
elif c == '%':
if self.compiler:
write(fmt % (c + str(self.compiler.name)), c)
compiler = True
elif c == '+':
if self.variants:
write(fmt % str(self.variants), c)
elif c == '=':
if self.architecture and str(self.architecture):
a_str = ' arch' + c + str(self.architecture) + ' '
write(fmt % (a_str), c)
elif c == '/':
out.write('/' + fmt % (self.dag_hash(7)))
elif c == '$':
if fmt != '%s':
raise ValueError("Can't use format width with $$.")
out.write('$')
elif c == '{':
named = True
named_str = ''
escape = False
elif compiler:
if c == '@':
if (self.compiler and self.compiler.versions and
self.compiler.versions != _any_version):
write(c + str(self.compiler.versions), '%')
elif c == '+':
if self.compiler_flags:
write(fmt % str(self.compiler_flags), '%')
compiler = False
elif c == '$':
escape = True
compiler = False
else:
out.write(c)
compiler = False
elif named:
if not c == '}':
if i == length - 1:
raise ValueError("Error: unterminated ${ in format:"
"'%s'" % format_string)
named_str += c
continue
named_str = named_str.upper()
# Retrieve the token transformation from the dictionary.
#
# The default behavior is to leave the string unchanged
# (`lambda x: x` is the identity function)
token_transform = token_transforms.get(named_str, lambda x: x)
if named_str == 'PACKAGE':
name = self.name if self.name else ''
write(fmt % token_transform(name), '@')
if named_str == 'VERSION':
if self.versions and self.versions != _any_version:
write(fmt % token_transform(str(self.versions)), '@')
elif named_str == 'COMPILER':
if self.compiler:
write(fmt % token_transform(self.compiler), '%')
elif named_str == 'COMPILERNAME':
if self.compiler:
write(fmt % token_transform(self.compiler.name), '%')
elif named_str in ['COMPILERVER', 'COMPILERVERSION']:
if self.compiler:
write(
fmt % token_transform(self.compiler.versions),
'%'
)
elif named_str == 'COMPILERFLAGS':
if self.compiler:
write(
fmt % token_transform(str(self.compiler_flags)),
'%'
)
elif named_str == 'OPTIONS':
if self.variants:
write(fmt % token_transform(str(self.variants)), '+')
elif named_str == 'ARCHITECTURE':
if self.architecture and str(self.architecture):
write(
fmt % token_transform(str(self.architecture)),
'='
)
elif named_str == 'SHA1':
if self.dependencies:
out.write(fmt % token_transform(str(self.dag_hash(7))))
elif named_str == 'SPACK_ROOT':
out.write(fmt % token_transform(spack.prefix))
elif named_str == 'SPACK_INSTALL':
out.write(fmt % token_transform(spack.store.root))
elif named_str == 'PREFIX':
out.write(fmt % token_transform(self.prefix))
elif named_str.startswith('HASH'):
if named_str.startswith('HASH:'):
_, hashlen = named_str.split(':')
hashlen = int(hashlen)
else:
hashlen = None
out.write(fmt % (self.dag_hash(hashlen)))
named = False
elif c == '$':
escape = True
if i == length - 1:
raise ValueError("Error: unterminated $ in format: '%s'"
% format_string)
else:
out.write(c)
result = out.getvalue()
return result
def cformat(self, *args, **kwargs):
"""Same as format, but color defaults to auto instead of False."""
kwargs = kwargs.copy()
kwargs.setdefault('color', None)
return self.format(*args, **kwargs)
def dep_string(self):
return ''.join("^" + dep.format() for dep in self.sorted_deps())
def __str__(self):
ret = self.format() + self.dep_string()
return ret.strip()
def _install_status(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
record = spack.store.db.get_record(self)
return record.installed
except KeyError:
return None
def _installed_explicitly(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
record = spack.store.db.get_record(self)
return record.explicit
except KeyError:
return None
def tree(self, **kwargs):
"""Prints out this spec and its dependencies, tree-formatted
with indentation."""
color = kwargs.pop('color', get_color_when())
depth = kwargs.pop('depth', False)
hashes = kwargs.pop('hashes', False)
hlen = kwargs.pop('hashlen', None)
install_status = kwargs.pop('install_status', False)
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs.pop('format', '$_$@$%@+$+$=')
prefix = kwargs.pop('prefix', None)
show_types = kwargs.pop('show_types', False)
deptypes = kwargs.pop('deptypes', ('build', 'link'))
check_kwargs(kwargs, self.tree)
out = ""
for d, dep_spec in self.traverse_edges(
order='pre', cover=cover, depth=True, deptypes=deptypes):
node = dep_spec.spec
if prefix is not None:
out += prefix(node)
out += " " * indent
if depth:
out += "%-4d" % d
if install_status:
status = node._install_status()
if status is None:
out += " " # Package isn't installed
elif status:
out += colorize("@g{[+]} ", color=color) # installed
else:
out += colorize("@r{[-]} ", color=color) # missing
if hashes:
out += colorize('@K{%s} ', color=color) % node.dag_hash(hlen)
if show_types:
out += '['
if dep_spec.deptypes:
for t in all_deptypes:
out += ''.join(t[0] if t in dep_spec.deptypes else ' ')
else:
out += ' ' * len(all_deptypes)
out += '] '
out += (" " * d)
if d > 0:
out += "^"
out += node.format(fmt, color=color) + "\n"
return out
def __repr__(self):
return str(self)
class LazySpecCache(collections.defaultdict):
"""Cache for Specs that uses a spec_like as key, and computes lazily
the corresponding value ``Spec(spec_like``.
"""
def __init__(self):
super(LazySpecCache, self).__init__(Spec)
def __missing__(self, key):
value = self.default_factory(key)
self[key] = value
return value
#
# These are possible token types in the spec grammar.
#
HASH, DEP, AT, COLON, COMMA, ON, OFF, PCT, EQ, ID, VAL = range(11)
class SpecLexer(spack.parse.Lexer):
"""Parses tokens that make up spack specs."""
def __init__(self):
super(SpecLexer, self).__init__([
(r'/', lambda scanner, val: self.token(HASH, val)),
(r'\^', lambda scanner, val: self.token(DEP, val)),
(r'\@', lambda scanner, val: self.token(AT, val)),
(r'\:', lambda scanner, val: self.token(COLON, val)),
(r'\,', lambda scanner, val: self.token(COMMA, val)),
(r'\+', lambda scanner, val: self.token(ON, val)),
(r'\-', lambda scanner, val: self.token(OFF, val)),
(r'\~', lambda scanner, val: self.token(OFF, val)),
(r'\%', lambda scanner, val: self.token(PCT, val)),
(r'\=', lambda scanner, val: self.token(EQ, val)),
# This is more liberal than identifier_re (see above).
# Checked by check_identifier() for better error messages.
(r'\w[\w.-]*', lambda scanner, val: self.token(ID, val)),
(r'\s+', lambda scanner, val: None)],
[EQ],
[(r'[\S].*', lambda scanner, val: self.token(VAL, val)),
(r'\s+', lambda scanner, val: None)],
[VAL])
# Lexer is always the same for every parser.
_lexer = SpecLexer()
class SpecParser(spack.parse.Parser):
def __init__(self, initial_spec=None):
"""Construct a new SpecParser.
Args:
initial_spec (Spec, optional): provide a Spec that we'll parse
directly into. This is used to avoid construction of a
superfluous Spec object in the Spec constructor.
"""
super(SpecParser, self).__init__(_lexer)
self.previous = None
self._initial = initial_spec
def do_parse(self):
specs = []
try:
while self.next:
# TODO: clean this parsing up a bit
if self.accept(ID):
self.previous = self.token
if self.accept(EQ):
# We're parsing an anonymous spec beginning with a
# key-value pair.
if not specs:
self.push_tokens([self.previous, self.token])
self.previous = None
specs.append(self.spec(None))
else:
if specs[-1].concrete:
# Trying to add k-v pair to spec from hash
raise RedundantSpecError(specs[-1],
'key-value pair')
# We should never end up here.
# This requires starting a new spec with ID, EQ
# After another spec that is not concrete
# If the previous spec is not concrete, this is
# handled in the spec parsing loop
# If it is concrete, see the if statement above
# If there is no previous spec, we don't land in
# this else case.
self.unexpected_token()
else:
# We're parsing a new spec by name
self.previous = None
specs.append(self.spec(self.token.value))
elif self.accept(HASH):
# We're finding a spec by hash
specs.append(self.spec_by_hash())
elif self.accept(DEP):
if not specs:
# We're parsing an anonymous spec beginning with a
# dependency. Push the token to recover after creating
# anonymous spec
self.push_tokens([self.token])
specs.append(self.spec(None))
else:
if self.accept(HASH):
# We're finding a dependency by hash for an
# anonymous spec
dep = self.spec_by_hash()
else:
# We're adding a dependency to the last spec
self.expect(ID)
dep = self.spec(self.token.value)
# Raise an error if the previous spec is already
# concrete (assigned by hash)
if specs[-1]._hash:
raise RedundantSpecError(specs[-1], 'dependency')
# command line deps get empty deptypes now.
# Real deptypes are assigned later per packages.
specs[-1]._add_dependency(dep, ())
else:
# If the next token can be part of a valid anonymous spec,
# create the anonymous spec
if self.next.type in (AT, ON, OFF, PCT):
# Raise an error if the previous spec is already
# concrete (assigned by hash)
if specs and specs[-1]._hash:
raise RedundantSpecError(specs[-1],
'compiler, version, '
'or variant')
specs.append(self.spec(None))
else:
self.unexpected_token()
except spack.parse.ParseError as e:
raise SpecParseError(e)
# If the spec has an os or a target and no platform, give it
# the default platform
platform_default = spack.architecture.platform().name
for spec in specs:
for s in spec.traverse():
if s.architecture and not s.architecture.platform and \
(s.architecture.platform_os or s.architecture.target):
s._set_architecture(platform=platform_default)
return specs
def parse_compiler(self, text):
self.setup(text)
return self.compiler()
def spec_by_hash(self):
self.expect(ID)
specs = spack.store.db.query()
matches = [spec for spec in specs if
spec.dag_hash()[:len(self.token.value)] == self.token.value]
if not matches:
raise NoSuchHashError(self.token.value)
if len(matches) != 1:
raise AmbiguousHashError(
"Multiple packages specify hash beginning '%s'."
% self.token.value, *matches)
return matches[0]
def spec(self, name):
"""Parse a spec out of the input. If a spec is supplied, initialize
and return it instead of creating a new one."""
if name:
spec_namespace, dot, spec_name = name.rpartition('.')
if not spec_namespace:
spec_namespace = None
self.check_identifier(spec_name)
else:
spec_namespace = None
spec_name = None
if self._initial is None:
# This will init the spec without calling Spec.__init__
spec = Spec.__new__(Spec)
else:
# this is used by Spec.__init__
spec = self._initial
self._initial = None
spec.name = spec_name
spec.versions = VersionList()
spec.variants = VariantMap(spec)
spec.architecture = None
spec.compiler = None
spec.external_path = None
spec.external_module = None
spec.compiler_flags = FlagMap(spec)
spec._dependents = DependencyMap()
spec._dependencies = DependencyMap()
spec.namespace = spec_namespace
spec._hash = None
spec._cmp_key_cache = None
spec._normal = False
spec._concrete = False
# record this so that we know whether version is
# unspecified or not.
added_version = False
while self.next:
if self.accept(AT):
vlist = self.version_list()
for version in vlist:
spec._add_version(version)
added_version = True
elif self.accept(ON):
name = self.variant()
spec.variants[name] = BoolValuedVariant(name, True)
elif self.accept(OFF):
name = self.variant()
spec.variants[name] = BoolValuedVariant(name, False)
elif self.accept(PCT):
spec._set_compiler(self.compiler())
elif self.accept(ID):
self.previous = self.token
if self.accept(EQ):
# We're adding a key-value pair to the spec
self.expect(VAL)
spec._add_flag(self.previous.value, self.token.value)
self.previous = None
else:
# We've found the start of a new spec. Go back to do_parse
# and read this token again.
self.push_tokens([self.token])
self.previous = None
break
elif self.accept(HASH):
# Get spec by hash and confirm it matches what we already have
hash_spec = self.spec_by_hash()
if hash_spec.satisfies(spec):
spec._dup(hash_spec)
break
else:
raise InvalidHashError(spec, hash_spec.dag_hash())
else:
break
# If there was no version in the spec, consier it an open range
if not added_version and not spec._hash:
spec.versions = VersionList(':')
return spec
def variant(self, name=None):
if name:
return name
else:
self.expect(ID)
self.check_identifier()
return self.token.value
def version(self):
start = None
end = None
if self.accept(ID):
start = self.token.value
if self.accept(COLON):
if self.accept(ID):
if self.next and self.next.type is EQ:
# This is a start: range followed by a key=value pair
self.push_tokens([self.token])
else:
end = self.token.value
elif start:
# No colon, but there was a version.
return Version(start)
else:
# No colon and no id: invalid version.
self.next_token_error("Invalid version specifier")
if start:
start = Version(start)
if end:
end = Version(end)
return VersionRange(start, end)
def version_list(self):
vlist = []
vlist.append(self.version())
while self.accept(COMMA):
vlist.append(self.version())
return vlist
def compiler(self):
self.expect(ID)
self.check_identifier()
compiler = CompilerSpec.__new__(CompilerSpec)
compiler.name = self.token.value
compiler.versions = VersionList()
if self.accept(AT):
vlist = self.version_list()
for version in vlist:
compiler._add_version(version)
else:
compiler.versions = VersionList(':')
return compiler
def check_identifier(self, id=None):
"""The only identifiers that can contain '.' are versions, but version
ids are context-sensitive so we have to check on a case-by-case
basis. Call this if we detect a version id where it shouldn't be.
"""
if not id:
id = self.token.value
if '.' in id:
self.last_token_error(
"{0}: Identifier cannot contain '.'".format(id))
def parse(string):
"""Returns a list of specs from an input string.
For creating one spec, see Spec() constructor.
"""
return SpecParser().parse(string)
def parse_anonymous_spec(spec_like, pkg_name):
"""Allow the user to omit the package name part of a spec if they
know what it has to be already.
e.g., provides('mpi@2', when='@1.9:') says that this package
provides MPI-3 when its version is higher than 1.9.
"""
if not isinstance(spec_like, (str, Spec)):
raise TypeError('spec must be Spec or spec string. Found %s'
% type(spec_like))
if isinstance(spec_like, str):
try:
anon_spec = Spec(spec_like)
if anon_spec.name != pkg_name:
raise SpecParseError(spack.parse.ParseError(
"",
"",
"Expected anonymous spec for package %s but found spec for"
"package %s" % (pkg_name, anon_spec.name)))
except SpecParseError:
anon_spec = Spec(pkg_name + ' ' + spec_like)
if anon_spec.name != pkg_name:
raise ValueError(
"Invalid spec for package %s: %s" % (pkg_name, spec_like))
else:
anon_spec = spec_like.copy()
if anon_spec.name != pkg_name:
raise ValueError("Spec name '%s' must match package name '%s'"
% (anon_spec.name, pkg_name))
return anon_spec
def base32_prefix_bits(hash_string, bits):
"""Return the first <bits> bits of a base32 string as an integer."""
if bits > len(hash_string) * 5:
raise ValueError("Too many bits! Requested %d bit prefix of '%s'."
% (bits, hash_string))
hash_bytes = base64.b32decode(hash_string, casefold=True)
return prefix_bits(hash_bytes, bits)
class SpecParseError(SpecError):
"""Wrapper for ParseError for when we're parsing specs."""
def __init__(self, parse_error):
super(SpecParseError, self).__init__(parse_error.message)
self.string = parse_error.string
self.pos = parse_error.pos
class DuplicateDependencyError(SpecError):
"""Raised when the same dependency occurs in a spec twice."""
class DuplicateCompilerSpecError(SpecError):
"""Raised when the same compiler occurs in a spec twice."""
class UnsupportedCompilerError(SpecError):
"""Raised when the user asks for a compiler spack doesn't know about."""
def __init__(self, compiler_name):
super(UnsupportedCompilerError, self).__init__(
"The '%s' compiler is not yet supported." % compiler_name)
class DuplicateArchitectureError(SpecError):
"""Raised when the same architecture occurs in a spec twice."""
class InconsistentSpecError(SpecError):
"""Raised when two nodes in the same spec DAG have inconsistent
constraints."""
class InvalidDependencyError(SpecError):
"""Raised when a dependency in a spec is not actually a dependency
of the package."""
class NoProviderError(SpecError):
"""Raised when there is no package that provides a particular
virtual dependency.
"""
def __init__(self, vpkg):
super(NoProviderError, self).__init__(
"No providers found for virtual package: '%s'" % vpkg)
self.vpkg = vpkg
class MultipleProviderError(SpecError):
"""Raised when there is no package that provides a particular
virtual dependency.
"""
def __init__(self, vpkg, providers):
"""Takes the name of the vpkg"""
super(MultipleProviderError, self).__init__(
"Multiple providers found for '%s': %s"
% (vpkg, [str(s) for s in providers]))
self.vpkg = vpkg
self.providers = providers
class UnsatisfiableSpecNameError(UnsatisfiableSpecError):
"""Raised when two specs aren't even for the same package."""
def __init__(self, provided, required):
super(UnsatisfiableSpecNameError, self).__init__(
provided, required, "name")
class UnsatisfiableVersionSpecError(UnsatisfiableSpecError):
"""Raised when a spec version conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableVersionSpecError, self).__init__(
provided, required, "version")
class UnsatisfiableCompilerSpecError(UnsatisfiableSpecError):
"""Raised when a spec comiler conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableCompilerSpecError, self).__init__(
provided, required, "compiler")
class UnsatisfiableCompilerFlagSpecError(UnsatisfiableSpecError):
"""Raised when a spec variant conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableCompilerFlagSpecError, self).__init__(
provided, required, "compiler_flags")
class UnsatisfiableArchitectureSpecError(UnsatisfiableSpecError):
"""Raised when a spec architecture conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableArchitectureSpecError, self).__init__(
provided, required, "architecture")
class UnsatisfiableProviderSpecError(UnsatisfiableSpecError):
"""Raised when a provider is supplied but constraints don't match
a vpkg requirement"""
def __init__(self, provided, required):
super(UnsatisfiableProviderSpecError, self).__init__(
provided, required, "provider")
# TODO: get rid of this and be more specific about particular incompatible
# dep constraints
class UnsatisfiableDependencySpecError(UnsatisfiableSpecError):
"""Raised when some dependency of constrained specs are incompatible"""
def __init__(self, provided, required):
super(UnsatisfiableDependencySpecError, self).__init__(
provided, required, "dependency")
class AmbiguousHashError(SpecError):
def __init__(self, msg, *specs):
specs_str = '\n ' + '\n '.join(spec.format('$.$@$%@+$+$=$/')
for spec in specs)
super(AmbiguousHashError, self).__init__(msg + specs_str)
class InvalidHashError(SpecError):
def __init__(self, spec, hash):
super(InvalidHashError, self).__init__(
"The spec specified by %s does not match provided spec %s"
% (hash, spec))
class NoSuchHashError(SpecError):
def __init__(self, hash):
super(NoSuchHashError, self).__init__(
"No installed spec matches the hash: '%s'"
% hash)
class RedundantSpecError(SpecError):
def __init__(self, spec, addition):
super(RedundantSpecError, self).__init__(
"Attempting to add %s to spec %s which is already concrete."
" This is likely the result of adding to a spec specified by hash."
% (addition, spec))
class ConflictsInSpecError(SpecError, RuntimeError):
def __init__(self, spec, matches):
message = 'Conflicts in concretized spec "{0}"\n'.format(
spec.short_spec
)
visited = set()
long_message = ''
match_fmt_default = '{0}. "{1}" conflicts with "{2}"\n'
match_fmt_custom = '{0}. "{1}" conflicts with "{2}" [{3}]\n'
for idx, (s, c, w, msg) in enumerate(matches):
if s not in visited:
visited.add(s)
long_message += 'List of matching conflicts for spec:\n\n'
long_message += s.tree(indent=4) + '\n'
if msg is None:
long_message += match_fmt_default.format(idx + 1, c, w)
else:
long_message += match_fmt_custom.format(idx + 1, c, w, msg)
super(ConflictsInSpecError, self).__init__(message, long_message)
|
wscullin/spack
|
lib/spack/spack/spec.py
|
Python
|
lgpl-2.1
| 139,193
|
[
"VisIt"
] |
a1d238a8b799e985e5f33571c0cf789ad18346557ebaadfea1e9a03a499c3c5b
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains some utility functions and classes that are used in the chemenv package.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import math
import numpy as np
from numpy.linalg import norm
from scipy.integrate import quad
from scipy.interpolate import UnivariateSpline
from scipy.spatial import ConvexHull
from pymatgen.analysis.chemenv.utils.chemenv_errors import SolidAngleError
def get_lower_and_upper_f(surface_calculation_options):
"""Get the lower and upper functions defining a surface in the distance-angle space of neighbors.
:param surface_calculation_options: Options for the surface.
:return: Dictionary containing the "lower" and "upper" functions for the surface.
"""
mindist = surface_calculation_options["distance_bounds"]["lower"]
maxdist = surface_calculation_options["distance_bounds"]["upper"]
minang = surface_calculation_options["angle_bounds"]["lower"]
maxang = surface_calculation_options["angle_bounds"]["upper"]
if surface_calculation_options["type"] == "standard_elliptic":
lower_and_upper_functions = quarter_ellipsis_functions(xx=(mindist, maxang), yy=(maxdist, minang))
elif surface_calculation_options["type"] == "standard_diamond":
deltadist = surface_calculation_options["distance_bounds"]["delta"]
deltaang = surface_calculation_options["angle_bounds"]["delta"]
lower_and_upper_functions = diamond_functions(
xx=(mindist, maxang), yy=(maxdist, minang), x_y0=deltadist, y_x0=deltaang
)
elif surface_calculation_options["type"] == "standard_spline":
lower_points = surface_calculation_options["lower_points"]
upper_points = surface_calculation_options["upper_points"]
degree = surface_calculation_options["degree"]
lower_and_upper_functions = spline_functions(
lower_points=lower_points, upper_points=upper_points, degree=degree
)
else:
raise ValueError(
'Surface calculation of type "{}" ' "is not implemented".format(surface_calculation_options["type"])
)
return lower_and_upper_functions
def function_comparison(f1, f2, x1, x2, numpoints_check=500):
"""
Method that compares two functions
Args:
f1: First function to compare
f2: Second function to compare
x1: Lower bound of the interval to compare
x2: Upper bound of the interval to compare
numpoints_check: Number of points used to compare the functions
Returns:
Whether the function are equal ("="), f1 is always lower than f2 ("<"), f1 is always larger than f2 (">"),
f1 is always lower than or equal to f2 ("<"), f1 is always larger than or equal to f2 (">") on the
interval [x1, x2]. If the two functions cross, a RuntimeError is thrown (i.e. we expect to compare
functions that do not cross...)
"""
xx = np.linspace(x1, x2, num=numpoints_check)
y1 = f1(xx)
y2 = f2(xx)
if np.all(y1 < y2):
return "<"
if np.all(y1 > y2):
return ">"
if np.all(y1 == y2):
return "="
if np.all(y1 <= y2):
return "<="
if np.all(y1 >= y2):
return ">="
raise RuntimeError("Error in comparing functions f1 and f2 ...")
def quarter_ellipsis_functions(xx, yy):
"""
Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to
be aligned with the axes. The two ellipsis pass through the two points xx and yy.
Args:
xx:
First point
yy:
Second point
Returns:
A dictionary with the lower and upper quarter ellipsis functions.
"""
npxx = np.array(xx)
npyy = np.array(yy)
if np.any(npxx == npyy):
raise RuntimeError("Invalid points for quarter_ellipsis_functions")
if np.all(npxx < npyy) or np.all(npxx > npyy):
if npxx[0] < npyy[0]:
p1 = npxx
p2 = npyy
else:
p1 = npyy
p2 = npxx
c_lower = np.array([p1[0], p2[1]])
c_upper = np.array([p2[0], p1[1]])
b2 = (p2[1] - p1[1]) ** 2
else:
if npxx[0] < npyy[0]:
p1 = npxx
p2 = npyy
else:
p1 = npyy
p2 = npxx
c_lower = np.array([p2[0], p1[1]])
c_upper = np.array([p1[0], p2[1]])
b2 = (p1[1] - p2[1]) ** 2
b2overa2 = b2 / (p2[0] - p1[0]) ** 2
def lower(x):
return c_lower[1] - np.sqrt(b2 - b2overa2 * (x - c_lower[0]) ** 2)
def upper(x):
return c_upper[1] + np.sqrt(b2 - b2overa2 * (x - c_upper[0]) ** 2)
return {"lower": lower, "upper": upper}
def spline_functions(lower_points, upper_points, degree=3):
"""
Method that creates two (upper and lower) spline functions based on points lower_points and upper_points.
Args:
lower_points:
Points defining the lower function.
upper_points:
Points defining the upper function.
degree:
Degree for the spline function
Returns:
A dictionary with the lower and upper spline functions.
"""
lower_xx = np.array([pp[0] for pp in lower_points])
lower_yy = np.array([pp[1] for pp in lower_points])
upper_xx = np.array([pp[0] for pp in upper_points])
upper_yy = np.array([pp[1] for pp in upper_points])
lower_spline = UnivariateSpline(lower_xx, lower_yy, k=degree, s=0)
upper_spline = UnivariateSpline(upper_xx, upper_yy, k=degree, s=0)
def lower(x):
return lower_spline(x)
def upper(x):
return upper_spline(x)
return {"lower": lower, "upper": upper}
def diamond_functions(xx, yy, y_x0, x_y0):
r"""
Method that creates two upper and lower functions based on points xx and yy
as well as intercepts defined by y_x0 and x_y0. The resulting functions
form kind of a distorted diamond-like structure aligned from
point xx to point yy.
Schematically :
xx is symbolized by x, yy is symbolized by y, y_x0 is equal to the distance
from x to a, x_y0 is equal to the distance from x to b, the lines a-p and
b-q are parallel to the line x-y such that points p and q are
obtained automatically.
In case of an increasing diamond the lower function is x-b-q and the upper
function is a-p-y while in case of a
decreasing diamond, the lower function is a-p-y and the upper function is
x-b-q.
Increasing diamond | Decreasing diamond
p--y x----b
/ /| |\ \
/ / | | \ q
/ / | a \ |
a / | \ \ |
| / q \ \ |
|/ / \ \|
x----b p--y
Args:
xx:
First point
yy:
Second point
Returns:
A dictionary with the lower and upper diamond functions.
"""
npxx = np.array(xx)
npyy = np.array(yy)
if np.any(npxx == npyy):
raise RuntimeError("Invalid points for diamond_functions")
if np.all(npxx < npyy) or np.all(npxx > npyy):
if npxx[0] < npyy[0]:
p1 = npxx
p2 = npyy
else:
p1 = npyy
p2 = npxx
else:
if npxx[0] < npyy[0]:
p1 = npxx
p2 = npyy
else:
p1 = npyy
p2 = npxx
slope = (p2[1] - p1[1]) / (p2[0] - p1[0])
if slope > 0.0:
x_bpoint = p1[0] + x_y0
myy = p1[1]
bq_intercept = myy - slope * x_bpoint
myx = p1[0]
myy = p1[1] + y_x0
ap_intercept = myy - slope * myx
x_ppoint = (p2[1] - ap_intercept) / slope
def lower(x):
return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept)
def upper(x):
return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept)
else:
x_bpoint = p1[0] + x_y0
myy = p1[1]
bq_intercept = myy - slope * x_bpoint
myx = p1[0]
myy = p1[1] - y_x0
ap_intercept = myy - slope * myx
x_ppoint = (p2[1] - ap_intercept) / slope
def lower(x):
return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept)
def upper(x):
return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept)
return {"lower": lower, "upper": upper}
def rectangle_surface_intersection(
rectangle,
f_lower,
f_upper,
bounds_lower=None,
bounds_upper=None,
check=True,
numpoints_check=500,
):
"""
Method to calculate the surface of the intersection of a rectangle (aligned with axes) and another surface
defined by two functions f_lower and f_upper.
Args:
rectangle:
Rectangle defined as : ((x1, x2), (y1, y2)).
f_lower:
Function defining the lower bound of the surface.
f_upper:
Function defining the upper bound of the surface.
bounds_lower:
Interval in which the f_lower function is defined.
bounds_upper:
Interval in which the f_upper function is defined.
check:
Whether to check if f_lower is always lower than f_upper.
numpoints_check:
Number of points used to check whether f_lower is always lower than f_upper
Returns:
The surface of the intersection of the rectangle and the surface defined by f_lower and f_upper.
"""
x1 = np.min(rectangle[0])
x2 = np.max(rectangle[0])
y1 = np.min(rectangle[1])
y2 = np.max(rectangle[1])
# Check that f_lower is allways lower than f_upper between x1 and x2 if no bounds are given or between the bounds
# of the f_lower and f_upper functions if they are given.
if check:
if bounds_lower is not None:
if bounds_upper is not None:
if not all(np.array(bounds_lower) == np.array(bounds_upper)):
raise ValueError("Bounds should be identical for both f_lower and f_upper")
if "<" not in function_comparison(
f1=f_lower,
f2=f_upper,
x1=bounds_lower[0],
x2=bounds_lower[1],
numpoints_check=numpoints_check,
):
raise RuntimeError(
"Function f_lower is not allways lower or equal to function f_upper within "
"the domain defined by the functions bounds."
)
else:
raise ValueError("Bounds are given for f_lower but not for f_upper")
elif bounds_upper is not None:
if bounds_lower is None:
raise ValueError("Bounds are given for f_upper but not for f_lower")
if "<" not in function_comparison(
f1=f_lower,
f2=f_upper,
x1=bounds_lower[0],
x2=bounds_lower[1],
numpoints_check=numpoints_check,
):
raise RuntimeError(
"Function f_lower is not allways lower or equal to function f_upper within "
"the domain defined by the functions bounds."
)
else:
if "<" not in function_comparison(f1=f_lower, f2=f_upper, x1=x1, x2=x2, numpoints_check=numpoints_check):
raise RuntimeError(
"Function f_lower is not allways lower or equal to function f_upper within "
"the domain defined by x1 and x2."
)
if bounds_lower is None:
raise NotImplementedError("Bounds should be given right now ...")
if x2 < bounds_lower[0] or x1 > bounds_lower[1]:
return 0.0, 0.0
if x1 < bounds_lower[0]:
xmin = bounds_lower[0]
else:
xmin = x1
if x2 > bounds_lower[1]:
xmax = bounds_lower[1]
else:
xmax = x2
def diff(x):
flwx = f_lower(x)
fupx = f_upper(x)
minup = np.min([fupx, y2 * np.ones_like(fupx)], axis=0)
maxlw = np.max([flwx, y1 * np.ones_like(flwx)], axis=0)
zeros = np.zeros_like(fupx)
upper = np.where(y2 >= flwx, np.where(y1 <= fupx, minup, zeros), zeros)
lower = np.where(y1 <= fupx, np.where(y2 >= flwx, maxlw, zeros), zeros)
return upper - lower
return quad(diff, xmin, xmax)
def my_solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center:
Center to measure solid angle from.
coords:
List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
phi = 0.0
for i in range(len(n) - 1):
try:
value = math.acos(-np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])))
except ValueError:
mycos = -np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
if 0.999999999999 < mycos < 1.000000000001:
value = math.acos(1.0)
elif -0.999999999999 > mycos > -1.000000000001:
value = math.acos(-1.0)
else:
raise SolidAngleError(mycos)
phi += value
return phi + (3 - len(r)) * math.pi
def vectorsToMatrix(aa, bb):
"""
Performs the vector multiplication of the elements of two vectors, constructing the 3x3 matrix.
:param aa: One vector of size 3
:param bb: Another vector of size 3
:return: A 3x3 matrix M composed of the products of the elements of aa and bb :
M_ij = aa_i * bb_j
"""
MM = np.zeros([3, 3], np.float_)
for ii in range(3):
for jj in range(3):
MM[ii, jj] = aa[ii] * bb[jj]
return MM
def matrixTimesVector(MM, aa):
"""
:param MM: A matrix of size 3x3
:param aa: A vector of size 3
:return: A vector of size 3 which is the product of the matrix by the vector
"""
bb = np.zeros(3, np.float_)
for ii in range(3):
bb[ii] = np.sum(MM[ii, :] * aa)
return bb
def rotateCoords(coords, R):
"""
Rotate the list of points using rotation matrix R
:param coords: List of points to be rotated
:param R: Rotation matrix
:return: List of rotated points
"""
newlist = []
for pp in coords:
rpp = matrixTimesVector(R, pp)
newlist.append(rpp)
return newlist
def rotateCoordsOpt(coords, R):
"""
Rotate the list of points using rotation matrix R
:param coords: List of points to be rotated
:param R: Rotation matrix
:return: List of rotated points
"""
return [np.dot(R, pp) for pp in coords]
def changebasis(uu, vv, nn, pps):
"""
For a list of points given in standard coordinates (in terms of e1, e2 and e3), returns the same list
expressed in the basis (uu, vv, nn), which is supposed to be orthonormal.
:param uu: First vector of the basis
:param vv: Second vector of the basis
:param nn: Third vector of the bais
:param pps: List of points in basis (e1, e2, e3)
:return: List of points in basis (uu, vv, nn)
"""
MM = np.zeros([3, 3], np.float_)
for ii in range(3):
MM[ii, 0] = uu[ii]
MM[ii, 1] = vv[ii]
MM[ii, 2] = nn[ii]
PP = np.linalg.inv(MM)
newpps = []
for pp in pps:
newpps.append(matrixTimesVector(PP, pp))
return newpps
def collinear(p1, p2, p3=None, tolerance=0.25):
"""
Checks if the three points p1, p2 and p3 are collinear or not within a given tolerance. The collinearity is
checked by computing the area of the triangle defined by the three points p1, p2 and p3. If the area of this
triangle is less than (tolerance x largest_triangle), then the three points are considered collinear. The
largest_triangle is defined as the right triangle whose legs are the two smallest distances between the three
points ie, its area is : 0.5 x (min(|p2-p1|,|p3-p1|,|p3-p2|) x secondmin(|p2-p1|,|p3-p1|,|p3-p2|))
:param p1: First point
:param p2: Second point
:param p3: Third point (origin [0.0, 0.0, 0.0 if not given])
:param tolerance: Area tolerance for the collinearity test (0.25 gives about 0.125 deviation from the line)
:return: True if the three points are considered as collinear within the given tolerance, False otherwise
"""
if p3 is None:
triangle_area = 0.5 * np.linalg.norm(np.cross(p1, p2))
dist = np.sort([np.linalg.norm(p2 - p1), np.linalg.norm(p1), np.linalg.norm(p2)])
else:
triangle_area = 0.5 * np.linalg.norm(np.cross(p1 - p3, p2 - p3))
dist = np.sort([np.linalg.norm(p2 - p1), np.linalg.norm(p3 - p1), np.linalg.norm(p3 - p2)])
largest_triangle_area = 0.5 * dist[0] * dist[1]
return triangle_area < tolerance * largest_triangle_area
def anticlockwise_sort(pps):
"""
Sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Sorted list of points
"""
newpps = []
angles = np.zeros(len(pps), np.float_)
for ipp, pp in enumerate(pps):
angles[ipp] = np.arctan2(pp[1], pp[0])
iisorted = np.argsort(angles)
for ii in range(len(pps)):
newpps.append(pps[iisorted[ii]])
return newpps
def anticlockwise_sort_indices(pps):
"""
Returns the indices that would sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Indices of the sorted list of points
"""
angles = np.zeros(len(pps), np.float_)
for ipp, pp in enumerate(pps):
angles[ipp] = np.arctan2(pp[1], pp[0])
return np.argsort(angles)
def sort_separation(separation):
"""Sort a separation.
:param separation: Initial separation.
:return: Sorted list of separation.
"""
if len(separation[0]) > len(separation[2]):
return [sorted(separation[2]), sorted(separation[1]), sorted(separation[0])]
return [sorted(separation[0]), sorted(separation[1]), sorted(separation[2])]
def sort_separation_tuple(separation):
"""Sort a separation
:param separation: Initial separation
:return: Sorted tuple of separation
"""
if len(separation[0]) > len(separation[2]):
return (
tuple(sorted(separation[2])),
tuple(sorted(separation[1])),
tuple(sorted(separation[0])),
)
return (
tuple(sorted(separation[0])),
tuple(sorted(separation[1])),
tuple(sorted(separation[2])),
)
def separation_in_list(separation_indices, separation_indices_list):
"""
Checks if the separation indices of a plane are already in the list
:param separation_indices: list of separation indices (three arrays of integers)
:param separation_indices_list: list of the list of separation indices to be compared to
:return: True if the separation indices are already in the list, False otherwise
"""
sorted_separation = sort_separation(separation_indices)
for sep in separation_indices_list:
if len(sep[1]) == len(sorted_separation[1]) and np.allclose(sorted_separation[1], sep[1]):
return True
return False
def is_anion_cation_bond(valences, ii, jj):
"""
Checks if two given sites are an anion and a cation.
:param valences: list of site valences
:param ii: index of a site
:param jj: index of another site
:return: True if one site is an anion and the other is a cation (from the valences)
"""
if valences == "undefined":
return True
if valences[ii] == 0 or valences[jj] == 0:
return True
return (valences[ii] > 0 > valences[jj]) or (valences[jj] > 0 > valences[ii])
class Plane:
"""
Class used to describe a plane
"""
TEST_2D_POINTS = [
np.array([0, 0], np.float_),
np.array([1, 0], np.float_),
np.array([0, 1], np.float_),
np.array([-1, 0], np.float_),
np.array([0, -1], np.float_),
np.array([0, 2], np.float_),
np.array([2, 0], np.float_),
np.array([0, -2], np.float_),
np.array([-2, 0], np.float_),
np.array([1, 1], np.float_),
np.array([2, 2], np.float_),
np.array([-1, -1], np.float_),
np.array([-2, -2], np.float_),
np.array([1, 2], np.float_),
np.array([1, -2], np.float_),
np.array([-1, 2], np.float_),
np.array([-1, -2], np.float_),
np.array([2, 1], np.float_),
np.array([2, -1], np.float_),
np.array([-2, 1], np.float_),
np.array([-2, -1], np.float_),
]
def __init__(self, coefficients, p1=None, p2=None, p3=None):
"""
Initializes a plane from the 4 coefficients a, b, c and d of ax + by + cz + d = 0
:param coefficients: abcd coefficients of the plane
"""
# Initializes the normal vector
self.normal_vector = np.array([coefficients[0], coefficients[1], coefficients[2]], np.float_)
normv = np.linalg.norm(self.normal_vector)
self.normal_vector /= normv
nonzeros = np.argwhere(self.normal_vector != 0.0).flatten()
zeros = list(set(range(3)) - set(nonzeros))
if len(nonzeros) == 0:
raise ValueError("Normal vector is equal to 0.0")
if self.normal_vector[nonzeros[0]] < 0.0:
self.normal_vector = -self.normal_vector
dd = -np.float_(coefficients[3]) / normv
else:
dd = np.float_(coefficients[3]) / normv
self._coefficients = np.array(
[self.normal_vector[0], self.normal_vector[1], self.normal_vector[2], dd],
np.float_,
)
self._crosses_origin = np.isclose(dd, 0.0, atol=1e-7, rtol=0.0)
self.p1 = p1
self.p2 = p2
self.p3 = p3
# Initializes 3 points belonging to the plane (useful for some methods)
if self.p1 is None:
self.init_3points(nonzeros, zeros)
self.vector_to_origin = dd * self.normal_vector
self.e1 = None
self.e2 = None
self.e3 = self.normal_vector
def init_3points(self, nonzeros, zeros):
"""Initialialize three random points on this plane.
:param nonzeros: Indices of plane coefficients ([a, b, c]) that are not zero.
:param zeros: Indices of plane coefficients ([a, b, c]) that are equal to zero.
:return: None
"""
if len(nonzeros) == 3:
self.p1 = np.array([-self.d / self.a, 0.0, 0.0], np.float_)
self.p2 = np.array([0.0, -self.d / self.b, 0.0], np.float_)
self.p3 = np.array([0.0, 0.0, -self.d / self.c], np.float_)
elif len(nonzeros) == 2:
self.p1 = np.zeros(3, np.float_)
self.p1[nonzeros[1]] = -self.d / self.coefficients[nonzeros[1]]
self.p2 = np.array(self.p1)
self.p2[zeros[0]] = 1.0
self.p3 = np.zeros(3, np.float_)
self.p3[nonzeros[0]] = -self.d / self.coefficients[nonzeros[0]]
elif len(nonzeros) == 1:
self.p1 = np.zeros(3, np.float_)
self.p1[nonzeros[0]] = -self.d / self.coefficients[nonzeros[0]]
self.p2 = np.array(self.p1)
self.p2[zeros[0]] = 1.0
self.p3 = np.array(self.p1)
self.p3[zeros[1]] = 1.0
def __str__(self):
"""
String representation of the Plane object
:return: String representation of the Plane object
"""
outs = ["Plane object"]
outs.append(f" => Normal vector : {self.normal_vector}")
outs.append(" => Equation of the plane ax + by + cz + d = 0")
outs.append(f" with a = {self._coefficients[0]}")
outs.append(f" b = {self._coefficients[1]}")
outs.append(f" c = {self._coefficients[2]}")
outs.append(f" d = {self._coefficients[3]}")
return "\n".join(outs)
def is_in_plane(self, pp, dist_tolerance):
"""
Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise
"""
return np.abs(np.dot(self.normal_vector, pp) + self._coefficients[3]) <= dist_tolerance
def is_same_plane_as(self, plane):
"""
Checks whether the plane is identical to another Plane "plane"
:param plane: Plane to be compared to
:return: True if the two facets are identical, False otherwise
"""
return np.allclose(self._coefficients, plane.coefficients)
def is_in_list(self, plane_list):
"""
Checks whether the plane is identical to one of the Planes in the plane_list list of Planes
:param plane_list: List of Planes to be compared to
:return: True if the plane is in the list, False otherwise
"""
for plane in plane_list:
if self.is_same_plane_as(plane):
return True
return False
def indices_separate(self, points, dist_tolerance):
"""
Returns three lists containing the indices of the points lying on one side of the plane, on the plane
and on the other side of the plane. The dist_tolerance parameter controls the tolerance to which a point
is considered to lie on the plane or not (distance to the plane)
:param points: list of points
:param dist_tolerance: tolerance to which a point is considered to lie on the plane
or not (distance to the plane)
:return: The lists of indices of the points on one side of the plane, on the plane and
on the other side of the plane
"""
side1 = []
inplane = []
side2 = []
for ip, pp in enumerate(points):
if self.is_in_plane(pp, dist_tolerance):
inplane.append(ip)
else:
if np.dot(pp + self.vector_to_origin, self.normal_vector) < 0.0:
side1.append(ip)
else:
side2.append(ip)
return [side1, inplane, side2]
def distance_to_point(self, point):
"""
Computes the absolute distance from the plane to the point
:param point: Point for which distance is computed
:return: Distance between the plane and the point
"""
return np.abs(np.dot(self.normal_vector, point) + self.d)
def distances(self, points):
"""
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side
:param points: Points for which distances are computed
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side)
"""
return [np.dot(self.normal_vector, pp) + self.d for pp in points]
def distances_indices_sorted(self, points, sign=False):
"""
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side. Indices sorting the points from closest
to furthest is also computed.
:param points: Points for which distances are computed
:param sign: Whether to add sign information in the indices sorting the points distances
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side), as well as indices of the points from closest to furthest. For the
latter, when the sign parameter is True, items of the sorting list are given as tuples of (index, sign).
"""
distances = [np.dot(self.normal_vector, pp) + self.d for pp in points]
indices = sorted(range(len(distances)), key=lambda k: np.abs(distances[k]))
if sign:
indices = [(ii, int(np.sign(distances[ii]))) for ii in indices]
return distances, indices
def distances_indices_groups(self, points, delta=None, delta_factor=0.05, sign=False):
"""
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side. Indices sorting the points from closest
to furthest is also computed. Grouped indices are also given, for which indices of the distances that are
separated by less than delta are grouped together. The delta parameter is either set explictly or taken as
a fraction (using the delta_factor parameter) of the maximal point distance.
:param points: Points for which distances are computed
:param delta: Distance interval for which two points are considered in the same group.
:param delta_factor: If delta is None, the distance interval is taken as delta_factor times the maximal
point distance.
:param sign: Whether to add sign information in the indices sorting the points distances
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side), as well as indices of the points from closest to furthest and
grouped indices of distances separated by less than delta. For the sorting list and the grouped
indices, when the sign parameter is True, items are given as tuples of (index, sign).
"""
distances, indices = self.distances_indices_sorted(points=points)
if delta is None:
delta = delta_factor * np.abs(distances[indices[-1]])
iends = [
ii
for ii, idist in enumerate(indices, start=1)
if ii == len(distances) or (np.abs(distances[indices[ii]]) - np.abs(distances[idist]) > delta)
]
if sign:
indices = [(ii, int(np.sign(distances[ii]))) for ii in indices]
grouped_indices = [indices[iends[ii - 1] : iend] if ii > 0 else indices[:iend] for ii, iend in enumerate(iends)]
return distances, indices, grouped_indices
def projectionpoints(self, pps):
"""
Projects each points in the point list pps on plane and returns the list of projected points
:param pps: List of points to project on plane
:return: List of projected point on plane
"""
return [pp - np.dot(pp - self.p1, self.normal_vector) * self.normal_vector for pp in pps]
def orthonormal_vectors(self):
"""
Returns a list of three orthogonal vectors, the two first being parallel to the plane and the
third one is the normal vector of the plane
:return: List of orthogonal vectors
:raise: ValueError if all the coefficients are zero or if there is some other strange error
"""
if self.e1 is None:
diff = self.p2 - self.p1
self.e1 = diff / norm(diff)
self.e2 = np.cross(self.e3, self.e1)
return [self.e1, self.e2, self.e3]
def orthonormal_vectors_old(self):
"""
Returns a list of three orthogonal vectors, the two first being parallel to the plane and the
third one is the normal vector of the plane
:return: List of orthogonal vectors
:raise: ValueError if all the coefficients are zero or if there is some other strange error
"""
if self.e1 is None:
imax = np.argmax(np.abs(self.normal_vector))
if imax == 0:
self.e1 = np.array([self.e3[1], -self.e3[0], 0.0]) / np.sqrt(self.e3[0] ** 2 + self.e3[1] ** 2)
elif imax == 1:
self.e1 = np.array([0.0, self.e3[2], -self.e3[1]]) / np.sqrt(self.e3[1] ** 2 + self.e3[2] ** 2)
elif imax == 2:
self.e1 = np.array([-self.e3[2], 0.0, self.e3[0]]) / np.sqrt(self.e3[0] ** 2 + self.e3[2] ** 2)
else:
raise ValueError("Only three values in the normal vector, should not be here ...")
self.e2 = np.cross(self.e3, self.e1)
return [self.e1, self.e2, self.e3]
def project_and_to2dim_ordered_indices(self, pps, plane_center="mean"):
"""
Projects each points in the point list pps on plane and returns the indices that would sort the
list of projected points in anticlockwise order
:param pps: List of points to project on plane
:return: List of indices that would sort the list of projected points
"""
pp2d = self.project_and_to2dim(pps, plane_center)
return anticlockwise_sort_indices(pp2d)
def project_and_to2dim(self, pps, plane_center):
"""
Projects the list of points pps to the plane and changes the basis from 3D to the 2D basis of the plane
:param pps: List of points to be projected
:return: :raise:
"""
proj = self.projectionpoints(pps)
[u1, u2, u3] = self.orthonormal_vectors()
PP = np.array([[u1[0], u2[0], u3[0]], [u1[1], u2[1], u3[1]], [u1[2], u2[2], u3[2]]])
xypps = []
for pp in proj:
xyzpp = np.dot(pp, PP)
xypps.append(xyzpp[0:2])
if str(plane_center) == "mean":
mean = np.zeros(2, np.float_)
for pp in xypps:
mean += pp
mean /= len(xypps)
xypps = [pp - mean for pp in xypps]
elif plane_center is not None:
projected_plane_center = self.projectionpoints([plane_center])[0]
xy_projected_plane_center = np.dot(projected_plane_center, PP)[0:2]
xypps = [pp - xy_projected_plane_center for pp in xypps]
return xypps
def fit_error(self, points, fit="least_square_distance"):
"""Evaluate the error for a list of points with respect to this plane.
:param points: List of points.
:param fit: Type of fit error.
:return: Error for a list of points with respect to this plane.
"""
if fit == "least_square_distance":
return self.fit_least_square_distance_error(points)
if fit == "maximum_distance":
return self.fit_maximum_distance_error(points)
return None
def fit_least_square_distance_error(self, points):
"""Evaluate the sum of squared distances error for a list of points with respect to this plane.
:param points: List of points.
:return: Sum of squared distances error for a list of points with respect to this plane.
"""
return np.sum([self.distance_to_point(pp) ** 2.0 for pp in points])
def fit_maximum_distance_error(self, points):
"""Evaluate the max distance error for a list of points with respect to this plane.
:param points: List of points.
:return: Max distance error for a list of points with respect to this plane.
"""
return np.max([self.distance_to_point(pp) for pp in points])
@property
def coefficients(self):
"""Return a copy of the plane coefficients.
:return: Plane coefficients as a numpy array.
"""
return np.copy(self._coefficients)
@property
def abcd(self):
"""Return a tuple with the plane coefficients.
:return: Tuple with the plane coefficients.
"""
return (
self._coefficients[0],
self._coefficients[1],
self._coefficients[2],
self._coefficients[3],
)
@property
def a(self):
"""Coefficient a of the plane."""
return self._coefficients[0]
@property
def b(self):
"""Coefficient b of the plane."""
return self._coefficients[1]
@property
def c(self):
"""Coefficient c of the plane."""
return self._coefficients[2]
@property
def d(self):
"""Coefficient d of the plane."""
return self._coefficients[3]
@property
def distance_to_origin(self):
"""Distance of the plane to the origin."""
return self._coefficients[3]
@property
def crosses_origin(self):
"""Whether this plane crosses the origin (i.e. coefficient d is 0.0)."""
return self._crosses_origin
@classmethod
def from_2points_and_origin(cls, p1, p2):
"""Initializes plane from two points and the origin.
:param p1: First point.
:param p2: Second point.
:return: Plane.
"""
return cls.from_3points(p1, p2, np.zeros(3))
@classmethod
def from_3points(cls, p1, p2, p3):
"""Initializes plane from three points.
:param p1: First point.
:param p2: Second point.
:param p3: Third point.
:return: Plane.
"""
nn = np.cross(p1 - p3, p2 - p3)
normal_vector = nn / norm(nn)
nonzeros = np.argwhere(normal_vector != 0.0)
if normal_vector[nonzeros[0, 0]] < 0.0:
normal_vector = -normal_vector
dd = -np.dot(normal_vector, p1)
coefficients = np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_)
return cls(coefficients, p1=p1, p2=p2, p3=p3)
@classmethod
def from_npoints(cls, points, best_fit="least_square_distance"):
"""Initializes plane from a list of points.
If the number of points is larger than 3, will use a least square fitting or max distance fitting.
:param points: List of points.
:param best_fit: Type of fitting procedure for more than 3 points.
:return: Plane
"""
if len(points) == 2:
return cls.from_2points_and_origin(points[0], points[1])
if len(points) == 3:
return cls.from_3points(points[0], points[1], points[2])
if best_fit == "least_square_distance":
return cls.from_npoints_least_square_distance(points)
if best_fit == "maximum_distance":
return cls.from_npoints_maximum_distance(points)
return None
@classmethod
def from_npoints_least_square_distance(cls, points):
"""Initializes plane from a list of points using a least square fitting procedure.
:param points: List of points.
:return: Plane.
"""
mean_point = np.array([sum(pp[ii] for pp in points) for ii in range(3)], np.float_)
mean_point /= len(points)
AA = np.zeros((len(points), 3), np.float_)
for ii, pp in enumerate(points):
for jj in range(3):
AA[ii, jj] = pp[jj] - mean_point[jj]
[UU, SS, Vt] = np.linalg.svd(AA)
imin = np.argmin(SS)
normal_vector = Vt[imin]
nonzeros = np.argwhere(normal_vector != 0.0)
if normal_vector[nonzeros[0, 0]] < 0.0:
normal_vector = -normal_vector
dd = -np.dot(normal_vector, mean_point)
coefficients = np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_)
return cls(coefficients)
@classmethod
def perpendicular_bisector(cls, p1, p2):
"""Initialize a plane from the perpendicular bisector of two points.
The perpendicular bisector of two points is the plane perpendicular to the vector joining these two points
and passing through the middle of the segment joining the two points.
:param p1: First point.
:param p2: Second point.
:return: Plane.
"""
middle_point = 0.5 * (p1 + p2)
normal_vector = p2 - p1
dd = -np.dot(normal_vector, middle_point)
return cls(np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_))
@classmethod
def from_npoints_maximum_distance(cls, points):
"""Initializes plane from a list of points using a max distance fitting procedure.
:param points: List of points.
:return: Plane.
"""
convex_hull = ConvexHull(points)
heights = []
ipoints_heights = []
for isimplex, simplex in enumerate(convex_hull.simplices):
cc = convex_hull.equations[isimplex]
plane = Plane.from_coefficients(cc[0], cc[1], cc[2], cc[3])
distances = [plane.distance_to_point(pp) for pp in points]
ipoint_height = np.argmax(distances)
heights.append(distances[ipoint_height])
ipoints_heights.append(ipoint_height)
imin_height = np.argmin(heights)
normal_vector = convex_hull.equations[imin_height, 0:3]
cc = convex_hull.equations[imin_height]
highest_point = points[ipoints_heights[imin_height]]
middle_point = (
Plane.from_coefficients(cc[0], cc[1], cc[2], cc[3]).projectionpoints([highest_point])[0] + highest_point
) / 2
dd = -np.dot(normal_vector, middle_point)
return cls(np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_))
@classmethod
def from_coefficients(cls, a, b, c, d):
"""Initialize plane from its coefficients.
:param a: a coefficient of the plane.
:param b: b coefficient of the plane.
:param c: c coefficient of the plane.
:param d: d coefficient of the plane.
:return: Plane.
"""
return cls(np.array([a, b, c, d], np.float_))
|
vorwerkc/pymatgen
|
pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py
|
Python
|
mit
| 42,284
|
[
"pymatgen"
] |
cbf468c6076989b8de8311f8b5a32a746180ff6f84a9a28f8b2a259e1d9e0a7b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.