text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# taken from http://www.piware.de/2011/01/creating-an-https-server-in-python/
# generate server.xml with the following command:
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
# run as follows:
# python simple-https-server.py
# then in your browser, visit:
# https://localhost:4443
import http.server
import socketserver
import ssl
PORT = 8443
CERT = "/home/dpriedel/projects/github/CollectEDGARData_Test/https_server/server.pem"
Handler = http.server.SimpleHTTPRequestHandler
server = socketserver.TCPServer(("localhost", PORT), Handler)
server.socket = ssl.wrap_socket(server.socket, certfile=CERT, server_side=True)
with server:
print("serving at port", PORT)
server.serve_forever()
|
dpriedel/CollectEDGARData_Test
|
https_server/simple-https-server.py
|
Python
|
gpl-3.0
| 736
|
[
"VisIt"
] |
e806fcf0945c15d2496ef2d6ba0647f4dd8bc5101c3f8397d343909a257bf2df
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.analysis.OrderParameter
**********************************
.. function:: espressopp.analysis.OrderParameter(system, cutoff, angular_momentum, do_cluster_analysis, include_surface_particles, ql_low, ql_high)
:param system:
:param cutoff:
:param angular_momentum: (default: 6)
:param do_cluster_analysis: (default: False)
:param include_surface_particles: (default: False)
:param ql_low: (default: -1.0)
:param ql_high: (default: 1.0)
:type system:
:type cutoff:
:type angular_momentum: int
:type do_cluster_analysis:
:type include_surface_particles:
:type ql_low:
:type ql_high: real
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.AnalysisBase import *
from _espressopp import analysis_OrderParameter
class OrderParameterLocal(AnalysisBaseLocal, analysis_OrderParameter):
def __init__(self, system, cutoff, angular_momentum=6,
do_cluster_analysis=False, include_surface_particles=False,
ql_low=-1.0, ql_high=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
#print "coupled cluster analysis is currently broken"
cxxinit(self, analysis_OrderParameter, system, cutoff, angular_momentum,
do_cluster_analysis, include_surface_particles,
ql_low, ql_high)
if pmi.isController :
class OrderParameter(AnalysisBase):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.OrderParameterLocal'
)
|
kkreis/espressopp
|
src/analysis/OrderParameter.py
|
Python
|
gpl-3.0
| 2,516
|
[
"ESPResSo"
] |
21e76fc94da5fefe7a6395461128217ec7cf1b1af3d05f065524d9fa197f2ea4
|
"""
This module defines input sets for CP2K and is a work in progress. The structure/philosophy
of this module is based on the Vasp input sets in Pymatgen. These sets are meant to contain
tested parameters that will result in successful, reproducible, consistent calculations without
need for intervention 99% of the time. 99% of the time, you only need to provide a pymatgen
structure object and let the defaults take over from there.
The sets are intended to be very general, e.g. a set for geometry relaxation, and so most of the
time, if you have specific needs, you can simply specify them via the keyword argument
override_default_params (see Section.update() method). If you have the need to create a new input
set (say for a standardized high throughput calculation) then you can create a new child of the
Cp2kInputSet class.
In order to implement a new Set within the current code structure, follow this 3 step flow:
(1) Inherit from Cp2kInputSet or one of its children and call the super() constructor
(2) Create the new sections and insert them into self and its subsections as needed
(3) Call self.update(override_default_params) in order to allow user settings.
"""
import warnings
from pathlib import Path
from typing import Dict, Union
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cp2k.inputs import (
LDOS,
PBE,
PDOS,
QS,
XC_FUNCTIONAL,
Cell,
Coord,
Cp2kInput,
Dft,
E_Density_Cube,
ForceEval,
Global,
Keyword,
KeywordList,
Kind,
Kpoints,
Mgrid,
MO_Cubes,
OrbitalTransformation,
Scf,
Section,
Smear,
Subsys,
V_Hartree_Cube,
)
from pymatgen.io.cp2k.utils import (
get_aux_basis,
get_basis_and_potential,
get_unique_site_indices,
)
__author__ = "Nicholas Winner"
__version__ = "0.2"
__email__ = "nwinner@berkeley.edu"
__date__ = "January 2019"
MODULE_DIR = Path(__file__).resolve().parent
class Cp2kInputSet(Cp2kInput):
"""
The basic representation of a CP2K input set as a collection of "sections" defining the simulation
connected to a structure object. At the most basis level, CP2K requires a &GLOBAL section and
&FORCE_EVAL section. Global sets parameters like "RUN_TYPE" or the overall verbosity. FORCE_EVAL is
the largest section usually, containing the cell and coordinates of atoms, the DFT settings, and more.
This top level input set is meant to initialize GLOBAL and FORCE_EVAL based on a structure object and
and sections that the user provides.
Like everything that goes into a cp2k input file, this base input set is essentially a section object.
These sets are distinguished by saving default settings for easy implementation of calculations such
as relaxation and static calculations. This base set is here to transfer a pymatgen structure object
into the input format for cp2k and associate the basis set and pseudopotential to use with each
element in the structure.
Generally, this class will not be used directly, and instead one of
its child-classes will be used, which contain more predefined initializations of various sections, and,
if modifications are required, the user can specify override_default_settings.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
potential_and_basis: Dict = {},
multiplicity: int = 0,
project_name: str = "CP2K",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure: (Structure or Molecule) pymatgen structure or molecule object used to define
the lattice, coordinates, and elements. This structure object cannot contain "special"
species like the Dummy species, e.g. X, or fractional occupations, e.g. Fe0.2, etc.
potential_and_basis: (dict) Specifies what basis set and potential to use. Specify these
as a dict of the form:
{ element: {'cardinality': __, 'sr': __, 'q': __},
'cardinality': __, 'functional': __}
Where cardinality and functional are overall specifications (for all elements), while
<key='element'> specifies the overrides for a specific element. Currently the following
conventions must be followed:
(a) All species of a particular element must have the same potential/basis
multiplicity: (int) Specify the system's multiplicity if appropriate
project_name: (str) Specify the project name. This will be used to name the output files
from a CP2K calculation
override_default_params: (dict) Specifies user-defined settings to override the settings of any
input set (See Section.update())
"""
super().__init__(name="CP2K_INPUT", subsections={})
# Important CP2K set parameters
self.structure = structure
self.charge = structure.charge
self.potential_and_basis = potential_and_basis
self.multiplicity = multiplicity # spin multiplicity = 2s+1
self.override_default_params = override_default_params
self.project_name = project_name
self.kwargs = kwargs
for s in self.structure.species:
assert s in Element
self.insert(ForceEval()) # always present in cp2k
self.basis_set_file_names = None # need for dft
self.potential_file_name = None # need for dft
self.create_subsys(self.structure) # assemble structure with atom types and pseudopotentials assigned
if self.kwargs.get("print_forces", True):
self.print_forces()
if self.kwargs.get("print_motion", True):
self.print_motion()
self.update(override_default_params)
def create_subsys(self, structure: Union[Structure, Molecule]):
"""
Create the structure for the input
"""
subsys = Subsys()
if isinstance(structure, Structure):
subsys.insert(Cell(structure.lattice))
# Decide what basis sets/pseudopotentials to use
basis_and_potential = get_basis_and_potential([str(s) for s in structure.species], self.potential_and_basis)
# Insert atom kinds by identifying the unique sites (unique element and site properties)
unique_kinds = get_unique_site_indices(structure)
for k, v in unique_kinds.items():
kind = k.split("_")[0]
kwargs = {}
if "magmom" in self.structure.site_properties:
kwargs["magnetization"] = self.structure.site_properties["magmom"][v[0]]
if "ghost" in self.structure.site_properties:
kwargs["ghost"] = self.structure.site_properties["ghost"][v[0]]
if "basis_set" in self.structure.site_properties:
basis_set = self.structure.site_properties["basis_set"][v[0]]
else:
basis_set = basis_and_potential[kind]["basis"]
if "potential" in self.structure.site_properties:
potential = self.structure.site_properties["potential"][v[0]]
else:
potential = basis_and_potential[kind]["potential"]
if "aux_basis" in self.structure.site_properties:
kwargs["aux_basis"] = self.structure.site_properties["aux_basis"][v[0]]
subsys.insert(Kind(kind, alias=k, basis_set=basis_set, potential=potential, **kwargs))
coord = Coord(structure, aliases=unique_kinds)
subsys.insert(coord)
self["FORCE_EVAL"].insert(subsys)
self.basis_set_file_names = basis_and_potential["basis_filenames"]
self.potential_file_name = basis_and_potential["potential_filename"]
def print_forces(self):
"""
Print out the forces and stress during calculation
"""
self["FORCE_EVAL"].insert(Section("PRINT", subsections={}))
self["FORCE_EVAL"]["PRINT"].insert(Section("FORCES", subsections={}))
self["FORCE_EVAL"]["PRINT"].insert(Section("STRESS_TENSOR", subsections={}))
def print_motion(self):
"""
Print the motion info (trajectory, cell, forces, stress
"""
if not self.check("MOTION"):
self.insert(Section("MOTION", subsections={}))
self["MOTION"].insert(Section("PRINT", subsections={}))
self["MOTION"]["PRINT"].insert(Section("TRAJECTORY", section_parameters=["ON"], subsections={}))
self["MOTION"]["PRINT"].insert(Section("CELL", subsections={}))
self["MOTION"]["PRINT"].insert(Section("FORCES", subsections={}))
self["MOTION"]["PRINT"].insert(Section("STRESS", subsections={}))
class DftSet(Cp2kInputSet):
"""
Base for an input set using the Quickstep module (i.e. a DFT calculation). The DFT section is pretty vast
in CP2K, so this set hopes to make the DFT setup fairly simple. The provided parameters are pretty conservative,
and so they should not need to be changed very often.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
ot: bool = True,
band_gap: float = 0.01,
eps_default: float = 1e-12,
eps_scf: float = 1e-7,
max_scf: Union[int, None] = None,
minimizer: str = "DIIS",
preconditioner: str = "FULL_ALL",
algorithm: str = "STRICT",
linesearch: str = "2PNT",
cutoff: int = 1200,
rel_cutoff: int = 80,
ngrids: int = 5,
progression_factor: int = 3,
override_default_params: Dict = {},
wfn_restart_file_name: str = None,
kpoints: Union[Kpoints, None] = None,
smearing: bool = False,
**kwargs,
):
"""
Args:
structure: Pymatgen structure or molecule object
ot (bool): Whether or not to use orbital transformation method for matrix diagonalization. OT is the
flagship scf solver of CP2K, and will provide huge speed-ups for this part of the calculation,
but the system must have a band gap for OT to be used (higher band-gap --> faster convergence).
Band gap is also used by the preconditioner for OT, and should be set as a value SMALLER than the true
band gap to get good efficiency. Generally, this parameter does not need to be changed from
default of 0.01
band_gap (float): The band gap can also be specified in order to determine if ot should be turned on.
eps_default (float): Replaces all EPS_XX Keywords in the DFT section (NOT its subsections!) to have this
value, ensuring an overall accuracy of at least this much.
eps_scf (float): The convergence criteria for leaving the SCF loop in Hartrees. Default is 1e-7. Should
ensure reasonable results for all properties. Smaller than 1e-7 is generally not needed unless
you need very high precision. 1e-6 may be used for difficult systems, and should still give
reasonable results for most properties.
max_scf (int): The max number of SCF cycles before terminating the solver. NOTE: With the OT solver, this
corresponds to the max number of INNER scf loops, and then the outer loops are set with outer_max_scf,
while with diagnolization it corresponds to the overall (INNER*OUTER) number of SCF steps, with the
inner loop limit set by
minimizer (str): The minimization scheme. DIIS can be as much as 50% faster than the more robust conjugate
gradient method, and so it is chosen as default. Switch to CG if dealing with a difficult system.
preconditioner (str): Preconditioner for the OT method. FULL_ALL is the most reliable, and is the
default. Though FULL_SINGLE_INVERSE has faster convergence according to our internal tests. Should
only change from theses two when simulation cell gets to be VERY large,
in which case FULL_KINETIC might be preferred.
cutoff (int): Cutoff energy (in Ry) for the finest level of the multigrid. A high cutoff will allow you to
have very accurate calculations PROVIDED that REL_CUTOFF is appropriate.
rel_cutoff (int): This cutoff decides how the Guassians are mapped onto the different levels of the
multigrid. From CP2K: A Gaussian is mapped onto the coarsest level of the multi-grid, on which the
function will cover number of grid points greater than or equal to the number of grid points
will cover on a reference grid defined by REL_CUTOFF.
progression_factor (int): Divisor of CUTOFF to get the cutoff for the next level of the multigrid.
Takeaway for the cutoffs: https://www.cp2k.org/howto:converging_cutoff
If CUTOFF is too low, then all grids will be coarse and the calculation may become inaccurate; and if
REL_CUTOFF is too low, then even if you have a high CUTOFF, all Gaussians will be mapped onto the coarsest
level of the multi-grid, and thus the effective integration grid for the calculation may still be too
coarse.
"""
super().__init__(structure, **kwargs)
self.structure = structure
self.ot = ot
self.band_gap = band_gap
self.eps_default = eps_default
self.eps_scf = eps_scf
self.max_scf = max_scf
self.minimizer = minimizer
self.preconditioner = preconditioner
self.algorithm = algorithm
self.linesearch = linesearch
self.cutoff = cutoff
self.rel_cutoff = rel_cutoff
self.ngrids = ngrids
self.progression_factor = progression_factor
self.override_default_params = override_default_params
self.wfn_restart_file_name = wfn_restart_file_name
self.kpoints = kpoints
self.smearing = smearing
self.kwargs = kwargs
# Build the QS Section
qs = QS(eps_default=eps_default)
max_scf = max_scf if max_scf else 20 if ot else 400 # If ot, max_scf is for inner loop
scf = Scf(eps_scf=eps_scf, max_scf=max_scf, subsections={})
# If there's a band gap, use OT, else use Davidson
if ot:
if band_gap <= 0:
warnings.warn(
"Orbital Transformation method is being used for"
"a system without a bandgap. OT can have very poor"
"convergence for metallic systems, proceed with caution.",
UserWarning,
)
scf.insert(
OrbitalTransformation(
minimizer=minimizer,
preconditioner=preconditioner,
energy_gap=band_gap,
algorithm=algorithm,
linesearch=linesearch,
)
)
scf.insert(
Section(
"OUTER_SCF",
subsections={},
keywords={
"MAX_SCF": Keyword("MAX_SCF", kwargs.get("outer_max_scf", 20)),
"EPS_SCF": Keyword("EPS_SCF", kwargs.get("outer_eps_scf", eps_scf)),
},
)
)
else:
scf.insert(Section("DIAGONALIZATION", subsections={}))
mixing_kwds = {
"METHOD": Keyword("METHOD", "BROYDEN_MIXING"),
"ALPHA": Keyword("ALPHA", 0.2),
"NBUFFER": Keyword("NBUFFER", 5),
}
mixing = Section("MIXING", keywords=mixing_kwds, subsections=None)
scf.insert(mixing)
davidson_kwds = {"PRECONDITIONER": Keyword("PRECONDITIONER", "FULL_ALL")}
davidson = Section("DAVIDSON", keywords=davidson_kwds, subsections=None)
scf["DIAGONALIZATION"].insert(davidson)
# Create the multigrid for FFTs
mgrid = Mgrid(
cutoff=cutoff,
rel_cutoff=rel_cutoff,
ngrids=ngrids,
progression_factor=progression_factor,
)
# Set the DFT calculation with global parameters
dft = Dft(
MULTIPLICITY=self.multiplicity,
CHARGE=self.charge,
basis_set_filenames=self.basis_set_file_names,
potential_filename=self.potential_file_name,
subsections={"QS": qs, "SCF": scf, "MGRID": mgrid},
wfn_restart_file_name=wfn_restart_file_name,
)
if kpoints:
dft.insert(Kpoints.from_kpoints(kpoints))
if smearing or (band_gap <= 0.0):
scf.kwargs["ADDED_MOS"] = 100
scf["ADDED_MOS"] = 100 # TODO: how to grab the appropriate number?
scf.insert(Smear())
# Create subsections and insert into them
self["FORCE_EVAL"].insert(dft)
xc_functional = XC_FUNCTIONAL(functional=kwargs.get("functional", "PBE"))
xc = Section("XC", subsections={"XC_FUNCTIONAL": xc_functional})
self["FORCE_EVAL"]["DFT"].insert(xc)
self["FORCE_EVAL"]["DFT"].insert(Section("PRINT", subsections={}))
if isinstance(structure, Molecule):
self.activate_nonperiodic()
if kwargs.get("print_pdos", True):
self.print_pdos()
if kwargs.get("print_ldos", False):
self.print_ldos()
if kwargs.get("print_mo_cubes", True):
self.print_mo_cubes()
if kwargs.get("print_hartree_potential", False):
self.print_hartree_potential()
if kwargs.get("print_e_density", False):
self.print_e_density()
self.update(self.override_default_params)
def print_pdos(self, nlumo=-1):
"""
Activate creation of the PDOS file.
Args:
nlumo (int): Number of virtual orbitals to be added to the MO set (-1=all).
CAUTION: Setting this value to be higher than the number of states present may cause a Cholesky error.
"""
if not self.check("FORCE_EVAL/DFT/PRINT/PDOS"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(PDOS(nlumo=nlumo))
def print_ldos(self, nlumo=-1):
"""
Activate the printing of LDOS files, printing one for each atom kind by default
Args:
nlumo (int): Number of virtual orbitals to be added to the MO set (-1=all).
CAUTION: Setting this value to be higher than the number of states present may cause a Cholesky error.
"""
if not self.check("FORCE_EVAL/DFT/PRINT/PDOS"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(PDOS(nlumo=nlumo))
for i in range(self.structure.num_sites):
self["FORCE_EVAL"]["DFT"]["PRINT"]["PDOS"].insert(LDOS(i + 1, alias="LDOS {}".format(i + 1), verbose=False))
def print_mo_cubes(self, write_cube=False, nlumo=-1, nhomo=-1):
"""
Activate printing of molecular orbitals.
Args:
write_cube (bool): whether to write cube file for the MOs (setting false will just print levels in out file)
nlumo (int): Controls the number of lumos that are printed and dumped as a cube (-1=all)
nhomo (int): Controls the number of homos that are printed and dumped as a cube (-1=all)
"""
if not self.check("FORCE_EVAL/DFT/PRINT/MO_CUBES"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(MO_Cubes(write_cube=write_cube, nlumo=nlumo, nhomo=nhomo))
def print_mo(self):
"""
Print molecular orbitals when running non-OT diagonalization
"""
raise NotImplementedError
def print_hartree_potential(self, stride=[1, 1, 1]):
"""
Controls the printing of a cube file with eletrostatic potential generated by the total density
(electrons+ions). It is valid only for QS with GPW formalism.
Note that by convention the potential has opposite sign than the expected physical one.
"""
if not self.check("FORCE_EVAL/DFT/PRINT/V_HARTREE_CUBE"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(V_Hartree_Cube(keywords={"STRIDE": Keyword("STRIDE", *stride)}))
def print_e_density(self):
"""
Controls the printing of cube files with the electronic density and, for LSD calculations, the spin density
"""
if not self.check("FORCE_EVAL/DFT/PRINT/E_DENSITY_CUBE"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(E_Density_Cube())
def set_charge(self, charge):
"""
Set the overall charge of the simulation cell
"""
self["FORCE_EVAL"]["DFT"]["CHARGE"] = Keyword("CHARGE", charge)
def activate_hybrid(
self,
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
max_memory: int = 2000,
cutoff_radius: float = 8.0,
potential_type: str = None,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
):
"""
Basic set for activating hybrid DFT calculation using Auxiliary Density Matrix Method.
Note 1: When running ADMM with cp2k, memory is very important. If the memory requirements exceed
what is available (see max_memory), then CP2K will have to calculate the 4-electron integrals
for HFX during each step of the SCF cycle. ADMM provides a huge speed up by making the memory
requirements *feasible* to fit into RAM, which means you only need to calculate the integrals
once each SCF cycle. But, this only works if it fits into memory. When setting up ADMM
calculations, we recommend doing whatever is possible to fit all the 4EI into memory.
Note 2: This set is designed for reliable high-throughput calculations, NOT for extreme
accuracy. Please review the in-line comments in this method if you want more control.
Args:
hybrid_functional (str): Type of hybrid functional. This set supports HSE (screened) and PBE0
(truncated). Default is PBE0, which converges easier in the GPW basis used by
cp2k.
hf_fraction (float): fraction of exact HF exchange energy to mix. Default: 0.25
gga_x_fraction (float): fraction of gga exchange energy to retain. Default: 0.75
gga_c_fraction (float): fraction of gga correlation energy to retain. Default: 1.0
max_memory (int): Maximum memory available to each MPI process (in Mb) in the calculation.
Most modern computing nodes will have ~2Gb per core, or 2048 Mb, but check for
your specific system. This value should be as large as possible while still leaving
some memory for the other parts of cp2k. Important: If this value is set larger
than the memory limits, CP2K will likely seg-fault.
Default: 2000
cutoff_radius (float): for truncated hybrid functional (i.e. PBE0), this is the cutoff
radius. The default is selected as that which generally gives convergence, but
maybe too low (if you want very high accuracy) or too high (if you want a quick
screening). Default: 8 angstroms
potential_type (str): what interaction potential to use for HFX. Available in CP2K are
COULOMB, GAUSSIAN, IDENTITY, LOGRANGE, MIX_CL, MIX_CL_TRUNC, MIX_LG, SHORTRANGE,
and TRUNCATED. Default is None, and it will be set automatically depending on the
named hybrid_functional that you use, but setting it to one of the acceptable
values will constitute a user-override.
omega (float): For HSE, this specifies the screening parameter. HSE06 sets this as
0.2, which is the default.
aux_basis (dict): If you want to specify the aux basis to use, specify it as a dict of
the form {'specie_1': 'AUX_BASIS_1', 'specie_2': 'AUX_BASIS_2'}
admm (bool): Whether or not to use the auxiliary density matrix method for the exact
HF exchange contribution. Highly recommended. Speed ups between 10x and aaa1000x are
possible when compared to non ADMM hybrid calculations. Default: True
eps_schwarz (float): Screening threshold for HFX, in Ha. Contributions smaller than this
will be screened. The smaller the value, the more accurate, but also the more
costly. Default value is 1e-6, which is quite aggressive. Aggressive screening
can also lead to convergence issues. 1e-7 should be a safe value if 1e-6 is too
aggressive.
eps_schwarz_forces (float): Same as for eps_schwarz, but for screening contributions to
forces. Convergence is not as sensitive with respect to eps_schwarz forces as
compared to eps_schwarz, and so 1e-6 should be good default.
screen_on_initial_p (bool): If an initial density matrix is provided, in the form of a
CP2K wfn restart file, then this initial density will be used for screening. This
is generally very computationally efficient, but, as with eps_schwarz, can lead to
instabilities if the initial density matrix is poor.
screen_p_forces (bool): Same as screen_on_initial_p, but for screening of forces.
"""
if admm:
aux_basis = aux_basis if aux_basis else {}
aux_basis = {s: aux_basis[s] if s in aux_basis else None for s in self.structure.symbol_set}
basis = get_aux_basis(basis_type=aux_basis)
if isinstance(self["FORCE_EVAL"]["DFT"]["BASIS_SET_FILE_NAME"], KeywordList):
self["FORCE_EVAL"]["DFT"]["BASIS_SET_FILE_NAME"].extend(
[Keyword("BASIS_SET_FILE_NAME", k) for k in ["BASIS_ADMM", "BASIS_ADMM_MOLOPT"]],
)
for k, v in self["FORCE_EVAL"]["SUBSYS"].subsections.items():
if v.name.upper() == "KIND":
kind = v["ELEMENT"].values[0]
v.keywords["BASIS_SET"] += Keyword("BASIS_SET", "AUX_FIT", basis[kind])
# Don't change unless you know what you're doing
# Use NONE for accurate eigenvalues (static calcs)
aux_matrix_params = {
"ADMM_PURIFICATION_METHOD": Keyword("ADMM_PURIFICATION_METHOD", "NONE"),
"METHOD": Keyword("METHOD", "BASIS_PROJECTION"),
}
aux_matrix = Section(
"AUXILIARY_DENSITY_MATRIX_METHOD",
keywords=aux_matrix_params,
subsections={},
)
self.subsections["FORCE_EVAL"]["DFT"].insert(aux_matrix)
# Define the GGA functional as PBE
pbe = PBE("ORIG", scale_c=gga_c_fraction, scale_x=gga_x_fraction)
xc_functional = XC_FUNCTIONAL("PBE", subsections={"PBE": pbe})
screening = Section(
"SCREENING",
subsections={},
keywords={
"EPS_SCHWARZ": Keyword("EPS_SCHWARZ", eps_schwarz),
"EPS_SCHWARZ_FORCES": Keyword("EPS_SCHWARZ_FORCES", eps_schwarz_forces),
"SCREEN_ON_INITIAL_P": Keyword("SCREEN_ON_INITIAL_P", screen_on_initial_p),
"SCREEN_P_FORCES": Keyword("SCREEN_P_FORCES", screen_p_forces),
},
)
ip_keywords = {}
if hybrid_functional == "HSE06":
potential_type = potential_type if potential_type else "SHORTRANGE"
xc_functional.insert(
Section(
"XWPBE",
subsections={},
keywords={
"SCALE_X0": Keyword("SCALE_X0", 1),
"SCALE_X": Keyword("SCALE_X", -hf_fraction),
"OMEGA": Keyword("OMEGA", omega),
},
)
)
ip_keywords.update(
{
"POTENTIAL_TYPE": Keyword("POTENTIAL_TYPE", potential_type),
"OMEGA": Keyword("OMEGA", omega),
}
)
elif hybrid_functional == "PBE0":
potential_type = potential_type if potential_type else "TRUNCATED"
ip_keywords.update(
{
"POTENTIAL_TYPE": Keyword("POTENTIAL_TYPE", potential_type),
"CUTOFF_RADIUS": Keyword("CUTOFF_RADIUS", cutoff_radius),
"T_C_G_DATA": Keyword("T_C_G_DATA", "t_c_g.dat"),
}
)
interaction_potential = Section("INTERACTION_POTENTIAL", subsections={}, keywords=ip_keywords)
# Unlikely for users to override
load_balance = Section(
"LOAD_BALANCE",
keywords={"RANDOMIZE": Keyword("RANDOMIZE", True)},
subsections={},
)
# EPS_STORAGE_SCALING squashes the integrals for efficient storage
# Unlikely for users to override.
memory = Section(
"MEMORY",
subsections={},
keywords={
"EPS_STORAGE_SCALING": Keyword("EPS_STORAGE_SCALING", 0.1),
"MAX_MEMORY": Keyword("MAX_MEMORY", max_memory),
},
)
hf = Section(
"HF",
keywords={"FRACTION": Keyword("FRACTION", hf_fraction)},
subsections={
"SCREENING": screening,
"INTERACTION_POTENTIAL": interaction_potential,
"LOAD_BALANCE": load_balance,
"MEMORY": memory,
},
)
xc = Section("XC", subsections={"XC_FUNCTIONAL": xc_functional, "HF": hf})
self.subsections["FORCE_EVAL"]["DFT"].insert(xc)
def activate_fast_minimization(self, on):
"""
Method to modify the set to use fast SCF minimization.
"""
if on:
ot = OrbitalTransformation(
minimizer="DIIS",
preconditioner="FULL_ALL",
algorithm="IRAC",
energy_gap=0.01,
linesearch="2PNT",
)
self.update({"FORCE_EVAL": {"DFT": {"SCF": {"OT": ot}}}})
def activate_robust_minimization(self):
"""
Method to modify the set to use more robust SCF minimization technique
"""
ot = OrbitalTransformation(
minimizer="CG",
preconditioner="FULL_ALL",
algorithm="STRICT",
energy_gap=0.05,
linesearch="3PNT",
)
self.update({"FORCE_EVAL": {"DFT": {"SCF": {"OT": ot}}}})
def activate_very_strict_minimization(self):
"""
Method to modify the set to use very strict SCF minimization scheme
:return:
"""
ot = OrbitalTransformation(
minimizer="CG",
preconditioner="FULL_ALL",
algorithm="STRICT",
energy_gap=0.05,
linesearch="GOLD",
)
self.update({"FORCE_EVAL": {"DFT": {"SCF": {"OT": ot}}}})
def activate_nonperiodic(self):
"""
Activates a calculation with non-periodic calculations by turning of PBC and
changing the poisson solver. Still requires a CELL to put the atoms
"""
kwds = {
"POISSON_SOLVER": Keyword("POISSON_SOLVER", "MT"),
"PERIODIC": Keyword("PERIODIC", "NONE"),
}
self["FORCE_EVAL"]["DFT"].insert(Section("POISSON", subsections={}, keywords=kwds))
if not self.check("FORCE_EVAL/SUBSYS/CELL"):
x = max([s.coords[0] for s in self.structure.sites])
y = max([s.coords[1] for s in self.structure.sites])
z = max([s.coords[2] for s in self.structure.sites])
self["FORCE_EVAL"]["SUBSYS"].insert(Cell(lattice=Lattice([[x, 0, 0], [0, y, 0], [0, 0, z]])))
self["FORCE_EVAL"]["SUBSYS"]["CELL"].add(Keyword("PERIODIC", "NONE"))
def modify_dft_print_iters(self, iters, add_last="no"):
"""
Modify all DFT print iterations at once. Common use is to set iters to the max
number of iterations + 1 and then set add_last to numeric. This would have the
effect of printing only the first and last iteration, which might be useful for
speeding up/saving space on GEO_OPT or MD runs where you don't need the intermediate
values.
Args
iters (int): print each "iters" iterations.
add_last (str): Whether to explicitly include the last iteration, and how to mark it.
numeric: mark last iteration with the iteration number
symbolic: mark last iteration with the letter "l"
no: do not explicitly include the last iteration
"""
assert add_last.lower() in ["no", "numeric", "symbolic"]
if self.check("FORCE_EVAL/DFT/PRINT"):
run_type = self["global"].get("run_type", Keyword("run_type", "energy")).values[0]
for k, v in self["force_eval"]["dft"]["print"].subsections.items():
if v.name.upper() in [
"ACTIVE_SPACE",
"BAND_STRUCTURE",
"GAPW",
"IMPLICIT_PSOLVER",
"SCCS",
"WFN_MIX",
]:
continue
v.insert(
Section(
"EACH",
subsections=None,
keywords={run_type: Keyword(run_type, iters)},
)
)
v.keywords["ADD_LAST"] = Keyword("ADD_LAST", add_last)
class StaticSet(DftSet):
"""
Basic static energy calculation. Turns on Quickstep module, sets the run_type in global,
and uses structure object to build the subsystem.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
project_name: str = "Static",
run_type: str = "ENERGY_FORCE",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure: Pymatgen structure object
project_name (str): What to name this cp2k project (controls naming of files printed out)
run_type (str): Run type. As a static set it should be one of the static aliases, like 'ENERGY_FORCE'
"""
super().__init__(structure, **kwargs)
global_section = Global(project_name=project_name, run_type=run_type)
self.structure = structure
self.project_name = project_name
self.run_type = run_type
self.override_default_params = override_default_params
self.insert(global_section)
self.update(override_default_params)
self.kwargs = kwargs
class RelaxSet(DftSet):
"""
CP2K input set containing the basic settings for performing geometry optimization. Values are all cp2k
defaults, and should be good for most systems of interest.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
max_drift: float = 3e-3,
max_force: float = 4.5e-3,
max_iter: int = 200,
project_name: str = "Relax",
optimizer: str = "BFGS",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure:
max_drift: Convergence criterion for the maximum geometry change between the current and the
last optimizer iteration. This keyword cannot be repeated and it expects precisely one real.
Default value: 3.00000000E-003
Default unit: [bohr]
max_force (float): Convergence criterion for the maximum force component of the current configuration.
This keyword cannot be repeated and it expects precisely one real.
Default value: 4.50000000E-004
Default unit: [bohr^-1*hartree]
max_iter (int): Specifies the maximum number of geometry optimization steps.
One step might imply several force evaluations for the CG and LBFGS optimizers.
This keyword cannot be repeated and it expects precisely one integer.
Default value: 200
optimizer (str): Specify which method to use to perform a geometry optimization.
This keyword cannot be repeated and it expects precisely one keyword. BFGS is a
quasi-newtonian method, and will best for "small" systems near the minimum. LBFGS
is a limited memory version that can be used for "large" (>1000 atom) systems when
efficiency outweights robustness. CG is more robust, especially when you are far from
the minimum, but it slower.
Default value: BFGS
"""
super().__init__(structure, **kwargs)
self.structure = structure
self.max_drift = max_drift
self.max_force = max_force
self.max_iter = max_iter
self.project_name = project_name
self.optimizer = optimizer
self.override_default_params = override_default_params
self.kwargs = kwargs
global_section = Global(project_name=project_name, run_type="GEO_OPT")
geo_opt_params = {
"TYPE": Keyword("TYPE", "MINIMIZATION"),
"MAX_DR": Keyword("MAX_DR", max_drift),
"MAX_FORCE": Keyword("MAX_FORCE", max_force),
"RMS_DR": Keyword("RMS_DR", 1.5e-3),
"MAX_ITER": Keyword("MAX_ITER", max_iter),
"OPTIMIZER": Keyword("OPTIMIZER", optimizer),
}
geo_opt = Section("GEO_OPT", subsections={}, keywords=geo_opt_params)
if not self.check("MOTION"):
self.insert(Section("MOTION", subsections={}))
self["MOTION"].insert(geo_opt)
self.insert(global_section)
self.modify_dft_print_iters(max_iter + 1, add_last="numeric")
self.update(override_default_params)
class CellOptSet(DftSet):
"""
CP2K input set containing the basic settings for performing geometry optimization. Values are all cp2k
defaults, and should be good for most systems of interest.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
project_name: str = "CellOpt",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure: Pymatgen structure object
max_drift: Convergence criterion for the maximum geometry change between the current and the
last optimizer iteration. This keyword cannot be repeated and it expects precisely one real.
Default value: 3.00000000E-003
Default unit: [bohr]
max_force (float): Convergence criterion for the maximum force component of the current configuration.
This keyword cannot be repeated and it expects precisely one real.
Default value: 4.50000000E-004
Default unit: [bohr^-1*hartree]
max_iter (int): Specifies the maximum number of geometry optimization steps.
One step might imply several force evaluations for the CG and LBFGS optimizers.
This keyword cannot be repeated and it expects precisely one integer.
Default value: 200
optimizer (str): Specify which method to use to perform a geometry optimization.
This keyword cannot be repeated and it expects precisely one keyword. BFGS is a
quasi-newtonian method, and will best for "small" systems near the minimum. LBFGS
is a limited memory version that can be used for "large" (>1000 atom) systems when
efficiency outweights robustness. CG is more robust, especially when you are far from
the minimum, but it slower.
Default value: BFGS
"""
super().__init__(structure, **kwargs)
self.structure = structure
self.project_name = project_name
self.override_default_params = override_default_params
self.kwargs = kwargs
global_section = Global(project_name=project_name, run_type="CELL_OPT")
self.insert(global_section)
self.modify_dft_print_iters(self.get("max_iter", 200) + 1, add_last="numeric")
self.update(override_default_params)
class HybridStaticSet(StaticSet):
"""
Static calculation using hybrid DFT with the ADMM formalism in Cp2k.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
project_name: str = "Hybrid-Static",
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
override_default_params: Dict = {},
max_memory: int = 2000,
cutoff_radius: float = 8.0,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
**kwargs,
):
"""
Args:
structure: pymatgen structure object
method: hybrid dft method to use (currently select between HSE06 and PBE0)
hf_fraction: percentage of exact HF to mix-in
project_name: what to call this project
gga_x_fraction: percentage of gga exchange to use
gga_c_fraction: percentage of gga correlation to use
override_default_params: override settings (see above).
"""
super().__init__(structure, project_name=project_name, **kwargs)
self.structure = structure
self.hybrid_functional = hybrid_functional
self.hf_fraction = hf_fraction
self.project_name = project_name
self.gga_x_fraction = gga_x_fraction
self.gga_c_fraction = gga_c_fraction
self.override_default_params = override_default_params
self.max_memory = max_memory
self.cutoff_radius = cutoff_radius
self.omega = omega
self.aux_basis = aux_basis
self.admm = admm
self.eps_schwarz = eps_schwarz
self.eps_schwarz_forces = eps_schwarz_forces
self.screen_on_initial_p = screen_on_initial_p
self.screen_p_forces = screen_p_forces
self.kwargs = kwargs
self.activate_hybrid(
hybrid_functional=hybrid_functional,
hf_fraction=hf_fraction,
gga_x_fraction=gga_x_fraction,
gga_c_fraction=gga_c_fraction,
max_memory=max_memory,
cutoff_radius=cutoff_radius,
omega=omega,
aux_basis=aux_basis,
admm=admm,
eps_schwarz=eps_schwarz,
eps_schwarz_forces=eps_schwarz_forces,
screen_on_initial_p=screen_on_initial_p,
screen_p_forces=screen_p_forces,
)
self.update(override_default_params)
class HybridRelaxSet(RelaxSet):
"""
Static calculation using hybrid DFT with the ADMM formalism in Cp2k.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
project_name: str = "Hybrid-Relax",
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
override_default_params: Dict = {},
max_memory: int = 2000,
cutoff_radius: float = 8.0,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
**kwargs,
):
"""
Args:
structure: pymatgen structure object
method: hybrid dft method to use (currently select between HSE06 and PBE0)
hf_fraction: percentage of exact HF to mix-in
project_name: what to call this project
gga_x_fraction: percentage of gga exchange to use
gga_c_fraction: percentage of gga correlation to use
override_default_params: override settings (see above).
"""
super().__init__(structure, project_name=project_name, **kwargs)
self.structure = structure
self.hybrid_functional = hybrid_functional
self.hf_fraction = hf_fraction
self.project_name = project_name
self.gga_x_fraction = gga_x_fraction
self.gga_c_fraction = gga_c_fraction
self.override_default_params = override_default_params
self.max_memory = max_memory
self.cutoff_radius = cutoff_radius
self.omega = omega
self.aux_basis = aux_basis
self.admm = admm
self.eps_schwarz = eps_schwarz
self.eps_schwarz_forces = eps_schwarz_forces
self.screen_on_initial_p = screen_on_initial_p
self.screen_p_forces = screen_p_forces
self.kwargs = kwargs
self.activate_hybrid(
hybrid_functional=hybrid_functional,
hf_fraction=hf_fraction,
gga_x_fraction=gga_x_fraction,
gga_c_fraction=gga_c_fraction,
max_memory=max_memory,
cutoff_radius=cutoff_radius,
omega=omega,
aux_basis=aux_basis,
admm=admm,
eps_schwarz=eps_schwarz,
eps_schwarz_forces=eps_schwarz_forces,
screen_on_initial_p=screen_on_initial_p,
screen_p_forces=screen_p_forces,
)
self.update(override_default_params)
class HybridCellOptSet(CellOptSet):
"""
Static calculation using hybrid DFT with the ADMM formalism in Cp2k.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
project_name: str = "Hybrid-CellOpt",
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
override_default_params: Dict = {},
max_memory: int = 2000,
cutoff_radius: float = 8.0,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
**kwargs,
):
"""
Args:
structure: pymatgen structure object
method: hybrid dft method to use (currently select between HSE06 and PBE0)
hf_fraction: percentage of exact HF to mix-in
project_name: what to call this project
gga_x_fraction: percentage of gga exchange to use
gga_c_fraction: percentage of gga correlation to use
override_default_params: override settings (see above).
"""
super().__init__(structure, project_name=project_name, **kwargs)
self.structure = structure
self.hybrid_functional = hybrid_functional
self.hf_fraction = hf_fraction
self.project_name = project_name
self.gga_x_fraction = gga_x_fraction
self.gga_c_fraction = gga_c_fraction
self.override_default_params = override_default_params
self.max_memory = max_memory
self.cutoff_radius = cutoff_radius
self.omega = omega
self.aux_basis = aux_basis
self.admm = admm
self.eps_schwarz = eps_schwarz
self.eps_schwarz_forces = eps_schwarz_forces
self.screen_on_initial_p = screen_on_initial_p
self.screen_p_forces = screen_p_forces
self.kwargs = kwargs
self.activate_hybrid(
hybrid_functional=hybrid_functional,
hf_fraction=hf_fraction,
gga_x_fraction=gga_x_fraction,
gga_c_fraction=gga_c_fraction,
max_memory=max_memory,
cutoff_radius=cutoff_radius,
omega=omega,
aux_basis=aux_basis,
admm=admm,
eps_schwarz=eps_schwarz,
eps_schwarz_forces=eps_schwarz_forces,
screen_on_initial_p=screen_on_initial_p,
screen_p_forces=screen_p_forces,
)
self.update(override_default_params)
|
gmatteo/pymatgen
|
pymatgen/io/cp2k/sets.py
|
Python
|
mit
| 48,957
|
[
"CP2K",
"Gaussian",
"VASP",
"pymatgen"
] |
e17344c993f5df00aa3a278c075ccd8d05f7c5e48936e354d898f8151e7d4c75
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fstrack(MakefilePackage):
"""Package with tools to analyze symmetry components of elastic tensors,
predict synthetic waveforms and compute automated shear wave splitting
along ray paths, and to track finite strain and predict LPO from mantle
flow given on GMT/netcdf grds."""
homepage = "http://www-udc.ig.utexas.edu/external/becker/data.html#fstrack"
url = "http://www-udc.ig.utexas.edu/external/becker/software/fstrack-0.5.3.092918.tgz"
version('0.5.3.092918', sha256='34b31687fdfa207b9659425238b805eaacf0b0209e7e3343c1a3cb4c9e62345d')
variant('flow', default=True, description='Build the flow tracker')
depends_on('gmt@4.0:4.999', when='+flow')
depends_on('netcdf-c', when='+flow')
parallel = False
def setup_build_environment(self, env):
# Compilers
env.set('F90', spack_fc)
# Compiler flags (assumes GCC)
env.set('CFLAGS', '-O2')
env.set('FFLAGS', '-ffixed-line-length-132 -x f77-cpp-input -O2')
env.set('FFLAGS_DEBUG', '-g -x f77-cpp-input')
env.set('F90FLAGS', '-O2 -x f95-cpp-input')
env.set('F90FLAGS_DEBUG', '-g -x f95-cpp-input')
env.set('LDFLAGS', '-lm')
if '+flow' in self.spec:
env.set('GMTHOME', self.spec['gmt'].prefix)
env.set('NETCDFDIR', self.spec['netcdf-c'].prefix)
def build(self, spec, prefix):
with working_dir('eispack'):
make()
with working_dir('d-rex'):
make()
with working_dir('fstrack'):
if '+flow' in spec:
make('really_all')
else:
make()
|
iulian787/spack
|
var/spack/repos/builtin/packages/fstrack/package.py
|
Python
|
lgpl-2.1
| 1,871
|
[
"NetCDF"
] |
284c163ba8528dcb159e253708b94e56a783bd9508301865c7e2e9782652247e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import math
import os
import json
import collections
import itertools
from abc import ABCMeta, abstractmethod
import random
import warnings
from fnmatch import fnmatch
import re
import functools
from math import gcd
import numpy as np
from monty.dev import deprecated
from pymatgen.core.operations import SymmOp
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.json import MSONable
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.bonds import CovalentBond, get_bond_length
from pymatgen.core.composition import Composition
from pymatgen.util.coord import get_angle, all_distances, \
lattice_points_in_supercell
from pymatgen.core.units import Mass, Length
from monty.io import zopen
"""
This module provides classes used to define a non-periodic molecule and a
periodic structure.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
class SiteCollection(collections.abc.Sequence, metaclass=ABCMeta):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
# Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.5
@property
@abstractmethod
def sites(self):
"""
Returns a tuple of sites.
"""
return
@abstractmethod
def get_distance(self, i: int, j: int) -> float:
"""
Returns distance between sites at index i and j.
Args:
i: Index of first site
j: Index of second site
Returns:
Distance between sites at index i and index j.
"""
return
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self):
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Specie]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def species_and_occu(self):
"""
List of species and occupancies at each site of the structure.
"""
return [site.species for site in self]
@property
def ntypesp(self):
"""Number of types of atoms."""
return len(self.types_of_specie)
@property
def types_of_specie(self):
"""
List of types of specie. Only works for ordered structures.
Disordered structures will raise TypeError.
"""
if not self.is_ordered:
raise TypeError("""\
types_of_species cannot be used with disordered structures and partial occupancies.
Use OrderDisorderedStructureTransformation or EnumerateStructureTransformation
to build an appropriate supercell from partial occupancies.""")
# Cannot use set since we want a deterministic algorithm.
types = []
for site in self:
if site.specie not in types:
types.append(site.specie)
return types
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol: str) -> tuple:
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
return tuple((i for i, specie in enumerate(self.species)
if specie.symbol == symbol))
@property
def symbol_set(self):
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple((specie.symbol for specie in self.types_of_specie))
@property
def atomic_numbers(self):
"""List of atomic numbers."""
return [site.specie.number for site in self]
@property
def site_properties(self):
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = {}
prop_keys = set()
for site in self:
prop_keys.update(site.properties.keys())
for k in prop_keys:
props[k] = [site.properties.get(k, None) for site in self]
return props
def __contains__(self, site):
return site in self.sites
def __iter__(self):
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self):
return len(self.sites)
def __hash__(self):
# for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self):
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self):
"""
Returns a np.array of the cartesian coordinates of sites in the
structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self):
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self):
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self):
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all((site.is_ordered for site in self))
def get_angle(self, i: int, j: int, k: int) -> float:
"""
Returns angle specified by three sites.
Args:
i: Index of first site.
j: Index of second site.
k: Index of third site.
Returns:
Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
"""
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
@abstractmethod
def to(self, fmt: str = None, filename: str = None):
"""
Generates well-known string representations of SiteCollections (e.g.,
molecules / structures). Should return a string type or write to a file.
"""
pass
@classmethod
@abstractmethod
def from_str(cls, input_string: str, fmt: str):
"""
Reads in SiteCollection from a string.
"""
pass
@classmethod
@abstractmethod
def from_file(cls, filename: str):
"""
Reads in SiteCollection from a filename.
"""
pass
def add_site_property(self, property_name, values):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
if len(values) != len(self.sites):
raise ValueError("Values must be same length as sites.")
for site, val in zip(self.sites, values):
site.properties[property_name] = val
def remove_site_property(self, property_name):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
"""
for site in self.sites:
del site.properties[property_name]
def replace_species(self, species_mapping):
"""
Swap species.
Args:
species_mapping (dict): dict of species to swap. Species can be
elements too. E.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
sp_to_replace = set(species_mapping.keys())
sp_in_structure = set(self.composition.keys())
if not sp_in_structure.issuperset(sp_to_replace):
warnings.warn(
"Some species to be substituted are not present in "
"structure. Pls check your input. Species to be "
"substituted = %s; Species in structure = %s"
% (sp_to_replace, sp_in_structure))
for site in self._sites:
if sp_to_replace.intersection(site.species):
c = Composition()
for sp, amt in site.species.items():
new_sp = species_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except Exception:
c += {new_sp: amt}
site.species = c
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for site in self.sites:
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
site.species = new_sp
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
if len(oxidation_states) != len(self.sites):
raise ValueError("Oxidation states of all sites must be "
"specified.")
for site, ox in zip(self.sites, oxidation_states):
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Specie(sym, ox)] = occu
site.species = new_sp
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for el, occu in site.species.items():
sym = el.symbol
new_sp[Element(sym)] += occu
site.species = new_sp
def add_oxidation_state_by_guess(self, **kwargs):
"""
Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses()
"""
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or \
[dict([(e.symbol, 0) for e in self.composition])]
self.add_oxidation_state_by_element(oxid_guess[0])
def add_spin_by_element(self, spins):
"""
Add spin states to a structure.
Args:
spisn (dict): Dict of spins associated with
elements or species, e.g. {"Ni":+5} or {"Ni2+":5}
"""
for site in self.sites:
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sym, oxidation_state=oxi_state,
properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu
site.species = new_sp
def add_spin_by_site(self, spins):
"""
Add spin states to a structure by site.
Args:
spins (list): List of spins
E.g., [+5, -5, 0, 0]
"""
if len(spins) != len(self.sites):
raise ValueError("Spin of all sites must be "
"specified in the dictionary.")
for site, spin in zip(self.sites, spins):
new_sp = {}
for sp, occu in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sym, oxidation_state=oxi_state,
properties={'spin': spin})] = occu
site.species = new_sp
def remove_spin(self):
"""
Removes spin states from a structure.
"""
for site in self.sites:
new_sp = collections.defaultdict(float)
for sp, occu in site.species.items():
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sp.symbol, oxidation_state=oxi_state)] += occu
site.species = new_sp
def extract_cluster(self, target_sites, **kwargs):
"""
Extracts a cluster of atoms based on bond lengths
Args:
target_sites ([Site]): List of initial sites to nucleate cluster.
\\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded.
Returns:
[Site/PeriodicSite] Cluster of atoms.
"""
cluster = list(target_sites)
others = [site for site in self if site not in cluster]
size = 0
while len(cluster) > size:
size = len(cluster)
new_others = []
for site in others:
for site2 in cluster:
if CovalentBond.is_bonded(site, site2, **kwargs):
cluster.append(site)
break
else:
new_others.append(site)
others = new_others
return cluster
class IStructure(SiteCollection, MSONable):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(self, lattice: Lattice, species: list, coords: list,
charge: float = None, validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None):
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError("The list of atomic species must be of the"
" same length as the list of fractional"
" coordinates.")
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i]
for k, v in site_properties.items()}
sites.append(
PeriodicSite(species[i], coords[i], self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
@classmethod
def from_sites(cls, sites, charge=None, validate_proximity=False,
to_unit_cell=False):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" %
cls)
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell)
@classmethod
def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None,
coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
sgp.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
frac_coords = np.array(coords, dtype=np.float) \
if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
props = {} if site_properties is None else site_properties
all_sp = []
all_coords = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@classmethod
def from_magnetic_spacegroup(
cls, msg, lattice, species, coords, site_properties,
transform_setting=None, coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a magnetic spacegroup. Note that only
symmetrically distinct species, coords and magmoms should be provided.]
All equivalent sites are generated from the spacegroup operations.
Args:
msg (str/list/:class:`pymatgen.symmetry.maggroups.MagneticSpaceGroup`):
The magnetic spacegroup.
If a string, it will be interpreted as one of the notations
supported by MagneticSymmetryGroup, e.g., "R-3'c" or "Fm'-3'm".
If a list of two ints, it will be interpreted as the number of
the spacegroup in its Belov, Neronova and Smirnova (BNS) setting.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Unlike Structure.from_spacegroup(),
this argument is mandatory, since magnetic moment information
has to be included. Note that the *direction* of the supplied
magnetic moment relative to the crystal is important, even if
the resulting structure is used for collinear calculations.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
if 'magmom' not in site_properties:
raise ValueError('Magnetic moments have to be defined.')
else:
magmoms = [Magmom(m) for m in site_properties['magmom']]
if not isinstance(msg, MagneticSpaceGroup):
msg = MagneticSpaceGroup(msg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not msg.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
msg.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
if len(species) != len(magmoms):
raise ValueError(
"Supplied species and magmom lengths (%d vs %d) are "
"different!" % (len(species), len(magmoms))
)
frac_coords = coords if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
all_sp = []
all_coords = []
all_magmoms = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c, m) in enumerate(zip(species, frac_coords, magmoms)):
cc, mm = msg.get_orbit(c, m, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
all_magmoms.extend(mm)
for k, v in site_properties.items():
if k != 'magmom':
all_site_properties[k].extend([v[i]] * len(cc))
all_site_properties['magmom'] = all_magmoms
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@property
def charge(self):
"""
Overall charge of the structure
"""
if self._charge is None:
return super().charge
else:
return self._charge
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords,
self.frac_coords)
@property
def sites(self):
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self):
"""
Lattice of the structure.
"""
return self._lattice
@property
def density(self):
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number
"""
# Import within method needed to avoid cyclic dependency.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec,
angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number()
def matches(self, other, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
return m.fit(Structure.from_sites(self), Structure.from_sites(other))
def __eq__(self, other):
if other is self:
return True
if other is None:
return False
if len(self) != len(other):
return False
if self.lattice != other.lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __mul__(self, scaling_matrix):
"""
Makes a supercell. Allowing to have sites outside the unit cell
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
Returns:
Supercell structure. Note that a Structure is always returned,
even if the input structure is a subclass of Structure. This is
to avoid different arguments signatures from causing problems. If
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
for site in self:
for v in c_lat:
s = PeriodicSite(site.species, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
new_charge = self._charge * np.linalg.det(scale_matrix) if self._charge else None
return Structure.from_sites(new_sites, charge=new_charge)
def __rmul__(self, scaling_matrix):
"""
Similar to __mul__ to preserve commutativeness.
"""
return self.__mul__(scaling_matrix)
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self):
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i, j, jimage=None):
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def get_sites_in_sphere(self, pt, r, include_index=False, include_image=False):
"""
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i, img in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species,
fcoord, self._lattice,
properties=self[i].properties)
# Get the neighbor data
nn_data = (nnsite, dist) if not include_index else (nnsite, dist, i)
if include_image:
nn_data += (img,)
neighbors.append(nn_data)
return neighbors
def get_neighbors(self, site, r, include_index=False, include_image=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
If include_index == True, the tuple for each neighbor also includes
the index of the neighbor.
If include_supercell == True, the tuple for each neighbor also includes
the index of supercell.
"""
nn = self.get_sites_in_sphere(site.coords, r,
include_index=include_index,
include_image=include_image)
return [d for d in nn if site != d[0]]
def get_all_neighbors(self, r, include_index=False, include_image=False,
include_site=True):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
A note about periodic images: Before computing the neighbors, this
operation translates all atoms to within the unit cell (having
fractional coordinates within [0,1)). This means that the "image" of a
site does not correspond to how much it has been translates from its
current position, but which image of the unit cell it resides.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
include_image (bool): Whether to include the supercell image
in the returned data
include_site (bool): Whether to include the site in the returned
data. Defaults to True.
Returns:
A list of a list of nearest neighbors for each site, i.e.,
[[(site, dist, index, image) ...], ..]
Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
Image only supplied if include_image = True
Site is supplied only if include_site = True (the default).
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr
all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]
latt = self._lattice
matrix = latt.matrix
neighbors = [list() for _ in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
coords_in_cell = np.dot(all_fcoords, matrix)
site_coords = self.cart_coords
indices = np.arange(len(self))
for image in itertools.product(*all_ranges):
coords = np.dot(image, matrix) + coords_in_cell
all_dists = all_distances(coords, site_coords)
all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)
for (j, d, within_r) in zip(indices, all_dists, all_within_r):
if include_site:
nnsite = PeriodicSite(self[j].species, coords[j],
latt, properties=self[j].properties,
coords_are_cartesian=True)
for i in indices[within_r]:
item = []
if include_site:
item.append(nnsite)
item.append(d[i])
if include_index:
item.append(j)
# Add the image, if requested
if include_image:
item.append(image)
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(self, origin, r, dr, include_index=False, include_image=False):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
include_index (bool): Whether to include the non-supercell site
in the returned data
include_image (bool): Whether to include the supercell image
in the returned data
Returns:
[(site, dist, index) ...] since most of the time, subsequent
processing
requires the distance. Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
Image only supplied if include_image = True
"""
outer = self.get_sites_in_sphere(origin, r + dr,
include_index=include_index,
include_image=include_image)
inner = r - dr
return [t for t in outer if t[1] > inner]
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
return self.__class__.from_sites(sites, charge=self._charge)
def get_reduced_structure(self, reduction_algo="niggli"):
"""
Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}"
.format(reduction_algo))
if reduced_latt != self.lattice:
return self.__class__(reduced_latt, self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True, to_unit_cell=True,
site_properties=self.site_properties, charge=self._charge)
else:
return self.copy()
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(self._lattice,
self.species_and_occu,
self.frac_coords,
charge=self._charge,
site_properties=props)
else:
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(PeriodicSite(site.species,
frac_coords, reduced_latt,
to_unit_cell=True,
properties=site_props))
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites, charge=self._charge)
def interpolate(self, end_structure, nimages=10,
interpolate_lattices=False, pbc=True, autosort_tol=0):
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int,list): No. of interpolation images or a list of
interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
autosort_tol (float): A distance tolerance in angstrom in
which to automatically sort end_structure to match to the
closest points in this particular structure. This is usually
what you want in a NEB calculation. 0 implies no sorting.
Otherwise, a 0.5 value usually works pretty well.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
# Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if not (interpolate_lattices or self.lattice == end_structure.lattice):
raise ValueError("Structures with different lattices!")
if not isinstance(nimages, collections.abc.Iterable):
nimages = np.arange(nimages + 1) / nimages
# Check that both structures have the same species
for i in range(len(self)):
if self[i].species != end_structure[i].species:
raise ValueError("Different species!\nStructure 1:\n" +
str(self) + "\nStructure 2\n" +
str(end_structure))
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
if autosort_tol:
dist_matrix = self.lattice.get_all_distances(start_coords,
end_coords)
site_mappings = collections.defaultdict(list)
unmapped_start_ind = []
for i, row in enumerate(dist_matrix):
ind = np.where(row < autosort_tol)[0]
if len(ind) == 1:
site_mappings[i].append(ind[0])
else:
unmapped_start_ind.append(i)
if len(unmapped_start_ind) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. unmapped indices "
"= %s" % (autosort_tol, unmapped_start_ind))
sorted_end_coords = np.zeros_like(end_coords)
matched = []
for i, j in site_mappings.items():
if len(j) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. More than one "
"site match!" % autosort_tol)
sorted_end_coords[i] = end_coords[j[0]]
matched.append(j[0])
if len(unmapped_start_ind) == 1:
i = unmapped_start_ind[0]
j = list(set(range(len(start_coords))).difference(matched))[0]
sorted_end_coords[i] = end_coords[j]
end_coords = sorted_end_coords
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
if interpolate_lattices:
# interpolate lattice matrices using polar decomposition
from scipy.linalg import polar
# u is unitary (rotation), p is stretch
u, p = polar(np.dot(end_structure.lattice.matrix.T,
np.linalg.inv(self.lattice.matrix.T)))
lvec = p - np.identity(3)
lstart = self.lattice.matrix.T
for x in nimages:
if interpolate_lattices:
l_a = np.dot(np.identity(3) + x * lvec, lstart).T
lat = Lattice(l_a)
else:
lat = self.lattice
fcoords = start_coords + x * vec
structs.append(self.__class__(lat, sp, fcoords,
site_properties=self.site_properties))
return structs
def get_miller_index_from_site_indexes(self, site_ids, round_dp=4,
verbose=True):
"""
Get the Miller index of a plane from a set of sites indexes.
A minimum of 3 sites are required. If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated.
Args:
site_ids (list of int): A list of site indexes to consider. A
minimum of three site indexes are required. If more than three
sites are provided, the best plane that minimises the distance
to all sites will be calculated.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
return self.lattice.get_miller_index_from_coords(
self.frac_coords[site_ids], coords_are_cartesian=False,
round_dp=round_dp, verbose=verbose)
def get_primitive_structure(self, tolerance=0.25, use_site_props=False,
constrain_latt=None):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list/dict): List of lattice parameters we want to
preserve, e.g. ["alpha", "c"] or dict with the lattice
parameter names as keys and values we want the parameters to
be e.g. {"alpha": 90, "c": 2.5}.
Returns:
The most primitive structure found.
"""
if constrain_latt is None:
constrain_latt = []
def site_label(site):
if not use_site_props:
return site.species_string
else:
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [
list(a[1])
for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g])
for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in
itertools.product(range(a), range(a),
range(e))])
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = functools.reduce(gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites,
grouped_fcoords,
grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(new_l, new_sp, new_coords,
site_properties=new_props,
coords_are_cartesian=False)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance, use_site_props=use_site_props,
constrain_latt=constrain_latt
).get_reduced_structure()
if not constrain_latt:
return p
# Only return primitive structures that
# satisfy the restriction condition
p_latt, s_latt = p.lattice, self.lattice
if type(constrain_latt).__name__ == "list":
if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]):
return p
elif type(constrain_latt).__name__ == "dict":
if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]):
return p
return self.copy()
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula ({s})".format(s=self.composition.formula),
"Reduced Formula: {}"
.format(self.composition.reduced_formula)]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
outs.append("Sites ({i})".format(i=len(self)))
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, site in enumerate(self):
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
for k in keys:
row.append(props[k][i])
data.append(row)
from tabulate import tabulate
outs.append(tabulate(data, headers=["#", "SP", "a", "b", "c"] + keys,
))
return "\n".join(outs)
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d, fmt=None):
"""
Reconstitute a Structure object from a dict representation of Structure
created using as_dict().
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_from_abivars
return structure_from_abivars(cls=cls, **d)
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
charge = d.get("charge", None)
return cls.from_sites(sites, charge=charge)
def to(self, fmt=None, filename=None, **kwargs):
"""
Outputs the structure to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "cif", "poscar", "cssr", "json".
Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
\\*\\*kwargs: Kwargs passthru to relevant methods. E.g., This allows
the passing of parameters like symprec to the
CifWriter.__init__ method for generation of symmetric cifs.
Returns:
(str) if filename is None. None otherwise.
"""
filename = filename or ""
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename)
if fmt == "cif" or fnmatch(fname.lower(), "*.cif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, **kwargs)
elif fmt == "mcif" or fnmatch(fname.lower(), "*.mcif*"):
from pymatgen.io.cif import CifWriter
writer = CifWriter(self, write_magmoms=True, **kwargs)
elif fmt == "poscar" or fnmatch(fname, "*POSCAR*"):
from pymatgen.io.vasp import Poscar
writer = Poscar(self, **kwargs)
elif fmt == "cssr" or fnmatch(fname.lower(), "*.cssr*"):
from pymatgen.io.cssr import Cssr
writer = Cssr(self, **kwargs)
elif fmt == "json" or fnmatch(fname.lower(), "*.json"):
s = json.dumps(self.as_dict())
if filename:
with zopen(filename, "wt") as f:
f.write("%s" % s)
return s
elif fmt == "xsf" or fnmatch(fname.lower(), "*.xsf*"):
from pymatgen.io.xcrysden import XSF
s = XSF(self).to_string()
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
f.write(s)
return s
elif fmt == 'mcsqs' or fnmatch(fname, "*rndstr.in*") \
or fnmatch(fname, "*lat.in*") \
or fnmatch(fname, "*bestsqs*"):
from pymatgen.io.atat import Mcsqs
s = Mcsqs(self).to_string()
if filename:
with zopen(fname, "wt", encoding='ascii') as f:
f.write(s)
return s
else:
import ruamel.yaml as yaml
if filename:
with zopen(filename, "wt") as f:
yaml.safe_dump(self.as_dict(), f)
return
else:
return yaml.safe_dump(self.as_dict())
if filename:
writer.write_file(filename)
else:
return writer.__str__()
@classmethod
def from_str(cls, input_string, fmt, primitive=False, sort=False,
merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
from pymatgen.io.atat import Mcsqs
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False,
read_velocities=False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
elif fmt == "mcsqs":
s = Mcsqs.structure_from_string(input_string)
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s)
@classmethod
def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a file. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR, Netcdf and pymatgen's JSON serialized structures.
Args:
filename (str): The filename to read from.
primitive (bool): Whether to convert to a primitive cell
Only available for cifs. Defaults to False.
sort (bool): Whether to sort sites. Default to False.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
Structure.
"""
filename = str(filename)
if filename.endswith(".nc"):
# Read Structure from a netcdf file.
from pymatgen.io.abinit.netcdf import structure_from_ncdata
s = structure_from_ncdata(filename, cls=cls)
if sort:
s = s.get_sorted_structure()
return s
from pymatgen.io.lmto import LMTOCtrl
from pymatgen.io.vasp import Vasprun, Chgcar
from pymatgen.io.exciting import ExcitingInput
from monty.io import zopen
fname = os.path.basename(filename)
with zopen(filename, "rt") as f:
contents = f.read()
if fnmatch(fname.lower(), "*.cif*") or fnmatch(fname.lower(), "*.mcif*"):
return cls.from_str(contents, fmt="cif",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or fnmatch(fname, "*.vasp"):
s = cls.from_str(contents, fmt="poscar",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
return cls.from_str(contents, fmt="cssr",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.xsf"):
return cls.from_str(contents, fmt="xsf",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "input*.xml"):
return ExcitingInput.from_file(fname).structure
elif fnmatch(fname, "*rndstr.in*") \
or fnmatch(fname, "*lat.in*") \
or fnmatch(fname, "*bestsqs*"):
return cls.from_str(contents, fmt="mcsqs",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CTRL*"):
return LMTOCtrl.from_file(filename=filename).structure
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
s.__class__ = cls
return s
class IMolecule(SiteCollection, MSONable):
"""
Basic immutable Molecule object without periodicity. Essentially a
sequence of sites. IMolecule is made to be immutable so that they can
function as keys in a dict. For a mutable molecule,
use the :class:Molecule.
Molecule extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a molecule is
equivalent to going through the sites in sequence.
"""
def __init__(self, species: list, coords: list, charge: float = 0,
spin_multiplicity: float = None,
validate_proximity: bool = False,
site_properties: dict = None):
"""
Creates a Molecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(("The list of atomic species must be of the",
" same length as the list of fractional ",
"coordinates."))
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(Site(species[i], coords[i], properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Molecule contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
nelectrons = 0
for site in sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecie):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of %d and spin multiplicity of %d is"
" not possible for this molecule" %
(self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
@property
def charge(self):
"""
Charge of molecule
"""
return self._charge
@property
def spin_multiplicity(self):
"""
Spin multiplicity of molecule.
"""
return self._spin_multiplicity
@property
def nelectrons(self):
"""
Number of electrons in the molecule.
"""
return self._nelectrons
@property
def center_of_mass(self):
"""
Center of mass of molecule.
"""
center = np.zeros(3)
total_weight = 0
for site in self:
wt = site.species.weight
center += site.coords * wt
total_weight += wt
return center / total_weight
@property
def sites(self):
"""
Returns a tuple of sites in the Molecule.
"""
return self._sites
@classmethod
def from_sites(cls, sites, charge=0, spin_multiplicity=None,
validate_proximity=False):
"""
Convenience constructor to make a Molecule from a list of sites.
Args:
sites ([Site]): Sequence of Sites.
charge (int): Charge of molecule. Defaults to 0.
spin_multiplicity (int): Spin multicipity. Defaults to None,
in which it is determined automatically.
validate_proximity (bool): Whether to check that atoms are too
close.
"""
props = collections.defaultdict(list)
for site in sites:
for k, v in site.properties.items():
props[k].append(v)
return cls([site.species for site in sites],
[site.coords for site in sites],
charge=charge, spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=props)
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters)
def get_covalent_bonds(self, tol=0.2):
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.charge != other.charge:
return False
if self.spin_multiplicity != other.spin_multiplicity:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __repr__(self):
outs = ["Molecule Summary"]
for s in self:
outs.append(s.__repr__())
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula (%s)" % self.composition.formula,
"Reduced Formula: " + self.composition.reduced_formula,
"Charge = %s, Spin Mult = %s" % (
self._charge, self._spin_multiplicity),
"Sites (%d)" % len(self)]
for i, site in enumerate(self):
outs.append(" ".join([str(i), site.species_string,
" ".join([("%0.6f" % j).rjust(12)
for j in site.coords])]))
return "\n".join(outs)
def as_dict(self):
"""
Json-serializable dict representation of Molecule
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"spin_multiplicity": self._spin_multiplicity,
"sites": []}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)
def get_distance(self, i, j):
"""
Get distance between site i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
Distance between the two sites.
"""
return self[i].distance(self[j])
def get_sites_in_sphere(self, pt, r):
"""
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
neighbors = []
for site in self._sites:
dist = site.distance_from_point(pt)
if dist <= r:
neighbors.append((site, dist))
return neighbors
def get_neighbors(self, site, r):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r)
return [(s, dist) for (s, dist) in nn if site != s]
def get_neighbors_in_shell(self, origin, r, dr):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [(site, dist) for (site, dist) in outer if dist > inner]
def get_boxed_structure(self, a, b, c, images=(1, 1, 1),
random_rotation=False, min_dist=1, cls=None,
offset=None, no_cross=False):
"""
Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box. Useful for creating Structure for
calculating molecules using periodic codes.
Args:
a (float): a-lattice parameter.
b (float): b-lattice parameter.
c (float): c-lattice parameter.
images: No. of boxed images in each direction. Defaults to
(1, 1, 1), meaning single molecule with 1 lattice parameter
in each direction.
random_rotation (bool): Whether to apply a random rotation to
each molecule. This jumbles all the molecules so that they
are not exact images of each other.
min_dist (float): The minimum distance that atoms should be from
each other. This is only used if random_rotation is True.
The randomized rotations are searched such that no two atoms
are less than min_dist from each other.
cls: The Structure class to instantiate (defaults to pymatgen
structure)
offset: Translation to offset molecule from center of mass coords
no_cross: Whether to forbid molecule coords from extending beyond
boundary of box.
Returns:
Structure containing molecule in a box.
"""
if offset is None:
offset = np.array([0, 0, 0])
coords = np.array(self.cart_coords)
x_range = max(coords[:, 0]) - min(coords[:, 0])
y_range = max(coords[:, 1]) - min(coords[:, 1])
z_range = max(coords[:, 2]) - min(coords[:, 2])
if a <= x_range or b <= y_range or c <= z_range:
raise ValueError("Box is not big enough to contain Molecule.")
lattice = Lattice.from_parameters(a * images[0], b * images[1],
c * images[2],
90, 90, 90)
nimages = images[0] * images[1] * images[2]
coords = []
centered_coords = self.cart_coords - self.center_of_mass + offset
for i, j, k in itertools.product(list(range(images[0])),
list(range(images[1])),
list(range(images[2]))):
box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c]
if random_rotation:
while True:
op = SymmOp.from_origin_axis_angle(
(0, 0, 0), axis=np.random.rand(3),
angle=random.uniform(-180, 180))
m = op.rotation_matrix
new_coords = np.dot(m, centered_coords.T).T + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
if len(coords) == 0:
break
distances = lattice.get_all_distances(
lattice.get_fractional_coords(new_coords),
lattice.get_fractional_coords(coords))
if np.amin(distances) > min_dist:
break
else:
new_coords = centered_coords + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
coords.extend(new_coords)
sprops = {k: v * nimages for k, v in self.site_properties.items()}
if cls is None:
cls = Structure
return cls(lattice, self.species * nimages, coords,
coords_are_cartesian=True,
site_properties=sprops).get_sorted_structure()
def get_centered_molecule(self):
"""
Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin.
"""
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return self.__class__(self.species_and_occu, new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties)
def to(self, fmt=None, filename=None):
"""
Outputs the molecule to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.babel import BabelMolAdaptor
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename or "")
if fmt == "xyz" or fnmatch(fname.lower(), "*.xyz*"):
writer = XYZ(self)
elif any([fmt == r or fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
writer = GaussianInput(self)
elif fmt == "json" or fnmatch(fname, "*.json*") or fnmatch(fname,
"*.mson*"):
if filename:
with zopen(filename, "wt", encoding='utf8') as f:
return json.dump(self.as_dict(), f)
else:
return json.dumps(self.as_dict())
elif fmt == "yaml" or fnmatch(fname, "*.yaml*"):
import ruamel.yaml as yaml
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
return yaml.safe_dump(self.as_dict(), f)
else:
return yaml.safe_dump(self.as_dict())
else:
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
fname.lower())
if (not fmt) and m:
fmt = m.group(1)
writer = BabelMolAdaptor(self)
return writer.write_file(filename, file_format=fmt)
if filename:
writer.write_file(filename)
else:
return str(writer)
@classmethod
def from_str(cls, input_string, fmt):
"""
Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
Returns:
IMolecule or Molecule.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
if fmt.lower() == "xyz":
m = XYZ.from_string(input_string).molecule
elif fmt in ["gjf", "g03", "g09", "com", "inp"]:
m = GaussianInput.from_string(input_string).molecule
elif fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
return cls.from_dict(d)
else:
from pymatgen.io.babel import BabelMolAdaptor
m = BabelMolAdaptor.from_string(input_string,
file_format=fmt).pymatgen_mol
return cls.from_sites(m)
@classmethod
def from_file(cls, filename):
"""
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
"""
filename = str(filename)
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return cls.from_str(contents, fmt="g09")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["out", "lis", "log"]]):
return GaussianOutput(filename).final_structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
else:
from pymatgen.io.babel import BabelMolAdaptor
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename,
m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Unrecognized file extension!")
class Structure(IStructure, collections.abc.MutableSequence):
"""
Mutable version of structure.
"""
__hash__ = None
def __init__(self, lattice: Lattice, species: list, coords: np.ndarray,
charge: float = None, validate_proximity: bool = False,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
site_properties: dict = None):
"""
Create a periodic structure.
Args:
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation
states.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to map all sites into the unit cell,
i.e., fractional coords between 0 and 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
super().__init__(
lattice, species, coords, charge=charge,
validate_proximity=validate_proximity, to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the structure.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3.
Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
s[(0, 2, 3)] = "Fe"
Replaces sites 0, 2 and 3 with Fe.
s[0::2] = "Fe"
Replaces all even index sites with Fe.
s["Mn"] = "Fe"
Replaces all Mn in the structure with Fe. This is
a short form for the more complex replace_species.
s["Mn"] = "Fe0.5Co0.5"
Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,
creates a disordered structure!
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice "
"as Structure!")
elif len(indices) != 1:
raise ValueError("Site assignments makes sense only for "
"single int indices!")
self._sites[ii] = site
else:
if isinstance(site, str) or (
not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site
else:
self._sites[ii].species = site[0]
if len(site) > 1:
self._sites[ii].frac_coords = site[1]
if len(site) > 2:
self._sites[ii].properties = site[2]
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
@property
def lattice(self):
return self._lattice
@lattice.setter
def lattice(self, lattice):
self._lattice = lattice
for site in self._sites:
site.lattice = lattice
def append(self, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties of the site.
Returns:
New structure with inserted site.
"""
return self.insert(len(self), species, coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties)
def insert(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties associated with the site.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice,
properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def replace(self, i, species, coords=None, coords_are_cartesian=False,
properties=None):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
properties (dict): Properties associated with the site.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
self._sites[i] = new_site
def substitute(self, index, func_grp, bond_order=1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2 in self.get_neighbors(nn, 3):
if inn != self[index] and \
dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach"
" functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_grp, Molecule):
func_grp = func_grp
else:
# Check to see whether the functional group is in database.
if func_grp not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
else:
func_grp = FunctionalGroups[func_grp]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
try:
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,
bond_order=bond_order)
# Catches for case of incompatibility between Element(s) and Specie(s)
except TypeError:
bl = None
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i in range(len(func_grp)):
func_grp[i] = (func_grp[i].species,
origin - (func_grp[i].coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
s_new = PeriodicSite(site.species, site.coords,
self.lattice, coords_are_cartesian=True)
self._sites.append(s_new)
def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(
new_sp_occu, site.frac_coords, self._lattice,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [s for i, s in enumerate(self._sites)
if i not in indices]
def apply_operation(self, symmop, fractional=False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species, new_frac,
self._lattice,
properties=site.properties)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(site.species,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
@deprecated(message="Simply set using Structure.lattice = lattice. This will be removed in pymatgen v2020.")
def modify_lattice(self, new_lattice):
"""
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice
"""
self._lattice = new_lattice
for site in self._sites:
site.lattice = new_lattice
def apply_strain(self, strain):
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
def sort(self, key=None, reverse=False):
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites.sort(key=key, reverse=reverse)
def translate_sites(self, indices, vector, frac_coords=True,
to_unit_cell=True):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.abc.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(
site.coords + vector)
if to_unit_cell:
fcoords = np.mod(fcoords, 1)
self._sites[i].frac_coords = fcoords
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None,
to_unit_cell=True):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()
new_site = PeriodicSite(
site.species, coords, self._lattice,
to_unit_cell=to_unit_cell, coords_are_cartesian=True,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def make_supercell(self, scaling_matrix, to_unit_cell=True):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
to_unit_cell: Whether or not to fall back sites into the unit cell
"""
s = self * scaling_matrix
if to_unit_cell:
for site in s:
site.to_unit_cell(in_place=True)
self._sites = s.sites
self._lattice = s.lattice
def scale_lattice(self, volume):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.lattice = self._lattice.scale(volume)
def merge_sites(self, tol=0.01, mode="sum"):
"""
Merges sites (adding occupancies) within tol of each other.
Removes site properties.
Args:
tol (float): Tolerance for distance to merge sites.
mode (str): Three modes supported. "delete" means duplicate sites are
deleted. "sum" means the occupancies are summed for the sites.
"average" means that the site is deleted but the properties are averaged
Only first letter is considered.
"""
mode = mode.lower()[0]
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster, linkage
d = self.distance_matrix
np.fill_diagonal(d, 0)
clusters = fcluster(linkage(squareform((d + d.T) / 2)),
tol, 'distance')
sites = []
for c in np.unique(clusters):
inds = np.where(clusters == c)[0]
species = self[inds[0]].species
coords = self[inds[0]].frac_coords
props = self[inds[0]].properties
for n, i in enumerate(inds[1:]):
sp = self[i].species
if mode == "s":
species += sp
offset = self[i].frac_coords - coords
coords = coords + ((offset - np.round(offset)) / (n + 2)).astype(
coords.dtype)
for key in props.keys():
if props[key] is not None and self[i].properties[key] != props[key]:
if mode == 'a' and isinstance(props[key], float):
# update a running total
props[key] = props[key]*(n+1)/(n+2) + self[i].properties[key]/(n+2)
else:
props[key] = None
warnings.warn("Sites with different site property %s are merged. "
"So property is set to none" % key)
sites.append(PeriodicSite(species, coords, self.lattice, properties=props))
self._sites = sites
def set_charge(self, new_charge: float = 0.):
"""
Sets the overall structure charge
Args:
new_charge (float): new charge to set
"""
self._charge = new_charge
class Molecule(IMolecule, collections.abc.MutableSequence):
"""
Mutable Molecule. It has all the methods in IMolecule, but in addition,
it allows a user to perform edits on the molecule.
"""
__hash__ = None
def __init__(self, species: list, coords: list, charge: float = 0,
spin_multiplicity: float = None,
validate_proximity: bool = False,
site_properties: dict = None):
"""
Creates a MutableMolecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
super().__init__(species, coords, charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the molecule.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You can
provide a Site directly, or for convenience, you can provide
simply a Specie-like string/object, or finally a (Specie,
coords) sequence, e.g., ("Fe", [0.5, 0.5, 0.5]).
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, (str, Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, Site):
self._sites[ii] = site
else:
if isinstance(site, str) or (
not isinstance(site, collections.abc.Sequence)):
self._sites[ii].species = site
else:
self._sites[ii].species = site[0]
if len(site) > 1:
self._sites[ii].coords = site[1]
if len(site) > 2:
self._sites[ii].properties = site[2]
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, validate_proximity=True, properties=None):
"""
Appends a site to the molecule.
Args:
species: Species of inserted site
coords: Coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
return self.insert(len(self), species, coords,
validate_proximity=validate_proximity,
properties=properties)
def set_charge_and_spin(self, charge, spin_multiplicity=None):
"""
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
"""
self._charge = charge
nelectrons = 0
for site in self._sites:
for sp, amt in site.species.items():
if not isinstance(sp, DummySpecie):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
def insert(self, i, species, coords, validate_proximity=False,
properties=None):
"""
Insert a site to the molecule.
Args:
i (int): Index to insert site
species: species of inserted site
coords (3x1 array): coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): Dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def remove_species(self, species):
"""
Remove all occurrences of a species from a molecule.
Args:
species: Species to remove.
"""
new_sites = []
species = [get_el_sp(sp) for sp in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def translate_sites(self, indices=None, vector=None):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices (list): List of site indices on which to perform the
translation.
vector (3x1 array): Translation vector for sites.
"""
if indices is None:
indices = range(len(self))
if vector is None:
vector == [0, 0, 0]
for i in indices:
site = self._sites[i]
new_site = Site(site.species, site.coords + vector,
properties=site.properties)
self._sites[i] = new_site
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((np.dot(rm, (site.coords - anchor).T)).T + anchor).ravel()
new_site = Site(site.species, s,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec())
def apply_operation(self, symmop):
"""
Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species, new_cart,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def copy(self):
"""
Convenience method to get a copy of the molecule.
Returns:
A copy of the Molecule.
"""
return self.__class__.from_sites(self)
def substitute(self, index, func_grp, bond_order=1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2 in self.get_neighbors(nn, 3):
if inn != self[index] and \
dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach"
" functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_grp, Molecule):
func_grp = func_grp
else:
# Check to see whether the functional group is in database.
if func_grp not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
else:
func_grp = FunctionalGroups[func_grp]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,
bond_order=bond_order)
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i in range(len(func_grp)):
func_grp[i] = (func_grp[i].species,
origin - (func_grp[i].coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
self._sites.append(site)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass
with open(os.path.join(os.path.dirname(__file__),
"func_groups.json"), "rt") as f:
FunctionalGroups = {k: Molecule(v["species"], v["coords"])
for k, v in json.load(f).items()}
|
dongsenfo/pymatgen
|
pymatgen/core/structure.py
|
Python
|
mit
| 141,785
|
[
"ABINIT",
"CRYSTAL",
"Gaussian",
"NetCDF",
"VASP",
"exciting",
"pymatgen"
] |
5a3c3ffa11863e69ad0c0fc63a708d174aeff414630f101997f03a1b075ac9f5
|
'''
Copyright (C) 2016 Travis DeWolf
Implemented from 'Control-limited differential dynamic programming'
by Yuval Tassa, Nicolas Mansard, and Emo Todorov (2014).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import lqr as lqr
import numpy as np
from copy import copy
class Control(lqr.Control):
"""
A controller that implements iterative Linear Quadratic Gaussian control.
Controls the (x,y) position of a robotic arm end-effector.
"""
def __init__(self, n=50, max_iter=100, **kwargs):
'''
n int: length of the control sequence
max_iter int: limit on number of optimization iterations
'''
super(Control, self).__init__(**kwargs)
self.old_target = [None, None]
self.tN = n # number of timesteps
self.max_iter = max_iter
self.lamb_factor = 10
self.lamb_max = 1000
self.eps_converge = 0.001 # exit if relative improvement below threshold
if self.write_to_file is True:
from controllers.recorder import Recorder
# set up recorders
self.u_recorder = Recorder('control signal', self.task, 'ilqr')
self.xy_recorder = Recorder('end-effector position', self.task, 'ilqr')
self.dist_recorder = Recorder('distance from target', self.task, 'ilqr')
self.recorders = [self.u_recorder,
self.xy_recorder,
self.dist_recorder]
def control(self, arm, x_des=None):
"""Generates a control signal to move the
arm to the specified target.
arm Arm: the arm model being controlled
des list: the desired system position
x_des np.array: desired task-space force,
irrelevant here.
"""
# if the target has changed, reset things and re-optimize
# for this movement
if self.old_target[0] != self.target[0] or \
self.old_target[1] != self.target[1]:
self.reset(arm, x_des)
# Reset k if at the end of the sequence
if self.t >= self.tN-1:
self.t = 0
# Compute the optimization
if self.t % 1 == 0:
x0 = np.zeros(arm.DOF*2)
self.arm, x0[:arm.DOF*2] = self.copy_arm(arm)
U = np.copy(self.U[self.t:])
self.X, self.U[self.t:], cost = \
self.ilqr(x0, U)
self.u = self.U[self.t]
# move us a step forward in our control sequence
self.t += 1
if self.write_to_file is True:
# feed recorders their signals
self.u_recorder.record(0.0, self.U)
self.xy_recorder.record(0.0, self.arm.x)
self.dist_recorder.record(0.0, self.target - self.arm.x)
# add in any additional signals (noise, external forces)
for addition in self.additions:
self.u += addition.generate(self.u, arm)
return self.u
def copy_arm(self, real_arm):
""" make a copy of the arm model, to make sure that the
actual arm model isn't affected during the iLQR process
real_arm Arm: the arm model being controlled
"""
# need to make a copy of the arm for simulation
arm = real_arm.__class__()
arm.dt = real_arm.dt
# reset arm position to x_0
arm.reset(q = real_arm.q, dq = real_arm.dq)
return arm, np.hstack([real_arm.q, real_arm.dq])
def cost(self, x, u):
""" the immediate state cost function """
# compute cost
dof = u.shape[0]
num_states = x.shape[0]
l = np.sum(u**2)
# compute derivatives of cost
l_x = np.zeros(num_states)
l_xx = np.zeros((num_states, num_states))
l_u = 2 * u
l_uu = 2 * np.eye(dof)
l_ux = np.zeros((dof, num_states))
# returned in an array for easy multiplication by time step
return l, l_x, l_xx, l_u, l_uu, l_ux
def cost_final(self, x):
""" the final state cost function """
num_states = x.shape[0]
l_x = np.zeros((num_states))
l_xx = np.zeros((num_states, num_states))
wp = 1e4 # terminal position cost weight
wv = 1e4 # terminal velocity cost weight
xy = self.arm.x
xy_err = np.array([xy[0] - self.target[0], xy[1] - self.target[1]])
l = (wp * np.sum(xy_err**2) +
wv * np.sum(x[self.arm.DOF:self.arm.DOF*2]**2))
l_x[0:self.arm.DOF] = wp * self.dif_end(x[0:self.arm.DOF])
l_x[self.arm.DOF:self.arm.DOF*2] = (2 *
wv * x[self.arm.DOF:self.arm.DOF*2])
eps = 1e-4 # finite difference epsilon
# calculate second derivative with finite differences
for k in range(self.arm.DOF):
veps = np.zeros(self.arm.DOF)
veps[k] = eps
d1 = wp * self.dif_end(x[0:self.arm.DOF] + veps)
d2 = wp * self.dif_end(x[0:self.arm.DOF] - veps)
l_xx[0:self.arm.DOF, k] = ((d1-d2) / 2.0 / eps).flatten()
l_xx[self.arm.DOF:self.arm.DOF*2, self.arm.DOF:self.arm.DOF*2] = 2 * wv * np.eye(self.arm.DOF)
# Final cost only requires these three values
return l, l_x, l_xx
# Compute derivative of endpoint error
def dif_end(self, x):
xe = -self.target.copy()
for ii in range(self.arm.DOF):
xe[0] += self.arm.L[ii] * np.cos(np.sum(x[:ii+1]))
xe[1] += self.arm.L[ii] * np.sin(np.sum(x[:ii+1]))
edot = np.zeros((self.arm.DOF,1))
for ii in range(self.arm.DOF):
edot[ii,0] += (2 * self.arm.L[ii] *
(xe[0] * -np.sin(np.sum(x[:ii+1])) +
xe[1] * np.cos(np.sum(x[:ii+1]))))
edot = np.cumsum(edot[::-1])[::-1][:]
return edot
def finite_differences(self, x, u):
""" calculate gradient of plant dynamics using finite differences
x np.array: the state of the system
u np.array: the control signal
"""
dof = u.shape[0]
num_states = x.shape[0]
A = np.zeros((num_states, num_states))
B = np.zeros((num_states, dof))
eps = 1e-4 # finite differences epsilon
for ii in range(num_states):
# calculate partial differential w.r.t. x
inc_x = x.copy()
inc_x[ii] += eps
state_inc,_ = self.plant_dynamics(inc_x, u.copy())
dec_x = x.copy()
dec_x[ii] -= eps
state_dec,_ = self.plant_dynamics(dec_x, u.copy())
A[:, ii] = (state_inc - state_dec) / (2 * eps)
for ii in range(dof):
# calculate partial differential w.r.t. u
inc_u = u.copy()
inc_u[ii] += eps
state_inc,_ = self.plant_dynamics(x.copy(), inc_u)
dec_u = u.copy()
dec_u[ii] -= eps
state_dec,_ = self.plant_dynamics(x.copy(), dec_u)
B[:, ii] = (state_inc - state_dec) / (2 * eps)
return A, B
def gen_target(self, arm):
"""Generate a random target"""
gain = np.sum(arm.L) * .75
bias = -np.sum(arm.L) * 0
self.target = np.random.random(size=(2,)) * gain + bias
return self.target.tolist()
def ilqr(self, x0, U=None):
""" use iterative linear quadratic regulation to find a control
sequence that minimizes the cost function
x0 np.array: the initial state of the system
U np.array: the initial control trajectory dimensions = [dof, time]
"""
U = self.U if U is None else U
tN = U.shape[0] # number of time steps
dof = self.arm.DOF # number of degrees of freedom of plant
num_states = dof * 2 # number of states (position and velocity)
dt = self.arm.dt # time step
lamb = 1.0 # regularization parameter
sim_new_trajectory = True
for ii in range(self.max_iter):
if sim_new_trajectory == True:
# simulate forward using the current control trajectory
X, cost = self.simulate(x0, U)
oldcost = np.copy(cost) # copy for exit condition check
# now we linearly approximate the dynamics, and quadratically
# approximate the cost function so we can use LQR methods
# for storing linearized dynamics
# x(t+1) = f(x(t), u(t))
f_x = np.zeros((tN, num_states, num_states)) # df / dx
f_u = np.zeros((tN, num_states, dof)) # df / du
# for storing quadratized cost function
l = np.zeros((tN,1)) # immediate state cost
l_x = np.zeros((tN, num_states)) # dl / dx
l_xx = np.zeros((tN, num_states, num_states)) # d^2 l / dx^2
l_u = np.zeros((tN, dof)) # dl / du
l_uu = np.zeros((tN, dof, dof)) # d^2 l / du^2
l_ux = np.zeros((tN, dof, num_states)) # d^2 l / du / dx
# for everything except final state
for t in range(tN-1):
# x(t+1) = f(x(t), u(t)) = x(t) + dx(t) * dt
# linearized dx(t) = np.dot(A(t), x(t)) + np.dot(B(t), u(t))
# f_x = np.eye + A(t)
# f_u = B(t)
A, B = self.finite_differences(X[t], U[t])
f_x[t] = np.eye(num_states) + A * dt
f_u[t] = B * dt
(l[t], l_x[t], l_xx[t], l_u[t],
l_uu[t], l_ux[t]) = self.cost(X[t], U[t])
l[t] *= dt
l_x[t] *= dt
l_xx[t] *= dt
l_u[t] *= dt
l_uu[t] *= dt
l_ux[t] *= dt
# aaaand for final state
l[-1], l_x[-1], l_xx[-1] = self.cost_final(X[-1])
sim_new_trajectory = False
# optimize things!
# initialize Vs with final state cost and set up k, K
V = l[-1].copy() # value function
V_x = l_x[-1].copy() # dV / dx
V_xx = l_xx[-1].copy() # d^2 V / dx^2
k = np.zeros((tN, dof)) # feedforward modification
K = np.zeros((tN, dof, num_states)) # feedback gain
# NOTE: they use V' to denote the value at the next timestep,
# they have this redundant in their notation making it a
# function of f(x + dx, u + du) and using the ', but it makes for
# convenient shorthand when you drop function dependencies
# work backwards to solve for V, Q, k, and K
for t in range(tN-2, -1, -1):
# NOTE: we're working backwards, so V_x = V_x[t+1] = V'_x
# 4a) Q_x = l_x + np.dot(f_x^T, V'_x)
Q_x = l_x[t] + np.dot(f_x[t].T, V_x)
# 4b) Q_u = l_u + np.dot(f_u^T, V'_x)
Q_u = l_u[t] + np.dot(f_u[t].T, V_x)
# NOTE: last term for Q_xx, Q_uu, and Q_ux is vector / tensor product
# but also note f_xx = f_uu = f_ux = 0 so they're all 0 anyways.
# 4c) Q_xx = l_xx + np.dot(f_x^T, np.dot(V'_xx, f_x)) + np.einsum(V'_x, f_xx)
Q_xx = l_xx[t] + np.dot(f_x[t].T, np.dot(V_xx, f_x[t]))
# 4d) Q_ux = l_ux + np.dot(f_u^T, np.dot(V'_xx, f_x)) + np.einsum(V'_x, f_ux)
Q_ux = l_ux[t] + np.dot(f_u[t].T, np.dot(V_xx, f_x[t]))
# 4e) Q_uu = l_uu + np.dot(f_u^T, np.dot(V'_xx, f_u)) + np.einsum(V'_x, f_uu)
Q_uu = l_uu[t] + np.dot(f_u[t].T, np.dot(V_xx, f_u[t]))
# Calculate Q_uu^-1 with regularization term set by
# Levenberg-Marquardt heuristic (at end of this loop)
Q_uu_evals, Q_uu_evecs = np.linalg.eig(Q_uu)
Q_uu_evals[Q_uu_evals < 0] = 0.0
Q_uu_evals += lamb
Q_uu_inv = np.dot(Q_uu_evecs,
np.dot(np.diag(1.0/Q_uu_evals), Q_uu_evecs.T))
# 5b) k = -np.dot(Q_uu^-1, Q_u)
k[t] = -np.dot(Q_uu_inv, Q_u)
# 5b) K = -np.dot(Q_uu^-1, Q_ux)
K[t] = -np.dot(Q_uu_inv, Q_ux)
# 6a) DV = -.5 np.dot(k^T, np.dot(Q_uu, k))
# 6b) V_x = Q_x - np.dot(K^T, np.dot(Q_uu, k))
V_x = Q_x - np.dot(K[t].T, np.dot(Q_uu, k[t]))
# 6c) V_xx = Q_xx - np.dot(-K^T, np.dot(Q_uu, K))
V_xx = Q_xx - np.dot(K[t].T, np.dot(Q_uu, K[t]))
Unew = np.zeros((tN, dof))
# calculate the optimal change to the control trajectory
xnew = x0.copy() # 7a)
for t in range(tN - 1):
# use feedforward (k) and feedback (K) gain matrices
# calculated from our value function approximation
# to take a stab at the optimal control signal
Unew[t] = U[t] + k[t] + np.dot(K[t], xnew - X[t]) # 7b)
# given this u, find our next state
_,xnew = self.plant_dynamics(xnew, Unew[t]) # 7c)
# evaluate the new trajectory
Xnew, costnew = self.simulate(x0, Unew)
# Levenberg-Marquardt heuristic
if costnew < cost:
# decrease lambda (get closer to Newton's method)
lamb /= self.lamb_factor
X = np.copy(Xnew) # update trajectory
U = np.copy(Unew) # update control signal
oldcost = np.copy(cost)
cost = np.copy(costnew)
sim_new_trajectory = True # do another rollout
# print("iteration = %d; Cost = %.4f;"%(ii, costnew) +
# " logLambda = %.1f"%np.log(lamb))
# check to see if update is small enough to exit
if ii > 0 and ((abs(oldcost-cost)/cost) < self.eps_converge):
print("Converged at iteration = %d; Cost = %.4f;"%(ii,costnew) +
" logLambda = %.1f"%np.log(lamb))
break
else:
# increase lambda (get closer to gradient descent)
lamb *= self.lamb_factor
# print("cost: %.4f, increasing lambda to %.4f")%(cost, lamb)
if lamb > self.lamb_max:
print("lambda > max_lambda at iteration = %d;"%ii +
" Cost = %.4f; logLambda = %.1f"%(cost,
np.log(lamb)))
break
return X, U, cost
def plant_dynamics(self, x, u):
""" simulate a single time step of the plant, from
initial state x and applying control signal u
x np.array: the state of the system
u np.array: the control signal
"""
# set the arm position to x
self.arm.reset(q=x[:self.arm.DOF],
dq=x[self.arm.DOF:self.arm.DOF*2])
# apply the control signal
self.arm.apply_torque(u, self.arm.dt)
# get the system state from the arm
xnext = np.hstack([np.copy(self.arm.q),
np.copy(self.arm.dq)])
# calculate the change in state
xdot = ((xnext - x) / self.arm.dt).squeeze()
return xdot, xnext
def reset(self, arm, q_des):
""" reset the state of the system """
# Index along current control sequence
self.t = 0
self.U = np.zeros((self.tN, arm.DOF))
self.old_target = self.target.copy()
def simulate(self, x0, U):
""" do a rollout of the system, starting at x0 and
applying the control sequence U
x0 np.array: the initial state of the system
U np.array: the control sequence to apply
"""
tN = U.shape[0]
num_states = x0.shape[0]
dt = self.arm.dt
X = np.zeros((tN, num_states))
X[0] = x0
cost = 0
# Run simulation with substeps
for t in range(tN-1):
_,X[t+1] = self.plant_dynamics(X[t], U[t])
l,_,_,_,_,_ = self.cost(X[t], U[t])
cost = cost + dt * l
# Adjust for final cost, subsample trajectory
l_f,_,_ = self.cost_final(X[-1])
cost = cost + l_f
return X, cost
|
studywolf/control
|
studywolf_control/controllers/ilqr.py
|
Python
|
gpl-3.0
| 17,013
|
[
"Gaussian"
] |
258eec645db94c2f4ec7d64f6f3c38dbc30aa321f6ec931b9283747f38df74de
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for data simulation, to test algorithms utilized in diagnostics.
Tong Zhang <zhangt@frib.msu.edu>
2017-03-27 11:22:25 AM EDT
"""
from __future__ import division
from __future__ import print_function
from phantasy.library.physics import Point
import numpy as np
class Distribution(object):
"""Particle distribution for transverse plane, i.e. ``x-o-y`` plane,
default is Gaussian distribution.
Parameters
----------
x0 : float
Mean value along ``x`` direction.
y0 : float
Mean value along ``y`` direction.
sx : float
Standard deviation along ``x`` direction.
sy : float
Standard deviation along ``y`` direction.
N : int
Total point number of particle distribution.
Keyword Arguments
-----------------
mean : list
Central point, ``[x0, y0]``, overrides *x0* and *y0*.
cov : list
Covariance matrix, overrides *sx* and *sy*.
rho : float
Correlation between ``x`` and ``y``, should be within ``[-1, 1]``.
distfile : string
Name of data file to load distribution, contains x and y data,
if *distfile* is valid, the internal data generation would be
ignored.
distdata : array
Array with shape of ``(2,n)`` to initialize distribution.
"""
def __init__(self, x0=0, y0=0, sx=0.1, sy=0.1, N=1000, **kws):
self.distype = None
distfile = kws.get('distfile', None)
distdata = kws.get('distdata', None)
# try to load data from array
if distdata is not None:
self.particles = distdata
else:
# generate internally
if not self.load_distfile(distfile):
self._x, self._y = None, None
if kws.get('mean', None) is not None:
mean = kws.get('mean')
else:
mean = [x0, y0]
if kws.get('cov', None) is not None:
cov = kws.get('cov')
else:
rho = kws.get('rho', None)
if -1.0 <= rho <= 1.0:
cxy = rho * sx * sy
else:
cxy = 0
cov = [[sx ** 2, cxy], [cxy, sy ** 2]]
self.distype = 'gaussian'
self.particles = Distribution.generate_gaussian_distrubution(
mean, cov, N)
else:
# load from external file
print("Load distribution from '{}'".format(distfile))
def load_distfile(self, distfile):
try:
data = np.loadtxt(distfile)
if data.shape[0] == 2:
self._x, self._y = data
else:
self._x, self._y = data.T
self.distype = 'external'
return True
except:
return False
@property
def particles(self):
"""tuple: Array of x, y distribution."""
return self._x, self._y
@particles.setter
def particles(self, p):
self._x, self._y = p
@staticmethod
def generate_gaussian_distrubution(mean, cov, N):
"""Generate random two-dimensional distribution.
"""
x, y = np.random.multivariate_normal(mean, cov, N).T
return x, y
def draw(self):
"""Draw particles.
"""
if self._x is None:
print("Particle distribution is not ready yet.")
return 1
else:
import matplotlib.pyplot as plt
x, y = self.particles
plt.plot(x, y, '.')
plt.show()
@staticmethod
def get_covariance(xarr, yarr, **kws):
"""Get covariance matrix of 'x' and 'y' array.
Parameters
----------
xarr : array
X array.
yarr : array
Y array.
Keyword Arguments
-----------------
norm :
If set, return normalized covariance.
Returns
-------
ret : array
Covariance matrix.
"""
if kws.get('norm', None) is not None:
return np.corrcoef(xarr, yarr)
else:
return np.cov(xarr, yarr)
def get_cov(self, **kws):
"""Return covariance of x and y of distribution,
if *norm* keyword is set, return normalized one.
"""
return Distribution.get_covariance(self._x, self._y, **kws)
def resample(self):
"""Generate normal distribution by resampling.
Returns
-------
ret : Distribution
New Distribution instance.
"""
mean = [np.mean(self._x), np.mean(self._y)]
cov = np.cov(self._x, self._y)
N = self._x.size
return Distribution(mean=mean, cov=cov, N=N)
def rotate(self, angle, p0=None):
"""Rotate particle distribution of *angle* w.r.t. *p0*.
Parameters
----------
angle : float
Anti-clockwised rotating angle, degree.
p0 : Point
Rotating central point, ``(0,0)`` by default.
Returns
-------
ret : Distribution
New Distribution after rotation.
"""
if p0 is None:
p0 = Point(0, 0)
data0 = np.array(self.particles)
disp = np.tile(p0[:], [int(data0.size / 2), 1]).T
theta = angle / 180.0 * np.pi
m = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
data1 = np.dot(m, data0 - disp) + disp
return Distribution(distdata=data1)
def __repr__(self):
x, y = self._x, self._y
cov = np.cov(x, y)
x0, y0 = x.mean(), y.mean()
sx, sy = x.std(ddof=1), y.std(ddof=1)
rho_xy = cov[0, 1] / cov[0, 0] ** 0.5 / cov[1, 1] ** 0.5
ret = '(x_0, y_0) = ({0:.3f},{1:.3f})\n'.format(x0, y0)
ret += 'sigma_x = {0:.3f}\n'.format(sx)
ret += 'sigma_y = {0:.3f}\n'.format(sy)
ret += '(x,y) correlation = {0:.3f}'.format(rho_xy)
return ret
if __name__ == '__main__':
# default
print("{0}{1}{0}".format('-' * 10, 'default'))
ds = Distribution()
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# internal gaussian w/o correlation
print("{0}{1}{0}".format('-' * 10, 'gaussian/rho=0'))
ds = Distribution(1, 1, 2, 3, 50000)
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# internal gaussian with correlation
print("{0}{1}{0}".format('-' * 10, 'gaussian/rho=0.5'))
ds = Distribution(1, 1, 2, 3, 50000, rho=0.5)
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# load external
print("{0}{1}{0}".format('-' * 10, 'external file'))
ds = Distribution(distfile='../../../tests/temp/dist.dat')
print(ds.distype)
print(ds.get_cov())
print(ds.get_cov(norm='True'))
ds.draw()
# resample
print("Resample external loaded dist")
ds1 = ds.resample()
ds1.draw()
|
archman/phantasy
|
phantasy/library/physics/particles.py
|
Python
|
bsd-3-clause
| 7,093
|
[
"Gaussian"
] |
ac784a6b0e444cae85bc8dd0f0aa5307b25684efd3dfe642cb913a794170d5a1
|
#!/usr/bin/python
'''
Modified by Bill Bushey <wbushey@acm.org> and Brian Young <byoung061@gmail.com> on August 10th, 2009
'''
import htmllib
import formatter
import string
import sys,urllib
import time
class HtmlTokenParser(htmllib.HTMLParser):
# return a dictionary mapping anchor texts to lists
# of associated hyperlinks
def __init__(self, verbose=0):
self.tokens = []
self.binary_tokens = []
f = formatter.NullFormatter()
htmllib.HTMLParser.__init__(self, f, verbose)
def unknown_tag(self):
self.tokens.append("TAG")
self.binary_tokens.append(1)
def unknown_starttag(self,tag, attrs):
self.tokens.append("<"+tag+">")
self.binary_tokens.append(1)
def unknown_endtag(self,tag):
self.tokens.append("<\\"+tag+">")
self.binary_tokens.append(1)
def handle_data(self,data):
for t in string.split(data):
self.tokens.append(t)
self.binary_tokens.append(-1)
def handle_starttag(self,tag, method, attrs):
self.binary_tokens.append(1)
self.tokens.append("<"+tag+">")
def handle_endtag(self,tag, method):
self.tokens.append("<\\"+tag+">")
self.binary_tokens.append(1)
class HtmlBodyTextExtractor(HtmlTokenParser):
''' Modified to include the initialization of total_tokens_before'''
def __init__(self):
HtmlTokenParser.__init__(self)
self.encoded = [0]
self.total_tokens_before = [0]
self.lookup0N = [0]
self.lookupN0 = [0]
self.body_txt = ""
def close(self):
HtmlTokenParser.close(self)
self._encode_binary_tokens()
self._initialise_lookups()
''' Modified to set values in total_tokens_before'''
def _encode_binary_tokens(self):
i = 0
for x in self.binary_tokens:
if(abs(x + self.encoded[i]) < abs(self.encoded[i])):
self.encoded.append(0)
self.total_tokens_before.append(self.total_tokens_before[-1])
i = i + 1
self.encoded[i] = self.encoded[i] + x
self.total_tokens_before[i] = self.total_tokens_before[i] + 1
# total_tokens_before works better in the rest of the class if we shift all values up one index
self.total_tokens_before.insert(0,0)
def _initialise_lookups(self):
t = 0
for x in self.encoded:
if(x>0):
t = t + x
self.lookup0N.append(t)
self.encoded.reverse()
t = 0
for x in self.encoded:
if(x>0):
t = t + x
self.lookupN0.append(t)
self.encoded.reverse()
self.lookupN0.reverse()
del(self.lookupN0[0]) #will never need these values
del(self.lookup0N[-1])
'''
This method has been modified to be in O(1).
This version of the method works with the assumption that all nodes are
either text or tags. Since we can quickly find out the number of tags
that have occured upto a given region, and the number of total tags up
to that region, we can quickly calculate the number of text nodes that
have occured upto that region.
The original method is available as _objective_fcn_old
'''
def _objective_fcn(self,i,j):
tags_to_i = self.lookup0N[i]
tags_after_j = self.lookupN0[j]
text_to_i = self.total_tokens_before[i] - tags_to_i
text_to_j = self.total_tokens_before[j] - self.lookup0N[j]
text_between_i_j = text_to_j - text_to_i
return_val = tags_to_i + tags_after_j + text_between_i_j
return return_val
'''
The original method, which is in O(n)
'''
def _objective_fcn_old(self,i,j):
return_val = self.lookup0N[i] + self.lookupN0[j]
for x in self.encoded[i:j]:
if(x<0):
return_val = return_val - x
return return_val
def _is_tag(self,s):
if(s[0]=='<' and s[-1]=='>'):
return(1)
else:
return(0)
'''
Method which uses the modified version of _objective_fcn, this function is in O(n^2)
This method has also been modified to improve the finding of the 'start' and 'end' variables
Finally, body_text now uses the join method for building the output string
'''
def body_text(self):
self.body_txt = ""
obj_max = 0
i_max = 0
j_max = len(self.encoded)-1
for i in range(len(self.encoded)-1):
if self.encoded[i] > 0:
continue
for j in range(i,len(self.encoded)):
if self.encoded[j] > 0:
continue
obj = self._objective_fcn(i,j)
if(obj > obj_max):
obj_max = obj
i_max = i
j_max = j
start = self.total_tokens_before[i_max]
end = self.total_tokens_before[j_max]
self.body_txt = " ".join(x for x in self.tokens[start:end] if not self._is_tag(x))
# This is added for testing purposes, so that the old and new versions produce the same string.
self.body_txt = self.body_txt + " "
return(self.body_txt)
'''
Method which uses _objective_fcn_old, this function is in O(n^3)
'''
def body_text_old(self):
self.body_txt = ""
obj_max = 0
i_max = 0
j_max = len(self.encoded)-1
for i in range(len(self.encoded)-1):
for j in range(i,len(self.encoded)):
obj = self._objective_fcn_old(i,j)
if(obj > obj_max):
obj_max = obj
i_max = i
j_max = j
start = 0
end = 0
for x in self.encoded[:i_max]:
start = start + abs(x)
for x in self.encoded[j_max:]:
end = end + abs(x)
for x in self.tokens[start:-end]:
if(not(self._is_tag(x))):
self.body_txt = self.body_txt + x + " "
return(self.body_txt)
def summary(self, start=0, bytes=255):
if(not(self.body_txt)):
self.body_text()
return(self.body_txt[start:(start+bytes)])
'''
Modified to use the more efficient join method for building the string
'''
def full_text(self):
ft = ""
ft = " ".join(x for x in self.tokens if not self._is_tag(x))
return ft
if __name__ == '__main__':
html = open(sys.argv[1]).read()
t0 = time.clock()
p = HtmlBodyTextExtractor()
p.feed(html)
p.close()
r10 = range(10)
t1 = time.clock()
for r in r10:
x = p.body_text()
t2 = time.clock()
for r in r10:
z = p.body_text_old()
t3 = time.clock()
x = p.body_text()
z = p.body_text_old()
s = p.summary()
t = p.full_text()
# print "\nNew Bodytext:\n",x
# print "\nOld Bodytext:\n",z
# print "\nFull Text:\n",t
if (x == z):
print "The SAME!!!!!\n"
print "Time to initialize: %f\nTime for new method: %f\nTime for old method: %f\n" % (t1-t0, t2-t1, t3-t2)
# (c) 2001 Aidan Finn
# Released under the terms of the GNU GPL
|
aidanf/BTE
|
BodyTextExtractor.py
|
Python
|
gpl-2.0
| 6,102
|
[
"Brian"
] |
ce849bfdbe36d92378c485d3d3928adabbe5fbf74d3abc7ff54fcbbd8c2269b2
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='~/climate/CMIP5/rcp85/SWIO'
VARIABLE='rsds'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
AbsTemp=273.15
RefTemp=5
CRUmean=8.148 #1900-2100 land
TargetModel=[\
#'CanESM2',\
#'BCC-CSM1.1',\
#'CCSM4',\
#'CNRM-CM5',\
#'CSIRO-Mk3.6.0',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
#'INM-CM4',\
'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MRI-CGCM3',\
#'NorESM1-M',\
#'MPI-ESM-LR',\
]
COLORtar=['darkred','black','deeppink','orange',\
'orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'red','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
#================================================ CMIP5 models
# for historical
modelist1=[ 'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CMCC-CESM',\
'CMCC-CM',\
'CMCC-CMS',\
'CNRM-CM5',\
'CanESM2',\
'GFDL-CM3',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'GISS-E2-H-CC',\
'GISS-E2-R',\
'GISS-E2-R-CC',\
'HadGEM2-AO',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'IPSL-CM5B-LR',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'NorESM1-M',\
'NorESM1-ME',\
'inmcm4']
# for rcp8.5
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
plt.ylabel('Surface Downwelling shortwave flux Change(W m-2)',fontsize=16)
plt.title("Surface Downwelling shortwave flux Change (W m-2) in SWIO simulated by CMIP5 models",fontsize=18)
plt.ylim(-5,5)
plt.xlim(1980,2100)
plt.grid()
plt.xticks(np.arange(1960, 2100+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
# vertical at 2005
plt.axvline(x=2005.5,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for rcp8.5:
########################## for rcp8.5:
print "========== for hist ==============="
EXPERIMENT='rcp85'
TIME='200601-210012'
YEAR=range(2006,2101)
Nmonth=1140
SumTemp=np.zeros(Nmonth/12)
K=0
for Model in modelist1:
#define the K-th model input file:
K=K+1 # for average
infile1=DIR+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_r1i1p1'+'_'+TIME+'.nc.SWIO.nc'
#rsds_Amon_MPI-ESM-LR_rcp85_r1i1p1_200601-210012.nc.SWIO.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
print 'the variable tas ===============: '
print TAS
# calculate the annual mean temp:
TEMP=range(0,Nmonth,12)
for j in range(0,Nmonth,12):
TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
print TEMP
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[0:5])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
print " temp ======================== relative to mean of 1986-2005"
print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],linewidth=2)
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print modelist1
plt.plot(YEAR,AveTemp,label=' mean',color="red",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(1980,-2,str(K)+' models',size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
plt.legend(loc=2)
plt.show()
quit()
########################## for rcp8.5:
########################## for rcp8.5:
|
CopyChat/Plotting
|
GCM_changes/climatechange.rsds.py
|
Python
|
gpl-3.0
| 6,743
|
[
"NetCDF"
] |
ab2ae92a47e16510a0b5a36219a80ea917655922b4ec0adb0b58e027d91026e1
|
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" contains factory class for producing signatures
"""
import copy
import numpy
from rdkit.Chem.Pharm2D import Utils
from rdkit.DataStructs import SparseBitVect, IntSparseIntVect, LongSparseIntVect
_verbose = False
class SigFactory(object):
"""
SigFactory's are used by creating one, setting the relevant
parameters, then calling the GetSignature() method each time a
signature is required.
"""
def __init__(self, featFactory, useCounts=False, minPointCount=2, maxPointCount=3,
shortestPathsOnly=True, includeBondOrder=False, skipFeats=None,
trianglePruneBins=True):
self.featFactory = featFactory
self.useCounts = useCounts
self.minPointCount = minPointCount
self.maxPointCount = maxPointCount
self.shortestPathsOnly = shortestPathsOnly
self.includeBondOrder = includeBondOrder
self.trianglePruneBins = trianglePruneBins
if skipFeats is None:
self.skipFeats = []
else:
self.skipFeats = skipFeats
self._bins = None
self.sigKlass = None
def SetBins(self, bins):
""" bins should be a list of 2-tuples """
self._bins = copy.copy(bins)
self.Init()
def GetBins(self):
return self._bins
def GetNumBins(self):
return len(self._bins)
def GetSignature(self):
return self.sigKlass(self._sigSize)
def _GetBitSummaryData(self, bitIdx):
nPts, combo, scaffold = self.GetBitInfo(bitIdx)
fams = self.GetFeatFamilies()
labels = [fams[x] for x in combo]
dMat = numpy.zeros((nPts, nPts), dtype=numpy.int64)
dVect = Utils.nPointDistDict[nPts]
for idx in range(len(dVect)):
i, j = dVect[idx]
dMat[i, j] = scaffold[idx]
dMat[j, i] = scaffold[idx]
return nPts, combo, scaffold, labels, dMat
def GetBitDescriptionAsText(self, bitIdx, includeBins=0, fullPage=1):
""" returns text with a description of the bit
**Arguments**
- bitIdx: an integer bit index
- includeBins: (optional) if nonzero, information about the bins will be
included as well
- fullPage: (optional) if nonzero, html headers and footers will
be included (so as to make the output a complete page)
**Returns**
a string with the HTML
"""
raise NotImplementedError('Missing implementation')
def GetBitDescription(self, bitIdx):
""" returns a text description of the bit
**Arguments**
- bitIdx: an integer bit index
**Returns**
a string
"""
_, _, _, labels, dMat = self._GetBitSummaryData(bitIdx)
res = " ".join(labels) + " "
for row in dMat:
res += "|" + " ".join([str(x) for x in row])
res += "|"
return res
def _findBinIdx(self, dists, bins, scaffolds):
""" OBSOLETE: this has been rewritten in C++
Internal use only
Returns the index of a bin defined by a set of distances.
**Arguments**
- dists: a sequence of distances (not binned)
- bins: a sorted sequence of distance bins (2-tuples)
- scaffolds: a list of possible scaffolds (bin combinations)
**Returns**
an integer bin index
**Note**
the value returned here is not an index in the overall
signature. It is, rather, an offset of a scaffold in the
possible combinations of distance bins for a given
proto-pharmacophore.
"""
whichBins = [0] * len(dists)
# This would be a ton easier if we had contiguous bins
# i.e. if we could maintain the bins as a list of bounds)
# because then we could use Python's bisect module.
# Since we can't do that, we've got to do our own binary
# search here.
for i, dist in enumerate(dists):
where = -1
# do a simple binary search:
startP, endP = 0, len(bins)
while startP < endP:
midP = (startP + endP) // 2
begBin, endBin = bins[midP]
if dist < begBin:
endP = midP
elif dist >= endBin:
startP = midP + 1
else:
where = midP
break
if where < 0:
return None
whichBins[i] = where
res = scaffolds.index(tuple(whichBins))
if _verbose:
print('----- _fBI -----------')
print(' scaffolds:', scaffolds)
print(' bins:', whichBins)
print(' res:', res)
return res
def GetFeatFamilies(self):
fams = [fam for fam in self.featFactory.GetFeatureFamilies() if fam not in self.skipFeats]
fams.sort()
return fams
def GetMolFeats(self, mol):
featFamilies = self.GetFeatFamilies()
featMatches = {}
for fam in featFamilies:
feats = self.featFactory.GetFeaturesForMol(mol, includeOnly=fam)
featMatches[fam] = [feat.GetAtomIds() for feat in feats]
return [featMatches[x] for x in featFamilies]
def GetBitIdx(self, featIndices, dists, sortIndices=True):
""" returns the index for a pharmacophore described using a set of
feature indices and distances
**Arguments***
- featIndices: a sequence of feature indices
- dists: a sequence of distance between the features, only the
unique distances should be included, and they should be in the
order defined in Utils.
- sortIndices : sort the indices
**Returns**
the integer bit index
"""
nPoints = len(featIndices)
if nPoints > 3:
raise NotImplementedError('>3 points not supported')
if nPoints < self.minPointCount:
raise IndexError('bad number of points')
if nPoints > self.maxPointCount:
raise IndexError('bad number of points')
# this is the start of the nPoint-point pharmacophores
startIdx = self._starts[nPoints]
#
# now we need to map the pattern indices to an offset from startIdx
#
if sortIndices:
tmp = list(featIndices)
tmp.sort()
featIndices = tmp
if featIndices[0] < 0:
raise IndexError('bad feature index')
if max(featIndices) >= self._nFeats:
raise IndexError('bad feature index')
if nPoints == 3:
featIndices, dists = Utils.OrderTriangle(featIndices, dists)
offset = Utils.CountUpTo(self._nFeats, nPoints, featIndices)
if _verbose:
print(f'offset for feature {str(featIndices)}: {offset}')
offset *= len(self._scaffolds[len(dists)])
try:
if _verbose:
print('>>>>>>>>>>>>>>>>>>>>>>>')
print('\tScaffolds:', repr(self._scaffolds[len(dists)]), type(
self._scaffolds[len(dists)]))
print('\tDists:', repr(dists), type(dists))
print('\tbins:', repr(self._bins), type(self._bins))
bin_ = self._findBinIdx(dists, self._bins, self._scaffolds[len(dists)])
except ValueError:
fams = self.GetFeatFamilies()
fams = [fams[x] for x in featIndices]
raise IndexError('distance bin not found: feats: %s; dists=%s; bins=%s; scaffolds: %s' %
(fams, dists, self._bins, self._scaffolds))
return startIdx + offset + bin_
def GetBitInfo(self, idx):
""" returns information about the given bit
**Arguments**
- idx: the bit index to be considered
**Returns**
a 3-tuple:
1) the number of points in the pharmacophore
2) the proto-pharmacophore (tuple of pattern indices)
3) the scaffold (tuple of distance indices)
"""
if idx >= self._sigSize:
raise IndexError(f'bad index ({idx}) queried. {self._sigSize} is the max')
# first figure out how many points are in the p'cophore
nPts = self.minPointCount
while nPts < self.maxPointCount and self._starts[nPts + 1] <= idx:
nPts += 1
# how far are we in from the start point?
offsetFromStart = idx - self._starts[nPts]
if _verbose:
print(f'\t {nPts} Points, {offsetFromStart} offset')
# lookup the number of scaffolds
nDists = len(Utils.nPointDistDict[nPts])
scaffolds = self._scaffolds[nDists]
nScaffolds = len(scaffolds)
# figure out to which proto-pharmacophore we belong:
protoIdx = offsetFromStart // nScaffolds
indexCombos = Utils.GetIndexCombinations(self._nFeats, nPts)
combo = tuple(indexCombos[protoIdx])
if _verbose:
print(f'\t combo: {str(combo)}')
# and which scaffold:
scaffoldIdx = offsetFromStart % nScaffolds
scaffold = scaffolds[scaffoldIdx]
if _verbose:
print(f'\t scaffold: {str(scaffold)}')
return nPts, combo, scaffold
def Init(self):
""" Initializes internal parameters. This **must** be called after
making any changes to the signature parameters
"""
accum = 0
self._scaffolds = [0] * (len(Utils.nPointDistDict[self.maxPointCount + 1]))
self._starts = {}
if not self.skipFeats:
self._nFeats = len(self.featFactory.GetFeatureFamilies())
else:
self._nFeats = 0
for fam in self.featFactory.GetFeatureFamilies():
if fam not in self.skipFeats:
self._nFeats += 1
for i in range(self.minPointCount, self.maxPointCount + 1):
self._starts[i] = accum
nDistsHere = len(Utils.nPointDistDict[i])
scaffoldsHere = Utils.GetPossibleScaffolds(i, self._bins,
useTriangleInequality=self.trianglePruneBins)
self._scaffolds[nDistsHere] = scaffoldsHere
accum += (Utils.NumCombinations(self._nFeats, i) * len(scaffoldsHere))
self._sigSize = accum
if not self.useCounts:
self.sigKlass = SparseBitVect
elif self._sigSize < 2**31:
self.sigKlass = IntSparseIntVect
else:
self.sigKlass = LongSparseIntVect
def GetSigSize(self):
return self._sigSize
try:
from rdkit.Chem.Pharmacophores import cUtils
except ImportError:
pass
else:
SigFactory._findBinIdx = cUtils.FindBinIdx
|
bp-kelley/rdkit
|
rdkit/Chem/Pharm2D/SigFactory.py
|
Python
|
bsd-3-clause
| 11,121
|
[
"RDKit"
] |
9f508c5266a35f5387e94f3b1b822edc7098a9ba51009103c1b5c45e0283048e
|
import os
from .. import utils
from ..qt import *
from .items import *
from ..globals import settings, app_launchers, file_handlers, current_tools
from pyqtconfig import ConfigManager
try:
import xml.etree.cElementTree as et
except ImportError:
import xml.etree.ElementTree as et
EDITOR_MODE_NORMAL = 0
EDITOR_MODE_TEXT = 1
EDITOR_MODE_REGION = 2
EDITOR_MODE_ARROW = 3
class QGraphicsSceneExtend(QGraphicsScene):
def __init__(self, parent, *args, **kwargs):
super(QGraphicsSceneExtend, self).__init__(parent, *args, **kwargs)
self.m = parent.m
self.config = ConfigManager()
# These config settings are transient (ie. not stored between sessions)
self.config.set_defaults({
'mode': EDITOR_MODE_NORMAL,
'font-family': 'Arial',
'font-size': '12',
'text-bold': False,
'text-italic': False,
'text-underline': False,
'text-color': '#000000',
'color-border': None, # '#000000',
'color-background': None,
})
# Pre-set these values (will be used by default)
self.config.set('color-background', '#5555ff')
self.background_image = QImage(os.path.join(utils.scriptdir, 'icons', 'grid100.png'))
if settings.get('Editor/Show_grid'):
self.showGrid()
else:
self.hideGrid()
self.mode = EDITOR_MODE_NORMAL
self.mode_current_object = None
self.annotations = []
def mousePressEvent(self, e):
if self.config.get('mode') != EDITOR_MODE_NORMAL:
for i in self.selectedItems():
i.setSelected(False)
if self.config.get('mode') == EDITOR_MODE_TEXT:
tw = AnnotationTextItem(position=e.scenePos())
elif self.config.get('mode') == EDITOR_MODE_REGION:
tw = AnnotationRegionItem(position=e.scenePos())
elif self.config.get('mode') == EDITOR_MODE_ARROW:
tw = AnnotationRegionItem(position=e.scenePos())
self.addItem(tw)
self.mode_current_object = tw
tw._createFromMousePressEvent(e)
tw.importStyleConfig(self.config)
self.annotations.append(tw)
else:
for i in self.selectedItems():
i.setSelected(False)
super(QGraphicsSceneExtend, self).mousePressEvent(e)
def mouseMoveEvent(self, e):
if self.config.get('mode') == EDITOR_MODE_TEXT and self.mode_current_object:
self.mode_current_object._resizeFromMouseMoveEvent(e)
elif self.config.get('mode') == EDITOR_MODE_REGION and self.mode_current_object:
self.mode_current_object._resizeFromMouseMoveEvent(e)
else:
super(QGraphicsSceneExtend, self).mouseMoveEvent(e)
def mouseReleaseEvent(self, e):
if self.config.get('mode'):
self.mode_current_object.setSelected(True)
self.mode_current_object.setFocus()
self.config.set('mode', EDITOR_MODE_NORMAL)
self.mode_current_object = None
super(QGraphicsSceneExtend, self).mouseReleaseEvent(e)
def showGrid(self):
self.setBackgroundBrush(QBrush(self.background_image))
def hideGrid(self):
self.setBackgroundBrush(QBrush(None))
def onSaveAsImage(self):
filename, _ = QFileDialog.getSaveFileName(self.m, 'Save current figure', '', "Tagged Image File Format (*.tif);;\
Portable Network Graphics (*.png)")
if filename:
self.saveAsImage(filename)
def saveAsImage(self, f):
self.image = QImage(self.sceneRect().size().toSize(), QImage.Format_ARGB32)
self.image.fill(Qt.white)
painter = QPainter(self.image)
self.render(painter)
self.image.save(f)
def addApp(self, app, position=None):
i = ToolItem(app, position=position)
self.addItem(i)
#i.onShow()
return i
def removeApp(self, app):
i = app.editorItem
self.removeItem(i)
app.editorItem = None
def dragEnterEvent(self, e):
if e.mimeData().hasFormat('application/x-pathomx-app') or e.mimeData().hasFormat('text/uri-list'):
e.accept()
else:
e.ignore()
def dragMoveEvent(self, e):
e.accept()
def dropEvent(self, e):
scenePos = e.scenePos() - QPointF(32, 32)
if e.mimeData().hasFormat('application/x-pathomx-app'):
try:
app_id = str(e.mimeData().data('application/x-pathomx-app'), 'utf-8') # Python 3
except:
app_id = str(e.mimeData().data('application/x-pathomx-app')) # Python 2
e.setDropAction(Qt.CopyAction)
a = app_launchers[app_id](self.m, position=scenePos, auto_focus=False)
#self.centerOn(a.editorItem)
e.accept()
elif e.mimeData().hasFormat('text/uri-list'):
for ufn in e.mimeData().urls():
fn = ufn.path()
fnn, ext = os.path.splitext(fn)
ext = ext.strip('.')
if ext in file_handlers:
a = file_handlers[ext](position=scenePos, auto_focus=False, filename=fn)
self.centerOn(a.editorItem)
e.accept()
def createApp(self, app_id):
# We have no position data, so auto-create at the furthest-right + 200
x, y = [], []
for a in current_tools:
i = a.editorItem
x.append(i.x())
y.append(i.y())
if len(x) > 0:
# Default position
target_x = max(x) + 200
target_y = sum(y) / len(y)
else:
target_x = 0
target_y = 0
a = app_launchers[app_id](self.m, position=QPointF(target_x, target_y), auto_focus=False)
def getXMLAnnotations(self, root):
# Iterate over the entire set (in order) creating a XML representation of the MatchDef and Style
for annotation in self.annotations:
ase = et.SubElement(root, "Annotation")
ase.set('type', type(annotation).__name__)
ase.set('x', str(annotation.x()))
ase.set('y', str(annotation.y()))
ase.set('width', str(annotation.rect().width()))
ase.set('height', str(annotation.rect().height()))
if hasattr(annotation, 'text'):
text = et.SubElement(ase, "Text")
text.text = annotation.text.toPlainText()
ase = annotation.config.getXMLConfig(ase)
return root
def setXMLAnnotations(self, root):
ANNOTATION_TYPES = {
'AnnotationTextItem': AnnotationTextItem,
'AnnotationRegionItem': AnnotationRegionItem,
}
for ase in root.findall('Annotation'):
# Validate the class definition before creating it
if ase.get('type') in ANNOTATION_TYPES:
pos = QPointF(float(ase.get('x')), float(ase.get('y')))
aobj = ANNOTATION_TYPES[ase.get('type')](position=pos)
aobj.setRect(QRectF(0, 0, float(ase.get('width')), float(ase.get('height'))))
to = ase.find('Text')
if to is not None:
aobj.text.setPlainText(to.text)
self.addItem(aobj)
self.annotations.append(aobj)
aobj.config.setXMLConfig(ase)
aobj.applyStyleConfig()
class WorkspaceEditorView(QGraphicsView):
def __init__(self, parent=None):
super(WorkspaceEditorView, self).__init__(parent)
self.m = parent
self.setRenderHint(QPainter.Antialiasing, True)
self.setRenderHint(QPainter.SmoothPixmapTransform, True)
self.setAcceptDrops(True)
self.scene = QGraphicsSceneExtend(self)
self.scene.setItemIndexMethod(QGraphicsScene.NoIndex)
self.setScene(self.scene)
# self.scale(1.0, 1.0)
self.resetScene()
def resetScene(self):
self.scene.clear()
r = QRectF(self.mapToScene(QPoint(0, 0)), self.mapToScene(QPoint(self.width(), self.height())))
self._scene_extreme_rect = self.scene.addRect(r, pen=QPen(Qt.NoPen), brush=QBrush(Qt.NoBrush))
def resizeEvent(self, e):
self._scene_extreme_rect.setRect(QRectF(
self.mapToScene(QPoint(0, 0)),
self.mapToScene(QPoint(self.width(), self.height()))
))
|
pathomx/pathomx
|
pathomx/editor/editor.py
|
Python
|
gpl-3.0
| 8,611
|
[
"ASE"
] |
0705159fa84001ba0a2f297ce24a93a31d1fb1e15f60824e664eaffc6b09432a
|
# $Id$
#
# Copyright (C) 2002-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" tools for interacting with chemdraw
"""
from __future__ import print_function
import string,tempfile,os,time
try:
import pythoncom
from win32com.client import gencache,Dispatch,constants
import win32com.client.gencache
cdxModule = win32com.client.gencache.EnsureModule("{5F646AAB-3B56-48D2-904C-A68D7989C251}", 0, 7, 0)
except:
cdxModule = None
_cdxVersion=0
raise ImportError("ChemDraw version (at least version 7) not found.")
else:
_cdxVersion=7
if cdxModule:
from win32com.client import Dispatch
import win32gui
import re
cdApp = None
theDoc = None
theObjs = None
selectItem = None
cleanItem = None
centerItem = None
def StartChemDraw(visible=True,openDoc=False,showDoc=False):
""" launches chemdraw """
global cdApp,theDoc,theObjs,selectItem,cleanItem,centerItem
if cdApp is not None:
# if called more than once, do a restart
holder = None
selectItem = None
cleanItem = None
centerItem = None
theObjs = None
theDoc = None
cdApp = None
cdApp = Dispatch('ChemDraw.Application')
if openDoc:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
else:
theDoc = None
selectItem = cdApp.MenuBars(1).Menus(2).MenuItems(8)
cleanItem = cdApp.MenuBars(1).Menus(5).MenuItems(6)
if _cdxVersion == 6:
centerItem = cdApp.MenuBars(1).Menus(4).MenuItems(1)
else:
centerItem = cdApp.MenuBars(1).Menus(4).MenuItems(7)
if visible:
cdApp.Visible=1
if theDoc and showDoc:
theDoc.Activate()
def ReactivateChemDraw(openDoc=True,showDoc=True):
global cdApp,theDoc,theObjs
cdApp.Visible=1
if openDoc:
theDoc = cdApp.Documents.Add()
if theDoc and showDoc:
theDoc.Activate()
theObjs = theDoc.Objects
# ------------------------------------------------------------------
# interactions with Chemdraw
# ------------------------------------------------------------------
def CDXConvert(inData,inFormat,outFormat):
"""converts the data passed in from one format to another
inFormat should be one of the following:
chemical/x-cdx chemical/cdx
chemical/x-daylight-smiles chemical/daylight-smiles
chemical/x-mdl-isis chemical/mdl-isis
chemical/x-mdl-molfile chemical/mdl-molfile
chemical/x-mdl-rxn chemical/mdl-rxn
chemical/x-mdl-tgf chemical/mdl-tgf
chemical/x-questel-F1
chemical/x-questel-F1-query
outFormat should be one of the preceding or:
image/x-png image/png
image/x-wmf image/wmf
image/tiff
application/postscript
image/gif
"""
global theObjs,theDoc
if cdApp is None:
StartChemDraw()
if theObjs is None:
if theDoc is None:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
theObjs.SetData(inFormat,inData,pythoncom.Missing)
outD = theObjs.GetData(outFormat)
theObjs.Clear()
return outD
def CDXClean(inData,inFormat,outFormat):
"""calls the CDXLib Clean function on the data passed in.
CDXLib_Clean attempts to clean (prettify) the data before
doing an output conversion. It can be thought of as CDXConvert++.
CDXClean supports the same input and output specifiers as CDXConvert
(see above)
"""
global cdApp,theDoc,theObjs,selectItem,cleanItem
if cdApp is None:
StartChemDraw()
if theObjs is None:
if theDoc is None:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
theObjs.SetData(inFormat,inData,pythoncom.Missing)
theObjs.Select()
cleanItem.Execute()
outD = theObjs.GetData(outFormat)
theObjs.Clear()
return outD
def CDXDisplay(inData,inFormat='chemical/cdx',clear=1):
""" displays the data in Chemdraw """
global cdApp,theDoc,theObjs,selectItem,cleanItem,centerItem
if cdApp is None:
StartChemDraw()
try:
theDoc.Activate()
except:
ReactivateChemDraw()
theObjs = theDoc.Objects
if clear:
theObjs.Clear()
theObjs.SetData(inFormat,inData,pythoncom.Missing)
return
def CDXGrab(outFormat='chemical/x-mdl-molfile'):
""" returns the contents of the active chemdraw document
"""
global cdApp,theDoc
if cdApp is None:
res = ""
else:
cdApp.Visible=1
if not cdApp.ActiveDocument:
ReactivateChemDraw()
try:
res = cdApp.ActiveDocument.Objects.GetData(outFormat)
except:
res = ""
return res
def CloseChemdraw():
""" shuts down chemdraw
"""
global cdApp
try:
cdApp.Quit()
except:
pass
Exit()
def Exit():
""" destroys our link to Chemdraw
"""
global cdApp
cdApp = None
def SaveChemDrawDoc(fileName='save.cdx'):
"""force chemdraw to save the active document
NOTE: the extension of the filename will determine the format
used to save the file.
"""
d = cdApp.ActiveDocument
d.SaveAs(fileName)
def CloseChemDrawDoc():
"""force chemdraw to save the active document
NOTE: the extension of the filename will determine the format
used to save the file.
"""
d = cdApp.ActiveDocument
d.Close()
def RaiseWindowNamed(nameRe):
# start by getting a list of all the windows:
cb = lambda x,y: y.append(x)
wins = []
win32gui.EnumWindows(cb,wins)
# now check to see if any match our regexp:
tgtWin = -1
for win in wins:
txt = win32gui.GetWindowText(win)
if nameRe.match(txt):
tgtWin=win
break
if tgtWin>=0:
win32gui.ShowWindow(tgtWin,1)
win32gui.BringWindowToTop(tgtWin)
def RaiseChemDraw():
e = re.compile('^ChemDraw')
RaiseWindowNamed(e)
try:
from PIL import Image
from io import StringIO
def SmilesToPilImage(smilesStr):
"""takes a SMILES string and returns a PIL image using chemdraw
"""
return MolToPilImage(smilesStr,inFormat='chemical/daylight-smiles',outFormat='image/gif')
def MolToPilImage(dataStr,inFormat='chemical/daylight-smiles',outFormat='image/gif'):
"""takes a molecule string and returns a PIL image using chemdraw
"""
# do the conversion...
res = CDXConvert(dataStr,inFormat,outFormat)
dataFile = StringIO(str(res))
img = Image.open(dataFile).convert('RGB')
return img
except ImportError:
def SmilesToPilImage(smilesStr):
print('You need to have PIL installed to use this functionality')
return None
def MolToPilImage(dataStr,inFormat='chemical/daylight-smiles',outFormat='image/gif'):
print('You need to have PIL installed to use this functionality')
return None
# ------------------------------------------------------------------
# interactions with Chem3D
# ------------------------------------------------------------------
c3dApp = None
def StartChem3D(visible=0):
""" launches Chem3D """
global c3dApp
c3dApp = Dispatch('Chem3D.Application')
if not c3dApp.Visible:
c3dApp.Visible = visible
def CloseChem3D():
""" shuts down Chem3D """
global c3dApp
c3dApp.Quit()
c3dApp = None
availChem3DProps = ('DipoleMoment','BendEnergy','Non14VDWEnergy','StericEnergy',
'StretchBendEnergy','StretchEnergy','TorsionEnergy','VDW14Energy')
def Add3DCoordsToMol(data,format,props={}):
""" adds 3D coordinates to the data passed in using Chem3D
**Arguments**
- data: the molecular data
- format: the format of _data_. Should be something accepted by
_CDXConvert_
- props: (optional) a dictionary used to return calculated properties
"""
global c3dApp
if c3dApp is None:
StartChem3D()
if format != 'chemical/mdl-molfile':
molData = CDXClean(data,format,'chemical/mdl-molfile')
else:
molData = data
molFName = tempfile.mktemp('.mol')
open(molFName,'wb+').write(molData)
doc = c3dApp.Documents.Open(molFName)
if not doc:
print('cannot open molecule')
raise ValueError('No Molecule')
# set up the MM2 job
job = Dispatch('Chem3D.MM2Job')
job.Type=1
job.DisplayEveryIteration=0
job.RecordEveryIteration=0
# start the calculation...
doc.MM2Compute(job)
# and wait for it to finish
while doc.ComputeStatus in [0x434f4d50,0x50454e44]:
pass
#outFName = tempfile.mktemp('.mol')
# this is horrible, but apparently Chem3D gets pissy with tempfiles:
outFName = os.getcwd()+'/to3d.mol'
doc.SaveAs(outFName)
# generate the properties
for prop in availChem3DProps:
props[prop] = eval('doc.%s'%prop)
doc.Close(0)
os.unlink(molFName)
c3dData = open(outFName,'r').read()
gone = 0
while not gone:
try:
os.unlink(outFName)
except:
time.sleep(.5)
else:
gone = 1
return c3dData
def OptimizeSDFile(inFileName,outFileName,problemFileName='problems.sdf',
restartEvery=20):
""" optimizes the structure of every molecule in the input SD file
**Arguments**
- inFileName: name of the input SD file
- outFileName: name of the output SD file
- problemFileName: (optional) name of the SD file used to store molecules which
fail during the optimization process
- restartEvery: (optional) Chem3D will be shut down and restarted
every _restartEvery_ molecules to try and keep core leaks under control
"""
inFile = open(inFileName,'r')
outFile = open(outFileName,'w+')
problemFile = None
props = {}
lines = []
nextLine = inFile.readline()
skip = 0
nDone = 0
t1 = time.time()
while nextLine != '':
if nextLine.find('M END') != -1:
lines.append(nextLine)
molBlock = string.join(lines,'')
try:
newMolBlock = Add3DCoordsToMol(molBlock,'chemical/mdl-molfile',props=props)
except:
badBlock = molBlock
skip = 1
lines = []
else:
skip = 0
lines = [newMolBlock]
elif nextLine.find('$$$$') != -1:
t2 = time.time()
nDone += 1
print('finished molecule %d in %f seconds'%(nDone,time.time()-t1))
t1 = time.time()
if nDone%restartEvery == 0:
CloseChem3D()
StartChem3D()
outFile.close()
outFile = open(outFileName,'a')
if not skip:
for prop in props.keys():
lines.append('> <%s>\n%f\n\n'%(prop,props[prop]))
lines.append(nextLine)
outFile.write(string.join(lines,''))
lines = []
else:
skip = 0
lines.append(nextLine)
if problemFile is None:
problemFile = open(problemFileName,'w+')
problemFile.write(badBlock)
problemFile.write(string.join(lines,''))
lines = []
else:
lines.append(nextLine)
nextLine = inFile.readline()
outFile.close()
if problemFile is not None:
problemFile.close()
if __name__=='__main__':
inStr = 'CCC(C=O)CCC'
img = SmilesToPilImage(inStr)
img.save('foo.jpg')
convStr = CDXClean(inStr,'chemical/x-daylight-smiles','chemical/x-daylight-smiles')
print('in:',inStr)
print('out:',convStr)
convStr = CDXConvert(inStr,'chemical/x-daylight-smiles','chemical/x-mdl-molfile')
print('in:',inStr)
print('out:',convStr)
convStr2 = CDXClean(convStr,'chemical/x-mdl-molfile','chemical/x-mdl-molfile')
print('out2:',convStr2)
inStr = 'COc1ccc(c2onc(c2C(=O)NCCc3ccc(F)cc3)c4ccc(F)cc4)c(OC)c1'
convStr = CDXConvert(inStr,'chemical/x-daylight-smiles','chemical/x-mdl-molfile')
out = open('test.mol','w+')
out.write(convStr)
out.close()
|
soerendip42/rdkit
|
rdkit/utils/chemdraw.py
|
Python
|
bsd-3-clause
| 11,651
|
[
"RDKit"
] |
9ac7d50016a6e5df916a97032f119ac8254034f2979deaf0e75cb487ee625b96
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
MetPy Declarative Syntax Tutorial
=================================
The declarative syntax that is a part of the MetPy packaged is designed to aid in simple
data exploration and analysis needs by simplifying the plotting context from typical verbose
Python code. The complexity of data wrangling and plotting are hidden behind the simplified
syntax to allow a lower barrier to investigating your data.
"""
#########################################################################
# Imports
# -------
#
# You'll note that the number of imports is smaller due to using the declarative syntax.
# There is no need to import Matplotlib or Cartopy to your code as all of that is done
# behind the scenes.
from datetime import datetime, timedelta
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.io import metar
from metpy.plots.declarative import (BarbPlot, ContourPlot, FilledContourPlot, MapPanel,
PanelContainer, PlotObs)
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Depending on what kind of data you are wanting to plot you'll use either Xarray (for gridded
# data), Pandas (for CSV data), or the MetPy METAR parser (for METAR data).
#
# We'll start this tutorial by reading in a gridded dataset using Xarray.
# Open the netCDF file as a xarray Dataset and parse the full dataset
data = xr.open_dataset(get_test_data('GFS_test.nc', False)).metpy.parse_cf()
# View a summary of the Dataset
print(data)
#########################################################################
# Set Datetime
# ------------
#
# Set the date/time of that you desire to plot
plot_time = datetime(2010, 10, 26, 12)
#########################################################################
# Subsetting Data
# ---------------
#
# MetPy provides wrappers for the usual xarray indexing and selection routines that can handle
# quantities with units. For DataArrays, MetPy also allows using the coordinate axis types
# mentioned above as aliases for the coordinates. And so, if we wanted data to be just over
# the U.S. for plotting purposes
ds = data.metpy.sel(lat=slice(70, 10), lon=slice(360 - 150, 360 - 55))
#########################################################################
# For full details on xarray indexing/selection, see
# `xarray's documentation <http://xarray.pydata.org/en/stable/indexing.html>`_.
#########################################################################
# Calculations
# ------------
#
# In MetPy 1.0 and later, calculation functions accept Xarray DataArray's as input and the
# output a DataArray that can be easily added to an existing Dataset.
#
# As an example, we calculate wind speed from the wind components and add it as a new variable
# to our Dataset.
ds['wind_speed'] = mpcalc.wind_speed(ds['u-component_of_wind_isobaric'],
ds['v-component_of_wind_isobaric'])
#########################################################################
# Plotting
# --------
#
# With that miniaml preparation, we are now ready to use the simplified plotting syntax to be
# able to plot our data and analyze the meteorological situation.
#
# General Structure
#
# 1. Set contour attributes
#
# 2. Set map characteristics and collect contours
#
# 3. Collect panels and plot
#
# 4. Show (or save) the results
#
# Valid Plotting Types for Gridded Data:
#
# - ``ContourPlot()``
#
# - ``FilledContourPlot()``
#
# - ``ImagePlot()``
#
# - ``PlotBarbs()``
#
# More complete descrptions of these and other plotting types, as well as the map panel and
# panel containter classes are at the end of this tutorial.
#
# Let's plot a 300-hPa map with color-filled wind speed, which we calculated and added to
# our Dataset above, and geopotential heights over the CONUS.
#########################################################################
# We'll start by setting attributes for contours of Geopotential Heights at 300 hPa.
# We need to set at least the data, field, level, and time attributes. We'll set a few others
# to have greater control over hour the data is plotted.
# Set attributes for contours of Geopotential Heights at 300 hPa
cntr2 = ContourPlot()
cntr2.data = ds
cntr2.field = 'Geopotential_height_isobaric'
cntr2.level = 300 * units.hPa
cntr2.time = plot_time
cntr2.contours = list(range(0, 10000, 120))
cntr2.linecolor = 'black'
cntr2.linestyle = 'solid'
cntr2.clabels = True
#########################################################################
# Now we'll set the attributes for plotting color-filled contours of wind speed at 300 hPa.
# Again, the attributes that must be set include data, field, level, and time. We'll also set
# a colormap and colorbar to be purposeful for wind speed. Additionally, we'll set the
# attribute to change the units from m/s to knots, which is the common plotting units for
# wind speed.
# Set attributes for plotting color-filled contours of wind speed at 300 hPa
cfill = FilledContourPlot()
cfill.data = ds
cfill.field = 'wind_speed'
cfill.level = 300 * units.hPa
cfill.time = plot_time
cfill.contours = list(range(10, 201, 20))
cfill.colormap = 'BuPu'
cfill.colorbar = 'horizontal'
cfill.plot_units = 'knot'
#########################################################################
# Once we have our contours (and any colorfill plots) set up, we will want to define the map
# panel that we'll plot the data on. This is the place where we can set the view extent,
# projection of our plot, add map lines like coastlines and states, set a plot title.
# One of the key elements is to add the data to the map panel as a list with the plots
# attribute.
# Set the attributes for the map and add our data to the map
panel = MapPanel()
panel.area = [-125, -74, 20, 55]
panel.projection = 'lcc'
panel.layers = ['states', 'coastline', 'borders']
panel.title = f'{cfill.level.m}-hPa Heights and Wind Speed at {plot_time}'
panel.plots = [cfill, cntr2]
#########################################################################
# Finally we'll collect all of the panels to plot on the figure, set the size of the figure,
# and ultimately show or save the figure.
# Set the attributes for the panel and put the panel in the figure
pc = PanelContainer()
pc.size = (15, 15)
pc.panels = [panel]
#########################################################################
# All of our setting now produce the following map!
# Show the image
pc.show()
#########################################################################
# That's it! What a nice looking map, with relatively simple set of code.
#########################################################################
# Adding Wind Barbs
# -----------------
#
# We can easily add wind barbs to the plot we generated above by adding another plot type
# and adding it to the panel. The plot type for wind barbs is ``PlotBarbs()`` and has its own
# set of attributes to control plotting a vector quantity.
#########################################################################
# We start with setting the attributes that we had before for our 300 hPa plot inlcuding,
# Geopotential Height contours, and color-filled wind speed.
# Set attributes for contours of Geopotential Heights at 300 hPa
cntr2 = ContourPlot()
cntr2.data = ds
cntr2.field = 'Geopotential_height_isobaric'
cntr2.level = 300 * units.hPa
cntr2.time = plot_time
cntr2.contours = list(range(0, 10000, 120))
cntr2.linecolor = 'black'
cntr2.linestyle = 'solid'
cntr2.clabels = True
# Set attributes for plotting color-filled contours of wind speed at 300 hPa
cfill = FilledContourPlot()
cfill.data = ds
cfill.field = 'wind_speed'
cfill.level = 300 * units.hPa
cfill.time = plot_time
cfill.contours = list(range(10, 201, 20))
cfill.colormap = 'BuPu'
cfill.colorbar = 'horizontal'
cfill.plot_units = 'knot'
#########################################################################
# Now we'll set the attributes for plotting wind barbs, with the required attributes of data,
# time, field, and level. The skip attribute is particularly useful for thining the number of
# wind barbs that are plotted on the map and again we'll convert to units of knots.
# Set attributes for plotting wind barbs
barbs = BarbPlot()
barbs.data = ds
barbs.time = plot_time
barbs.field = ['u-component_of_wind_isobaric', 'v-component_of_wind_isobaric']
barbs.level = 300 * units.hPa
barbs.skip = (3, 3)
barbs.plot_units = 'knot'
#########################################################################
# Add all of our plot types to the panel, don't forget to add in the new wind barbs to our plot
# list!
# Set the attributes for the map and add our data to the map
panel = MapPanel()
panel.area = [-125, -74, 20, 55]
panel.projection = 'lcc'
panel.layers = ['states', 'coastline', 'borders']
panel.title = f'{cfill.level.m}-hPa Heights and Wind Speed at {plot_time}'
panel.plots = [cfill, cntr2, barbs]
# Set the attributes for the panel and put the panel in the figure
pc = PanelContainer()
pc.size = (15, 15)
pc.panels = [panel]
# Show the figure
pc.show()
#########################################################################
# Plot Surface Obs
# ----------------
#
# We can also plot surface (or upper-air) observations at point locations using the simplified
# syntax. Whether it is surface or upper-air data, the ``PlotObs()`` class is what you would
# want to use. Then you would add those observations to a map panel and then collect the panels
# to plot the figure; similar to what you would do for a gridded plot.
df = metar.parse_metar_file(get_test_data('metar_20190701_1200.txt', False), year=2019,
month=7)
# Let's take a look at the variables that we could plot coming from our METAR observations.
print(df.keys())
# Set the observation time
obs_time = datetime(2019, 7, 1, 12)
#########################################################################
# Setting of our attributes for plotting observations is pretty straignforward and just needs
# to be lists for the variables, and a comparable number of items for plot characteristics that
# are specific to the individual fields. For example, the locations around a station plot, the
# plot units, and any plotting formats would all meed to have the same number of items as the
# fields attribute.
#
# Plotting wind bards is done through the vector_field attribute and you can reduce the number
# of points plotted (especially important for surface observations) with the reduce points
# attribute.
#
# For a very basic plot of one field, the minimum required attributes are the data, time,
# fields, and location attributes.
# Plot desired data
obs = PlotObs()
obs.data = df
obs.time = obs_time
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['cloud_coverage', 'air_temperature', 'dew_point_temperature',
'air_pressure_at_sea_level', 'present_weather']
obs.plot_units = [None, 'degF', 'degF', None, None]
obs.locations = ['C', 'NW', 'SW', 'NE', 'W']
obs.formats = ['sky_cover', None, None, lambda v: format(v * 10, '.0f')[-3:],
'current_weather']
obs.reduce_points = 0.75
obs.vector_field = ['eastward_wind', 'northward_wind']
#########################################################################
# We use the same Classes for plotting our data on a map panel and collecting all of the
# panels on the figure. In this case we'll focus in on the state of Indiana for plotting.
# Panel for plot with Map features
panel = MapPanel()
panel.layout = (1, 1, 1)
panel.projection = 'lcc'
panel.area = 'in'
panel.layers = ['states']
panel.title = f'Surface plot for {obs_time}'
panel.plots = [obs]
# Bringing it all together
pc = PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
pc.show()
#########################################################################
# Detailed Attribute Descriptions
# -------------------------------
#
# This final section contains verbose descriptions of the attributes that can be set by the
# plot types used in this tutorial.
#########################################################################
# ContourPlot()
# -------------
#
# This class is designed to plot contours of gridded data, most commonly model output from the
# GFS, NAM, RAP, or other gridded dataset (e.g., NARR).
#
# Attributes:
#
# ``data``
#
# This attribute must be set with the variable name that contains the xarray dataset.
# (Typically this is the variable ds)
#
# ``field``
#
# This attribute must be set with the name of the variable that you want to contour.
# For example, to plot the heights of pressure surfaces from the GFS you would use the name
# ``‘Geopotential_height_isobaric’``
#
# ``level``
#
# This attribute sets the level of the data you wish to plot. If it is a pressure level,
# then it must be set to a unit bearing value (e.g., 500*units.hPa). If the variable does
# not have any vertical levels (e.g., mean sea-level pressure), then the level attribute must
# be set to None.
#
# ``time``
#
# This attribute must be set with a datetime object, just as with the ``PlotObs()`` class.
# To get a forecast hour, you can use the timedelta function from datetime to add the number of
# hours into the future you wish to plot. For example, if you wanted the six hour forecast from
# the 00 UTC 2 February 2020 model run, then you would set the attribute with:
#
# ``datetime(2020, 2, 2, 0) + timedelta(hours=6)``
#
# ``contours``
#
# This attribute sets the contour values to be plotted with a list. This can be set manually
# with a list of integers in square brackets (e.g., ``[5400, 5460, 5520, 5580, 5640, 5700]``)
# or programmatically (e.g., ``list(range(0, 10000, 60))``). The second method is a way to
# easily set a contour interval (in this case 60).
#
# ``clabel``
#
# This attribute can be set to ``True`` if you desire to have your contours labeled.
#
# ``linestyle``
#
# This attribute can be set to make the contours ``‘solid’``, ``‘dashed’``, ``‘dotted’``,
# or ``‘dashdot’``. Other linestyles are can be used and are found at:
# https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
#
# Default is ``‘solid’``.
#
# ``linewidth``
#
# This attribute alters the width of the contours (defaults to 1). Setting the value greater
# than 1 will yield a thicker contour line.
#
# ``linecolor``
#
# This attribute sets the color of the contour lines. Default is ``‘black’``. All colors from
# matplotlib are valid: https://matplotlib.org/3.1.0/_images/sphx_glr_named_colors_003.png
#
# ``plot_units``
#
# If you want to change the units for plotting purposes, add the string value of the units
# desired. For example, if you want to plot temperature in Celsius, then set this attribute
# to ``‘degC’``.
#########################################################################
# FilledContourPlot()
# -------------------
#
# Works very similarly to ``ContourPlot()``, except that contours are filled using a colormap
# between contour values. All attributes for ``ContourPlot()`` work for color-filled plots,
# except for linestyle, linecolor, and linewidth. Additionally, there are the following
# attributes that work for color-filling:
#
# Attributes:
#
# ``colormap``
#
# This attribute is used to set a valid colormap from either Matplotlib or MetPy:
# Matplotlib Colormaps: https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html
# MetPy Colormaps: https://unidata.github.io/MetPy/v1.0/api/generated/metpy.plots.ctables.html
#
# ``colorbar``
#
# This attribute can be set to ``‘vertical’`` or ``‘horizontal’``, which is the location the
# colorbar will be plotted on the panel.
#
# ``image_range``
#
# A set of values indicating the minimum and maximum for the data being plotted. This
# attribute should be set as ``(min_value, max_value)``, where min_value and max_value are
# numeric values.
#########################################################################
# PanelContainer()
# ----------------
#
# Attributes:
#
# ``size``
#
# The size of the figure in inches (e.g., (10, 8))
#
# ``panels``
#
# A list collecting the panels to be plotted in the figure.
#
# ``show``
#
# Show the plot
#
# ``save``
#
# Save the figure using the Matplotlib arguments/keyword arguments
#########################################################################
# MapPanel()
# ----------
#
# Attributes:
#
# ``layout``
#
# The Matplotlib layout of the figure. For a single panel figure the setting should be
# ``(1, 1, 1)``
#
# ``projection``
#
# The projection can be set with the name of a default projection (``‘lcc’``, ``‘mer’``, or
# ``‘ps’``) or it can be set to a Cartopy projection.
#
# ``layers``
#
# This attribute will add map layers to identify boundaries or features to plot on the map.
# Valid layers are ``'borders'``, ``'coastline'``, ``'states'``, ``'lakes'``, ``'land'``,
# ``'ocean'``, ``'rivers'``, ``'counties'``.
#
# ``area``
#
# This attribute sets the geographical area of the panel. This can be set with a predefined
# name of an area including all US state postal abbreviations (e.g., ``‘us’``, ``‘natl’``,
# ``‘in’``, ``‘il’``, ``‘wi’``, ``‘mi’``, etc.) or a tuple value that corresponds to
# longitude/latitude box based on the projection of the map with the format
# ``(west-most longitude, east-most longitude, south-most latitude, north-most latitude)``.
# This tuple defines a box from the lower-left to the upper-right corner.
#
# ``title``
#
# This attribute sets a title for the panel.
#
# ``plots``
#
# A list collecting the observations to be plotted in the panel.
#########################################################################
# BarbPlot()
# ----------
#
# This plot class is used to add wind barbs to the plot with the following
#
# Attributes:
#
# ``data``
#
# This attribute must be set to the variable that contains the vector components to be plotted.
#
# ``field``
#
# This attribute is a list of the vector components to be plotted. For the typical
# meteorological case it would be the ``[‘u-compopnent’, ‘v-component’]``.
#
# ``time``
#
# This attribute should be set to a datetime object, the same as for all other declarative
# classes.
#
# ``barblength``
#
# This attribute sets the length of the wind barbs. The default value is based on the
# font size.
#
# ``color``
#
# This attribute sets the color of the wind barbs, which can be any Matplotlib color.
# Default color is ``‘black’``.
#
# ``earth_relative``
#
# This attribute can be set to False if the vector components are grid relative (e.g., for NAM
# or NARR output)
#
# ``pivot``
#
# This attribute can be set to a string value about where the wind barb will pivot relative to
# the grid point. Possible values include ``‘tip’`` or ``‘middle’``. Default is ``‘middle’``.
########################################################################
# PlotObs()
# ---------
#
# This class is used to plot point observations from the surface or upper-air.
#
# Attributes:
#
# ``data``
#
# This attribute needs to be set to the DataFrame variable containing the fields that you
# desire to plot.
#
# ``fields``
#
# This attribute is a list of variable names from your DataFrame that you desire to plot at the
# given locations around the station model.
#
# ``level``
#
# For a surface plot this needs to be set to None.
#
# ``time``
#
# This attribute needs to be set to subset your data attribute for the time of the observations
# to be plotted. This needs to be a datetime object.
#
# ``locations``
#
# This attribute sets the location of the fields to be plotted around the surface station
# model. The default location is center ``(‘C’)``
#
# ``time_range``
#
# This attribute allows you to define a window for valid observations (e.g., 15 minutes on
# either side of the datetime object setting. This is important for surface data since actual
# observed times are not all exactly on the hour. If multiple observations exist in the defined
# window, the most recent observations is retained for plotting purposes.
#
# ``formats``
#
# This attribute sets a formatter for text or plotting symbols around the station model. For
# example, plotting mean sea-level pressure is done in a three-digit code and a formatter can
# be used to achieve that on the station plot.
#
# MSLP Formatter: ``lambda v: format(10 * v, '.0f')[-3:]``
#
# For plotting symbols use the available MetPy options through their name. Valid symbol formats
# are ``'current_weather'``, ``'sky_cover'``, ``'low_clouds'``, ``'mid_clouds'``,
# ``'high_clouds'``, and ``'pressure_tendency'``.
#
# ``colors``
#
# This attribute can change the color of the plotted observation. Default is ``‘black’``.
# Acceptable colors are those available through Matplotlib:
# https://matplotlib.org/3.1.1/_images/sphx_glr_named_colors_003.png
#
# ``vector_field``
#
# This attribute can be set to a list of wind component values for plotting
# (e.g., ``[‘uwind’, ‘vwind’]``)
#
# ``vector_field_color``
#
# Same as colors except only controls the color of the wind barbs. Default is ``‘black’``.
#
# ``reduce_points``
#
# This attribute can be set to a real number to reduce the number of stations that are plotted.
# Default value is zero (e.g., no points are removed from the plot).
|
metpy/MetPy
|
v1.0/_downloads/900d76c7356d09e4ca20b90f94a0732e/declarative_tutorial.py
|
Python
|
bsd-3-clause
| 21,538
|
[
"NetCDF"
] |
3fddd2078c2df8bc1307170cbf63b973385f760d05b26b0591341cf4ad2b53bb
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-11 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_OC_biofuel_1960-2020_greg.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i313: Organic carbon biofuel surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i313'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='OC_biofuel'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='OC biofuel fuel surf emissions expressed as carbon'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_OC_biofuel_1960-2020_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of organic carbon from 1960 to 2020 (from selected anthropogenic biofuel sources only)'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_OC_biofuel_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,344
|
[
"NetCDF"
] |
f4106cd9eddd0e9010da3afe6ef24dd1e0d2d4d1544899f99af213cc884c5cd2
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testSkippedAllocations(self):
"""
Scenarios which trigger skipped tests due to insufficient
resource allocation.
"""
# Subject a normally passing test to impossible cpu allocations
output = self.runTests('--no-color', '-i', 'always_ok', '-p', '2', '-j', '1')
self.assertRegexpMatches(output.decode('utf-8'), 'tests/test_harness.always_ok.*? \[INSUFFICIENT SLOTS\] SKIP')
# Subject a normally passing test to impossible thread allocations
output = self.runTests('--no-color', '-i', 'always_ok', '--n-threads', '2', '-j', '1')
self.assertRegexpMatches(output.decode('utf-8'), 'tests/test_harness.always_ok.*? \[INSUFFICIENT SLOTS\] SKIP')
# A combination of threads*cpus with too low a hard limit (3*3= -j9)
output = self.runTests('--no-color', '-i', 'allocation_test', '--n-threads', '3', '-p', '3', '-j', '8')
self.assertRegexpMatches(output.decode('utf-8'), 'tests/test_harness.allocation_test.*? \[INSUFFICIENT SLOTS\] SKIP')
def testOversizedCaveat(self):
"""
Scenarios which trigger only the 'oversized' caveat.
"""
# A test which has no min/max cpu parameters should print oversized
# when subjected to -p 2
output = self.runTests('-i', 'always_ok', '-p', '2').decode('utf-8')
self.assertNotIn('CPUS', output)
self.assertIn('OVERSIZED', output)
# A test which has no min/max thread parameters should print oversized
# when subjected to --n-threads 2
output = self.runTests('-i', 'always_ok', '--n-threads', '2').decode('utf-8')
self.assertNotIn('THREADS', output)
self.assertIn('OVERSIZED', output)
def testCpuCaveats(self):
"""
Scenarios which trigger the min/max CPU caveat.
Note: --n-threads is present to suppress the threading
caveat for more accurate caveat detection.
"""
# Test MIN CPUs / Oversized caveat using soft limit (no -j) on a test
# having a minimum cpu parameter of 2.
output = self.runTests('-i', 'allocation_test', '--n-threads', '2').decode('utf-8')
self.assertNotIn('MIN_THREADS', output)
self.assertIn('MIN_CPUS=2', output)
self.assertIn('OVERSIZED', output)
# Test MAX CPUs / Oversized caveat on a test having a maximum cpu
# parameter of 3 (and we subjected it to 4).
output = self.runTests('-i', 'allocation_test', '-p', '4', '--n-threads', '2').decode('utf-8')
self.assertNotIn('MIN_THREADS', output)
self.assertIn('MAX_CPUS=3', output)
self.assertIn('OVERSIZED', output)
def testThreadCaveats(self):
"""
Scenarios which trigger the min/max threading caveat.
Note: -j/p is present to suppress the min/max cpu oversize
caveat for more accurate caveat detection.
"""
# MIN Threads caveat
# Note: 1*2 should be -j 2 but the test minimum is 2 threads, so we need
# to use -j 4 to suppress any cpu caveats. Oversized will not trigger as
# -j4 satisfies this test's requirements.
output = self.runTests('-i', 'allocation_test', '-j', '4', '-p', '2', '--n-threads', '1').decode('utf-8')
self.assertNotIn('CPUS', output)
self.assertNotIn('OVERSIZED', output)
self.assertIn('MIN_THREADS=2', output)
# MAX Threads caveat
# Note: 2*4 should be -j 8 but the test maximum is 3 threads, so we
# are specifically testing that setting a lower j does _not_ trigger an
# insufficient skipped test scenario. Oversized will not trigger as
# -j6 satisfies this test's requirements.
output = self.runTests('-i', 'allocation_test', '-j', '6', '-p', '2', '--n-threads', '4').decode('utf-8')
self.assertNotIn('CPUS', output)
self.assertNotIn('OVERSIZED', output)
self.assertIn('MAX_THREADS=3', output)
def testPerfectAllocation(self):
"""
Scenario which trigger no caveats.
"""
# Passing test triggering no caveats, as supplied allocations satisfies
# the test's requirements
output = self.runTests('-i', 'allocation_test', '-j', '4', '-p', '2', '--n-threads', '2').decode('utf-8')
self.assertNotIn('MIN_THREADS', output)
self.assertNotIn('MAX_THREADS', output)
self.assertNotIn('MIN_CPUS', output)
self.assertNotIn('MAX_CPUS', output)
self.assertNotIn('OVERSIZED', output)
self.assertRegexpMatches(output, 'tests/test_harness.allocation_test.*OK')
|
nuclear-wizard/moose
|
python/TestHarness/tests/test_Allocations.py
|
Python
|
lgpl-2.1
| 5,026
|
[
"MOOSE"
] |
b3194b17ac9661136ce9c33dddb85d04bfa520d86d5d9187c2deb75df1144fa2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
import random
import mxnet as mx
import numpy as np
from common import find_mxnet, dali, fit
from mlperf_compliance import constants as mlperf_constants
from mlperf_compliance import tags as mlperf_log
from mlperf_compliance.mlperf_log import LOGGER
from mlperf_log_utils import mx_resnet_print, mpiwrapper, mlperf_submission_log
def add_general_args(parser):
parser.add_argument('--verbose', type=int, default=0,
help='turn on reporting of chosen algos for convolution, etc.')
parser.add_argument('--seed', type=int, default=None,
help='set the seed for python, nd and mxnet rngs')
parser.add_argument('--custom-bn-off', type=int, default=0,
help='disable use of custom batchnorm kernel')
parser.add_argument('--fuse-bn-relu', type=int, default=0,
help='have batchnorm kernel perform activation relu')
parser.add_argument('--fuse-bn-add-relu', type=int, default=0,
help='have batchnorm kernel perform add followed by activation relu')
parser.add_argument('--input-layout', type=str, default='NCHW',
help='the layout of the input data (e.g. NCHW)')
parser.add_argument('--conv-layout', type=str, default='NCHW',
help='the layout of the data assumed by the conv operation (e.g. NCHW)')
parser.add_argument('--conv-algo', type=int, default=-1,
help='set the convolution algos (fwd, dgrad, wgrad)')
parser.add_argument('--force-tensor-core', type=int, default=0,
help='require conv algos to be tensor core')
parser.add_argument('--batchnorm-layout', type=str, default='NCHW',
help='the layout of the data assumed by the batchnorm operation (e.g. NCHW)')
parser.add_argument('--batchnorm-eps', type=float, default=2e-5,
help='the amount added to the batchnorm variance to prevent output explosion.')
parser.add_argument('--batchnorm-mom', type=float, default=0.9,
help='the leaky-integrator factor controling the batchnorm mean and variance.')
parser.add_argument('--pooling-layout', type=str, default='NCHW',
help='the layout of the data assumed by the pooling operation (e.g. NCHW)')
parser.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
def _get_gpu(gpus):
idx = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
gpu = gpus.split(",")[idx]
return gpu
class MLPerfInit(mx.init.Xavier):
def _init_weight(self, name, arg):
if name.startswith("fc"):
mx.ndarray.random.normal(0, 0.01, out=arg)
else:
return super()._init_weight(name, arg)
if __name__ == '__main__':
LOGGER.propagate = False
mx_resnet_print(key=mlperf_constants.INIT_START, uniq=False)
# parse args
parser = argparse.ArgumentParser(description="MLPerf RN50v1.5 training script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_general_args(parser)
fit.add_fit_args(parser)
dali.add_dali_args(parser)
parser.set_defaults(
# network
network = 'resnet-v1b',
num_layers = 50,
# data
resize = 256,
num_classes = 1000,
num_examples = 1281167,
image_shape = '3,224,224',
# train
num_epochs = 100,
lr_step_epochs = '30,60,80',
dtype = 'float32'
)
args = parser.parse_args()
# select gpu for horovod process
if 'horovod' in args.kv_store:
args.gpus = _get_gpu(args.gpus)
# kvstore
kv = mx.kvstore.create(args.kv_store)
mlperf_submission_log('resnet', 'abci_{}xV100'.format(kv.num_workers))
# load network
from importlib import import_module
net = import_module('symbols.'+args.network)
mx_resnet_print(key=mlperf_log.EVAL_EPOCH_OFFSET,
val=args.eval_offset)
if args.seed is None:
if 'horovod' in args.kv_store:
all_seeds = np.random.randint(2**16, size=(int(os.environ['OMPI_COMM_WORLD_SIZE'])))
args.seed = int(all_seeds[int(os.environ['OMPI_COMM_WORLD_RANK'])])
else:
args.seed = args.seed + int(os.environ['OMPI_COMM_WORLD_RANK'])
mx_resnet_print(key=mlperf_log.RUN_SET_RANDOM_SEED, val=args.seed, uniq=True)
random.seed(args.seed)
np.random.seed(args.seed)
mx.random.seed(args.seed)
# Devices for training
devs = mx.cpu() if args.gpus is None or args.gpus == "" else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# Load symbol definiton and create model
sym = net.get_symbol(**vars(args))
model = mx.mod.Module(context=devs, symbol=sym)
# Weights init
initializer = MLPerfInit(
rnd_type='gaussian', factor_type="in", magnitude=2, seed=args.weight_seed)
# Start DALI pipeline
lambda_fnc_dali_get_rec_iter=dali.build_input_pipeline(args, kv)
arg_params, aux_params = None, None
if 'horovod' in args.kv_store:
# Create dummy data shapes and bind them to the model
data_shapes = [mx.io.DataDesc('data',(args.batch_size, 224, 224, 4),'float16')]
label_shapes = [mx.io.DataDesc('softmax_label',(args.batch_size,),'float32')]
model.bind(data_shapes=data_shapes, label_shapes=label_shapes)
model.init_params(initializer, arg_params=arg_params, aux_params=aux_params)
mx.ndarray.waitall()
fit.fit(args, kv, model, initializer, lambda_fnc_dali_get_rec_iter, devs, arg_params, aux_params)
mx_resnet_print(key=mlperf_log.RUN_FINAL)
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/JobScripts/image_classification/train_imagenet.py
|
Python
|
apache-2.0
| 6,645
|
[
"Gaussian"
] |
15977d15bc032899265e5db8610473bd63ce9bcb1eab8dec4805039c4fd28e09
|
#!/usr/bin/env python
#
# Copyright (C) 2010-2012 ABINIT Group (Yann Pouillon)
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
# FIXME: detect duplicate definitions
from ConfigParser import ConfigParser
from time import gmtime,strftime
import commands
import os
import re
import sys
class MyConfigParser(ConfigParser):
def optionxform(self,option):
return str(option)
# ---------------------------------------------------------------------------- #
#
# Functions
#
env_ignore = list()
opt_ignore = ["fcflags_opt_*","status"]
def is_ignored(keyword):
for opt in env_ignore + opt_ignore:
if ( "*" in opt ):
if ( re.match(opt,keyword) ):
return True
elif ( opt == keyword ):
return True
return False
def key_is_ok(mode,key):
# Init keys to ignore
cnf_ignore = dict()
cnf_ignore["mpi"] = ("status","CC","CXX","FC")
cnf_ignore["raw"] = ("status")
cnf_ignore["serial"] = ("status","with_mpi_prefix")
if ( key in cnf_ignore[mode] ):
return False
else:
return True
# ---------------------------------------------------------------------------- #
#
# Main program
#
# Check if we are in the top of the ABINIT source tree
if ( not os.path.exists("configure.ac") or
not os.path.exists("src/98_main/abinit.F90") ):
print "%s: You must be in the top of an ABINIT source tree." % my_name
print "%s: Aborting now." % my_name
sys.exit(1)
# Init
re_env = re.compile("^[A-Z][0-9A-Z_]*")
re_opt = re.compile("^[a-z][0-9a-z_]*")
# Extract environment variables from config file
cnf_env = MyConfigParser()
cnf_env.read("config/specs/environment.conf")
env_config = list()
for env in cnf_env.sections():
if ( cnf_env.get(env,"reset") == "no" ):
if ( not is_ignored(env) ):
env_config.append(env)
env_config.sort()
# Extract options from config file
cnf_opt = MyConfigParser()
cnf_opt.read("config/specs/options.conf")
opt_config = list()
opt_removed = list()
for opt in cnf_opt.sections():
tmp_sta = cnf_opt.get(opt,"status")
if ( tmp_sta == "removed" or tmp_sta == "dropped" ):
opt_removed.append(opt)
elif ( "renamed" in tmp_sta ):
opt_removed.append(tmp_sta.split()[1])
if ( not is_ignored(opt) ):
opt_config.append(opt)
else:
if ( not is_ignored(opt) ):
opt_config.append(opt)
opt_config.sort()
opt_removed.sort()
# Extract information from build example config file
cnf_bex = MyConfigParser()
cnf_bex.read("config/specs/build-examples.conf")
env_examples = list()
opt_examples = list()
env_dict = dict()
opt_dict = dict()
for bot in cnf_bex.sections():
for var in cnf_bex.options(bot):
if ( not is_ignored(var) ):
if ( re_env.match(var) ):
if ( not var in env_examples ):
env_examples.append(var)
if ( not var in env_dict ):
env_dict[var] = list()
env_dict[var].append(bot)
elif ( re_opt.match(var) ):
if ( not var in opt_examples ):
opt_examples.append(var)
if ( not var in opt_dict ):
opt_dict[var] = list()
opt_dict[var].append(bot)
env_examples.sort()
opt_examples.sort()
# Compare environment and options
denv_examples = [env for env in env_examples if not env in env_config]
dopt_examples = [opt for opt in opt_examples if not opt in opt_config + opt_removed]
dopt_removed = [opt for opt in opt_examples if opt in opt_removed]
nerr = len(denv_examples) + len(dopt_examples) + len(dopt_removed)
# Compare build examples and generated files
bex_data = dict()
dbex_files = list()
for acf in os.listdir("doc/config/build-examples"):
if ( re.match("bb_",acf) ):
acf_section = re.sub("\.ac","",acf)
if ( cnf_bex.has_section(acf_section) ):
acf_data = file("doc/config/build-examples/"+acf,"r").readlines()
acf_dict = dict()
for line in acf_data:
line = re.sub("#.*","",line)
line = line.strip()
if ( len(line) > 0 ):
idx = line.find("=")
key = line[:idx]
val = re.sub("\"","",line[idx+1:])
acf_dict[key] = val
bex_data[acf_section] = acf_dict
else:
dbex_files.append(acf_section)
dbex_sections = [bot for bot in cnf_bex.sections() \
if ( re.match("bb_",bot) and not bot in bex_data.keys() )]
dbex_keys = dict()
dbex_vals = dict()
for bot in cnf_bex.sections():
if ( re.match("bb_",bot) ):
# Check for the presence of MPI options
cnf_mode = "raw"
if ( "with_mpi_prefix" in cnf_bex.options(bot) ):
cnf_mode = "mpi"
if ( "enable_mpi" in cnf_bex.options(bot) ):
if ( cnf_bex.get(bot,"enable_mpi") == "no" ):
cnf_mode = "serial"
for var in cnf_bex.options(bot):
if ( (not is_ignored(var)) and key_is_ok(cnf_mode,var) ):
if ( var not in bex_data[bot].keys() ):
if ( bot not in dbex_keys ):
dbex_keys[bot] = list()
dbex_keys[bot].append(var)
elif ( cnf_bex.get(bot,var) != bex_data[bot][var] ):
if ( bot not in dbex_vals ):
dbex_vals[bot] = dict()
dbex_vals[bot][var] = "%s != %s" % \
(cnf_bex.get(bot,var),bex_data[bot][var])
nerr += len(dbex_sections) + len(dbex_keys.keys()) + len(dbex_vals.keys())
# Report any mismatch
if ( nerr > 0 ):
sys.stderr.write("%s: reporting use of wrong options\n\n" % \
(os.path.basename(sys.argv[0])))
sys.stderr.write("X: R=Removed / U=Undefined / F=Missing File / K=Missing Key / V=Value Mismatch\n")
sys.stderr.write(" >=Continued from previous line\n\n")
sys.stderr.write("%s %-24s %-48s\n" % \
("X","Wrong option","Bot"))
sys.stderr.write("%s %s %s\n" % ("-","-" * 24,"-" * 48))
for env in denv_examples:
for bot in env_dict[env]:
sys.stderr.write("%s %-24s %-48s\n" % ("U",env,bot))
for opt in dopt_examples:
for bot in opt_dict[opt]:
sys.stderr.write("%s %-24s %-48s\n" % ("U",opt,bot))
for opt in dopt_removed:
for bot in opt_dict[opt]:
sys.stderr.write("%s %-24s %-48s\n" % ("R",opt,bot))
for bot in dbex_sections:
sys.stderr.write("%s %-24s %-48s\n" % ("F","---",bot))
for bot in dbex_keys:
for key in dbex_keys[bot]:
sys.stderr.write("%s %-24s %-48s\n" % ("K",key,bot))
for bot in dbex_vals:
for key in dbex_vals[bot].keys():
val = " " + dbex_vals[bot][key]
sys.stderr.write("%s %-24s %-48s\n" % ("V",key,bot))
sys.stderr.write("%s %-24s %-48s\n" % (">",val,""))
sys.stderr.write("\n")
sys.exit(1)
else:
sys.exit(0)
|
qsnake/abinit
|
util/maintainers/check-build-examples.py
|
Python
|
gpl-3.0
| 6,595
|
[
"ABINIT"
] |
d2ae6a95584bcb459365f802b7500740adbaa5610513d749e350b7db24768f13
|
import time
from datetime import datetime
from abc import ABCMeta
from .api_entity import APIEntity
class Application(APIEntity):
"""
Firefly application entity.
"""
__metaclass__ = ABCMeta
_export = [
'id', 'eui', 'name', 'description', 'sink', 'created', 'updated'
]
api = None
id = -1
eui = None
name = None
description = None
created = 0
updated = 0
sink = None
def __init__(self, api, **args):
self.api = api
if ('created_at' in args):
self.created = int(
time.mktime(datetime.strptime(args.pop('created_at'), '%Y-%m-%dT%H:%M:%S').timetuple()))
if ('updated_at' in args):
self.updated = int(
time.mktime(datetime.strptime(args.pop('updated_at'), '%Y-%m-%dT%H:%M:%S').timetuple()))
self._exists = True
self.__dict__.update(args)
def export(self):
return self._export
def to_json(self, target, transcript={'created': 'inserted_at', 'updated': 'updated_at'}, exclude=['api']):
return super(Application, self).to_json(target, transcript)
|
digimondo/python-fireflyapi
|
fireflyapi/application.py
|
Python
|
mit
| 1,136
|
[
"Firefly"
] |
5090282da3f99a65ab6036c0f833680c233c6a30467856cd4da91c90bdfff8c5
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************************************************************************************
**CoulombKSpaceP3M** - Coulomb potential and interaction Objects (`K` space part)
*******************************************************************************************
This is the `K` space part of potential of Coulomb long range interaction according to the P3M
summation technique. Good explanation of P3M summation could be found here [Allen89]_,
[Deserno98]_.
Example:
>>> ewaldK_pot = espressopp.interaction.CoulombKSpaceP3M(system, coulomb_prefactor, alpha, kspacecutoff)
>>> ewaldK_int = espressopp.interaction.CellListCoulombKSpaceP3M(system.storage, ewaldK_pot)
>>> system.addInteraction(ewaldK_int)
**!IMPORTANT** Coulomb interaction needs `R` space part as well CoulombRSpace_.
.. _CoulombRSpace: espressopp.interaction.CoulombRSpace.html
Definition:
It provides potential object *CoulombKSpaceP3M* and interaction object *CellListCoulombKSpaceP3M* based on
all particles list.
The *potential* is based on the system information (System_) and parameters:
Coulomb prefactor (coulomb_prefactor), P3M parameter (alpha),
and the cutoff in K space (kspacecutoff).
.. _System: espressopp.System.html
>>> ewaldK_pot = espressopp.interaction.CoulombKSpaceP3M(system, coulomb_prefactor, alpha, kspacecutoff)
Potential Properties:
* *ewaldK_pot.prefactor*
The property 'prefactor' defines the Coulomb prefactor.
* *ewaldK_pot.alpha*
The property 'alpha' defines the P3M parameter :math:`\\alpha`.
* *ewaldK_pot.kmax*
The property 'kmax' defines the cutoff in `K` space.
The *interaction* is based on the all particles list. It needs the information from Storage_
and `K` space part of potential.
.. _Storage: espressopp.storage.Storage.html
>>> ewaldK_int = espressopp.interaction.CellListCoulombKSpaceP3M(system.storage, ewaldK_pot)
Interaction Methods:
* *getPotential()*
Access to the local potential.
Adding the interaction to the system:
>>> system.addInteraction(ewaldK_int)
.. function:: espressopp.interaction.CoulombKSpaceP3M(system, C_pref, alpha, M, P, rcut, interpolation)
:param system:
:param C_pref:
:param alpha:
:param M:
:param P:
:param rcut:
:param interpolation: (default: 200192)
:type system:
:type C_pref:
:type alpha:
:type M:
:type P:
:type rcut:
:type interpolation: int
.. function:: espressopp.interaction.CellListCoulombKSpaceP3M(storage, potential)
:param storage:
:param potential:
:type storage:
:type potential:
.. function:: espressopp.interaction.CellListCoulombKSpaceP3M.getPotential()
:rtype:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_CoulombKSpaceP3M, \
interaction_CellListCoulombKSpaceP3M
class CoulombKSpaceP3MLocal(PotentialLocal, interaction_CoulombKSpaceP3M):
def __init__(self, system, C_pref, alpha, M, P, rcut, interpolation = 200192):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CoulombKSpaceP3M, system, C_pref, alpha, M, P, rcut, interpolation)
class CellListCoulombKSpaceP3MLocal(InteractionLocal, interaction_CellListCoulombKSpaceP3M):
def __init__(self, storage, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListCoulombKSpaceP3M, storage, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
if pmi.isController:
class CoulombKSpaceP3M(Potential):
pmiproxydefs = dict(
cls = 'espressopp.interaction.CoulombKSpaceP3MLocal',
pmiproperty = ['prefactor'] #, 'alpha', 'kmax'
)
class CellListCoulombKSpaceP3M(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListCoulombKSpaceP3MLocal',
pmicall = ['getPotential']
)
|
capoe/espressopp.soap
|
src/interaction/CoulombKSpaceP3M.py
|
Python
|
gpl-3.0
| 5,244
|
[
"ESPResSo"
] |
95255160bf6d87ed02bea684c7b6d08fee9fff23573ebb0fa4943ff59b45f8f8
|
from aces.materials import Material
from .atomic import get_unique_atoms
from ase import Atoms,Atom
from math import pi,sqrt
from ase.dft.kpoints import ibz_points
class structure(Material):
def set_parameters(self):
pass
def setup(self):
pass
def lmp_structure(self):
col=self.unitcell(self.laty,self.latx)
col.set_pbc([self.xp,self.yp,self.zp])
atoms=get_unique_atoms(col)
cell=atoms.cell*self.bond
atoms.set_cell(cell,scale_atoms=True)
atoms.center()
return atoms
def unitcell(self,latx,laty):
unit=Atoms('C2',[(0.5,0.5,0.0),(0.5+sqrt(2)/2,0.5+sqrt(2)/2,0)])
atoms=Atoms()
for i in range(4):
a=unit.copy()
a.rotate('z',pi/2*i)
atoms.extend(a)
atoms.set_cell([2+sqrt(2),2+sqrt(2),10.0])
col=unit.repeat((latx,laty,1))
return col
|
vanceeasleaf/aces
|
aces/materials/Tgraphene.py
|
Python
|
gpl-2.0
| 793
|
[
"ASE"
] |
db40b5c520c35406484a99276c4e947fcfd5b4b761b355963a94cdf6cb0a9b82
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from six import string_types
from .file import (AddPathAction, FileAction, FileTransaction, MakeDir,
Copy, Transfer, Link, Move, Remove, Touch)
import os
class ActionParser(object):
"""
A class that can interprete actions into scheduler understandable commands
"""
def __init__(self):
self.parent = None
self.scheduler = None
def parse(self, scheduler, action):
"""
Parse a single action
Parameters
----------
scheduler : `Scheduler`
the used scheduler which knows about specifics in the
parsing process, like, e.g., file paths
action : `Action` or dict or list of str
the actual action to be parsed
Returns
-------
list of `Action` or dict or str
"""
return action
def __call__(self, scheduler, actions):
return self._f([self.parse(scheduler, x) for x in actions])
def __rshift__(self, other):
return ChainedParser(self, other)
@staticmethod
def _f(actions):
"""
Flatten lists
Returns
-------
list of str or dict or `Action`
"""
return list(filter(bool, sum([x if isinstance(x, list) else [x] for x in actions], [])))
class DictFilterParser(ActionParser):
def parse(self, scheduler, action):
if isinstance(action, dict):
return action
return None
class StrFilterParser(ActionParser):
def parse(self, scheduler, action):
if isinstance(action, string_types):
return action
return None
class ChainedParser(ActionParser):
"""
Parser that represents the chained application of two parser
"""
def __init__(self, parent, child):
super(ChainedParser, self).__init__()
self.parent = parent
self.child = child
def __call__(self, scheduler, actions):
return self.parent(scheduler, self.child(scheduler, actions))
class StageInParser(ActionParser):
"""
Special parser that can interpret actions into RP stage-in phase
"""
def parse(self, scheduler, action):
if isinstance(action, FileTransaction):
source = action.source
target = action.target
sp = source.url
tp = target.url
ret = {
'source': sp,
'target': tp,
'action': 'Transfer' # rp.TRANSFER
}
return ret
return action
class BashParser(ActionParser):
def parse(self, scheduler, action):
if isinstance(action, FileAction):
sp = action.source.url
sd = sp.split('://')[0]
if sd == 'worker':
sp = sp.split('://')[1]
if isinstance(action, Transfer):
if sd == 'file':
sp = sp.split('://')[1]
if isinstance(action, Remove):
return ['rm %s %s' % (
'-r' if action.source.is_folder else '', sp)]
elif isinstance(action, Touch):
if action.source.is_folder:
return ['mkdir -p %s' % sp]
else:
return ['touch %s' % sp]
elif isinstance(action, MakeDir):
if action.source.is_folder:
return ['mkdir -p %s' % sp]
elif isinstance(action, FileTransaction):
if action.target.is_folder == action.source.is_folder:
# file to file and folder to folder
rules = stage_rules[action.__class__]
if rules['bash_cmd']:
tp = action.target.url
td = action.target.drive
if isinstance(action, Move) and action.source.is_folder:
# we cannot just replace an existing folder using `mv`
# easiest way is to just move all source files
# this will create `mv source/* target/ and mv all files in
# source to target and overwrite the targets as we expect
return [
'mkdir -p %s' % tp, # create target dir if not exist
'mv %s* %s' % (sp, tp), # move all files
'rm -r %s' % sp] # remove source dir
else:
if isinstance(action, Transfer):
if td == 'file':
tp = tp.split('://')[1]
if isinstance(action, Link):
# links must not end in `/`
if action.target.is_folder:
tp = tp[:-1]
sp = sp[:-1]
return ['%s %s %s' % (rules['bash_cmd'], sp, tp)]
else:
if isinstance(action, AddPathAction):
return ['export PATH=%s:$PATH' % action.path]
return action
class StageParser(ActionParser):
"""
Parse into possible RP Stage commands for ComputeUnits
"""
def parse(self, scheduler, action):
sa_location = scheduler.staging_area_location
if isinstance(action, FileAction):
sp = action.source.url
# useful for RP only
if sp.startswith(sa_location):
sp = 'staging://' + sp.split(sa_location)[1]
sd = sp.split('://')[0]
if sd == 'worker':
sp = sp.split('://')[1]
if isinstance(action, Transfer):
if sd == 'file':
sp = sp.split('://')[1]
if isinstance(action, FileTransaction):
tp = action.target.url
td = action.target.drive
if td == 'worker':
tp = tp.split('://')[1]
if isinstance(action, Transfer):
if td == 'file':
tp = tp.split('://')[1]
rules = stage_rules[action.__class__]
signature = (sd, td)
action_models = rules['folder' if action.source.is_folder else 'file']
action_mode = action_models.get(signature)
if action_mode == 'stage':
ret = {
'source': sp,
'target': tp,
'action': rules['rp_action']
}
return ret
return action
class WorkerParser(ActionParser):
"""
A parser that can interprete transactions from/to ``file://`` for workers
This will write the files to the target location instead of a real
transaction. It requires the file to be stored in the DB using ``load()``
"""
def parse(self, scheduler, action):
# all of this is to keep RP compatibility which works with files
if isinstance(action, FileTransaction):
source = action.source
target = action.target
# create file from
sp = scheduler.replace_prefix(source.url)
tp = scheduler.replace_prefix(target.url)
if source.drive == 'file' and target.drive != 'file':
if source.has_file:
with open(tp, 'w') as f:
f.write(source.get_file())
return ['# write file `%s` from DB' % tp]
elif os.path.exists(sp):
# in case someone already created the file we need, rename it
if sp != tp:
return ['ln %s %s' % (sp, tp)]
else:
return ['# Could not write or rename file', action]
elif target.drive == 'file' and source.drive != 'file':
return ['ln -s %s %s' % (sp, tp)]
return action
class PrefixParser(ActionParser):
"""
Replace all adaptiveMD prefixes
Usually the last step to convert all file paths
"""
def parse(self, scheduler, action):
if isinstance(action, string_types):
# a bash command, look for prefixes to be parsed
return [scheduler.replace_prefix(action)]
return action
# a list of RP implementations and bash commands for each `Action`
stage_rules = {
Copy: {
'file': {
('staging', 'worker'): 'stage',
('worker', 'staging'): 'stage',
('sandbox', 'worker'): 'bash',
('shared', 'worker'): 'bash',
('worker', 'shared'): 'bash',
('shared', 'shared'): 'bash',
('shared', 'staging'): 'bash',
('staging', 'shared'): 'bash'
},
'folder': {
('staging', 'worker'): 'bash',
('worker', 'staging'): 'bash',
('sandbox', 'worker'): 'bash',
('shared', 'worker'): 'bash',
('worker', 'shared'): 'bash',
('shared', 'shared'): 'bash',
('shared', 'staging'): 'bash',
('staging', 'shared'): 'bash'
},
'bash_cmd': 'cp',
'rp_action': 'Copy' # rp.COPY
},
Transfer: {
'file': {
('file', 'worker'): 'stage',
('file', 'staging'): 'stage',
('staging', 'worker'): 'stage',
('staging', 'file'): 'stage',
('worker', 'staging'): 'stage',
('worker', 'file'): 'stage'
},
'folder': {
},
'bash_cmd': None,
'rp_action': 'Transfer' # rp.TRANSFER
},
Move: {
'file': {
('staging', 'worker'): 'stage',
('worker', 'staging'): 'stage',
('sandbox', 'worker'): 'bash',
('shared', 'worker'): 'bash',
('worker', 'shared'): 'bash',
('shared', 'shared'): 'bash',
('shared', 'staging'): 'bash',
('staging', 'shared'): 'bash'
},
'folder': {
('staging', 'worker'): 'bash',
('worker', 'staging'): 'bash',
('sandbox', 'worker'): 'bash',
('shared', 'worker'): 'bash',
('worker', 'shared'): 'bash',
('shared', 'shared'): 'bash',
('shared', 'staging'): 'bash',
('staging', 'shared'): 'bash'
},
'bash_cmd': 'mv',
'rp_action': 'Move' # rp.MOVE
},
Link: {
'file': {
('staging', 'worker'): 'stage',
('sandbox', 'worker'): 'bash',
('shared', 'worker'): 'bash'
},
'folder': {
('staging', 'worker'): 'bash',
('sandbox', 'worker'): 'bash',
('shared', 'worker'): 'bash'
},
'bash_cmd': 'ln -s',
'rp_action': 'Link' # rp.LINK
}
}
|
jrossyra/adaptivemd
|
adaptivemd/reducer.py
|
Python
|
lgpl-2.1
| 12,009
|
[
"MDTraj"
] |
6fd47c007a530ccd90ddeee06ea0ff396ab21c0e0c4661ee3aed1adeaf68689c
|
from __future__ import absolute_import
import logging
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urllib2 import Request
except ImportError:
from urllib.request import Request
try:
from galaxy import eggs
eggs.require("poster")
except ImportError:
pass
try:
import poster
except ImportError:
poster = None
POSTER_UNAVAILABLE_MESSAGE = "Pulsar configured to use poster module - but it is unavailable. Please install poster."
log = logging.getLogger(__name__)
if poster is not None:
poster.streaminghttp.register_openers()
def post_file(url, path):
__ensure_poster()
try:
datagen, headers = poster.encode.multipart_encode({"file": open(path, "rb")})
request = Request(url, datagen, headers)
return urlopen(request).read()
except:
log.exception("problem")
raise
def get_file(url, path):
__ensure_poster()
request = Request(url=url)
response = urlopen(request)
with open(path, 'wb') as output:
while True:
buffer = response.read(1024)
if not buffer:
break
output.write(buffer)
def __ensure_poster():
if poster is None:
raise ImportError(POSTER_UNAVAILABLE_MESSAGE)
|
ssorgatem/pulsar
|
pulsar/client/transport/poster.py
|
Python
|
apache-2.0
| 1,308
|
[
"Galaxy"
] |
491dc7284787bb989c0504ac12559b1e9745c949492b79de1c2e3651591a738b
|
# This file is part of Merlin/Arthur.
# Merlin/Arthur is the Copyright (C)2009,2010 of Elliot Rosemarine.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
from django.conf.urls import include, url
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from Core.db import session
from Core.maps import Planet, Alliance, User, Intel
from Hooks.scans.parser import scanre, scangrpre, parse
from Arthur.loadable import loadable, load
@load
class lookup(loadable):
coord = re.compile(r"(\d+)([. :\-])(\d+)(\2(\d+))?")
def execute(self, request, user):
lookup = (request.POST.get("lookup") or "").strip()
if not lookup:
if user.is_member():
return HttpResponseRedirect(reverse("dashboard", kwargs={"username":user.name}))
return HttpResponseRedirect("/")
scans = scanre.findall(lookup)
groups = scangrpre.findall(lookup)
if len(scans) or len(groups):
for url in scans:
parse(user.id, "scan", url).start()
for url in groups:
parse(user.id, "group", url).start()
return HttpResponseRedirect(reverse("requests"))
m = self.coord.match(lookup)
if m is None:
alliance = Alliance.load(lookup) if lookup else None
if alliance:
return HttpResponseRedirect(reverse("alliance", kwargs={"name":alliance.name}))
elif not user.is_member():
return HttpResponseRedirect(reverse("alliance_ranks"))
else:
member = User.load(lookup, exact=False, access="member") if lookup else None
if member:
return HttpResponseRedirect(reverse("dashboard", kwargs={"username":member.name}))
else:
Q = session.query(Planet)
Q = Q.join(Planet.intel)
Q = Q.filter(Planet.active == True)
Q = Q.filter(Intel.nick.ilike(lookup+"%"))
planet = Q.first()
if planet:
return HttpResponseRedirect(reverse("planet", kwargs={"x":planet.x, "y":planet.y, "z":planet.z}))
else:
return HttpResponseRedirect(reverse("alliance_ranks"))
elif m.group(5) is not None:
return HttpResponseRedirect(reverse("planet", kwargs={"x":m.group(1), "y":m.group(3), "z":m.group(5)}))
elif m.group(3) is not None:
return HttpResponseRedirect(reverse("galaxy", kwargs={"x":m.group(1), "y":m.group(3)}))
urlpatterns = [
url(r'^lookup/$', lookup),
]
|
d7415/merlin
|
Arthur/views/lookup.py
|
Python
|
gpl-2.0
| 3,608
|
[
"Galaxy"
] |
12b9cc35619d5b7929e4d839a83c2f2bfcc18f58f07262036cfe5a27834124fc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import traceback
import datetime
import reversion
from django.utils.translation import ugettext as _
from snisi_core.models.common import get_temp_receipt
from snisi_core.models.Providers import Provider
from snisi_core.models.Entities import Entity
from snisi_core.models.Roles import Role
from snisi_core.models.Periods import MonthPeriod, FixedDaysPeriod
from snisi_core.models.Notifications import Notification
from snisi_trachoma.models import (TTBacklogVillageR,
TTBacklogMissionR)
from snisi_core.models.Reporting import (ReportClass, ExpectedReporting,
ExpectedValidation)
from snisi_core.integrity import ReportIntegrityChecker
from snisi_tools.datetime import parse_date_string
from snisi_trachoma import PROJECT_BRAND, get_domain
logger = logging.getLogger(__name__)
reportcls_visit = ReportClass.objects.get(slug='ttbacklog_visit')
reportcls_mission = ReportClass.objects.get(slug='ttbacklog_mission')
validating_role = Role.objects.get(slug='charge_sis')
class TTBacklogMissionStartChecker(ReportIntegrityChecker):
DOMAIN = get_domain()
def _check_completeness(self, **options):
fields = ['district', 'submitter', 'submit_time', 'started_on',
'operator_type', 'strategy']
for field in fields:
if not self.has(field):
try:
fname = TTBacklogVillageR.field_name(field)
except:
fname = field
self.add_missing(_("Données manquantes pour {}").format(fname),
blocking=True, field=field)
def _check(self, **options):
# period
period = MonthPeriod.find_create_by_date(self.get('submit_time'))
self.set('clean_period', period)
# entity (district)
entity = Entity.get_or_none(self.get('district'),
type_slug='health_district')
if entity is None:
self.add_error("Aucun District ne correspond "
"au code {}".format(self.get('district')),
field='district', blocking=True)
self.set('clean_entity', entity)
# check auth for user at district
user_district = self.get('submitter').location.get_health_district()
if (user_district is None
or not user_district == entity
or self.get('submitter').role.slug
not in ('tt_tso', 'tt_opt', 'tt_amo', 'charge_sis')):
self.add_error("Vous n'êtes pas autorisé à créer un rapport de "
"mission pour ce district: {}".format(entity),
blocking=True, field='submitter')
# expected reporting defines if report is expeted or not
expected_reporting = ExpectedReporting.get_or_none(
report_class__slug='ttbacklog_mission',
period=period,
within_period=True,
entity=entity,
within_entity=False)
self.set('expected_reporting', expected_reporting)
# should have already been checked in checker.
if expected_reporting is None:
self.add_error("Aucune mission TT Backlog attendue à {} pour "
"la période de {}".format(entity, period),
blocking=True)
if expected_reporting.completion_status \
== ExpectedReporting.COMPLETION_COMPLETE:
self.add_error("Aucune mission TT Backlog attendue à {} pour "
"la période de {}".format(entity, period),
blocking=True)
# no creation if exist open
open_missions = TTBacklogMissionR.objects.filter(
entity=entity,
created_by=self.get('submitter'),
).exclude(completion_status=TTBacklogMissionR.COMPLETE)
if open_missions.count():
self.add_error("Vous avez déjà une mission en cours à {}. "
"Cloturez la d'abord.".format(entity, period),
blocking=True)
# started_on must be <= today
today = datetime.date.today()
started_on = parse_date_string(self.get('started_on'))
if started_on is None:
self.add_error("La date de démarrage est incorrecte: "
"{}.".format(self.get('started_on')),
blocking=True, field='started_on')
if started_on.date() > today:
self.add_error("La date de démarrage est dans "
"le futur: {}.".format(started_on),
blocking=True, field='started_on')
self.set('clean_started_on', started_on)
# operator type
operator_type = {
'amo': TTBacklogMissionR.AMO,
'tso': TTBacklogMissionR.TSO,
'opt': TTBacklogMissionR.OPT
}.get(self.get('operator_type').lower())
if operator_type not in TTBacklogMissionR.OPERATOR_TYPES.keys():
self.add_error("Profil agent innatendu: "
"{}.".format(self.get('operator_type')),
blocking=True, field='operator')
self.set('clean_operator_type', operator_type)
# strategy
strategy = {
'fixed': TTBacklogMissionR.FIXED,
'mobile': TTBacklogMissionR.MOBILE,
'advanced': TTBacklogMissionR.ADVANCED
}.get(self.get('strategy').lower())
if strategy not in TTBacklogMissionR.STRATEGIES.keys():
self.add_error("Strategie innatendue: "
"{}.".format(self.get('strategy')),
blocking=True, field='strategy')
self.set('clean_strategy', strategy)
def create_mission_report(provider, expected_reporting, completed_on,
integrity_checker, data_source):
report = TTBacklogMissionR.start(
period=expected_reporting.period,
entity=expected_reporting.entity,
created_by=provider,
completion_status=TTBacklogMissionR.INCOMPLETE,
arrival_status=TTBacklogMissionR.ON_TIME,
validation_status=TTBacklogMissionR.NOT_VALIDATED)
report.receipt = get_temp_receipt(report)[:10]
# fill the report from SMS data
report.started_on = integrity_checker.get('clean_started_on')
report.operator = provider
report.operator_type = integrity_checker.get('clean_operator_type')
report.strategy = integrity_checker.get('clean_strategy')
try:
with reversion.create_revision():
report.save()
except Exception as e:
logger.error("Unable to save report to DB. Content: {} | Exp: {}"
.format(data_source, e))
logger.debug("".join(traceback.format_exc()))
return False, ("Une erreur technique s'est "
"produite. Réessayez plus tard et "
"contactez ANTIM si le problème persiste.")
# No ExpectedReporting for VisitR. We rely on MissionR.
# Warn Disctrict people that mission started
for recipient in Provider.get_at(integrity_checker.get('clean_entity'),
role_slug='charge_sis'):
if recipient == provider:
continue
Notification.create(
provider=recipient,
deliver=Notification.TODAY,
expirate_on=report.started_on + datetime.timedelta(days=3),
category=PROJECT_BRAND,
text="Une mission TT de {district} a été créée par {author}. "
"No reçu: #{receipt}."
.format(district=report.entity.display_full_name(),
author=report.operator,
receipt=report.receipt)
)
return report, ("Le rapport de mission TT au départ de {district} "
"a été enregistré. "
"Le No de reçu est #{receipt}."
.format(district=report.entity.display_full_name(),
receipt=report.receipt))
class TTBacklogVisitChecker(ReportIntegrityChecker):
DOMAIN = get_domain()
def _check_completeness(self, **options):
fields = ['consultation_male', 'consultation_female',
'surgery_male', 'surgery_female',
'refusal_male', 'refusal_female',
'recidivism_male', 'recidivism_female']
local_fields = ['location', 'submitter', 'submit_time']
for field in fields + local_fields:
if not self.has(field):
try:
fname = TTBacklogVillageR.field_name(field)
except:
fname = field
self.add_missing(_("Données manquantes pour {}").format(fname),
blocking=True, field=field)
def check_data(self, **options):
genders = ('male', 'female')
# convert data to int
number_fields = ['consultation_male', 'consultation_female',
'surgery_male', 'surgery_female',
'refusal_male', 'refusal_female',
'recidivism_male', 'recidivism_female']
for field in number_fields:
try:
self.set(field, int(self.get(field)))
except:
self.add_error(
"La valeur de {} n'est pas un entier.".format(field),
blocking=True, field=field)
error_tmpl = ("Le nombre de {f1} ({f1n}) ne peut pas être supérieur "
"au nombre de {f2} ({f2n})")
# consultés >= opérés + refus
for gender in genders:
sf = 'surgery_{}'.format(gender)
rf = 'refusal_{}'.format(gender)
cf = 'consultation_{}'.format(gender)
cname = TTBacklogVillageR.field_name(cf)
surgery_refusal = self.get(sf) + self.get(rf)
if surgery_refusal > self.get(cf):
srname = "{} + {}".format(
TTBacklogVillageR.field_name(sf),
TTBacklogVillageR.field_name(rf))
self.add_error(error_tmpl.format(f1=srname,
f1n=surgery_refusal,
f2=cname,
f2n=self.get(cf)),
field=sf, blocking=True)
# operés <= consultés
for gender in genders:
sf = 'surgery_{}'.format(gender)
cf = 'consultation_{}'.format(gender)
cname = TTBacklogVillageR.field_name(cf)
if self.get(sf) > self.get(cf):
rname = TTBacklogVillageR.field_name(sf)
self.add_error(error_tmpl.format(f1=rname,
f1n=self.get(sf),
f2=cname,
f2n=self.get(cf)),
field=sf, blocking=True)
# refus <= consultés
for gender in genders:
rf = 'refusal_{}'.format(gender)
cf = 'consultation_{}'.format(gender)
cname = TTBacklogVillageR.field_name(cf)
if self.get(rf) > self.get(cf):
rname = TTBacklogVillageR.field_name(rf)
self.add_error(error_tmpl.format(f1=rname,
f1n=self.get(rf),
f2=cname,
f2n=self.get(cf)),
field=rf, blocking=True)
# recidives <= opérés
for gender in genders:
rf = 'recidivism_{}'.format(gender)
sf = 'surgery_{}'.format(gender)
cname = TTBacklogVillageR.field_name(sf)
if self.get(rf) > self.get(sf):
rname = TTBacklogVillageR.field_name(rf)
self.add_error(error_tmpl.format(f1=rname,
f1n=self.get(rf),
f2=cname,
f2n=self.get(sf)),
field=rf, blocking=True)
# community_assistance
if self.get('community_assistance'):
self.set('community_assistance', True)
# arrived_on is a date
self.set('arrived_on', parse_date_string(self.get('arrived_on')))
if not isinstance(self.get('arrived_on'), datetime.date):
self.add_error("La date d'arrivée au village est "
"incompréhensible: {}"
.format(self.get('arrived_on')),
blocking=True, field='arrived_on')
# left_on is a date
self.set('left_on', parse_date_string(self.get('left_on')))
if not isinstance(self.get('left_on'), datetime.date):
self.add_error("La date de départ du village est "
"incompréhensible: {}".format(self.get('left_on')),
blocking=True, field='left_on')
today = datetime.date.today()
# arrived_on <= today
if not self.get('arrived_on').date() <= today:
self.add_error("La date d'arrivée au village est "
"dans le futur: {}".format(self.get('arrived_on')),
blocking=True, field='arrived_on')
# left_on <= today
if not self.get('left_on').date() <= today:
self.add_error("La date de départ du village est "
"dans le futur: {}".format(self.get('left_on')),
blocking=True, field='left_on')
# arrived_on <= left_on
if not self.get('arrived_on') <= self.get('left_on'):
self.add_error("La date de départ du village est "
"postérieure à la date d'arrivée {}"
.format(self.get('left_on')),
blocking=True, field='left_on')
def _check(self, **options):
self.check_data(**options)
# Entity for Village
village = Entity.get_or_none(self.get('location'), type_slug='vfq')
if village is None:
self.add_error("Aucun village ne correspond "
"au code {}".format(self.get('location')),
field='location', blocking=True)
self.set('clean_village', village)
# Entity for district
district = village.get_health_district()
if district is None:
self.add_error("Impossible de retrouver le district correspondant "
"au village {}".format(village),
field='district', blocking=True)
self.set('clean_district', district)
# check auth for user at district
user_district = self.get('submitter').location.get_health_district()
if (user_district is None
or not user_district == district
or self.get('submitter').role.slug
not in ('tt_tso', 'tt_opt', 'tt_amo', 'charge_sis')):
self.add_error("Vous n'êtes pas autorisé à créer un rapport de "
"visite pour ce village: {}".format(village),
blocking=True, field='submitter')
# No ExpectedReporting for VisitR ; open missionR instead
open_missions = TTBacklogMissionR.objects.filter(
entity=district,
created_by=self.get('submitter'),
).exclude(completion_status=TTBacklogMissionR.COMPLETE)
if not open_missions.count():
self.add_error("Aucune mission TT en cours pour vous. "
"Commencez par envoyer le formulaire de "
"début de mission.", blocking=True)
if open_missions.count() > 1:
self.add_error("Vous avez plusieurs missions ouvertes. "
"Merci de contacter ANTIM.", blocking=True)
# only one visit per village
if open_missions[0].village_reports.filter(
location__slug=village.slug):
self.add_error("Cette mission possède déjà un rapport de "
"visite TT pour le village {}.".format(village),
blocking=True, field='location')
missionR = open_missions[0]
# set mission for use in create_report
self.set('missionR', missionR)
# arrived_on => mission.started_on
if not self.get('arrived_on').date() >= missionR.started_on:
self.add_error("La date d'arrivée au village est antérieure au "
"début de la mission: {}"
.format(self.get('arrived_on')),
blocking=True, field='arrived_on')
def create_visit_report(provider, expected_reporting, completed_on,
integrity_checker, data_source):
report = TTBacklogVillageR.start(
period=integrity_checker.get('missionR').period,
entity=integrity_checker.get('clean_village'),
created_by=provider,
completion_status=TTBacklogVillageR.COMPLETE,
completed_on=completed_on,
integrity_status=TTBacklogVillageR.CORRECT,
arrival_status=TTBacklogVillageR.ON_TIME,
validation_status=TTBacklogVillageR.NOT_VALIDATED)
# fill the report from SMS data
report.location = integrity_checker.get('clean_village')
for field_part in ('consultation', 'surgery', 'refusal', 'recidivism'):
for gender in ('male', 'female'):
field = '{}_{}'.format(field_part, gender)
setattr(report, field, integrity_checker.get(field))
for field in ('community_assistance', 'arrived_on', 'left_on'):
setattr(report, field, integrity_checker.get(field))
try:
with reversion.create_revision():
report.save()
except Exception as e:
logger.error("Unable to save report to DB. Content: {} | Exp: {}"
.format(data_source, e))
logger.debug("".join(traceback.format_exc()))
return False, ("Une erreur technique s'est "
"produite. Réessayez plus tard et "
"contactez ANTIM si le problème persiste.")
# no expected reporting to acknowledge.
# Add this report to MissionR
integrity_checker.get('missionR').add_village(report)
# No individual validation on VisitR.
# Validation is handled Mission-wise once it is over.
# Add alert to validation Entity?
for recipient in Provider.get_at(integrity_checker.get('clean_district'),
role_slug='charge_sis'):
if recipient == provider:
continue
Notification.create(
provider=recipient,
deliver=Notification.TODAY,
expirate_on=report.left_on + datetime.timedelta(days=3),
category=PROJECT_BRAND,
text="La mission TT de {mission_author} a fini la visite de "
" {village}. No reçu: #{receipt}."
.format(
village=report.location.display_full_name(),
mission_author=integrity_checker.get('missionR').operator,
receipt=report.receipt)
)
return report, ("Le rapport de visite TT pour {village} "
"a été enregistré. "
"Le No de reçu est #{receipt}."
.format(village=report.entity.display_full_name(),
receipt=report.receipt))
class TTBacklogMissionEndChecker(ReportIntegrityChecker):
DOMAIN = get_domain()
def _check_completeness(self, **options):
for field in ['district', 'ended_on', 'submit_time', 'submitter']:
if not self.has(field):
self.add_missing(_("Données manquantes pour {}").format(field),
blocking=True, field=field)
def _check(self, **options):
# district
entity = Entity.get_or_none(self.get('district'),
type_slug='health_district')
if entity is None:
self.add_error("Aucun District ne correspond "
"au code {}".format(self.get('district')),
field='district', blocking=True)
self.set('clean_entity', entity)
# started_on must be <= today
today = datetime.date.today()
try:
self.set('clean_ended_on', parse_date_string(self.get('ended_on')))
except:
self.add_error("La date de fin de mission est incorrecte: "
"{}.".format(self.get('clean_ended_on')),
blocking=True)
if self.get('clean_ended_on').date() > today:
self.add_error("La date de fin de mission est dans "
"le futur: {}".format(self.get('clean_ended_on')),
blocking=True, field='ended_on')
open_missions = TTBacklogMissionR.objects.filter(
entity=entity,
created_by=self.get('submitter'),
).exclude(completion_status=TTBacklogMissionR.COMPLETE)
if not open_missions.count():
self.add_error("Aucune mission TT en cours pour vous. "
"Commencez par envoyer le formulaire de "
"début de mission.", blocking=True)
if open_missions.count() > 1:
self.add_error("Vous avez plusieurs missions ouvertes. "
"Merci de contacter ANTIM.", blocking=True)
missionR = open_missions.all()[0]
self.set('missionR', missionR)
if self.get('clean_ended_on').date() < missionR.started_on:
self.add_error("La date de fin de mission est antérieure "
"à la date de début: {}"
.format(self.get('clean_ended_on')),
blocking=True, field='ended_on')
expected_reporting = ExpectedReporting.get_or_none(
report_class=reportcls_mission,
period=missionR.period,
entity=missionR.entity)
self.set('expected_reporting', expected_reporting)
def close_mission_report(provider, expected_reporting, completed_on,
integrity_checker, data_source):
report = integrity_checker.get('missionR')
report.ended_on = integrity_checker.get('clean_ended_on')
report.integrity_status = TTBacklogMissionR.CORRECT
report.completion_status = TTBacklogMissionR.COMPLETE
report.completed_on = completed_on
try:
with reversion.create_revision():
report.save()
except Exception as e:
logger.error("Unable to save report to DB. Content: {} | Exp: {}"
.format(data_source, e))
logger.debug("".join(traceback.format_exc()))
return False, ("Une erreur technique s'est "
"produite. Réessayez plus tard et "
"contactez ANTIM si le problème persiste.")
else:
expected_reporting.acknowledge_report(report)
# VP is District VP of next month
today = datetime.date.today()
validation_period = FixedDaysPeriod.find_create_with(
start_on=today,
end_on=today + datetime.timedelta(days=10))
# VE is the district (CSCOM's parent)
validating_entity = report.entity
# created expected validation for district charge_sis
ExpectedValidation.objects.create(
report=report,
validation_period=validation_period,
validating_entity=validating_entity,
validating_role=validating_role)
# Add alert to validation Entity?
for recipient in Provider.get_at(integrity_checker.get('clean_entity'),
role_slug='charge_sis'):
if recipient == provider:
continue
Notification.create(
provider=recipient,
deliver=Notification.TODAY,
expirate_on=report.ended_on + datetime.timedelta(days=3),
category=PROJECT_BRAND,
text="La mission TT de {mission_author} est terminée. "
"No reçu: #{receipt}."
.format(mission_author=report.operator,
receipt=report.receipt)
)
return report, ("Le rapport de mission TT "
"a été enregistré. "
"Le No de reçu est #{receipt}."
.format(receipt=report.receipt))
|
yeleman/snisi
|
snisi_trachoma/integrity.py
|
Python
|
mit
| 25,172
|
[
"VisIt"
] |
17f2495095f76c81eccb403d72f16c7e45e44daf02e17ddf24fb94f0588566f8
|
MATH = [
'https://en.wikipedia.org/wiki/Analysis_of_algorithms',
'https://en.wikipedia.org/wiki/Linear_programming',
'https://en.wikipedia.org/wiki/Mathematics',
'https://en.wikipedia.org/wiki/Algorithm',
'https://en.wikipedia.org/wiki/Scalar_multiplication',
'https://en.wikipedia.org/wiki/Maxima_and_minima',
'https://en.wikipedia.org/wiki/Machine_learning',
'https://en.wikipedia.org/wiki/Mathematical_structure',
'https://en.wikipedia.org/wiki/Statistics',
'https://en.wikipedia.org/wiki/Randomized_algorithm',
'https://en.wikipedia.org/wiki/Mathematical_optimization',
'https://en.wikipedia.org/wiki/Dot_product',
'https://en.wikipedia.org/wiki/Linear_algebra',
'https://en.wikipedia.org/wiki/Criss-cross_algorithm',
'https://en.wikipedia.org/wiki/Support_vector_machine',
'https://en.wikipedia.org/wiki/Abstract_algebra',
'https://en.wikipedia.org/wiki/Active_filter',
'https://en.wikipedia.org/wiki/Adaptive_control',
'https://en.wikipedia.org/wiki/Adversarial_machine_learning',
'https://en.wikipedia.org/wiki/Algebra',
'https://en.wikipedia.org/wiki/Algebraic_geometry',
'https://en.wikipedia.org/wiki/Algorithm',
'https://en.wikipedia.org/wiki/Angle',
'https://en.wikipedia.org/wiki/Anomaly_detection',
'https://en.wikipedia.org/wiki/Applied_mathematics',
'https://en.wikipedia.org/wiki/Approximation_algorithm',
'https://en.wikipedia.org/wiki/Approximation_theory',
'https://en.wikipedia.org/wiki/Areas_of_mathematics',
'https://en.wikipedia.org/wiki/Arg_max',
'https://en.wikipedia.org/wiki/Argument_of_a_function',
'https://en.wikipedia.org/wiki/Arithmetic',
'https://en.wikipedia.org/wiki/Arithmetic_operation',
'https://en.wikipedia.org/wiki/Artificial_neural_network',
'https://en.wikipedia.org/wiki/Augmented_Lagrangian_method',
'https://en.wikipedia.org/wiki/Average_case_complexity',
'https://en.wikipedia.org/wiki/BFGS_method',
'https://en.wikipedia.org/wiki/BIRCH_(data_clustering)',
'https://en.wikipedia.org/wiki/Backpropagation',
'https://en.wikipedia.org/wiki/Barrier_function',
'https://en.wikipedia.org/wiki/Basis_(linear_algebra)',
'https://en.wikipedia.org/wiki/Bayesian_network',
'https://en.wikipedia.org/wiki/Bayesian_optimization',
'https://en.wikipedia.org/wiki/Bellman_equation',
'https://en.wikipedia.org/wiki/Bilinear_form',
'https://en.wikipedia.org/wiki/Binary_classification',
'https://en.wikipedia.org/wiki/Binary_classifier',
'https://en.wikipedia.org/wiki/Bland%27s_rule',
'https://en.wikipedia.org/wiki/Block_matrix',
'https://en.wikipedia.org/wiki/Boosting_(machine_learning)',
'https://en.wikipedia.org/wiki/Bootstrap_aggregating',
'https://en.wikipedia.org/wiki/Bounded_set',
'https://en.wikipedia.org/wiki/Brain-machine_interfaces',
'https://en.wikipedia.org/wiki/Branch_and_cut',
'https://en.wikipedia.org/wiki/Buchberger%27s_algorithm',
'https://en.wikipedia.org/wiki/Calculus',
'https://en.wikipedia.org/wiki/Calculus_of_variations',
'https://en.wikipedia.org/wiki/Candidate_solution',
'https://en.wikipedia.org/wiki/Canonical_correlation_analysis',
'https://en.wikipedia.org/wiki/Cartesian_coordinate_system',
'https://en.wikipedia.org/wiki/Cauchy%E2%80%93Schwarz_inequality',
'https://en.wikipedia.org/wiki/Cluster_analysis',
'https://en.wikipedia.org/wiki/Combinatorial_optimization',
'https://en.wikipedia.org/wiki/Combinatorics',
'https://en.wikipedia.org/wiki/Comparative_statics',
'https://en.wikipedia.org/wiki/Complex_conjugate',
'https://en.wikipedia.org/wiki/Complex_function',
'https://en.wikipedia.org/wiki/Complex_number',
'https://en.wikipedia.org/wiki/Computation',
'https://en.wikipedia.org/wiki/Computational_finance',
'https://en.wikipedia.org/wiki/Computational_geometry',
'https://en.wikipedia.org/wiki/Computational_mathematics',
'https://en.wikipedia.org/wiki/Computational_statistics',
'https://en.wikipedia.org/wiki/Concave_function',
'https://en.wikipedia.org/wiki/Conditional_independence',
'https://en.wikipedia.org/wiki/Conditional_random_field',
'https://en.wikipedia.org/wiki/Conformational_analysis',
'https://en.wikipedia.org/wiki/Conjugate_gradient_method',
'https://en.wikipedia.org/wiki/Conjugate_linear',
'https://en.wikipedia.org/wiki/Constraint_(mathematics)',
'https://en.wikipedia.org/wiki/Constraint_programming',
'https://en.wikipedia.org/wiki/Constraint_satisfaction',
'https://en.wikipedia.org/wiki/Continuous_function',
'https://en.wikipedia.org/wiki/Control_theory',
'https://en.wikipedia.org/wiki/Convex_analysis',
'https://en.wikipedia.org/wiki/Convex_function',
'https://en.wikipedia.org/wiki/Convex_hull',
'https://en.wikipedia.org/wiki/Convex_minimization',
'https://en.wikipedia.org/wiki/Convex_optimization',
'https://en.wikipedia.org/wiki/Convex_programming',
'https://en.wikipedia.org/wiki/Convex_set',
'https://en.wikipedia.org/wiki/Coordinate_descent',
'https://en.wikipedia.org/wiki/Coordinate_vector',
'https://en.wikipedia.org/wiki/Cosine',
'https://en.wikipedia.org/wiki/Criss-cross_algorithm',
'https://en.wikipedia.org/wiki/Crisscross_method',
'https://en.wikipedia.org/wiki/Critical_point_(mathematics)',
'https://en.wikipedia.org/wiki/Cross-validation_(statistics)',
'https://en.wikipedia.org/wiki/Cross_product',
'https://en.wikipedia.org/wiki/Cubic_polynomial',
'https://en.wikipedia.org/wiki/Curve_fitting',
'https://en.wikipedia.org/wiki/Cutting-plane_method',
'https://en.wikipedia.org/wiki/Data_mining',
'https://en.wikipedia.org/wiki/Data_modeling',
'https://en.wikipedia.org/wiki/Decision_tree',
'https://en.wikipedia.org/wiki/Decision_tree_learning',
'https://en.wikipedia.org/wiki/Degree_of_a_polynomial',
'https://en.wikipedia.org/wiki/Density_estimation',
'https://en.wikipedia.org/wiki/Differential_calculus',
'https://en.wikipedia.org/wiki/Differential_equation',
'https://en.wikipedia.org/wiki/Differential_evolution',
'https://en.wikipedia.org/wiki/Differential_geometry',
'https://en.wikipedia.org/wiki/Digital_signal_processing',
'https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm',
'https://en.wikipedia.org/wiki/Dimension',
'https://en.wikipedia.org/wiki/Dimension_(vector_space)',
'https://en.wikipedia.org/wiki/Dimensionality_reduction',
'https://en.wikipedia.org/wiki/Dinic%27s_algorithm',
'https://en.wikipedia.org/wiki/Directed_acyclic_graph',
'https://en.wikipedia.org/wiki/Discrete_function',
'https://en.wikipedia.org/wiki/Discrete_geometry',
'https://en.wikipedia.org/wiki/Discrete_mathematics',
'https://en.wikipedia.org/wiki/Displacement_(vector)',
'https://en.wikipedia.org/wiki/Domain_(mathematics)',
'https://en.wikipedia.org/wiki/Domain_of_a_function',
'https://en.wikipedia.org/wiki/Dot_operator',
'https://en.wikipedia.org/wiki/Dot_product',
'https://en.wikipedia.org/wiki/Dual_space',
'https://en.wikipedia.org/wiki/Dynamic_programming',
'https://en.wikipedia.org/wiki/Dynamic_relaxation',
'https://en.wikipedia.org/wiki/Dynamic_stochastic_general_equilibrium',
'https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors',
'https://en.wikipedia.org/wiki/Elementary_algebra',
'https://en.wikipedia.org/wiki/Ellipsoid_method',
'https://en.wikipedia.org/wiki/Ellipsoidal_algorithm',
'https://en.wikipedia.org/wiki/Ensemble_Averaging',
'https://en.wikipedia.org/wiki/Ensemble_learning',
'https://en.wikipedia.org/wiki/Euclidean_geometry',
'https://en.wikipedia.org/wiki/Euclidean_length',
'https://en.wikipedia.org/wiki/Euclidean_metric',
'https://en.wikipedia.org/wiki/Euclidean_space',
'https://en.wikipedia.org/wiki/Euclidean_vector',
'https://en.wikipedia.org/wiki/Expectation-maximization_algorithm',
'https://en.wikipedia.org/wiki/Expenditure_minimization_problem',
'https://en.wikipedia.org/wiki/Extreme_value_theorem',
'https://en.wikipedia.org/wiki/Facet',
'https://en.wikipedia.org/wiki/Factor_analysis',
'https://en.wikipedia.org/wiki/Farkas_lemma',
'https://en.wikipedia.org/wiki/Feasible_region',
'https://en.wikipedia.org/wiki/Feature_learning',
'https://en.wikipedia.org/wiki/Feature_space',
'https://en.wikipedia.org/wiki/Fermat%27s_theorem_(stationary_points)',
'https://en.wikipedia.org/wiki/Field_(mathematics)',
'https://en.wikipedia.org/wiki/Finite_difference',
'https://en.wikipedia.org/wiki/Finite_geometry',
'https://en.wikipedia.org/wiki/First_derivative_test',
'https://en.wikipedia.org/wiki/Fisher_kernel',
'https://en.wikipedia.org/wiki/Floating_point',
'https://en.wikipedia.org/wiki/Fractional_programming',
'https://en.wikipedia.org/wiki/Frank%E2%80%93Wolfe_algorithm',
'https://en.wikipedia.org/wiki/Frobenius_inner_product',
'https://en.wikipedia.org/wiki/Function_(mathematics)',
'https://en.wikipedia.org/wiki/Function_model',
'https://en.wikipedia.org/wiki/Function_of_a_real_variable',
'https://en.wikipedia.org/wiki/Functional_(mathematics)',
'https://en.wikipedia.org/wiki/Functional_analysis',
'https://en.wikipedia.org/wiki/Fuzzy_logic',
'https://en.wikipedia.org/wiki/Game_theory',
'https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm',
'https://en.wikipedia.org/wiki/Gaussian',
'https://en.wikipedia.org/wiki/Gaussian_elimination',
'https://en.wikipedia.org/wiki/Generalization_error',
'https://en.wikipedia.org/wiki/Generalized_linear_model',
'https://en.wikipedia.org/wiki/Geometry',
'https://en.wikipedia.org/wiki/Global_optimization',
'https://en.wikipedia.org/wiki/Golden_section_search',
'https://en.wikipedia.org/wiki/Gradient',
'https://en.wikipedia.org/wiki/Gradient_descent',
'https://en.wikipedia.org/wiki/Graph_algorithm',
'https://en.wikipedia.org/wiki/Graph_theory',
'https://en.wikipedia.org/wiki/Graphical_model',
'https://en.wikipedia.org/wiki/Greedy_algorithm',
'https://en.wikipedia.org/wiki/Grid_search',
'https://en.wikipedia.org/wiki/Hermitian_form',
'https://en.wikipedia.org/wiki/Hessian_matrix',
'https://en.wikipedia.org/wiki/Heuristic_algorithm',
'https://en.wikipedia.org/wiki/Hidden_Markov_model',
'https://en.wikipedia.org/wiki/Hierarchical_clustering',
'https://en.wikipedia.org/wiki/High-dimensional_space',
'https://en.wikipedia.org/wiki/High-performance_computing',
'https://en.wikipedia.org/wiki/Hilbert_space',
'https://en.wikipedia.org/wiki/Hill_climbing',
'https://en.wikipedia.org/wiki/Homogeneous_polynomial',
'https://en.wikipedia.org/wiki/Hyperbolic_function',
'https://en.wikipedia.org/wiki/Hyperplane',
'https://en.wikipedia.org/wiki/Independent_component_analysis',
'https://en.wikipedia.org/wiki/Inference',
'https://en.wikipedia.org/wiki/Infinite-dimensional_optimization',
'https://en.wikipedia.org/wiki/Infinity',
'https://en.wikipedia.org/wiki/Information_theory',
'https://en.wikipedia.org/wiki/Inner_product_space',
'https://en.wikipedia.org/wiki/Integer',
'https://en.wikipedia.org/wiki/Integer_programming',
'https://en.wikipedia.org/wiki/Interior_point_method',
'https://en.wikipedia.org/wiki/Interpolation',
'https://en.wikipedia.org/wiki/Interval_(mathematics)',
'https://en.wikipedia.org/wiki/Invertible_matrix',
'https://en.wikipedia.org/wiki/Isotropic_quadratic_form',
'https://en.wikipedia.org/wiki/Iterative_method',
'https://en.wikipedia.org/wiki/Iterative_methods',
'https://en.wikipedia.org/wiki/Johnson%27s_algorithm',
'https://en.wikipedia.org/wiki/Joint_probability_distribution',
'https://en.wikipedia.org/wiki/K-SVD',
'https://en.wikipedia.org/wiki/K-means_clustering',
'https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm',
'https://en.wikipedia.org/wiki/K-nearest_neighbors_classification',
'https://en.wikipedia.org/wiki/Karmarkar%27s_algorithm',
'https://en.wikipedia.org/wiki/Kernel_(integral_operator)',
'https://en.wikipedia.org/wiki/Kernel_(linear_algebra)',
'https://en.wikipedia.org/wiki/Kernel_trick',
'https://en.wikipedia.org/wiki/Kronecker_delta',
'https://en.wikipedia.org/wiki/Kruskal%27s_algorithm',
'https://en.wikipedia.org/wiki/Lagrange_multiplier',
'https://en.wikipedia.org/wiki/Lagrangian_relaxation',
'https://en.wikipedia.org/wiki/Law_of_cosines',
'https://en.wikipedia.org/wiki/Least_squares',
'https://en.wikipedia.org/wiki/Lemke%27s_algorithm',
'https://en.wikipedia.org/wiki/Lie_theory',
'https://en.wikipedia.org/wiki/Limited-memory_BFGS',
'https://en.wikipedia.org/wiki/Line_search',
'https://en.wikipedia.org/wiki/Linear-fractional_programming',
'https://en.wikipedia.org/wiki/Linear_algebra',
'https://en.wikipedia.org/wiki/Linear_classifier',
'https://en.wikipedia.org/wiki/Linear_combination',
'https://en.wikipedia.org/wiki/Linear_discriminant_analysis',
'https://en.wikipedia.org/wiki/Linear_independence',
'https://en.wikipedia.org/wiki/Linear_inequality',
'https://en.wikipedia.org/wiki/Linear_map',
'https://en.wikipedia.org/wiki/Linear_programming',
'https://en.wikipedia.org/wiki/Linear_regression',
'https://en.wikipedia.org/wiki/Linear_separability',
'https://en.wikipedia.org/wiki/Linear_span',
'https://en.wikipedia.org/wiki/Linear_system',
'https://en.wikipedia.org/wiki/Linearly_separable',
'https://en.wikipedia.org/wiki/Lipschitz_continuity',
'https://en.wikipedia.org/wiki/Lipschitz_function',
'https://en.wikipedia.org/wiki/Local_convergence',
'https://en.wikipedia.org/wiki/Local_outlier_factor',
'https://en.wikipedia.org/wiki/Local_search_(optimization)',
'https://en.wikipedia.org/wiki/Logistic_regression',
'https://en.wikipedia.org/wiki/Loss_function',
'https://en.wikipedia.org/wiki/Manifold_learning',
'https://en.wikipedia.org/wiki/Map_(mathematics)',
'https://en.wikipedia.org/wiki/Margin_classifier',
'https://en.wikipedia.org/wiki/Mathematical_Programming',
'https://en.wikipedia.org/wiki/Mathematical_analysis',
'https://en.wikipedia.org/wiki/Mathematical_logic',
'https://en.wikipedia.org/wiki/Mathematical_model',
'https://en.wikipedia.org/wiki/Mathematical_optimization',
'https://en.wikipedia.org/wiki/Mathematical_physics',
'https://en.wikipedia.org/wiki/Mathematical_statistics',
'https://en.wikipedia.org/wiki/Mathematics',
'https://en.wikipedia.org/wiki/Matrix_(mathematics)',
'https://en.wikipedia.org/wiki/Matrix_decomposition',
'https://en.wikipedia.org/wiki/Matrix_multiplication',
'https://en.wikipedia.org/wiki/Matroid',
'https://en.wikipedia.org/wiki/Max-flow_min-cut_theorem',
'https://en.wikipedia.org/wiki/Maxima_and_minima',
'https://en.wikipedia.org/wiki/Maximum-margin_hyperplane',
'https://en.wikipedia.org/wiki/Maximum_(mathematics)',
'https://en.wikipedia.org/wiki/Maximum_theorem',
'https://en.wikipedia.org/wiki/Mean-shift',
'https://en.wikipedia.org/wiki/Memetic_algorithm',
'https://en.wikipedia.org/wiki/Metaheuristic',
'https://en.wikipedia.org/wiki/Minimum_spanning_tree',
'https://en.wikipedia.org/wiki/Minor_(linear_algebra)',
'https://en.wikipedia.org/wiki/Mixed_complementarity_problem',
'https://en.wikipedia.org/wiki/Mixed_linear_complementarity_problem',
'https://en.wikipedia.org/wiki/Monomials',
'https://en.wikipedia.org/wiki/Multi-label_classification',
'https://en.wikipedia.org/wiki/Multi-objective_optimization',
'https://en.wikipedia.org/wiki/Multiclass_problem',
'https://en.wikipedia.org/wiki/Multilayer_perceptron',
'https://en.wikipedia.org/wiki/Multilinear_algebra',
'https://en.wikipedia.org/wiki/Multilinear_subspace_learning',
'https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines',
'https://en.wikipedia.org/wiki/Multivariate_polynomial',
'https://en.wikipedia.org/wiki/Naive_Bayes_classifier',
'https://en.wikipedia.org/wiki/Nelder-Mead_method',
'https://en.wikipedia.org/wiki/Neural_networks',
'https://en.wikipedia.org/wiki/Newton%27s_method',
'https://en.wikipedia.org/wiki/Newton%27s_method_in_optimization',
'https://en.wikipedia.org/wiki/Non-linear',
'https://en.wikipedia.org/wiki/Non-negative_matrix_factorization',
'https://en.wikipedia.org/wiki/Nonlinear',
'https://en.wikipedia.org/wiki/Nonlinear_complementarity_problem',
'https://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method',
'https://en.wikipedia.org/wiki/Nonlinear_programming',
'https://en.wikipedia.org/wiki/Normal_(geometry)',
'https://en.wikipedia.org/wiki/Normed_vector_space',
'https://en.wikipedia.org/wiki/Number',
'https://en.wikipedia.org/wiki/Number_theory',
'https://en.wikipedia.org/wiki/Numerical_analysis',
'https://en.wikipedia.org/wiki/Numerical_linear_algebra',
'https://en.wikipedia.org/wiki/Numerical_stability',
'https://en.wikipedia.org/wiki/Optimization_(mathematics)',
'https://en.wikipedia.org/wiki/Optimization_algorithm',
'https://en.wikipedia.org/wiki/Optimization_problem',
'https://en.wikipedia.org/wiki/Ordinary_differential_equation',
'https://en.wikipedia.org/wiki/Ordinary_least_squares',
'https://en.wikipedia.org/wiki/Oriented_matroid',
'https://en.wikipedia.org/wiki/Orthogonality',
'https://en.wikipedia.org/wiki/Orthonormal_basis',
'https://en.wikipedia.org/wiki/Outer_product',
'https://en.wikipedia.org/wiki/P-matrix',
'https://en.wikipedia.org/wiki/Paraboloid',
'https://en.wikipedia.org/wiki/Pareto_frontier',
'https://en.wikipedia.org/wiki/Pareto_set',
'https://en.wikipedia.org/wiki/Pattern_recognition',
'https://en.wikipedia.org/wiki/Pattern_search_(optimization)',
'https://en.wikipedia.org/wiki/Perceptron',
'https://en.wikipedia.org/wiki/Pi',
'https://en.wikipedia.org/wiki/Polyhedron',
'https://en.wikipedia.org/wiki/Polynomial_kernel',
'https://en.wikipedia.org/wiki/Polytope',
'https://en.wikipedia.org/wiki/Positive-definite_kernel',
'https://en.wikipedia.org/wiki/Positive-definite_matrix',
'https://en.wikipedia.org/wiki/Positive_definite_bilinear_form',
'https://en.wikipedia.org/wiki/Positive_definite_matrix',
'https://en.wikipedia.org/wiki/Posynomials',
'https://en.wikipedia.org/wiki/Powell%27s_method',
'https://en.wikipedia.org/wiki/Predictive_analytics',
'https://en.wikipedia.org/wiki/Predictive_modelling',
'https://en.wikipedia.org/wiki/Principal_component_analysis',
'https://en.wikipedia.org/wiki/Principal_minor',
'https://en.wikipedia.org/wiki/Probabilistic_classification',
'https://en.wikipedia.org/wiki/Probability_distribution',
'https://en.wikipedia.org/wiki/Probability_theory',
'https://en.wikipedia.org/wiki/Product_(mathematics)',
'https://en.wikipedia.org/wiki/Product_Rule',
'https://en.wikipedia.org/wiki/Projection_(linear_algebra)',
'https://en.wikipedia.org/wiki/Pseudovector',
'https://en.wikipedia.org/wiki/Pure_mathematics',
'https://en.wikipedia.org/wiki/Push%E2%80%93relabel_maximum_flow_algorithm',
'https://en.wikipedia.org/wiki/Quadratic_programming',
'https://en.wikipedia.org/wiki/Quasi-Newton_method',
'https://en.wikipedia.org/wiki/Quasiconvex_function',
'https://en.wikipedia.org/wiki/Rademacher%27s_theorem',
'https://en.wikipedia.org/wiki/Radial_basis_function',
'https://en.wikipedia.org/wiki/Radial_basis_function_kernel',
'https://en.wikipedia.org/wiki/Random_forest',
'https://en.wikipedia.org/wiki/Random_variable',
'https://en.wikipedia.org/wiki/Rank_(linear_algebra)',
'https://en.wikipedia.org/wiki/Rate_of_convergence',
'https://en.wikipedia.org/wiki/Reactive_Search_Optimization',
'https://en.wikipedia.org/wiki/Real_coordinate_space',
'https://en.wikipedia.org/wiki/Real_number',
'https://en.wikipedia.org/wiki/Regression_analysis',
'https://en.wikipedia.org/wiki/Regularization_(mathematics)',
'https://en.wikipedia.org/wiki/Reinforcement_learning',
'https://en.wikipedia.org/wiki/Relaxation_(approximation)',
'https://en.wikipedia.org/wiki/Relevance_vector_machine',
'https://en.wikipedia.org/wiki/Representation_theory',
'https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine',
'https://en.wikipedia.org/wiki/Revised_simplex_algorithm',
'https://en.wikipedia.org/wiki/Robust_optimization',
'https://en.wikipedia.org/wiki/Saddle_point',
'https://en.wikipedia.org/wiki/Satisfiability_problem',
'https://en.wikipedia.org/wiki/Scalar_(mathematics)',
'https://en.wikipedia.org/wiki/Scalar_multiplication',
'https://en.wikipedia.org/wiki/Scalar_projection',
'https://en.wikipedia.org/wiki/Search_algorithm',
'https://en.wikipedia.org/wiki/Second_derivative_test',
'https://en.wikipedia.org/wiki/Second_order_cone_programming',
'https://en.wikipedia.org/wiki/Semidefinite',
'https://en.wikipedia.org/wiki/Semidefinite_programming',
'https://en.wikipedia.org/wiki/Sequential_minimal_optimization',
'https://en.wikipedia.org/wiki/Sequential_quadratic_programming',
'https://en.wikipedia.org/wiki/Set_(mathematics)',
'https://en.wikipedia.org/wiki/Set_theory',
'https://en.wikipedia.org/wiki/Sign_function',
'https://en.wikipedia.org/wiki/Signal_analysis',
'https://en.wikipedia.org/wiki/Similarity_learning',
'https://en.wikipedia.org/wiki/Simplex_algorithm',
'https://en.wikipedia.org/wiki/Simulated_annealing',
'https://en.wikipedia.org/wiki/Simultaneous_perturbation_stochastic_approximation',
'https://en.wikipedia.org/wiki/Slack_variable',
'https://en.wikipedia.org/wiki/Space_complexity',
'https://en.wikipedia.org/wiki/Sparse_coding',
'https://en.wikipedia.org/wiki/Sparse_matrix',
'https://en.wikipedia.org/wiki/Standard_basis',
'https://en.wikipedia.org/wiki/Stationary_point',
'https://en.wikipedia.org/wiki/Statistical',
'https://en.wikipedia.org/wiki/Statistical_classification',
'https://en.wikipedia.org/wiki/Statistical_inference',
'https://en.wikipedia.org/wiki/Statistical_learning_theory',
'https://en.wikipedia.org/wiki/Statistics',
'https://en.wikipedia.org/wiki/Stochastic_gradient_descent',
'https://en.wikipedia.org/wiki/Stochastic_optimization',
'https://en.wikipedia.org/wiki/Stochastic_process',
'https://en.wikipedia.org/wiki/Stochastic_programming',
'https://en.wikipedia.org/wiki/Strongly_NP-hard',
'https://en.wikipedia.org/wiki/Structured_SVM',
'https://en.wikipedia.org/wiki/Structured_prediction',
'https://en.wikipedia.org/wiki/Subgradient',
'https://en.wikipedia.org/wiki/Subgradient_method',
'https://en.wikipedia.org/wiki/Successive_linear_programming',
'https://en.wikipedia.org/wiki/Successive_parabolic_interpolation',
'https://en.wikipedia.org/wiki/Summation',
'https://en.wikipedia.org/wiki/Supervised_learning',
'https://en.wikipedia.org/wiki/Support_vector_machine',
'https://en.wikipedia.org/wiki/Support_vector_machines',
'https://en.wikipedia.org/wiki/Symmetric_rank-one',
'https://en.wikipedia.org/wiki/System_of_linear_equations',
'https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding',
'https://en.wikipedia.org/wiki/Tabu_search',
'https://en.wikipedia.org/wiki/Tensor',
'https://en.wikipedia.org/wiki/Tensor_contraction',
'https://en.wikipedia.org/wiki/Theory_of_computation',
'https://en.wikipedia.org/wiki/Three-dimensional_space',
'https://en.wikipedia.org/wiki/Tikhonov_regularization',
'https://en.wikipedia.org/wiki/Time_complexity',
'https://en.wikipedia.org/wiki/Topic_modeling',
'https://en.wikipedia.org/wiki/Topology',
'https://en.wikipedia.org/wiki/Transformation_matrix',
'https://en.wikipedia.org/wiki/Transpose',
'https://en.wikipedia.org/wiki/Trigonometry',
'https://en.wikipedia.org/wiki/Triple_product',
'https://en.wikipedia.org/wiki/Truncated_Newton_method',
'https://en.wikipedia.org/wiki/Unit_cube',
'https://en.wikipedia.org/wiki/Unit_sphere',
'https://en.wikipedia.org/wiki/Unit_vector',
'https://en.wikipedia.org/wiki/Unsupervised_learning',
'https://en.wikipedia.org/wiki/Utility_maximization_problem',
'https://en.wikipedia.org/wiki/Variational_calculus',
'https://en.wikipedia.org/wiki/Vector_(geometry)',
'https://en.wikipedia.org/wiki/Vector_area',
'https://en.wikipedia.org/wiki/Vector_optimization',
'https://en.wikipedia.org/wiki/Vector_projection',
'https://en.wikipedia.org/wiki/Vector_space',
'https://en.wikipedia.org/wiki/Vertex_(geometry)',
'https://en.wikipedia.org/wiki/Vertex_enumeration_problem',
'https://en.wikipedia.org/wiki/Weight_function',
'https://en.wikipedia.org/wiki/Worst-case_complexity'
]
POLITICS = [
'https://en.wikipedia.org/wiki/Sovereign_state',
'https://en.wikipedia.org/wiki/Executive_(government)',
'https://en.wikipedia.org/wiki/Government',
'https://en.wikipedia.org/wiki/Law',
'https://en.wikipedia.org/wiki/Public_capital',
'https://en.wikipedia.org/wiki/Politics',
'https://en.wikipedia.org/wiki/Federalism',
'https://en.wikipedia.org/wiki/Separation_of_powers',
'https://en.wikipedia.org/wiki/Autocracy',
'https://en.wikipedia.org/wiki/Democracy',
'https://en.wikipedia.org/wiki/Empire',
'https://en.wikipedia.org/wiki/Colonialism',
'https://en.wikipedia.org/wiki/Centralized_government',
'https://en.wikipedia.org/wiki/Federation',
'https://en.wikipedia.org/wiki/Means_of_production',
'https://en.wikipedia.org/wiki/Communism',
'https://en.wikipedia.org/wiki/Absolute_monarchy',
'https://en.wikipedia.org/wiki/Abstraction_principle_(law)',
'https://en.wikipedia.org/wiki/Act_of_Congress',
'https://en.wikipedia.org/wiki/Act_of_Parliament',
'https://en.wikipedia.org/wiki/Adjudication',
'https://en.wikipedia.org/wiki/Administration_of_justice',
'https://en.wikipedia.org/wiki/Administrative_law',
'https://en.wikipedia.org/wiki/Admission_to_the_bar',
'https://en.wikipedia.org/wiki/Age_of_Discovery',
'https://en.wikipedia.org/wiki/Age_of_Enlightenment',
'https://en.wikipedia.org/wiki/Age_of_discovery',
'https://en.wikipedia.org/wiki/Agency_(law)',
'https://en.wikipedia.org/wiki/Agricultural_law',
'https://en.wikipedia.org/wiki/Allies_of_World_War_I',
'https://en.wikipedia.org/wiki/American_Civil_War',
'https://en.wikipedia.org/wiki/American_Revolutionary_War',
'https://en.wikipedia.org/wiki/American_imperialism',
'https://en.wikipedia.org/wiki/Analytical_Marxism',
'https://en.wikipedia.org/wiki/Analytical_jurisprudence',
'https://en.wikipedia.org/wiki/Anarchism',
'https://en.wikipedia.org/wiki/Anarchism_and_Marxism',
'https://en.wikipedia.org/wiki/Anarcho-communism',
'https://en.wikipedia.org/wiki/Anarchy',
'https://en.wikipedia.org/wiki/Ancient_Greek_law',
'https://en.wikipedia.org/wiki/Anti-Federalism',
'https://en.wikipedia.org/wiki/Anti-Revisionism',
'https://en.wikipedia.org/wiki/Anti-authoritarian',
'https://en.wikipedia.org/wiki/Anti-authoritarianism',
'https://en.wikipedia.org/wiki/Anti-capitalism',
'https://en.wikipedia.org/wiki/Anti-communism',
'https://en.wikipedia.org/wiki/Anti-fascism',
'https://en.wikipedia.org/wiki/Anti-imperialism',
'https://en.wikipedia.org/wiki/Anti-revisionism',
'https://en.wikipedia.org/wiki/Anti-statism',
'https://en.wikipedia.org/wiki/Antitrust',
'https://en.wikipedia.org/wiki/Apportionment_(politics)',
'https://en.wikipedia.org/wiki/Aristocracy',
'https://en.wikipedia.org/wiki/Articles_of_Confederation',
'https://en.wikipedia.org/wiki/Assault_(tort)',
'https://en.wikipedia.org/wiki/Asymmetric_federalism',
'https://en.wikipedia.org/wiki/Athenian_democracy',
'https://en.wikipedia.org/wiki/Attorney_at_law',
'https://en.wikipedia.org/wiki/Authoritarianism',
'https://en.wikipedia.org/wiki/Autocracy',
'https://en.wikipedia.org/wiki/Autonomism',
'https://en.wikipedia.org/wiki/Autonomous_area',
'https://en.wikipedia.org/wiki/Ballot_measure',
'https://en.wikipedia.org/wiki/Bank_regulation',
'https://en.wikipedia.org/wiki/Banking_regulation',
'https://en.wikipedia.org/wiki/Bankruptcy_law',
'https://en.wikipedia.org/wiki/Bar_(law)',
'https://en.wikipedia.org/wiki/Battery_(tort)',
'https://en.wikipedia.org/wiki/Bench_(law)',
'https://en.wikipedia.org/wiki/Bicameral',
'https://en.wikipedia.org/wiki/Bicameralism',
'https://en.wikipedia.org/wiki/Bill_(proposed_law)',
'https://en.wikipedia.org/wiki/Bill_of_rights',
'https://en.wikipedia.org/wiki/Binding',
'https://en.wikipedia.org/wiki/Body_politic',
'https://en.wikipedia.org/wiki/Bourgeoisie',
'https://en.wikipedia.org/wiki/Breach_of_duty_in_English_law',
'https://en.wikipedia.org/wiki/Brief_(law)',
'https://en.wikipedia.org/wiki/British_Empire',
'https://en.wikipedia.org/wiki/British_North_America_Act',
'https://en.wikipedia.org/wiki/Brocard_(law)',
'https://en.wikipedia.org/wiki/Brown_v._Board_of_Education',
'https://en.wikipedia.org/wiki/Bureaucracy',
'https://en.wikipedia.org/wiki/Burma',
'https://en.wikipedia.org/wiki/Capital_(economics)',
'https://en.wikipedia.org/wiki/Capital_accumulation',
'https://en.wikipedia.org/wiki/Capitalism',
'https://en.wikipedia.org/wiki/Capitalist_mode_of_production_(Marxist_theory)',
'https://en.wikipedia.org/wiki/Case_law',
'https://en.wikipedia.org/wiki/Caste',
'https://en.wikipedia.org/wiki/Caucasus',
'https://en.wikipedia.org/wiki/Causation_(law)',
'https://en.wikipedia.org/wiki/Central_American_Free_Trade_Agreement',
'https://en.wikipedia.org/wiki/Central_government',
'https://en.wikipedia.org/wiki/Centralisation',
'https://en.wikipedia.org/wiki/Centralization',
'https://en.wikipedia.org/wiki/Centralized_government',
'https://en.wikipedia.org/wiki/Centrism',
'https://en.wikipedia.org/wiki/Chambers_(law)',
'https://en.wikipedia.org/wiki/Charter',
'https://en.wikipedia.org/wiki/Chinese_imperialism',
'https://en.wikipedia.org/wiki/Chinese_law',
'https://en.wikipedia.org/wiki/Chinese_nationalism',
'https://en.wikipedia.org/wiki/Chronology_of_European_exploration_of_Asia',
'https://en.wikipedia.org/wiki/Chronology_of_Western_colonialism',
'https://en.wikipedia.org/wiki/Chronology_of_colonialism',
'https://en.wikipedia.org/wiki/Citizenship',
'https://en.wikipedia.org/wiki/City-state',
'https://en.wikipedia.org/wiki/Civil_and_political_rights',
'https://en.wikipedia.org/wiki/Civil_law(legal_system)',
'https://en.wikipedia.org/wiki/Civil_law_(common_law)',
'https://en.wikipedia.org/wiki/Civil_law_(legal_system)',
'https://en.wikipedia.org/wiki/Civil_liberties',
'https://en.wikipedia.org/wiki/Civil_procedure',
'https://en.wikipedia.org/wiki/Civilisation',
'https://en.wikipedia.org/wiki/Class_conflict',
'https://en.wikipedia.org/wiki/Class_consciousness',
'https://en.wikipedia.org/wiki/Class_in_Marxist_theory',
'https://en.wikipedia.org/wiki/Class_struggle',
'https://en.wikipedia.org/wiki/Classical_Marxism',
'https://en.wikipedia.org/wiki/Classical_liberals',
'https://en.wikipedia.org/wiki/Classical_republicanism',
'https://en.wikipedia.org/wiki/Classless_society',
'https://en.wikipedia.org/wiki/Co-operative_economics',
'https://en.wikipedia.org/wiki/Code_of_Hammurabi',
'https://en.wikipedia.org/wiki/Codification_(law)',
'https://en.wikipedia.org/wiki/Collective_leadership',
'https://en.wikipedia.org/wiki/Colonial_empire',
'https://en.wikipedia.org/wiki/Colonialism',
'https://en.wikipedia.org/wiki/Colonies_in_antiquity',
'https://en.wikipedia.org/wiki/Colonization',
'https://en.wikipedia.org/wiki/Commerce_Clause',
'https://en.wikipedia.org/wiki/Commercial_Revolution',
'https://en.wikipedia.org/wiki/Commercial_law',
'https://en.wikipedia.org/wiki/Commodity_(Marxism)',
'https://en.wikipedia.org/wiki/Common_law',
'https://en.wikipedia.org/wiki/Common_ownership',
'https://en.wikipedia.org/wiki/Communalism',
'https://en.wikipedia.org/wiki/Commune_(socialism)',
'https://en.wikipedia.org/wiki/Communist_party',
'https://en.wikipedia.org/wiki/Communist_revolution',
'https://en.wikipedia.org/wiki/Communist_society',
'https://en.wikipedia.org/wiki/Communist_state',
'https://en.wikipedia.org/wiki/Communist_symbolism',
'https://en.wikipedia.org/wiki/Comparative_government',
'https://en.wikipedia.org/wiki/Comparative_law',
'https://en.wikipedia.org/wiki/Comparative_politics',
'https://en.wikipedia.org/wiki/Competition_law',
'https://en.wikipedia.org/wiki/Concession_(territory)',
'https://en.wikipedia.org/wiki/Confederalism',
'https://en.wikipedia.org/wiki/Confederation',
'https://en.wikipedia.org/wiki/Conflict_of_interest',
'https://en.wikipedia.org/wiki/Conflict_of_laws',
'https://en.wikipedia.org/wiki/Consensus_democracy',
'https://en.wikipedia.org/wiki/Conservatism',
'https://en.wikipedia.org/wiki/Constituency',
'https://en.wikipedia.org/wiki/Constituent_country',
'https://en.wikipedia.org/wiki/Constituent_state',
'https://en.wikipedia.org/wiki/Constitution',
'https://en.wikipedia.org/wiki/Constitutional_law',
'https://en.wikipedia.org/wiki/Constitutional_monarchy',
'https://en.wikipedia.org/wiki/Constitutionalism',
'https://en.wikipedia.org/wiki/Constitutionality',
'https://en.wikipedia.org/wiki/Construction_law',
'https://en.wikipedia.org/wiki/Consumer_protection',
'https://en.wikipedia.org/wiki/Consumer_welfare',
'https://en.wikipedia.org/wiki/Contract',
'https://en.wikipedia.org/wiki/Contract_law',
'https://en.wikipedia.org/wiki/Contractual_terms',
'https://en.wikipedia.org/wiki/Cooperative_Federalism',
'https://en.wikipedia.org/wiki/Copyright',
'https://en.wikipedia.org/wiki/Corporate_law',
'https://en.wikipedia.org/wiki/Corporate_tax',
'https://en.wikipedia.org/wiki/Court',
'https://en.wikipedia.org/wiki/Covenant_(law)',
'https://en.wikipedia.org/wiki/Crimes_against_humanity',
'https://en.wikipedia.org/wiki/Criminal_court',
'https://en.wikipedia.org/wiki/Criminal_justice',
'https://en.wikipedia.org/wiki/Criminal_law',
'https://en.wikipedia.org/wiki/Criminal_procedure',
'https://en.wikipedia.org/wiki/Critical_legal_studies',
'https://en.wikipedia.org/wiki/Criticism_of_communism',
'https://en.wikipedia.org/wiki/Cultural_Survival',
'https://en.wikipedia.org/wiki/Cultural_anthropology',
'https://en.wikipedia.org/wiki/Cultural_appropriation',
'https://en.wikipedia.org/wiki/Cultural_hegemony',
'https://en.wikipedia.org/wiki/Custom_(law)',
'https://en.wikipedia.org/wiki/Damages',
'https://en.wikipedia.org/wiki/De_facto',
'https://en.wikipedia.org/wiki/Decolonization',
'https://en.wikipedia.org/wiki/Decree',
'https://en.wikipedia.org/wiki/Defence_minister',
'https://en.wikipedia.org/wiki/Definition_of_law',
'https://en.wikipedia.org/wiki/Delegated_legislation',
'https://en.wikipedia.org/wiki/Deliberative_democracy',
'https://en.wikipedia.org/wiki/Democracy',
'https://en.wikipedia.org/wiki/Democratic_centralism',
'https://en.wikipedia.org/wiki/Democratic_socialism',
'https://en.wikipedia.org/wiki/Dependent_territory',
'https://en.wikipedia.org/wiki/Deregulation',
'https://en.wikipedia.org/wiki/Despotism',
'https://en.wikipedia.org/wiki/Dictatorship',
'https://en.wikipedia.org/wiki/Dictatorship_of_the_proletariat',
'https://en.wikipedia.org/wiki/Direct_democracy',
'https://en.wikipedia.org/wiki/Direct_rule',
'https://en.wikipedia.org/wiki/Doctrine_of_precedent',
'https://en.wikipedia.org/wiki/Dual_federalism',
'https://en.wikipedia.org/wiki/Duty_of_care',
'https://en.wikipedia.org/wiki/Early_customary_law',
'https://en.wikipedia.org/wiki/Ecclesiology',
'https://en.wikipedia.org/wiki/Economic_analysis_of_law',
'https://en.wikipedia.org/wiki/Economic_efficiency',
'https://en.wikipedia.org/wiki/Economic_surplus',
'https://en.wikipedia.org/wiki/Egalitarian_community',
'https://en.wikipedia.org/wiki/Election',
'https://en.wikipedia.org/wiki/Election_commission',
'https://en.wikipedia.org/wiki/Election_law',
'https://en.wikipedia.org/wiki/Electoral_commission',
'https://en.wikipedia.org/wiki/Electoral_district',
'https://en.wikipedia.org/wiki/Entertainment_law',
'https://en.wikipedia.org/wiki/Entick_v_Carrington',
'https://en.wikipedia.org/wiki/Environmental_determinism',
'https://en.wikipedia.org/wiki/Environmental_law',
'https://en.wikipedia.org/wiki/Environmentalism',
'https://en.wikipedia.org/wiki/Equality_before_the_law',
'https://en.wikipedia.org/wiki/Equity_(law)',
'https://en.wikipedia.org/wiki/Esquire',
'https://en.wikipedia.org/wiki/Estate_(law)',
'https://en.wikipedia.org/wiki/Law',
'https://en.wikipedia.org/wiki/Law_and_economics',
'https://en.wikipedia.org/wiki/Law_and_society',
'https://en.wikipedia.org/wiki/Law_enforcement',
'https://en.wikipedia.org/wiki/Law_firm',
'https://en.wikipedia.org/wiki/Lawsuit',
'https://en.wikipedia.org/wiki/Lawyer',
'https://en.wikipedia.org/wiki/Lease',
'https://en.wikipedia.org/wiki/Left-wing_politics',
'https://en.wikipedia.org/wiki/Legal_burden_of_proof',
'https://en.wikipedia.org/wiki/Legal_opinion',
'https://en.wikipedia.org/wiki/Legal_remedy',
'https://en.wikipedia.org/wiki/Legislation',
'https://en.wikipedia.org/wiki/Stalinism',
'https://en.wikipedia.org/wiki/Quasi-contract',
'https://en.wikipedia.org/wiki/Penal_damages',
'https://en.wikipedia.org/wiki/Legal_personality',
'https://en.wikipedia.org/wiki/Tort_of_deceit',
'https://en.wikipedia.org/wiki/Constitutional_republic',
'https://en.wikipedia.org/wiki/Libertarian_municipalism',
'https://en.wikipedia.org/wiki/Property_law',
'https://en.wikipedia.org/wiki/Political_party',
'https://en.wikipedia.org/wiki/Anocracy',
'https://en.wikipedia.org/wiki/Legal_realism',
'https://en.wikipedia.org/wiki/Popular_assembly',
'https://en.wikipedia.org/wiki/Stratocracy',
'https://en.wikipedia.org/wiki/Proxy_voting',
'https://en.wikipedia.org/wiki/Implied-in-law_contract',
'https://en.wikipedia.org/wiki/Magistrate',
'https://en.wikipedia.org/wiki/Judicial_review',
'https://en.wikipedia.org/wiki/Injunction',
'https://en.wikipedia.org/wiki/World_Wars',
'https://en.wikipedia.org/wiki/Autonomous_Action',
'https://en.wikipedia.org/wiki/Sortition',
'https://en.wikipedia.org/wiki/Autarchy',
'https://en.wikipedia.org/wiki/Single-party_state',
'https://en.wikipedia.org/wiki/Religious_democracy',
'https://en.wikipedia.org/wiki/Delegative_democracy',
'https://en.wikipedia.org/wiki/Legal_history',
'https://en.wikipedia.org/wiki/Jury',
'https://en.wikipedia.org/wiki/Tribunal',
'https://en.wikipedia.org/wiki/Roman_Republic',
'https://en.wikipedia.org/wiki/Privity_of_contract',
'https://en.wikipedia.org/wiki/Ruling_class',
'https://en.wikipedia.org/wiki/Unconscionability',
'https://en.wikipedia.org/wiki/Reliance_damages',
'https://en.wikipedia.org/wiki/Libertarian_socialism',
'https://en.wikipedia.org/wiki/Exclusion_clause',
'https://en.wikipedia.org/wiki/Law_of_war',
'https://en.wikipedia.org/wiki/Semi-authoritarian',
'https://en.wikipedia.org/wiki/Breach_of_contract',
'https://en.wikipedia.org/wiki/Restitution',
'https://en.wikipedia.org/wiki/Insurrection',
'https://en.wikipedia.org/wiki/Natural_person',
'https://en.wikipedia.org/wiki/Legal_theory',
'https://en.wikipedia.org/wiki/Voidable_contract',
'https://en.wikipedia.org/wiki/Justice',
'https://en.wikipedia.org/wiki/Void_(law)',
'https://en.wikipedia.org/wiki/Will_(law)',
'https://en.wikipedia.org/wiki/Intention_to_be_legally_bound',
'https://en.wikipedia.org/wiki/Solicitor',
'https://en.wikipedia.org/wiki/Legislature',
'https://en.wikipedia.org/wiki/Contract_(conflict)',
'https://en.wikipedia.org/wiki/Expectation_damages',
'https://en.wikipedia.org/wiki/Condition_precedent',
'https://en.wikipedia.org/wiki/Sociocracy',
'https://en.wikipedia.org/wiki/Ideology',
'https://en.wikipedia.org/wiki/Republic',
'https://en.wikipedia.org/wiki/Dispute_resolution',
'https://en.wikipedia.org/wiki/Judiciary',
'https://en.wikipedia.org/wiki/Indictment',
'https://en.wikipedia.org/wiki/Pleading',
'https://en.wikipedia.org/wiki/Prosecutor',
'https://en.wikipedia.org/wiki/Public_interest',
'https://en.wikipedia.org/wiki/Precedent',
'https://en.wikipedia.org/wiki/Civil_procedure_in_the_United_States',
'https://en.wikipedia.org/wiki/Knock-and-announce',
'https://en.wikipedia.org/wiki/Sources_of_law',
'https://en.wikipedia.org/wiki/Offence_(law)',
'https://en.wikipedia.org/wiki/Political_economy',
'https://en.wikipedia.org/wiki/Political_psychology',
'https://en.wikipedia.org/wiki/Laissez_faire',
'https://en.wikipedia.org/wiki/Republic',
'https://en.wikipedia.org/wiki/Externalities',
'https://en.wikipedia.org/wiki/Protectionism',
'https://en.wikipedia.org/wiki/Political_philosophy',
'https://en.wikipedia.org/wiki/Scientific_socialism',
'https://en.wikipedia.org/wiki/Neomercantilism',
'https://en.wikipedia.org/wiki/Anarchist_economics',
'https://en.wikipedia.org/wiki/Expropriation',
'https://en.wikipedia.org/wiki/Free_market',
'https://en.wikipedia.org/wiki/Appellant',
'https://en.wikipedia.org/wiki/Right-libertarianism',
'https://en.wikipedia.org/wiki/One-nation_conservatism',
'https://en.wikipedia.org/wiki/Norm_(social)',
'https://en.wikipedia.org/wiki/Quiet_Revolution',
'https://en.wikipedia.org/wiki/Liberal_conservative',
'https://en.wikipedia.org/wiki/Reformism',
'https://en.wikipedia.org/wiki/Modernism',
'https://en.wikipedia.org/wiki/Corporatism',
'https://en.wikipedia.org/wiki/Roman_law',
'https://en.wikipedia.org/wiki/Representative_democracy',
'https://en.wikipedia.org/wiki/Federal_republic',
'https://en.wikipedia.org/wiki/Statute_of_limitations',
'https://en.wikipedia.org/wiki/Monarchy',
'https://en.wikipedia.org/wiki/Liquidated_damages',
'https://en.wikipedia.org/wiki/Consensus',
'https://en.wikipedia.org/wiki/Western_world',
'https://en.wikipedia.org/wiki/Divine_right_of_kings',
'https://en.wikipedia.org/wiki/Succession_of_states'
]
IRRELEVANT = [
'https://en.wikipedia.org/wiki/Grape',
'https://en.wikipedia.org/wiki/Cardiovascular_disease',
'https://en.wikipedia.org/wiki/Goat',
'https://en.wikipedia.org/wiki/Richard_Gere',
'https://en.wikipedia.org/wiki/Mud',
'https://en.wikipedia.org/wiki/Jazz',
'https://en.wikipedia.org/wiki/Irish_Potato_Famine',
'https://en.wikipedia.org/wiki/Charles_Baudelaire',
'https://en.wikipedia.org/wiki/Troodos_Mountains',
'https://en.wikipedia.org/wiki/Brandy',
'https://en.wikipedia.org/wiki/Facial_hair',
'https://en.wikipedia.org/wiki/Sephora',
'https://en.wikipedia.org/wiki/Harper_Perennial',
'https://en.wikipedia.org/wiki/The_Pink_Panther_(1963_film)',
'https://en.wikipedia.org/wiki/YouTube',
'https://en.wikipedia.org/wiki/Ganden_Monastery',
'https://en.wikipedia.org/wiki/Late_Triassic',
'https://en.wikipedia.org/wiki/Wrathful_deities',
'https://en.wikipedia.org/wiki/CNN',
'https://en.wikipedia.org/wiki/Widget',
'https://en.wikipedia.org/wiki/Finland',
'https://en.wikipedia.org/wiki/Ohio_Players',
'https://en.wikipedia.org/wiki/Mark_Sanford_disappearance_and_extramarital_affair',
'https://en.wikipedia.org/wiki/W00t',
'https://en.wikipedia.org/wiki/Manchego_cheese',
'https://en.wikipedia.org/wiki/Hyperbole',
'https://en.wikipedia.org/wiki/Verizon_New_Jersey',
'https://en.wikipedia.org/wiki/Ivory_tower',
'https://en.wikipedia.org/wiki/International_Standard_Book_Number',
'https://en.wikipedia.org/wiki/Odin',
'https://en.wikipedia.org/wiki/Stereotype',
'https://en.wikipedia.org/wiki/Family_name',
'https://en.wikipedia.org/wiki/Sex_segregation',
'https://en.wikipedia.org/wiki/Height_discrimination',
'https://en.wikipedia.org/wiki/Bull_shark',
'https://en.wikipedia.org/wiki/Estuary',
'https://en.wikipedia.org/wiki/Hope',
'https://en.wikipedia.org/wiki/Naval_artillery',
'https://en.wikipedia.org/wiki/Endonym',
'https://en.wikipedia.org/wiki/Happiness',
'https://en.wikipedia.org/wiki/Agriculture',
'https://en.wikipedia.org/wiki/Guilt_(emotion)',
'https://en.wikipedia.org/wiki/Eulogy',
'https://en.wikipedia.org/wiki/Cognitive_process',
'https://en.wikipedia.org/wiki/Romanticism',
'https://en.wikipedia.org/wiki/Insane_delusion',
'https://en.wikipedia.org/wiki/Home_economics',
'https://en.wikipedia.org/wiki/Predation',
'https://en.wikipedia.org/wiki/Market_segment',
'https://en.wikipedia.org/wiki/Meta-ethnicity',
'https://en.wikipedia.org/wiki/Holograph',
'https://en.wikipedia.org/wiki/Slav',
'https://en.wikipedia.org/wiki/Museum_of_Death',
'https://en.wikipedia.org/wiki/Memory',
'https://en.wikipedia.org/wiki/Bronze',
'https://en.wikipedia.org/wiki/34th_Golden_Globe_Awards',
'https://en.wikipedia.org/wiki/MasterChef:_The_Professionals',
'https://en.wikipedia.org/wiki/Christian_martyrs',
'https://en.wikipedia.org/wiki/Carnal_Knowledge',
'https://en.wikipedia.org/wiki/New_York_Giants',
'https://en.wikipedia.org/wiki/Cultural_intelligence',
'https://en.wikipedia.org/wiki/Pluto_Press',
'https://en.wikipedia.org/wiki/Pat%27s_King_of_Steaks',
'https://en.wikipedia.org/wiki/Indovision',
'https://en.wikipedia.org/wiki/Michael_Lewis_(wide_receiver)',
'https://en.wikipedia.org/wiki/Telecommunications_in_Uganda',
'https://en.wikipedia.org/wiki/Wireless_broadband',
'https://en.wikipedia.org/wiki/Squamous_metaplasia',
'https://en.wikipedia.org/wiki/Disability-adjusted_life_year',
'https://en.wikipedia.org/wiki/Avian_flu_outbreak_of_2009',
'https://en.wikipedia.org/wiki/Writing_system',
'https://en.wikipedia.org/wiki/Catalan_grammar',
'https://en.wikipedia.org/wiki/Xploration_Station'
]
|
scivey/relevanced
|
testing/func_test_lib/urls.py
|
Python
|
mit
| 47,144
|
[
"Gaussian"
] |
54c21210c50e356879325a4ba9e3d215a069fb71124187557597ffe4dbab26d7
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Stephane Charette
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Find unused objects and remove with the user's permission."
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from __future__ import with_statement
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.constfunc import handle2internal
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".RemoveUnused")
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.db import DbTxn
from gramps.gen.errors import WindowActiveError
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.datehandler import displayer as _dd
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.plug import tool
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# runTool
#
#-------------------------------------------------------------------------
class RemoveUnused(tool.Tool, ManagedWindow, UpdateCallback):
MARK_COL = 0
OBJ_ID_COL = 1
OBJ_NAME_COL = 2
OBJ_TYPE_COL = 3
OBJ_HANDLE_COL = 4
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
self.title = _('Unused Objects')
tool.Tool.__init__(self, dbstate, options_class, name)
if self.db.readonly:
return
ManagedWindow.__init__(self, uistate,[], self.__class__)
UpdateCallback.__init__(self, self.uistate.pulse_progressbar)
self.dbstate = dbstate
self.uistate = uistate
self.tables = {
'events' : {'get_func': self.db.get_event_from_handle,
'remove' : self.db.remove_event,
'get_text': self.get_event_text,
'editor' : 'EditEvent',
'stock' : 'gramps-event',
'name_ix' : 4},
'sources' : {'get_func': self.db.get_source_from_handle,
'remove' : self.db.remove_source,
'get_text': None,
'editor' : 'EditSource',
'stock' : 'gramps-source',
'name_ix' : 2},
'places' : {'get_func': self.db.get_place_from_handle,
'remove' : self.db.remove_place,
'get_text': None,
'editor' : 'EditPlace',
'stock' : 'gramps-place',
'name_ix' : 2},
'media' : {'get_func': self.db.get_object_from_handle,
'remove' : self.db.remove_object,
'get_text': None,
'editor' : 'EditMedia',
'stock' : 'gramps-media',
'name_ix' : 4},
'repos' : {'get_func': self.db.get_repository_from_handle,
'remove' : self.db.remove_repository,
'get_text': None,
'editor' : 'EditRepository',
'stock' : 'gramps-repository',
'name_ix' : 3},
'notes' : {'get_func': self.db.get_note_from_handle,
'remove' : self.db.remove_note,
'get_text': self.get_note_text,
'editor' : 'EditNote',
'stock' : 'gramps-notes',
'name_ix' : 2},
}
self.init_gui()
def init_gui(self):
self.top = Glade()
window = self.top.toplevel
self.set_window(window, self.top.get_object('title'), self.title)
self.events_box = self.top.get_object('events_box')
self.sources_box = self.top.get_object('sources_box')
self.places_box = self.top.get_object('places_box')
self.media_box = self.top.get_object('media_box')
self.repos_box = self.top.get_object('repos_box')
self.notes_box = self.top.get_object('notes_box')
self.find_button = self.top.get_object('find_button')
self.remove_button = self.top.get_object('remove_button')
self.events_box.set_active(self.options.handler.options_dict['events'])
self.sources_box.set_active(
self.options.handler.options_dict['sources'])
self.places_box.set_active(
self.options.handler.options_dict['places'])
self.media_box.set_active(self.options.handler.options_dict['media'])
self.repos_box.set_active(self.options.handler.options_dict['repos'])
self.notes_box.set_active(self.options.handler.options_dict['notes'])
self.warn_tree = self.top.get_object('warn_tree')
self.warn_tree.connect('button_press_event', self.double_click)
self.selection = self.warn_tree.get_selection()
self.mark_button = self.top.get_object('mark_button')
self.mark_button.connect('clicked', self.mark_clicked)
self.unmark_button = self.top.get_object('unmark_button')
self.unmark_button.connect('clicked', self.unmark_clicked)
self.invert_button = self.top.get_object('invert_button')
self.invert_button.connect('clicked', self.invert_clicked)
self.real_model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
self.sort_model = self.real_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
self.renderer = Gtk.CellRendererText()
self.img_renderer = Gtk.CellRendererPixbuf()
self.bool_renderer = Gtk.CellRendererToggle()
self.bool_renderer.connect('toggled', self.selection_toggled)
# Add mark column
mark_column = Gtk.TreeViewColumn(_('Mark'), self.bool_renderer,
active=RemoveUnused.MARK_COL)
mark_column.set_sort_column_id(RemoveUnused.MARK_COL)
self.warn_tree.append_column(mark_column)
# Add image column
img_column = Gtk.TreeViewColumn(None, self.img_renderer )
img_column.set_cell_data_func(self.img_renderer, self.get_image)
self.warn_tree.append_column(img_column)
# Add column with object gramps_id
id_column = Gtk.TreeViewColumn(_('ID'), self.renderer,
text=RemoveUnused.OBJ_ID_COL)
id_column.set_sort_column_id(RemoveUnused.OBJ_ID_COL)
self.warn_tree.append_column(id_column)
# Add column with object name
name_column = Gtk.TreeViewColumn(_('Name'), self.renderer,
text=RemoveUnused.OBJ_NAME_COL)
name_column.set_sort_column_id(RemoveUnused.OBJ_NAME_COL)
self.warn_tree.append_column(name_column)
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_remove_button_clicked": self.do_remove,
"on_find_button_clicked" : self.find,
"on_delete_event" : self.close,
})
self.dc_label = self.top.get_object('dc_label')
self.sensitive_list = [self.warn_tree, self.mark_button,
self.unmark_button, self.invert_button,
self.dc_label, self.remove_button]
for item in self.sensitive_list:
item.set_sensitive(False)
self.show()
def build_menu_names(self, obj):
return (self.title, None)
def find(self, obj):
self.options.handler.options_dict.update(
events = self.events_box.get_active(),
sources = self.sources_box.get_active(),
places = self.places_box.get_active(),
media = self.media_box.get_active(),
repos = self.repos_box.get_active(),
notes = self.notes_box.get_active(),
)
for item in self.sensitive_list:
item.set_sensitive(True)
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.window.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
self.real_model.clear()
self.collect_unused()
self.uistate.progress.hide()
self.uistate.set_busy_cursor(False)
self.window.get_window().set_cursor(None)
self.reset()
# Save options
self.options.handler.save_options()
def collect_unused(self):
# Run through all requested tables and check all objects
# for being referenced some place. If not, add_results on them.
db = self.db
tables = (
('events', db.get_event_cursor, db.get_number_of_events),
('sources', db.get_source_cursor, db.get_number_of_sources),
('places', db.get_place_cursor, db.get_number_of_places),
('media', db.get_media_cursor, db.get_number_of_media_objects),
('repos', db.get_repository_cursor, db.get_number_of_repositories),
('notes', db.get_note_cursor, db.get_number_of_notes),
)
for (the_type, cursor_func, total_func) in tables:
if not self.options.handler.options_dict[the_type]:
# This table was not requested. Skip it.
continue
with cursor_func() as cursor:
self.set_total(total_func())
fbh = db.find_backlink_handles
for handle, data in cursor:
if not any(h for h in fbh(handle)):
self.add_results((the_type, handle2internal(handle),
data))
self.update()
self.reset()
def do_remove(self, obj):
with DbTxn(_("Remove unused objects"), self.db, batch=False) as trans:
self.db.disable_signals()
for row_num in range(len(self.real_model)-1, -1, -1):
path = (row_num,)
row = self.real_model[path]
if not row[RemoveUnused.MARK_COL]:
continue
the_type = row[RemoveUnused.OBJ_TYPE_COL]
handle = row[RemoveUnused.OBJ_HANDLE_COL]
remove_func = self.tables[the_type]['remove']
remove_func(handle, trans)
self.real_model.remove(row.iter)
self.db.enable_signals()
self.db.request_rebuild()
def selection_toggled(self, cell, path_string):
sort_path = tuple(map(int, path_string.split(':')))
real_path = self.sort_model.convert_path_to_child_path(Gtk.TreePath(sort_path))
row = self.real_model[real_path]
row[RemoveUnused.MARK_COL] = not row[RemoveUnused.MARK_COL]
self.real_model.row_changed(real_path, row.iter)
def mark_clicked(self, mark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = True
def unmark_clicked(self, unmark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = False
def invert_clicked(self, invert_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = not row[RemoveUnused.MARK_COL]
def double_click(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
(model, node) = self.selection.get_selected()
if not node:
return
sort_path = self.sort_model.get_path(node)
real_path = self.sort_model.convert_path_to_child_path(sort_path)
row = self.real_model[real_path]
the_type = row[RemoveUnused.OBJ_TYPE_COL]
handle = row[RemoveUnused.OBJ_HANDLE_COL]
self.call_editor(the_type, handle)
def call_editor(self, the_type, handle):
try:
obj = self.tables[the_type]['get_func'](handle)
editor_str = 'from gramps.gui.editors import %s as editor' % (
self.tables[the_type]['editor']
)
exec(editor_str, globals())
editor(self.dbstate, self.uistate, [], obj)
except WindowActiveError:
pass
def get_image(self, column, cell, model, iter, user_data=None):
the_type = model.get_value(iter, RemoveUnused.OBJ_TYPE_COL)
the_stock = self.tables[the_type]['stock']
cell.set_property('stock-id', the_stock)
def add_results(self, results):
(the_type, handle, data) = results
gramps_id = data[1]
# if we have a function that will return to us some type
# of text summary, then we should use it; otherwise we'll
# use the generic field index provided in the tables above
if self.tables[the_type]['get_text']:
text = self.tables[the_type]['get_text'](the_type, handle, data)
else:
# grab the text field index we know about, and hope
# it represents something useful to the user
name_ix = self.tables[the_type]['name_ix']
text = data[name_ix]
# insert a new row into the table
self.real_model.append(row=[False, gramps_id, text, the_type, handle])
def get_event_text(self, the_type, handle, data):
"""
Come up with a short line of text that we can use as
a summary to represent this event.
"""
# get the event:
event = self.tables[the_type]['get_func'](handle)
# first check to see if the event has a descriptive name
text = event.get_description() # (this is rarely set for events)
# if we don't have a description...
if text == '':
# ... then we merge together several fields
# get the event type (marriage, birth, death, etc.)
text = str(event.get_type())
# see if there is a date
date = _dd.display(event.get_date_object())
if date != '':
text += '; %s' % date
# see if there is a place
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle)
text += '; %s' % place.get_title()
return text
def get_note_text(self, the_type, handle, data):
"""
We need just the first few words of a note as a summary.
"""
# get the note object
note = self.tables[the_type]['get_func'](handle)
# get the note text; this ignores (discards) formatting
text = note.get()
# convert whitespace to a single space
text = " ".join(text.split())
# if the note is too long, truncate it
if len(text) > 80:
text = text[:80] + "..."
return text
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class CheckOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
# Options specific for this report
self.options_dict = {
'events' : 1,
'sources' : 1,
'places' : 1,
'media' : 1,
'repos' : 1,
'notes' : 1,
}
self.options_help = {
'events' : ("=0/1","Whether to use check for unused events",
["Do not check events","Check events"],
True),
'sources' : ("=0/1","Whether to use check for unused sources",
["Do not check sources","Check sources"],
True),
'places' : ("=0/1","Whether to use check for unused places",
["Do not check places","Check places"],
True),
'media' : ("=0/1","Whether to use check for unused media",
["Do not check media","Check media"],
True),
'repos' : ("=0/1","Whether to use check for unused repositories",
["Do not check repositories","Check repositories"],
True),
'notes' : ("=0/1","Whether to use check for unused notes",
["Do not check notes","Check notes"],
True),
}
|
pmghalvorsen/gramps_branch
|
gramps/plugins/tool/removeunused.py
|
Python
|
gpl-2.0
| 18,587
|
[
"Brian"
] |
6f6c56b2a546a959dcccc10b6f8a5532b03f68e618bc593a2208652a003ca115
|
import pdb
import numpy as np
import math
import time
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from util import gaussian_kl_divergence
from util import gaussian_logp
from util import gaussian_logp0
from util import bernoulli_logp
class VAE(chainer.Chain):
def __init__(self, dim_in, dim_hidden, dim_latent, num_layers, num_trans, temperature, num_zsamples=1):
super(VAE, self).__init__()
# initialise first encoder and decoder hidden layer separately because
# the input and output dims differ from the other hidden layers
self.qlin0 = L.Linear(dim_in, dim_hidden)
self.plin0 = L.Linear(dim_latent, dim_hidden)
self._children.append('qlin0')
self._children.append('plin0')
# batch normalization layers at the start of the encoder and start of the decoder
self.qlin_batch_norm_0 = L.BatchNormalization(dim_hidden)
self.plin_batch_norm_0 = L.BatchNormalization(dim_hidden)
self._children.append('qlin_batch_norm_0')
self._children.append('plin_batch_norm_0')
for i in range(num_layers-1):
# encoder
layer_name = 'qlin' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
layer_name = 'qlin_batch_norm_' + str(i+1)
setattr(self, layer_name, L.BatchNormalization(dim_hidden))
self._children.append(layer_name)
# decoder
layer_name = 'plin' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
layer_name = 'plin_batch_norm_' + str(i+1)
setattr(self, layer_name, L.BatchNormalization(dim_hidden))
self._children.append(layer_name)
# initialise the encoder and decoder output layer separately because
# the input and output dims differ from the other hidden layers
self.qlin_mu = L.Linear(2*dim_hidden, dim_latent)
self.qlin_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.plin_ber_prob = L.Linear(2*dim_hidden, dim_in)
self._children.append('qlin_mu')
self._children.append('qlin_ln_var')
self._children.append('plin_ber_prob')
# v0 and linear layer required for v_t of Householder flow transformations
self.qlin_h_vec_0 = L.Linear(2*dim_hidden, dim_latent)
self.qlin_h_vec_t = L.Linear(dim_latent, dim_latent)
self._children.append('qlin_h_vec_0')
self._children.append('qlin_h_vec_t')
self.num_layers = num_layers
self.num_trans = num_trans
self.temperature = temperature
self.num_zsamples = num_zsamples
self.epochs_seen = 0
# pdb.set_trace()
def encode(self, x):
h = self.qlin0(x)
h = self.qlin_batch_norm_0(h)
h = F.crelu(h)
for i in range(self.num_layers-1):
layer_name = 'qlin' + str(i+1)
h = self[layer_name](h)
layer_name = 'qlin_batch_norm_' + str(i+1)
h = self[layer_name](h)
h = F.crelu(h)
self.qmu = self.qlin_mu(h)
self.qln_var = self.qlin_ln_var(h)
self.qh_vec_0 = self.qlin_h_vec_0(h)
return self.qmu, self.qln_var, self.qh_vec_0
def decode(self, z):
h = self.plin0(z)
h = self.plin_batch_norm_0(h)
h = F.crelu(h)
for i in range(self.num_layers-1):
layer_name = 'plin' + str(i+1)
h = self[layer_name](h)
layer_name = 'plin_batch_norm_' + str(i+1)
h = self[layer_name](h)
h = F.crelu(h)
self.p_ber_prob_logit = self.plin_ber_prob(h)
return self.p_ber_prob_logit
def house_transform(self,z):
vec_t = self.qh_vec_0
for i in range(self.num_trans):
vec_t = F.identity(self.qlin_h_vec_t(vec_t))
vec_t_product = F.matmul(vec_t, vec_t, transb=True)
vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
z = z - 2*F.matmul(vec_t_product, z)/vec_t_norm_sqr
return z
def __call__(self, x):
# Obtain parameters for q(z|x)
encoding_time = time.time()
qmu, qln_var, qh_vec_0 = self.encode(x)
encoding_time = float(time.time() - encoding_time)
decoding_time_average = 0.
self.kl = 0
self.logp = 0
for j in xrange(self.num_zsamples):
# z_0 ~ q(z|x)
z_0 = F.gaussian(qmu, qln_var)
# Perform Householder flow transformation, Equation (8)
decoding_time = time.time()
z_T = self.house_transform(z_0)
# Obtain parameters for p(x|z_T)
p_ber_prob_logit = self.decode(z_T)
decoding_time = time.time() - decoding_time
decoding_time_average += decoding_time
# Compute objective
self.logp += bernoulli_logp(x, self.p_ber_prob_logit)
self.kl += gaussian_kl_divergence(z_0, qmu, qln_var, z_T)
decoding_time_average /= self.num_zsamples
self.logp /= self.num_zsamples
self.kl /= self.num_zsamples
current_temperature = min(self.temperature['value'],1.0)
self.obj_batch = self.logp - (current_temperature*self.kl)
self.temperature['value'] += self.temperature['increment']
self.timing_info = np.array([encoding_time,decoding_time_average])
batch_size = self.obj_batch.shape[0]
self.obj = -F.sum(self.obj_batch)/batch_size
return self.obj
|
ashwindcruz/dgm
|
householder_celebA/model.py
|
Python
|
mit
| 5,777
|
[
"Gaussian"
] |
f1e76e964518d8b84afff92e92c79f5ac61f516c9703e275dcd32499fb7a9f37
|
"""
Test the Von-Mises-Fisher mixture model
Author : Bertrand Thirion, 2010
"""
from __future__ import absolute_import
import numpy as np
from ..von_mises_fisher_mixture import (VonMisesMixture,
sphere_density,
select_vmm,
select_vmm_cv)
from nose.tools import assert_true, assert_equal
def test_spherical_area():
# test the co_labelling functionality
points, area = sphere_density(100)
assert_true(np.abs(area.sum()-4*np.pi)<1.e-2)
def test_von_mises_fisher_density():
# test that a density is indeed computed on the unit sphere for a
# one-component and three-component model (k == 1, 3)
x = np.random.randn(100, 3)
x = (x.T/np.sqrt(np.sum(x**2, 1))).T
s, area = sphere_density(100)
for k in (1, 3):
for precision in [.1, 1., 10., 100.]:
for null_class in (False, True):
vmd = VonMisesMixture(k, precision, null_class=null_class)
vmd.estimate(x)
# check that it sums to 1
assert_true(np.abs((vmd.mixture_density(s)*area).sum() - 1)
< 1e-2)
def test_dimension_selection_bic():
# Tests whether dimension selection yields correct results
x1 = [0.6, 0.48, 0.64]
x2 = [-0.8, 0.48, 0.36]
x3 = [0.48, 0.64, -0.6]
x = np.random.randn(200, 3) * .1
x[:40] += x1
x[40:150] += x2
x[150:] += x3
x = (x.T / np.sqrt(np.sum(x**2, 1))).T
precision = 100.
my_vmm = select_vmm(list(range(1,8)), precision, False, x)
assert_equal(my_vmm.k, 3)
def test_dimension_selection_cv():
# Tests the dimension selection using cross validation
x1 = [1, 0, 0]
x2 = [-1, 0, 0]
x = np.random.randn(20, 3)*.1
x[0::2] += x1
x[1::2] += x2
x = (x.T / np.sqrt(np.sum(x**2,1))).T
precision = 50.
sub = np.repeat(np.arange(10), 2)
my_vmm = select_vmm_cv(list(range(1,8)), precision, x, cv_index=sub,
null_class=False, ninit=5)
z = np.argmax(my_vmm.responsibilities(x), 1)
assert_true(len(np.unique(z))>1)
assert_true(len(np.unique(z))<4)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
alexis-roche/nipy
|
nipy/algorithms/clustering/tests/test_vmm.py
|
Python
|
bsd-3-clause
| 2,288
|
[
"VMD"
] |
28960f74f58fabede4412dd296946db5d76690f2de59df4e88f96a448ba8ae66
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
This example plots several randomly generated classification datasets.
For easy visualization, all datasets have 2 features, plotted on the x and y
axis. The color of each point represents its class label.
The first 4 plots use the :func:`~sklearn.datasets.make_classification` with
different numbers of informative features, clusters per class and classes.
The final 2 plots use :func:`~sklearn.datasets.make_blobs` and
:func:`~sklearn.datasets.make_gaussian_quantiles`.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1,
s=25, edgecolor='k')
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1,
s=25, edgecolor='k')
plt.subplot(323)
plt.title("Two informative features, two clusters per class",
fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2,
s=25, edgecolor='k')
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1,
s=25, edgecolor='k')
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1,
s=25, edgecolor='k')
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1,
s=25, edgecolor='k')
plt.show()
|
lesteve/scikit-learn
|
examples/datasets/plot_random_dataset.py
|
Python
|
bsd-3-clause
| 2,550
|
[
"Gaussian"
] |
936ce1a46d9876492448d367f81a1184dbece9d06dc383b70d21cac44cb894c3
|
"""
State Space Analysis using the Kalman Filter
References
-----------
Durbin., J and Koopman, S.J. `Time Series Analysis by State Space Methods`.
Oxford, 2001.
Hamilton, J.D. `Time Series Analysis`. Princeton, 1994.
Harvey, A.C. `Forecasting, Structural Time Series Models and the Kalman Filter`.
Cambridge, 1989.
Notes
-----
This file follows Hamilton's notation pretty closely.
The ARMA Model class follows Durbin and Koopman notation.
Harvey uses Durbin and Koopman notation.
"""
#Anderson and Moore `Optimal Filtering` provides a more efficient algorithm
# namely the information filter
# if the number of series is much greater than the number of states
# e.g., with a DSGE model. See also
# http://www.federalreserve.gov/pubs/oss/oss4/aimindex.html
# Harvey notes that the square root filter will keep P_t pos. def. but
# is not strictly needed outside of the engineering (long series)
import numpy as np
from numpy import dot, identity, kron, log, zeros, pi, exp, eye, issubdtype, ones
from numpy.linalg import inv, pinv
from statsmodels.tools.tools import chain_dot
from . import kalman_loglike
#Fast filtering and smoothing for multivariate state space models
# and The Riksbank -- Strid and Walentin (2008)
# Block Kalman filtering for large-scale DSGE models
# but this is obviously macro model specific
def _init_diffuse(T,R):
m = T.shape[1] # number of states
r = R.shape[1] # should also be the number of states?
Q_0 = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F'))
return zeros((m,1)), Q_0.reshape(r,r,order='F')
def kalmansmooth(F, A, H, Q, R, y, X, xi10):
pass
def kalmanfilter(F, A, H, Q, R, y, X, xi10, ntrain, history=False):
"""
Returns the negative log-likelihood of y conditional on the information set
Assumes that the initial state and all innovations are multivariate
Gaussian.
Parameters
-----------
F : array-like
The (r x r) array holding the transition matrix for the hidden state.
A : array-like
The (nobs x k) array relating the predetermined variables to the
observed data.
H : array-like
The (nobs x r) array relating the hidden state vector to the
observed data.
Q : array-like
(r x r) variance/covariance matrix on the error term in the hidden
state transition.
R : array-like
(nobs x nobs) variance/covariance of the noise in the observation
equation.
y : array-like
The (nobs x 1) array holding the observed data.
X : array-like
The (nobs x k) array holding the predetermined variables data.
xi10 : array-like
Is the (r x 1) initial prior on the initial state vector.
ntrain : int
The number of training periods for the filter. This is the number of
observations that do not affect the likelihood.
Returns
-------
likelihood
The negative of the log likelihood
history or priors, history of posterior
If history is True.
Notes
-----
No input checking is done.
"""
# uses log of Hamilton 13.4.1
F = np.asarray(F)
H = np.atleast_2d(np.asarray(H))
n = H.shape[1] # remember that H gets transposed
y = np.asarray(y)
A = np.asarray(A)
X = np.asarray(X)
if y.ndim == 1: # note that Y is in rows for now
y = y[:,None]
nobs = y.shape[0]
xi10 = np.atleast_2d(np.asarray(xi10))
# if xi10.ndim == 1:
# xi10[:,None]
if history:
state_vector = [xi10]
Q = np.asarray(Q)
r = xi10.shape[0]
# Eq. 12.2.21, other version says P0 = Q
# p10 = np.dot(np.linalg.inv(np.eye(r**2)-np.kron(F,F)),Q.ravel('F'))
# p10 = np.reshape(P0, (r,r), order='F')
# Assume a fixed, known intial point and set P0 = Q
#TODO: this looks *slightly * different than Durbin-Koopman exact likelihood
# initialization p 112 unless I've misunderstood the notational translation.
p10 = Q
loglikelihood = 0
for i in range(nobs):
HTPHR = np.atleast_1d(np.squeeze(chain_dot(H.T,p10,H)+R))
# print HTPHR
# print HTPHR.ndim
# print HTPHR.shape
if HTPHR.ndim == 1:
HTPHRinv = 1./HTPHR
else:
HTPHRinv = np.linalg.inv(HTPHR) # correct
# print A.T
# print X
# print H.T
# print xi10
# print y[i]
part1 = y[i] - np.dot(A.T,X) - np.dot(H.T,xi10) # correct
if i >= ntrain: # zero-index, but ntrain isn't
HTPHRdet = np.linalg.det(np.atleast_2d(HTPHR)) # correct
part2 = -.5*chain_dot(part1.T,HTPHRinv,part1) # correct
#TODO: Need to test with ill-conditioned problem.
loglike_interm = (-n/2.) * np.log(2*np.pi) - .5*\
np.log(HTPHRdet) + part2
loglikelihood += loglike_interm
# 13.2.15 Update current state xi_t based on y
xi11 = xi10 + chain_dot(p10, H, HTPHRinv, part1)
# 13.2.16 MSE of that state
p11 = p10 - chain_dot(p10, H, HTPHRinv, H.T, p10)
# 13.2.17 Update forecast about xi_{t+1} based on our F
xi10 = np.dot(F,xi11)
if history:
state_vector.append(xi10)
# 13.2.21 Update the MSE of the forecast
p10 = chain_dot(F,p11,F.T) + Q
if not history:
return -loglikelihood
else:
return -loglikelihood, np.asarray(state_vector[:-1])
#TODO: this works if it gets refactored, but it's not quite as accurate
# as KalmanFilter
# def loglike_exact(self, params):
# """
# Exact likelihood for ARMA process.
#
# Notes
# -----
# Computes the exact likelihood for an ARMA process by modifying the
# conditional sum of squares likelihood as suggested by Shephard (1997)
# "The relationship between the conditional sum of squares and the exact
# likelihood for autoregressive moving average models."
# """
# p = self.p
# q = self.q
# k = self.k
# y = self.endog.copy()
# nobs = self.nobs
# if self.transparams:
# newparams = self._transparams(params)
# else:
# newparams = params
# if k > 0:
# y -= dot(self.exog, newparams[:k])
# if p != 0:
# arcoefs = newparams[k:k+p][::-1]
# T = KalmanFilter.T(arcoefs)
# else:
# arcoefs = 0
# if q != 0:
# macoefs = newparams[k+p:k+p+q][::-1]
# else:
# macoefs = 0
# errors = [0] * q # psuedo-errors
# rerrors = [1] * q # error correction term
# # create pseudo-error and error correction series iteratively
# for i in range(p,len(y)):
# errors.append(y[i]-sum(arcoefs*y[i-p:i])-\
# sum(macoefs*errors[i-q:i]))
# rerrors.append(-sum(macoefs*rerrors[i-q:i]))
# errors = np.asarray(errors)
# rerrors = np.asarray(rerrors)
#
# # compute bayesian expected mean and variance of initial errors
# one_sumrt2 = 1 + np.sum(rerrors**2)
# sum_errors2 = np.sum(errors**2)
# mup = -np.sum(errors * rerrors)/one_sumrt2
#
# # concentrating out the ML estimator of "true" sigma2 gives
# sigma2 = 1./(2*nobs) * (sum_errors2 - mup**2*(one_sumrt2))
#
# # which gives a variance of the initial errors of
# sigma2p = sigma2/one_sumrt2
#
# llf = -(nobs-p)/2. * np.log(2*pi*sigma2) - 1./(2*sigma2)*sum_errors2 \
# + 1./2*log(one_sumrt2) + 1./(2*sigma2) * mup**2*one_sumrt2
# Z_mat = KalmanFilter.Z(r)
# R_mat = KalmanFilter.R(newparams, r, k, q, p)
# T_mat = KalmanFilter.T(newparams, r, k, p)
# # initial state and its variance
# alpha = zeros((m,1))
# Q_0 = dot(inv(identity(m**2)-kron(T_mat,T_mat)),
# dot(R_mat,R_mat.T).ravel('F'))
# Q_0 = Q_0.reshape(r,r,order='F')
# P = Q_0
# v = zeros((nobs,1))
# F = zeros((nobs,1))
# B = array([T_mat, 0], dtype=object)
#
#
# for i in xrange(int(nobs)):
# v_mat = (y[i],0) - dot(z_mat,B)
#
# B_0 = (T,0)
# v_t = (y_t,0) - z*B_t
# llf = -nobs/2.*np.log(2*pi*sigma2) - 1/(2.*sigma2)*se_n - \
# 1/2.*logdet(Sigma_a) + 1/(2*sigma2)*s_n_prime*sigma_a*s_n
# return llf
#
class StateSpaceModel(object):
"""
Generic StateSpaceModel class. Meant to be a base class.
This class lays out the methods that are to be defined by any child
class.
Parameters
----------
endog : array-like
An `nobs` x `p` array of observations
exog : array-like, optional
An `nobs` x `k` array of exogenous variables.
**kwargs
Anything provided to the constructor will be attached as an
attribute.
Notes
-----
The state space model is assumed to be of the form
y[t] = Z[t].dot(alpha[t]) + epsilon[t]
alpha[t+1] = T[t].dot(alpha[t]) + R[t].dot(eta[t])
where
epsilon[t] ~ N(0, H[t])
eta[t] ~ N(0, Q[t])
alpha[0] ~ N(a[0], P[0])
Where y is the `p` x 1 observations vector, and alpha is the `m` x 1
state vector.
References
-----------
Durbin, J. and S.J. Koopman. 2001. `Time Series Analysis by State Space
Methods.` Oxford.
"""
def __init__(self, endog, exog=None, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
endog = np.asarray(endog)
if endog.ndim == 1:
endog = endog[:,None]
self.endog = endog
p = endog.shape[1]
self.p = nobs
self.nobs = endog.shape[0]
if exog:
self.exog = exog
def T(self, params):
pass
def R(self, params):
pass
def Z(self, params):
pass
def H(self, params):
pass
def Q(self, params):
pass
def _univariatefilter(self, params, init_state, init_var):
"""
Implements the Kalman Filter recursions. Optimized for univariate case.
"""
y = self.endog
nobs = self.nobs
R = self.R
T = self.T
Z = self.Z
H = self.H
Q = self.Q
if not init_state and not init_var:
alpha, P = _init_diffuse(T,R)
#NOTE: stopped here
def _univariatefilter_update(self):
pass
# does the KF but calls _update after each loop to update the matrices
# for time-varying coefficients
def kalmanfilter(self, params, init_state=None, init_var=None):
"""
Runs the Kalman Filter
"""
# determine if
if self.p == 1:
return _univariatefilter(init_state, init_var)
else:
raise ValueError("No multivariate filter written yet")
def _updateloglike(self, params, xi10, ntrain, penalty, upperbounds, lowerbounds,
F,A,H,Q,R, history):
"""
"""
paramsorig = params
# are the bounds binding?
if penalty:
params = np.min((np.max((lowerbounds, params), axis=0),upperbounds),
axis=0)
#TODO: does it make sense for all of these to be allowed to be None?
if F != None and callable(F):
F = F(params)
elif F == None:
F = 0
if A != None and callable(A):
A = A(params)
elif A == None:
A = 0
if H != None and callable(H):
H = H(params)
elif H == None:
H = 0
print callable(Q)
if Q != None and callable(Q):
Q = Q(params)
elif Q == None:
Q = 0
if R != None and callable(R):
R = R(params)
elif R == None:
R = 0
X = self.exog
if X == None:
X = 0
y = self.endog
loglike = kalmanfilter(F,A,H,Q,R,y,X, xi10, ntrain, history)
# use a quadratic penalty function to move away from bounds
if penalty:
loglike += penalty * np.sum((paramsorig-params)**2)
return loglike
# r = self.r
# n = self.n
# F = np.diagonal(np.ones(r-1), k=-1) # think this will be wrong for VAR
# cf. 13.1.22 but think VAR
# F[0] = params[:p] # assumes first p start_params are coeffs
# of obs. vector, needs to be nxp for VAR?
# self.F = F
# cholQ = np.diag(start_params[p:]) # fails for bivariate
# MA(1) section
# 13.4.2
# Q = np.dot(cholQ,cholQ.T)
# self.Q = Q
# HT = np.zeros((n,r))
# xi10 = self.xi10
# y = self.endog
# ntrain = self.ntrain
# loglike = kalmanfilter(F,H,y,xi10,Q,ntrain)
def fit_kalman(self, start_params, xi10, ntrain=1, F=None, A=None, H=None,
Q=None,
R=None, method="bfgs", penalty=True, upperbounds=None,
lowerbounds=None):
"""
Parameters
----------
method : str
Only "bfgs" is currently accepted.
start_params : array-like
The first guess on all parameters to be estimated. This can
be in any order as long as the F,A,H,Q, and R functions handle
the parameters appropriately.
xi10 : arry-like
The (r x 1) vector of initial states. See notes.
F,A,H,Q,R : functions or array-like, optional
If functions, they should take start_params (or the current
value of params during iteration and return the F,A,H,Q,R matrices).
See notes. If they are constant then can be given as array-like
objects. If not included in the state-space representation then
can be left as None. See example in class docstring.
penalty : bool,
Whether or not to include a penalty for solutions that violate
the bounds given by `lowerbounds` and `upperbounds`.
lowerbounds : array-like
Lower bounds on the parameter solutions. Expected to be in the
same order as `start_params`.
upperbounds : array-like
Upper bounds on the parameter solutions. Expected to be in the
same order as `start_params`
"""
y = self.endog
ntrain = ntrain
_updateloglike = self._updateloglike
params = start_params
if method.lower() == 'bfgs':
(params, llf, score, cov_params, func_calls, grad_calls,
warnflag) = optimize.fmin_bfgs(_updateloglike, params,
args = (xi10, ntrain, penalty, upperbounds, lowerbounds,
F,A,H,Q,R, False), gtol= 1e-8, epsilon=1e-5,
full_output=1)
#TODO: provide more options to user for optimize
# Getting history would require one more call to _updatelikelihood
self.params = params
self.llf = llf
self.gradient = score
self.cov_params = cov_params # how to interpret this?
self.warnflag = warnflag
def updatematrices(params, y, xi10, ntrain, penalty, upperbound, lowerbound):
"""
TODO: change API, update names
This isn't general. Copy of Luca's matlab example.
"""
paramsorig = params
# are the bounds binding?
params = np.min((np.max((lowerbound,params),axis=0),upperbound), axis=0)
rho = params[0]
sigma1 = params[1]
sigma2 = params[2]
F = np.array([[rho, 0],[0,0]])
cholQ = np.array([[sigma1,0],[0,sigma2]])
H = np.ones((2,1))
q = np.dot(cholQ,cholQ.T)
loglike = kalmanfilter(F,0,H,q,0, y, 0, xi10, ntrain)
loglike = loglike + penalty*np.sum((paramsorig-params)**2)
return loglike
class KalmanFilter(object):
"""
Kalman Filter code intended for use with the ARMA model.
Notes
-----
The notation for the state-space form follows Durbin and Koopman (2001).
The observation equations is
.. math:: y_{t} = Z_{t}\\alpha_{t} + \\epsilon_{t}
The state equation is
.. math:: \\alpha_{t+1} = T_{t}\\alpha_{t} + R_{t}\\eta_{t}
For the present purposed \epsilon_{t} is assumed to always be zero.
"""
@classmethod
def T(cls, params, r, k, p): # F in Hamilton
"""
The coefficient matrix for the state vector in the state equation.
Its dimension is r+k x r+k.
Parameters
----------
r : int
In the context of the ARMA model r is max(p,q+1) where p is the
AR order and q is the MA order.
k : int
The number of exogenous variables in the ARMA model, including
the constant if appropriate.
p : int
The AR coefficient in an ARMA model.
References
----------
Durbin and Koopman Section 3.7.
"""
arr = zeros((r,r), dtype=params.dtype) # allows for complex-step
# derivative
params_padded = zeros(r, dtype=params.dtype) # handle zero coefficients if necessary
#NOTE: squeeze added for cg optimizer
params_padded[:p] = params[k:p+k]
arr[:,0] = params_padded # first p params are AR coeffs w/ short params
arr[:-1,1:] = eye(r-1)
return arr
@classmethod
def R(cls, params, r, k, q, p): # R is H in Hamilton
"""
The coefficient matrix for the state vector in the observation equation.
Its dimension is r+k x 1.
Parameters
----------
r : int
In the context of the ARMA model r is max(p,q+1) where p is the
AR order and q is the MA order.
k : int
The number of exogenous variables in the ARMA model, including
the constant if appropriate.
q : int
The MA order in an ARMA model.
p : int
The AR order in an ARMA model.
References
----------
Durbin and Koopman Section 3.7.
"""
arr = zeros((r,1), dtype=params.dtype) # this allows zero coefficients
# dtype allows for compl. der.
arr[1:q+1,:] = params[p+k:p+k+q][:,None]
arr[0] = 1.0
return arr
@classmethod
def Z(cls, r):
"""
Returns the Z selector matrix in the observation equation.
Parameters
----------
r : int
In the context of the ARMA model r is max(p,q+1) where p is the
AR order and q is the MA order.
Notes
-----
Currently only returns a 1 x r vector [1,0,0,...0]. Will need to
be generalized when the Kalman Filter becomes more flexible.
"""
arr = zeros((1,r))
arr[:,0] = 1.
return arr
@classmethod
def geterrors(cls, y, k, k_ar, k_ma, k_lags, nobs, Z_mat, m, R_mat, T_mat,
paramsdtype):
"""
Returns just the errors of the Kalman Filter
"""
if issubdtype(paramsdtype, float):
return kalman_loglike.kalman_filter_double(y, k, k_ar, k_ma,
k_lags, int(nobs), Z_mat, R_mat, T_mat)[0]
elif issubdtype(paramsdtype, complex):
return kalman_loglike.kalman_filter_complex(y, k, k_ar, k_ma,
k_lags, int(nobs), Z_mat, R_mat, T_mat)[0]
else:
raise TypeError("dtype %s is not supported "
"Please file a bug report" % paramsdtype)
@classmethod
def _init_kalman_state(cls, params, arma_model):
"""
Returns the system matrices and other info needed for the
Kalman Filter recursions
"""
paramsdtype = params.dtype
y = arma_model.endog.copy().astype(paramsdtype)
k = arma_model.k_exog + arma_model.k_trend
nobs = arma_model.nobs
k_ar = arma_model.k_ar
k_ma = arma_model.k_ma
k_lags = arma_model.k_lags
if arma_model.transparams:
newparams = arma_model._transparams(params)
else:
newparams = params # don't need a copy if not modified.
if k > 0:
y -= dot(arma_model.exog, newparams[:k])
# system matrices
Z_mat = cls.Z(k_lags)
m = Z_mat.shape[1] # r
R_mat = cls.R(newparams, k_lags, k, k_ma, k_ar)
T_mat = cls.T(newparams, k_lags, k, k_ar)
return (y, k, nobs, k_ar, k_ma, k_lags,
newparams, Z_mat, m, R_mat, T_mat, paramsdtype)
@classmethod
def loglike(cls, params, arma_model):
"""
The loglikelihood for an ARMA model using the Kalman Filter recursions.
Parameters
----------
params : array
The coefficients of the ARMA model, assumed to be in the order of
trend variables and `k` exogenous coefficients, the `p` AR
coefficients, then the `q` MA coefficients.
arma_model : `statsmodels.tsa.arima.ARMA` instance
A reference to the ARMA model instance.
Notes
-----
This works for both real valued and complex valued parameters. The
complex values being used to compute the numerical derivative. If
available will use a Cython version of the Kalman Filter.
"""
#TODO: see section 3.4.6 in Harvey for computing the derivatives in the
# recursion itself.
#TODO: this won't work for time-varying parameters
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat, T_mat,
paramsdtype) = cls._init_kalman_state(params, arma_model)
if issubdtype(paramsdtype, float):
loglike, sigma2 = kalman_loglike.kalman_loglike_double(y, k,
k_ar, k_ma, k_lags, int(nobs), Z_mat,
R_mat, T_mat)
elif issubdtype(paramsdtype, complex):
loglike, sigma2 = kalman_loglike.kalman_loglike_complex(y, k,
k_ar, k_ma, k_lags, int(nobs), Z_mat,
R_mat, T_mat)
else:
raise TypeError("This dtype %s is not supported "
" Please files a bug report." % paramsdtype)
arma_model.sigma2 = sigma2
return loglike.item() # return a scalar not a 0d array
if __name__ == "__main__":
import numpy as np
from scipy.linalg import block_diag
import numpy as np
# Make our observations as in 13.1.13
np.random.seed(54321)
nobs = 600
y = np.zeros(nobs)
rho = [.5, -.25, .35, .25]
sigma = 2.0 # std dev. or noise
for i in range(4,nobs):
y[i] = np.dot(rho,y[i-4:i][::-1]) + np.random.normal(scale=sigma)
y = y[100:]
# make an MA(2) observation equation as in example 13.3
# y = mu + [1 theta][e_t e_t-1]'
mu = 2.
theta = .8
rho = np.array([1, theta])
np.random.randn(54321)
e = np.random.randn(101)
y = mu + rho[0]*e[1:]+rho[1]*e[:-1]
# might need to add an axis
r = len(rho)
x = np.ones_like(y)
# For now, assume that F,Q,A,H, and R are known
F = np.array([[0,0],[1,0]])
Q = np.array([[1,0],[0,0]])
A = np.array([mu])
H = rho[:,None]
R = 0
# remember that the goal is to solve recursively for the
# state vector, xi, given the data, y (in this case)
# we can also get a MSE matrix, P, associated with *each* observation
# given that our errors are ~ NID(0,variance)
# the starting E[e(1),e(0)] = [0,0]
xi0 = np.array([[0],[0]])
# with variance = 1 we know that
# P0 = np.eye(2) # really P_{1|0}
# Using the note below
P0 = np.dot(np.linalg.inv(np.eye(r**2)-np.kron(F,F)),Q.ravel('F'))
P0 = np.reshape(P0, (r,r), order='F')
# more generally, if the eigenvalues for F are in the unit circle
# (watch out for rounding error in LAPACK!) then
# the DGP of the state vector is var/cov stationary, we know that
# xi0 = 0
# Furthermore, we could start with
# vec(P0) = np.dot(np.linalg.inv(np.eye(r**2) - np.kron(F,F)),vec(Q))
# where vec(X) = np.ravel(X, order='F') with a possible [:,np.newaxis]
# if you really want a "2-d" array
# a fortran (row-) ordered raveled array
# If instead, some eigenvalues are on or outside the unit circle
# xi0 can be replaced with a best guess and then
# P0 is a positive definite matrix repr the confidence in the guess
# larger diagonal elements signify less confidence
# we also know that y1 = mu
# and MSE(y1) = variance*(1+theta**2) = np.dot(np.dot(H.T,P0),H)
state_vector = [xi0]
forecast_vector = [mu]
MSE_state = [P0] # will be a list of matrices
MSE_forecast = []
# must be numerical shortcuts for some of this...
# this should be general enough to be reused
for i in range(len(y)-1):
# update the state vector
sv = state_vector[i]
P = MSE_state[i]
HTPHR = np.dot(np.dot(H.T,P),H)+R
if np.ndim(HTPHR) < 2: # we have a scalar
HTPHRinv = 1./HTPHR
else:
HTPHRinv = np.linalg.inv(HTPHR)
FPH = np.dot(np.dot(F,P),H)
gain_matrix = np.dot(FPH,HTPHRinv) # correct
new_sv = np.dot(F,sv)
new_sv += np.dot(gain_matrix,y[i] - np.dot(A.T,x[i]) -
np.dot(H.T,sv))
state_vector.append(new_sv)
# update the MSE of the state vector forecast using 13.2.28
new_MSEf = np.dot(np.dot(F - np.dot(gain_matrix,H.T),P),F.T - np.dot(H,
gain_matrix.T)) + np.dot(np.dot(gain_matrix,R),gain_matrix.T) + Q
MSE_state.append(new_MSEf)
# update the in sample forecast of y
forecast_vector.append(np.dot(A.T,x[i+1]) + np.dot(H.T,new_sv))
# update the MSE of the forecast
MSE_forecast.append(np.dot(np.dot(H.T,new_MSEf),H) + R)
MSE_forecast = np.array(MSE_forecast).squeeze()
MSE_state = np.array(MSE_state)
forecast_vector = np.array(forecast_vector)
state_vector = np.array(state_vector).squeeze()
##########
# Luca's example
# choose parameters governing the signal extraction problem
rho = .9
sigma1 = 1
sigma2 = 1
nobs = 100
# get the state space representation (Hamilton's notation)\
F = np.array([[rho, 0],[0, 0]])
cholQ = np.array([[sigma1, 0],[0,sigma2]])
H = np.ones((2,1))
# generate random data
np.random.seed(12345)
xihistory = np.zeros((2,nobs))
for i in range(1,nobs):
xihistory[:,i] = np.dot(F,xihistory[:,i-1]) + \
np.dot(cholQ,np.random.randn(2,1)).squeeze()
# this makes an ARMA process?
# check notes, do the math
y = np.dot(H.T, xihistory)
y = y.T
params = np.array([rho, sigma1, sigma2])
penalty = 1e5
upperbounds = np.array([.999, 100, 100])
lowerbounds = np.array([-.999, .001, .001])
xi10 = xihistory[:,0]
ntrain = 1
bounds = zip(lowerbounds,upperbounds) # if you use fmin_l_bfgs_b
# results = optimize.fmin_bfgs(updatematrices, params,
# args=(y,xi10,ntrain,penalty,upperbounds,lowerbounds),
# gtol = 1e-8, epsilon=1e-10)
# array([ 0.83111567, 1.2695249 , 0.61436685])
F = lambda x : np.array([[x[0],0],[0,0]])
def Q(x):
cholQ = np.array([[x[1],0],[0,x[2]]])
return np.dot(cholQ,cholQ.T)
H = np.ones((2,1))
# ssm_model = StateSpaceModel(y) # need to pass in Xi10!
# ssm_model.fit_kalman(start_params=params, xi10=xi10, F=F, Q=Q, H=H,
# upperbounds=upperbounds, lowerbounds=lowerbounds)
# why does the above take 3 times as many iterations than direct max?
# compare directly to matlab output
from scipy import io
# y_matlab = io.loadmat('./kalman_y.mat')['y'].reshape(-1,1)
# ssm_model2 = StateSpaceModel(y_matlab)
# ssm_model2.fit_kalman(start_params=params, xi10=xi10, F=F, Q=Q, H=H,
# upperbounds=upperbounds, lowerbounds=lowerbounds)
# matlab output
# thetaunc = np.array([0.7833, 1.1688, 0.5584])
# np.testing.assert_almost_equal(ssm_model2.params, thetaunc, 4)
# maybe add a line search check to make sure we didn't get stuck in a local
# max for more complicated ssm?
# Examples from Durbin and Koopman
import zipfile
try:
dk = zipfile.ZipFile('/home/skipper/statsmodels/statsmodels-skipper/scikits/statsmodels/sandbox/tsa/DK-data.zip')
except:
raise IOError("Install DK-data.zip from http://www.ssfpack.com/DKbook.html or specify its correct local path.")
nile = dk.open('Nile.dat').readlines()
nile = [float(_.strip()) for _ in nile[1:]]
nile = np.asarray(nile)
# v = np.zeros_like(nile)
# a = np.zeros_like(nile)
# F = np.zeros_like(nile)
# P = np.zeros_like(nile)
# P[0] = 10.**7
# sigma2e = 15099.
# sigma2n = 1469.1
# for i in range(len(nile)):
# v[i] = nile[i] - a[i] # Kalman filter residual
# F[i] = P[i] + sigma2e # the variance of the Kalman filter residual
# K = P[i]/F[i]
# a[i+1] = a[i] + K*v[i]
# P[i+1] = P[i]*(1.-K) + sigma2n
nile_ssm = StateSpaceModel(nile)
R = lambda params : np.array(params[0])
Q = lambda params : np.array(params[1])
# nile_ssm.fit_kalman(start_params=[1.0,1.0], xi10=0, F=[1.], H=[1.],
# Q=Q, R=R, penalty=False, ntrain=0)
# p. 162 univariate structural time series example
seatbelt = dk.open('Seatbelt.dat').readlines()
seatbelt = [map(float,_.split()) for _ in seatbelt[2:]]
sb_ssm = StateSpaceModel(seatbelt)
s = 12 # monthly data
# s p.
H = np.zeros((s+1,1)) # Z in DK, H' in Hamilton
H[::2] = 1.
lambdaj = np.r_[1:6:6j]
lambdaj *= 2*np.pi/s
T = np.zeros((s+1,s+1))
C = lambda j : np.array([[np.cos(j), np.sin(j)],[-np.sin(j), np.cos(j)]])
Cj = [C(j) for j in lambdaj] + [-1]
#NOTE: the above is for handling seasonality
#TODO: it is just a rotation matrix. See if Robert's link has a better way
#http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=5F5145BE25D61F87478B25AD1493C8F4?doi=10.1.1.110.5134&rep=rep1&type=pdf&ei=QcetSefqF4GEsQPnx4jSBA&sig2=HjJILSBPFgJTfuifbvKrxw&usg=AFQjCNFbABIxusr-NEbgrinhtR6buvjaYA
from scipy import linalg
F = linalg.block_diag(*Cj) # T in DK, F in Hamilton
R = np.eye(s-1)
sigma2_omega = 1.
Q = np.eye(s-1) * sigma2_omega
|
yarikoptic/pystatsmodels
|
statsmodels/tsa/kalmanf/kalmanfilter.py
|
Python
|
bsd-3-clause
| 30,496
|
[
"Gaussian"
] |
ea64fd8de6e3fdbbf02bd790643c5e354f84debf46791c967a056299197b43ab
|
# -*- coding: utf-8 -*-
"""Page and page Component classes
The keteparaha Page and Component classes represent web pages and components
of web pages.
Pages are identified by the URL of the browser, and components by the CSS
selector that is used to retrieve them.
If you perform an action that causes the browser to visit a new URL, and
you have defined a Page class with that URL, then the new page will
automatically be returned from that action.
Creating an instance of a page object will automatically cause the browser to
visit that page as well.
Example:
BASE_URL = 'http://my-site.com'
class Home(Page):
url = BASE_URL + '/'
class Dashboard(Page):
url = BASE_URL + '/dashboard/'
class
home = Home(driver) # driver is a WebDriver instance, browser would
# automatically visit the home page at this point
dashboard = home.click_link('Dashboard')
"""
from __future__ import unicode_literals
import collections
from inspect import isclass
import time
from selenium.common import exceptions
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import TimeoutException, WebDriverWait
from six import with_metaclass
from .expectations import (
_wait_for_condition,
component_to_be_clickable,
text_to_be_present_in_component
)
from . import flow
ELEMENT_TIMEOUT = 10
""" (int): The seconds that a component will wait to be visible, clickable, or
present before raising a TimeoutException
"""
__all__ = ['Component', 'Page']
# Workaround for backwards compatibility with Python 2.7
try:
unicode = unicode
except NameError:
basestring = (str, bytes)
class _Registry(collections.MutableMapping):
"""A singleton registry for pages and components"""
store = dict()
def __delitem__(self, key):
pass
def __getitem__(self, key):
return self.store[key]
def __iter__(self):
for value in self.store.values():
yield value
def __setitem__(self, key, value):
self.store[key] = value
def __len__(self):
return len(self.store)
def __call__(self, selector):
try:
return self.store[selector]
except KeyError:
return self.make_class(selector)
def make_class(self, selector):
try:
return type(
'DynamicComponent', (Component,), {'selector': selector})
except TypeError: # Python < 3
return type(
b'DynamicComponent', (Component,), {'selector': selector})
return
class _RegistryMeta(type):
"""Add our pages and components to a central registry"""
def __init__(cls, name, bases, dct):
if dct.get('url'):
cls._registry[dct.get('url')] = cls
elif dct.get('selector'):
cls._registry[dct.get('selector')] = cls
return super(_RegistryMeta, cls).__init__(name, bases, dct)
class _SeleniumWrapper(object):
"""Mixin for page and component class that understands the WebDriver API
"""
TimeoutException = TimeoutException
class ComponentMissing(Exception):
pass
def _get_component_class(self, component_or_selector):
"""Ensure we have a component class
Either return argument if it's a component, get a registered component,
or dynamically create a component.
"""
if isclass(component_or_selector) and issubclass(
component_or_selector, Component):
return component_or_selector
return self._registry(component_or_selector)
def get_component(self, component_or_selector):
"""Return an initialised component present in page
takes either a component class to find in the page or a css selector.
If the selector is not present in the page raises a ComponentMissing
error.
"""
ComponentClass = self._get_component_class(component_or_selector)
try:
return ComponentClass(self)
except TimeoutException:
raise self.ComponentMissing(
'"{0}" could not be found in page'.format(
ComponentClass.selector))
def get_components(self, component_or_selector):
"""Return an list of initialised components present in page
Returns an empty list if no components could be found
"""
ComponentClass = self._get_component_class(component_or_selector)
components = []
try:
elements = self.get_elements(ComponentClass.selector)
except TimeoutException:
return components
for idx, element in enumerate(elements):
comp_inst = self._get_component_class(
component_or_selector)(self, find_by='index_position')
comp_inst._index_position = idx
components.append(comp_inst)
return components
def get_element(self, selector, driver=None):
"""Get the DOM element identified by the css selector"""
return _wait_for_condition(
ec.presence_of_element_located((By.CSS_SELECTOR, selector)),
self,
message='No element found with selector "{0}".'.format(selector),
driver=driver
)
def get_clickable_element(self, selector, driver=None):
"""Return an element that can be clicked, or raise an error"""
return _wait_for_condition(
ec.element_to_be_clickable((By.CSS_SELECTOR, selector)),
self,
message='No clickable element found with selector "{0}".'.format(
selector),
driver=driver
)
def get_visible_element(self, selector):
"""Return an element that is visible, or raise an error"""
return _wait_for_condition(
ec.visibility_of_element_located((By.CSS_SELECTOR, selector)),
self,
message='No visible element found with selector "{0}".'.format(
selector)
)
def get_element_by_link_text(self, link_text):
"""Get the DOM element identified by the css selector"""
return _wait_for_condition(
ec.presence_of_element_located((By.LINK_TEXT, link_text)),
self,
message='No link with text "{0}".'.format(link_text)
)
def get_elements(self, selector):
"""Get a list of elements identified by the css selector"""
return _wait_for_condition(
ec.presence_of_all_elements_located((By.CSS_SELECTOR, selector)),
self
)
def get_attribute(self, attribute):
"""Return the value of an attribute of the component"""
return self._element.get_attribute(attribute)
def wait_for_invisibility(self, selector):
"""Pause until the element identified by selector is invisible"""
return _wait_for_condition(
ec.invisibility_of_element_located((By.CSS_SELECTOR, selector)),
self
)
def text_in_element(self, selector, text):
"""Return whether the text is in the element identified by selector"""
return _wait_for_condition(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, selector), text),
self,
message='"{0}" not found in "{1}".'.format(
text, self.get_component(selector).text)
)
def has_text(self, text):
"""Return whether the text is in the component"""
return _wait_for_condition(
text_to_be_present_in_component(self, text),
self,
message='"{0}" not found in "{1}".'.format(
text, self._element.text)
)
def _click(self, component, opens=None):
"""Click an element and return an appropriate component or page
component -- a keteparaha.page.Component
opens -- a keteparaha.page.Component to initialise and return
returns -- either a new Page object if the url changes, the initialised
Component passed in as opens, or itself
"""
_wait_for_condition(
component_to_be_clickable(component), component,
message='"{0}" was never clickable'.format(self)
)
component._element.click()
if opens and isinstance(opens, basestring):
# open is a string look it up in registry
return self._registry(opens)(self)
if opens and issubclass(opens, Component) and isclass(opens):
# open is an Component class, use it
return opens(self)
if opens and isinstance(opens, Component):
# open is an initialised component, use it
return opens
if self.url != self.location() and self.location() in self._registry:
return self._registry(self.location())(driver=self._driver)
return self
def click(self, selector=None, opens=None):
"""Main method for interacting with a page or component
Returns either self, a new page object based on browser url, or a
page component based on the selector passed in as 'opens'.
selector can be a css selector in the form of a string, or a
selenium WebElement.
"""
if isinstance(selector, basestring):
# selector passed in, get component class from registry
component = self._registry(selector)(self)
return self._click(component, opens)
elif isinstance(selector, Component) and isclass(selector):
# We already have a component class, so just use it
component = selector(self)
return self._click(component, opens)
elif isinstance(selector, Component) and not isclass(selector):
# We already have an initalised component, so just use it
component = selector
return self._click(component, opens)
elif selector is None:
# We have no selector so click on yourself
return self._click(self, opens)
raise ValueError(
'selector, "{0}", not a string or Component instance.'.format(
selector))
def click_link(self, link_text, opens=None):
component = Component(self, find_by='link_text')
component.selector = link_text
return self._click(component, opens)
def click_button(self, button_text, opens=None):
"""Find buttons on the page and click the first one with the text"""
component = Component(self, find_by='button_text')
component.selector = button_text
return self._click(component, opens)
def location(self):
"""The current page location without any query parameters"""
return self.page._driver.current_url.split('?')[0]
def select_option(self, selector, option_text):
"""Select option in dropdown identified by selector with given text"""
def find_and_select(selector, option_text):
return Select(
self.get_element(selector)
).select_by_visible_text(option_text),
retryable_find_and_select = flow.retry(
find_and_select, exceptions.NoSuchElementException
)
return retryable_find_and_select(selector, option_text)
def scroll_into_view(self):
"""Scroll the window until the component is visible"""
self._element.location_once_scrolled_into_view
def clear(self, selector):
"""Clear text out of input identified by CSS selector"""
try:
self.get_visible_element(selector).clear()
except (exceptions.InvalidElementStateException,
exceptions.WebDriverException):
raise exceptions.WebDriverException(
'You cannot clear that element')
def hover(self, selector, opens=None):
"""Hover over element identified by CSS selector"""
ActionChains(self._driver).move_to_element(
self.get_element(selector)).perform()
if opens:
return self._get_component_class(opens)(self)
def enter_text(self, selector, text):
"""Enter text into DOM element identified by selector
The function performs some error checking because as of Jan 2014
send_keys on the element is unreliable at text entry.
"""
element = self.get_visible_element(selector)
for _ in range(5):
element.send_keys(*text)
try:
value_in_place = element.get_attribute("value") or element.text
except exceptions.StaleElementReferenceException:
return
expected = "".join([unicode(v) for v in text])
if value_in_place == expected:
return
try:
element.clear()
except (exceptions.InvalidElementStateException,
exceptions.WebDriverException):
return # Element is not user editable and can't be cleared
time.sleep(0.2)
raise AssertionError("Unable to correctly type {0}".format(text))
class _WebElementProxy(object):
"""A proxy to the Selenium WebElement identified by obj's selector"""
def __init__(self):
self.selector = 'html'
def __get__(self, obj, owner):
selector = obj.selector if hasattr(obj, 'selector') else self.selector
if obj._find_by == 'selector':
try:
return obj._driver.find_element_by_css_selector(selector)
except exceptions.NoSuchElementException:
return WebDriverWait(obj._driver, ELEMENT_TIMEOUT).until(
ec.presence_of_element_located(
(
By.CSS_SELECTOR,
selector
)
),
'No element "{0}", waited {1} seconds'.format(
selector, ELEMENT_TIMEOUT
)
)
elif obj._find_by == 'button_text':
for button in obj._driver.find_elements_by_tag_name("button"):
if button.text == obj.selector and button.is_displayed():
return button
raise AssertionError(
"Could not find a button with the text '%s'" % (selector,)
)
elif obj._find_by == 'link_text':
try:
return obj._driver.find_element_by_link_text(selector)
except exceptions.NoSuchElementException:
return WebDriverWait(obj._driver, ELEMENT_TIMEOUT).until(
ec.presence_of_element_located(
(
By.LINK_TEXT,
selector
)
),
'No link with text "{0}", waited {1} seconds'.format(
selector, ELEMENT_TIMEOUT
)
)
elif obj._find_by == 'index_position':
idx = obj._index_position
return obj._driver.find_elements_by_css_selector(selector)[idx]
else:
raise ValueError('Element proxy needs to know how to find element')
def __set__(self, obj, value):
raise AttributeError()
class WebDriverOnly(object):
"""This attribute must be a WebDriver instance"""
def __set__(self, obj, value):
if not isinstance(value, WebDriver):
raise TypeError('driver must be an instance of WebDriver')
self.driver = value
def __get__(self, obj, owner):
return self.driver
class _BaseComponent(object):
_element = _WebElementProxy()
@property
def text(self):
"""The visible text of the component"""
return self._element.text
class Component(
with_metaclass(_RegistryMeta, _BaseComponent, _SeleniumWrapper)):
"""Generic page component, intended to be subclassed
Pages and Components are stored in a registry and switched to dynamically
class ShoppingBasket(Component):
selector = '#shopping-basket'
def remove_item(self, name):
contents = self.get_components('tr')
for item in contents:
if name in item.text:
item.click('.remove')
return
raise AssertionError('No item in basket called "{0}"'.format(name))
page = Page(driver)
basket = page.click_link('Shopping Basket', opens=ShoppingBasket)
# The following would also work identically:
## basket = page.click_link('Shopping Basket', opens='#shopping-basket')
basket.remove_item('Buzz Lightyear')
"""
_registry = _Registry()
selector = None
def __repr__(self):
output = '{0}(selector="{1}")'.format(
self.__class__.__name__, self.selector)
if self._find_by == 'index_position':
output = output + '[{0}]'.format(self._index_position)
return output
def __init__(self, parent, driver=None, find_by='selector'):
self._parent = parent
self._find_by = find_by
@property
def _driver(self):
return self._parent._element
@property
def page(self):
if isinstance(self._parent, Page):
return self._parent
return self._parent.page
@property
def url(self):
"""The url of the page which the component is inside"""
return self.page.url
class Page(
with_metaclass(_RegistryMeta, _BaseComponent, _SeleniumWrapper)):
"""Generic web page, intended to be subclassed
Pages and Components are stored in a registry and switched to dynamically
class LoginPage(Page):
url = 'https://your-site.com/login
def login(username, password):
self.enter_text("input[name=username]", username)
self.enter_text("input[name=password]", password)
return self.click("input[type=submit]")
"""
_driver = WebDriverOnly()
_registry = _Registry()
def __init__(self, driver=None):
self._find_by = 'selector'
self.selector = 'html'
try:
self._driver = driver
except TypeError: # Driver was a WebElement, not WebDriver
self._driver = driver.parent
if self.location() != self.url:
self._driver.get(self.url)
@property
def page(self):
"""Unifies the api for pages and components slightly"""
return self
|
tomdottom/keteparaha
|
keteparaha/page.py
|
Python
|
mit
| 18,789
|
[
"VisIt"
] |
41431ba98d0eca0bff0f5b2bfd24a3c53c0ecd76a5c5e55622fb530504ac9679
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
from pymatgen.optimization.linear_assignment import LinearAssignment
import numpy as np
class LinearAssignmentTest(unittest.TestCase):
def test(self):
w0 = np.array([[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[ 1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
[73, 11, 98, 50, 19, 96, 61, 73, 98, 14]])
w1 = np.array([[95, 60, 89, 38, 36, 38, 58, 94, 66, 23],
[37, 0, 40, 58, 97, 85, 18, 54, 86, 21],
[ 9, 74, 11, 45, 65, 64, 27, 88, 24, 26],
[58, 90, 6, 36, 17, 21, 2, 12, 80, 90],
[33, 0, 74, 75, 11, 84, 34, 7, 39, 0],
[17, 61, 94, 68, 27, 41, 33, 86, 59, 2],
[61, 94, 36, 53, 66, 33, 15, 87, 97, 11],
[22, 20, 57, 69, 15, 9, 15, 8, 82, 68],
[40, 0, 13, 61, 67, 40, 29, 25, 72, 44],
[13, 97, 97, 54, 5, 30, 44, 75, 16, 0]])
w2 = np.array([[34, 44, 72, 13, 10, 58, 16, 1, 10, 61],
[54, 70, 99, 4, 64, 0, 15, 94, 39, 46],
[49, 21, 80, 68, 96, 58, 24, 87, 79, 67],
[86, 46, 58, 83, 83, 56, 83, 65, 4, 96],
[48, 95, 64, 34, 75, 82, 64, 47, 35, 19],
[11, 49, 6, 57, 80, 26, 47, 63, 75, 75],
[74, 7, 15, 83, 64, 26, 78, 17, 67, 46],
[19, 13, 2, 26, 52, 16, 65, 24, 2, 98],
[36, 7, 93, 93, 11, 39, 94, 26, 46, 69],
[32, 95, 37, 50, 97, 96, 12, 70, 40, 93]])
la0 = LinearAssignment(w0)
self.assertEqual(la0.min_cost, 194, 'Incorrect cost')
la1 = LinearAssignment(w1)
self.assertEqual(la0.min_cost, la0.min_cost, 'Property incorrect')
self.assertEqual(la1.min_cost, 125, 'Incorrect cost')
la2 = LinearAssignment(w2)
self.assertEqual(la2.min_cost, 110, 'Incorrect cost')
def test_rectangular(self):
w0 = np.array([[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[ 1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20]])
la0 = LinearAssignment(w0)
w1 = np.array([[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[ 1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,]])
la1 = LinearAssignment(w1)
self.assertEqual(len(la1.solution), 10)
self.assertEqual(la0.min_cost, la1.min_cost)
self.assertRaises(ValueError, LinearAssignment, w0.T)
def another_test_case(self):
w1 = np.array([[0.03900238875468465, 0.003202415721817453, 0.20107156847937024, 0.0, 0.5002116398420846,
0.11951326861160616, 0.0, 0.5469032363997579, 0.3243791041219123, 0.1119882291981289],
[0.6048342640688928, 0.3847629088356139, 0.0, 0.44358269535118944, 0.45925670625165016,
0.31416882324798145, 0.8065128182180494, 0.0, 0.26153475286065075, 0.6862799559241944],
[0.5597215814025246, 0.15133664165478322, 0.0, 0.6218101659263295, 0.15438455134183793,
0.17281467064043232, 0.8458127968475472, 0.020860721537078075, 0.1926886361228456, 0.0],
[0.0, 0.0, 0.6351848838666995, 0.21261247074659906, 0.4811603832432241, 0.6663733668270337,
0.63970145187428, 0.1415815172623256, 0.5294574133825874, 0.5576702829768786],
[0.25052904388309016, 0.2309392544588127, 0.0656162006684271, 0.0248922362001176, 0.0,
0.2101808638720748, 0.6529031699724193, 0.1503003886507902, 0.375576165698992,
0.7368328849560374],
[0.0, 0.042215873587668984, 0.10326920761908365, 0.3562551151517992, 0.9170343984958856,
0.818783531026254, 0.7896770426052844, 0.0, 0.6573135097946438, 0.17806189728574429],
[0.44992199118890386, 0.0, 0.38548898339412585, 0.6269193883601244, 1.0022861602564634, 0.0,
0.1869765500803764, 0.03474156273982543, 0.3715310534696664, 0.6197122486230232],
[0.37939853696836545, 0.2421427374018027, 0.5586150342727723, 0.0, 0.7171485794073893,
0.8021029235865014, 0.11213464903613135, 0.6497896761660467, 0.3274108706187846, 0.0],
[0.6674685746225324, 0.5347953626128863, 0.11461835366075113, 0.0, 0.8170639855163434,
0.7291931505979982, 0.3149153087053108, 0.1008681103294512, 0.0, 0.18751172321112997],
[0.6985944652913342, 0.6139921045056471, 0.0, 0.4393266955771965, 0.0, 0.47265399761400695,
0.3674241844351025, 0.04731761392352629, 0.21484886069716147, 0.16488710920126137]])
la = LinearAssignment(w1)
self.assertAlmostEqual(la.min_cost, 0)
def test_boolean_inputs(self):
w = np.ones((135,135), dtype=np.bool)
np.fill_diagonal(w, False)
la = LinearAssignment(w)
#if the input doesn't get converted to a float, the masking
#doesn't work properly
self.assertEqual(la.orig_c.dtype, np.float64)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/optimization/tests/test_linear_assignment.py
|
Python
|
mit
| 6,889
|
[
"pymatgen"
] |
cd4cf865bff2d8996e61041d05c2e8801f74665bd34c3c48e1d7f1dcd8d4e87a
|
# Orca
#
# Copyright (C) 2015 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import orca.scripts.toolkits.WebKitGtk as WebKitGtk
class SpeechGenerator(WebKitGtk.SpeechGenerator):
def __init__(self, script):
super().__init__(script)
self._cache = {}
def _isTreeTableCell(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isTreeTableCell")
if rv == None:
rv = obj.parent and obj.parent.getRole() == pyatspi.ROLE_TREE_TABLE
cached["isTreeTableCell"] = rv
self._cache[hash(obj)] = cached
return rv
def _isMessageListStatusCell(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isMessageListStatusCell")
if rv == None:
rv = self._script.utilities.isMessageListStatusCell(obj)
cached["isMessageListStatusCell"] = rv
self._cache[hash(obj)] = cached
return rv
def _isMessageListToggleCell(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isMessageListToggleCell")
if rv == None:
rv = self._script.utilities.isMessageListToggleCell(obj)
cached["isMessageListToggleCell"] = rv
self._cache[hash(obj)] = cached
return rv
def _isFocused(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isFocused")
if rv == None:
rv = obj.getState().contains(pyatspi.STATE_FOCUSED)
cached["isFocused"] = rv
self._cache[hash(obj)] = cached
return rv
def _isChecked(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isChecked")
if rv == None:
rv = obj.getState().contains(pyatspi.STATE_CHECKED)
cached["isChecked"] = rv
self._cache[hash(obj)] = cached
return rv
def _isInNewRow(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isInNewRow")
if rv == None:
row, column = self._script.utilities.coordinatesForCell(obj)
lastRow = self._script.pointOfReference.get("lastRow")
rv = row != lastRow
cached["isInNewRow"] = rv
self._cache[hash(obj)] = cached
return rv
def _generateCellCheckedState(self, obj, **args):
if self._isMessageListStatusCell(obj):
return []
if self._isMessageListToggleCell(obj):
if self._isInNewRow(obj) or not self._isFocused(obj):
return []
return super()._generateCellCheckedState(obj, **args)
def _generateLabel(self, obj, **args):
if self._isMessageListToggleCell(obj):
return []
return super()._generateLabel(obj, **args)
def _generateName(self, obj, **args):
if self._isMessageListToggleCell(obj) \
and not self._isMessageListStatusCell(obj):
return []
return super()._generateName(obj, **args)
def _generateLabelOrName(self, obj, **args):
if self._isMessageListToggleCell(obj) \
and not self._isMessageListStatusCell(obj):
return []
return super()._generateLabelOrName(obj, **args)
def _generateRealActiveDescendantDisplayedText(self, obj, **args):
if self._isMessageListToggleCell(obj) \
and not self._isMessageListStatusCell(obj):
if not self._isChecked(obj):
return []
if self._isFocused(obj) and not self._isInNewRow(obj):
return []
return super()._generateRealActiveDescendantDisplayedText(obj, **args)
def _generateRoleName(self, obj, **args):
if self._isMessageListToggleCell(obj) and not self._isFocused(obj):
return []
return super()._generateRoleName(obj, **args)
def _generateUnselectedCell(self, obj, **args):
if self._isMessageListToggleCell(obj) or self._isTreeTableCell(obj):
return []
return super()._generateUnselectedCell(obj, **args)
def generateSpeech(self, obj, **args):
self._cache = {}
results = super().generateSpeech(obj, **args)
self._cache = {}
return results
|
pvagner/orca
|
src/orca/scripts/apps/evolution/speech_generator.py
|
Python
|
lgpl-2.1
| 5,188
|
[
"ORCA"
] |
dcd69fff8d45edd807e367031d520310d6332fb1c72f190e98fbd0b4658d3f5b
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=wildcard-import
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=unused-wildcard-import
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=unused-argument
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
if 'COURSE' in world.scenario_dict:
path = path.format(world.scenario_dict['COURSE'].id)
assert world.url_equals(path), (
"path should be {!r} but is {!r}".format(path, world.browser.url)
)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
if 'COURSE' in world.scenario_dict:
url = url.format(world.scenario_dict['COURSE'].id)
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
@step(u'(I am viewing|s?he views) the course team settings$')
def view_course_team_settings(_step, whom):
""" navigates to course team settings page """
world.click_course_settings()
link_css = 'li.nav-course-settings-team a'
world.css_click(link_css)
|
eestay/edx-platform
|
common/djangoapps/terrain/steps.py
|
Python
|
agpl-3.0
| 7,478
|
[
"VisIt"
] |
8fbcf23b006a2914a5ca4007258503ee0ccb20f1a314e9c394e4563b41c5536c
|
"""
This module finds diffusion paths through a structure based on a given
potential field.
If you use PathFinder algorithm for your research, please consider citing the
following work::
Ziqin Rong, Daniil Kitchaev, Pieremanuele Canepa, Wenxuan Huang, Gerbrand
Ceder, The Journal of Chemical Physics 145 (7), 074112
"""
import logging
import math
from abc import ABCMeta
import warnings
import numpy as np
import numpy.linalg as la
import scipy.signal
import scipy.stats
from scipy.interpolate import interp1d
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Poscar
__author__ = "Daniil Kitchaev"
__version__ = "1.0"
__maintainer__ = "Daniil Kitchaev, Ziqin Rong"
__email__ = "dkitch@mit.edu, rongzq08@mit.edu"
__status__ = "Development"
__date__ = "March 17, 2015"
logger = logging.getLogger(__name__)
warnings.warn(
"This code has been superseded by pymatgen.analysis.neb in the separate add-on package"
"pymatgen-diffusion. This module here is retained for backwards compatibility. It will be removed from"
"2022.1.1.",
FutureWarning,
)
class NEBPathfinder:
"""
General pathfinder for interpolating between two structures, where the
interpolating path is calculated with the elastic band method with
respect to the given static potential for sites whose indices are given
in relax_sites, and is linear otherwise.
"""
def __init__(self, start_struct, end_struct, relax_sites, v, n_images=20, mid_struct=None):
"""
Args:
start_struct, end_struct: Endpoint structures to interpolate
relax_sites: List of site indices whose interpolation paths should
be relaxed
v: Static potential field to use for the elastic band relaxation
n_images: Number of interpolation images to generate
mid_struct: (optional) additional structure between the start and end structures to help
"""
self.__s1 = start_struct
self.__s2 = end_struct
self.__mid = mid_struct
self.__relax_sites = relax_sites
self.__v = v
self.__n_images = n_images
self.__images = None
self.interpolate()
def interpolate(self):
"""
Finds a set of n_images from self.s1 to self.s2, where all sites except
for the ones given in relax_sites, the interpolation is linear (as in
pymatgen.core.structure.interpolate), and for the site indices given
in relax_sites, the path is relaxed by the elastic band method within
the static potential V.
If a mid point is defined we will interpolate from s1--> mid -->s2
The final number of images will still be n_images.
"""
if self.__mid is not None:
# to make arithmatic easier we will do the interpolation in two parts with n images each
# then just take every other image at the end, this results in exactly n images
images_0 = self.__s1.interpolate(self.__mid, nimages=self.__n_images, interpolate_lattices=False)[:-1]
images_1 = self.__mid.interpolate(self.__s2, nimages=self.__n_images, interpolate_lattices=False)
images = images_0 + images_1
images = images[::2]
else:
images = self.__s1.interpolate(self.__s2, nimages=self.__n_images, interpolate_lattices=False)
for site_i in self.__relax_sites:
start_f = images[0].sites[site_i].frac_coords
end_f = images[-1].sites[site_i].frac_coords
path = NEBPathfinder.string_relax(
NEBPathfinder.__f2d(start_f, self.__v),
NEBPathfinder.__f2d(end_f, self.__v),
self.__v,
n_images=(self.__n_images + 1),
dr=[
self.__s1.lattice.a / self.__v.shape[0],
self.__s1.lattice.b / self.__v.shape[1],
self.__s1.lattice.c / self.__v.shape[2],
],
)
for image_i, image in enumerate(images):
image.translate_sites(
site_i,
NEBPathfinder.__d2f(path[image_i], self.__v) - image.sites[site_i].frac_coords,
frac_coords=True,
to_unit_cell=True,
)
self.__images = images
@property
def images(self):
"""
Returns a list of structures interpolating between the start and
endpoint structures.
"""
return self.__images
def plot_images(self, outfile):
"""
Generates a POSCAR with the calculated diffusion path with respect to the first endpoint.
:param outfile: Output file for the POSCAR
"""
sum_struct = self.__images[0].sites
for image in self.__images:
for site_i in self.__relax_sites:
sum_struct.append(
PeriodicSite(
image.sites[site_i].specie,
image.sites[site_i].frac_coords,
self.__images[0].lattice,
to_unit_cell=True,
coords_are_cartesian=False,
)
)
sum_struct = Structure.from_sites(sum_struct, validate_proximity=False)
p = Poscar(sum_struct)
p.write_file(outfile)
@staticmethod
def string_relax(
start,
end,
V,
n_images=25,
dr=None,
h=3.0,
k=0.17,
min_iter=100,
max_iter=10000,
max_tol=5e-6,
):
"""
Implements path relaxation via the elastic band method. In general, the
method is to define a path by a set of points (images) connected with
bands with some elasticity constant k. The images then relax along the
forces found in the potential field V, counterbalanced by the elastic
response of the elastic band. In general the endpoints of the band can
be allowed to relax also to their local minima, but in this calculation
they are kept fixed.
Args:
start, end: Endpoints of the path calculation given in discrete
coordinates with respect to the grid in V
V: potential field through which to calculate the path
n_images: number of images used to define the path. In general
anywhere from 20 to 40 seems to be good.
dr: Conversion ratio from discrete coordinates to real coordinates
for each of the three coordinate vectors
h: Step size for the relaxation. h = 0.1 works reliably, but is
slow. h=10 diverges with large gradients but for the types of
gradients seen in CHGCARs, works pretty reliably
k: Elastic constant for the band (in real units, not discrete)
min_iter, max_iter: Number of optimization steps the string will
take before exiting (even if unconverged)
max_tol: Convergence threshold such that if the string moves by
less than max_tol in a step, and at least min_iter steps have
passed, the algorithm will terminate. Depends strongly on the
size of the gradients in V, but 5e-6 works reasonably well for
CHGCARs.
"""
#
# This code is based on the MATLAB example provided by
# Prof. Eric Vanden-Eijnden of NYU
# (http://www.cims.nyu.edu/~eve2/main.htm)
#
# logger.debug("Getting path from {} to {} (coords wrt V grid)".format(start, end))
# Set parameters
if not dr:
dr = np.array([1.0 / V.shape[0], 1.0 / V.shape[1], 1.0 / V.shape[2]])
else:
dr = np.array(dr, dtype=float)
keff = k * dr * n_images
h0 = h
# Initialize string
g1 = np.linspace(0, 1, n_images)
s0 = start
s1 = end
s = np.array([g * (s1 - s0) for g in g1]) + s0
ds = s - np.roll(s, 1, axis=0)
ds[0] = ds[0] - ds[0]
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls / ls[-1]
fi = interp1d(ls, s, axis=0)
s = fi(g1)
# Evaluate initial distances (for elastic equilibrium)
ds0_plus = s - np.roll(s, 1, axis=0)
ds0_minus = s - np.roll(s, -1, axis=0)
ds0_plus[0] = ds0_plus[0] - ds0_plus[0]
ds0_minus[-1] = ds0_minus[-1] - ds0_minus[-1]
# Evaluate potential gradient outside the loop, as potential does not
# change per step in this approximation.
dV = np.gradient(V)
# Evolve string
for step in range(0, max_iter):
if step > min_iter:
# Gradually decay step size to prevent oscillations
h = h0 * np.exp(-2.0 * (step - min_iter) / max_iter)
else:
h = h0
# Calculate forces acting on string
d = V.shape
s0 = s
edV = np.array(
[
[
dV[0][int(pt[0]) % d[0]][int(pt[1]) % d[1]][int(pt[2]) % d[2]] / dr[0],
dV[1][int(pt[0]) % d[0]][int(pt[1]) % d[1]][int(pt[2]) % d[2]] / dr[0],
dV[2][int(pt[0]) % d[0]][int(pt[1]) % d[1]][int(pt[2]) % d[2]] / dr[0],
]
for pt in s
]
)
# if(step % 100 == 0):
# logger.debug(edV)
# Update according to force due to potential and string elasticity
ds_plus = s - np.roll(s, 1, axis=0)
ds_minus = s - np.roll(s, -1, axis=0)
ds_plus[0] = ds_plus[0] - ds_plus[0]
ds_minus[-1] = ds_minus[-1] - ds_minus[-1]
Fpot = edV
Fel = keff * (la.norm(ds_plus) - la.norm(ds0_plus)) * (ds_plus / la.norm(ds_plus))
Fel += keff * (la.norm(ds_minus) - la.norm(ds0_minus)) * (ds_minus / la.norm(ds_minus))
s -= h * (Fpot + Fel)
# Fix endpoints
s[0] = s0[0]
s[-1] = s0[-1]
# Reparametrize string
ds = s - np.roll(s, 1, axis=0)
ds[0] = ds[0] - ds[0]
ls = np.cumsum(la.norm(ds, axis=1))
ls = ls / ls[-1]
fi = interp1d(ls, s, axis=0)
s = fi(g1)
tol = la.norm((s - s0) * dr) / n_images / h
if tol > 1e10:
raise ValueError("Pathfinding failed, path diverged! Consider reducing h to " "avoid divergence.")
if step > min_iter and tol < max_tol:
logger.debug("Converged at step {}".format(step))
break
if step % 100 == 0:
logger.debug("Step {} - ds = {}".format(step, tol))
return s
@staticmethod
def __f2d(frac_coords, v):
"""
Converts fractional coordinates to discrete coordinates with respect to
the grid size of v
"""
# frac_coords = frac_coords % 1
return np.array(
[
int(frac_coords[0] * v.shape[0]),
int(frac_coords[1] * v.shape[1]),
int(frac_coords[2] * v.shape[2]),
]
)
@staticmethod
def __d2f(disc_coords, v):
"""
Converts a point given in discrete coordinates withe respect to the
grid in v to fractional coordinates.
"""
return np.array(
[
disc_coords[0] / v.shape[0],
disc_coords[1] / v.shape[1],
disc_coords[2] / v.shape[2],
]
)
class StaticPotential(metaclass=ABCMeta):
"""
Defines a general static potential for diffusion calculations. Implements
grid-rescaling and smearing for the potential grid. Also provides a
function to normalize the potential from 0 to 1 (recommended).
"""
def __init__(self, struct, pot):
"""
:param struct: atomic structure of the potential
:param pot: volumentric data to be used as a potential
"""
self.__v = pot
self.__s = struct
def get_v(self):
"""
Returns the potential
"""
return self.__v
def normalize(self):
"""
Sets the potential range 0 to 1.
"""
self.__v = self.__v - np.amin(self.__v)
self.__v = self.__v / np.amax(self.__v)
def rescale_field(self, new_dim):
"""
Changes the discretization of the potential field by linear
interpolation. This is necessary if the potential field
obtained from DFT is strangely skewed, or is too fine or coarse. Obeys
periodic boundary conditions at the edges of
the cell. Alternatively useful for mixing potentials that originally
are on different grids.
:param new_dim: tuple giving the numpy shape of the new grid
"""
v_dim = self.__v.shape
padded_v = np.lib.pad(self.__v, ((0, 1), (0, 1), (0, 1)), mode="wrap")
ogrid_list = np.array([list(c) for c in list(np.ndindex(v_dim[0] + 1, v_dim[1] + 1, v_dim[2] + 1))])
v_ogrid = padded_v.reshape(((v_dim[0] + 1) * (v_dim[1] + 1) * (v_dim[2] + 1), -1))
ngrid_a, ngrid_b, ngrid_c = np.mgrid[
0 : v_dim[0] : v_dim[0] / new_dim[0],
0 : v_dim[1] : v_dim[1] / new_dim[1],
0 : v_dim[2] : v_dim[2] / new_dim[2],
]
v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid, (ngrid_a, ngrid_b, ngrid_c), method="linear").reshape(
(new_dim[0], new_dim[1], new_dim[2])
)
self.__v = v_ngrid
def gaussian_smear(self, r):
"""
Applies an isotropic Gaussian smear of width (standard deviation) r to
the potential field. This is necessary to avoid finding paths through
narrow minima or nodes that may exist in the field (although any
potential or charge distribution generated from GGA should be
relatively smooth anyway). The smearing obeys periodic
boundary conditions at the edges of the cell.
:param r - Smearing width in cartesian coordinates, in the same units
as the structure lattice vectors
"""
# Since scaling factor in fractional coords is not isotropic, have to
# have different radii in 3 directions
a_lat = self.__s.lattice.a
b_lat = self.__s.lattice.b
c_lat = self.__s.lattice.c
# Conversion factors for discretization of v
v_dim = self.__v.shape
r_frac = (r / a_lat, r / b_lat, r / c_lat)
r_disc = (
int(math.ceil(r_frac[0] * v_dim[0])),
int(math.ceil(r_frac[1] * v_dim[1])),
int(math.ceil(r_frac[2] * v_dim[2])),
)
# Apply smearing
# Gaussian filter
gauss_dist = np.zeros((r_disc[0] * 4 + 1, r_disc[1] * 4 + 1, r_disc[2] * 4 + 1))
for g_a in np.arange(-2.0 * r_disc[0], 2.0 * r_disc[0] + 1, 1.0):
for g_b in np.arange(-2.0 * r_disc[1], 2.0 * r_disc[1] + 1, 1.0):
for g_c in np.arange(-2.0 * r_disc[2], 2.0 * r_disc[2] + 1, 1.0):
g = np.array([g_a / v_dim[0], g_b / v_dim[1], g_c / v_dim[2]]).T
gauss_dist[int(g_a + r_disc[0])][int(g_b + r_disc[1])][int(g_c + r_disc[2])] = (
la.norm(np.dot(self.__s.lattice.matrix, g)) / r
)
gauss = scipy.stats.norm.pdf(gauss_dist)
gauss = gauss / np.sum(gauss, dtype=float)
padded_v = np.pad(
self.__v,
((r_disc[0], r_disc[0]), (r_disc[1], r_disc[1]), (r_disc[2], r_disc[2])),
mode="wrap",
)
smeared_v = scipy.signal.convolve(padded_v, gauss, mode="valid")
self.__v = smeared_v
class ChgcarPotential(StaticPotential):
"""
Implements a potential field based on the charge density output from VASP.
"""
def __init__(self, chgcar, smear=False, normalize=True):
"""
:param chgcar: Chgcar object based on a VASP run of the structure of
interest (Chgcar.from_file("CHGCAR"))
:param smear: Whether or not to apply a Gaussian smearing to the
potential
:param normalize: Whether or not to normalize the potential to range
from 0 to 1
"""
v = chgcar.data["total"]
v = v / (v.shape[0] * v.shape[1] * v.shape[2])
StaticPotential.__init__(self, chgcar.structure, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
class FreeVolumePotential(StaticPotential):
"""
Implements a potential field based on geometric distances from atoms in the
structure - basically, the potential
is lower at points farther away from any atoms in the structure.
"""
def __init__(self, struct, dim, smear=False, normalize=True):
"""
:param struct: Unit cell on which to base the potential
:param dim: Grid size for the potential
:param smear: Whether or not to apply a Gaussian smearing to the
potential
:param normalize: Whether or not to normalize the potential to range
from 0 to 1
"""
self.__s = struct
v = FreeVolumePotential.__add_gaussians(struct, dim)
StaticPotential.__init__(self, struct, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
@staticmethod
def __add_gaussians(s, dim, r=1.5):
gauss_dist = np.zeros(dim)
for a_d in np.arange(0.0, dim[0], 1.0):
for b_d in np.arange(0.0, dim[1], 1.0):
for c_d in np.arange(0.0, dim[2], 1.0):
coords_f = np.array([a_d / dim[0], b_d / dim[1], c_d / dim[2]])
d_f = sorted(s.get_sites_in_sphere(coords_f, s.lattice.a), key=lambda x: x[1])[0][1]
# logger.debug(d_f)
gauss_dist[int(a_d)][int(b_d)][int(c_d)] = d_f / r
v = scipy.stats.norm.pdf(gauss_dist)
return v
class MixedPotential(StaticPotential):
"""
Implements a potential that is a weighted sum of some other potentials
"""
def __init__(self, potentials, coefficients, smear=False, normalize=True):
"""
Args:
potentials: List of objects extending the StaticPotential superclass
coefficients: Mixing weights for the elements of the potentials list
smear: Whether or not to apply a Gaussian smearing to the potential
normalize: Whether or not to normalize the potential to range from
0 to 1
"""
v = potentials[0].get_v() * coefficients[0]
s = potentials[0].__s
for i in range(1, len(potentials)):
v += potentials[i].get_v() * coefficients[i]
StaticPotential.__init__(self, s, v)
if smear:
self.gaussian_smear(2.0)
if normalize:
self.normalize()
|
gmatteo/pymatgen
|
pymatgen/analysis/path_finder.py
|
Python
|
mit
| 19,178
|
[
"Gaussian",
"VASP",
"pymatgen"
] |
7843812635cd561d046956b2264b351e5b119f4b9b1aebf4098b1c3038a3bc9f
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import errno
import fnmatch
import locale
import logging
import os
import platform
_system = platform.system()
log = logging.getLogger(__name__)
def _get_xdg_dir(envname, default):
default = os.path.expanduser(default)
filename = os.path.expanduser("~/.config/user-dirs.dirs")
try:
f = open(filename)
except IOError as e:
if e.errno == errno.ENOENT:
return default
for line in f:
if line.startswith(envname):
return os.path.expandvars(line[len(envname) + 2:-2])
return default
def get_application_dir(appname="stoq"):
"""Fetches a application specific directory,
this can be used to save temporary files and other state.
This also creates the directory if it doesn't exist
:returns: the application directory
"""
if _system == 'Linux':
appdir = os.path.join(os.environ['HOME'], '.' + appname)
elif _system == 'Windows':
appdir = os.path.join(os.environ['APPDATA'], appname)
elif _system == 'Darwin':
appdir = os.path.join(os.environ['HOME'], 'Library',
'Application Support', 'Stoq')
else:
raise SystemExit("unknown system: %s" % (_system, ))
if not os.path.exists(appdir):
os.makedirs(appdir)
return appdir
def get_documents_dir():
""":returns: the documents dir for the current user"""
if _system == 'Linux':
return _get_xdg_dir("XDG_DOCUMENTS_DIR", "~/Documents")
elif _system == 'Windows':
from win32com.shell import shell
MY_DOCUMENTS = "::{450d8fba-ad25-11d0-98a8-0800361b1103}"
folder = shell.SHGetDesktopFolder()
pidl = folder.ParseDisplayName(0, None, MY_DOCUMENTS)[1]
return shell.SHGetPathFromIDList(pidl)
elif _system == 'Darwin':
return os.path.join(os.environ['HOME'], 'Documents')
else:
raise SystemExit("unknown system: %s" % (_system, ))
def get_username():
""":returns: the current username"""
if _system == 'Linux' or _system == 'Darwin':
return os.environ.get('USER')
elif _system == 'Windows':
return os.environ['USERNAME']
else:
raise SystemExit("unknown system: %s" % (_system, ))
def read_registry_key(root, key, value):
"""Reads a registry key and return it's value.
None is returned if the value couldn't be read
"""
if platform.system() != 'Windows':
return None
import exceptions
import _winreg
if root == 'HKCC':
root = _winreg.HKEY_CURRENT_USER
elif root == 'HKLM':
root = _winreg.HKEY_LOCAL_MACHINE
else:
raise ValueError(root)
try:
k = _winreg.OpenKey(root, key)
reg_value, key_type = _winreg.QueryValueEx(k, value)
except exceptions.WindowsError:
# log.info('Error while reading %s/%s/%s: %r' % (root, k, value, e))
return None
return reg_value
def list_recursively(directory, pattern):
"""Returns files recursively from directory matching pattern
:param directory: directory to list
:param pattern: glob mattern to match
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
# skip backup files
if (filename.startswith('.#') or
filename.endswith('~')):
continue
matches.append(os.path.join(root, filename))
return matches
def find_program(program):
"""Looks for program in PATH.
:param program: name of the program to find
:returns: the complete path of the program or None if it couldn't be found
"""
for path in os.environ['PATH'].split(':'):
fullpath = os.path.join(path, program)
if os.path.exists(fullpath):
return fullpath
def get_system_locale():
"""Fetches the current locale according to the system.
:returns: the current locale
"""
# Locale comes in a tuple like ('en_US', 'UTF-8')
if _system == 'Linux':
lang = locale.getlocale(locale.LC_MESSAGES)
elif _system == 'Windows':
lang = locale.getlocale(locale.LC_ALL)
elif _system == 'Darwin':
lang = locale.getlocale(locale.LC_ALL)
else:
raise SystemExit("unknown system: %s" % (_system, ))
return lang[0]
def get_product_key():
"""Fetches the product key
:returns: the product key
"""
if _system == 'Linux':
app_dir = get_application_dir()
filename = os.path.join(app_dir, "product_key")
try:
return open(filename).read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
return None
elif _system == 'Windows':
product_key = read_registry_key('HKCC', r'Software\Stoq',
'ProductKey')
elif _system == 'Darwin':
product_key = None
else:
raise SystemExit("unknown system: %s" % (_system, ))
return product_key
|
tiagocardosos/stoq
|
stoqlib/lib/osutils.py
|
Python
|
gpl-2.0
| 5,881
|
[
"VisIt"
] |
e1e0a55dfe7bc265dbd7dc75706fcf461f452f7124dfc7326788e1bc2e63bde1
|
import requests
import h2o
import h2o_test_utils
from h2o_test_utils import ModelSpec
from h2o_test_utils import GridSpec
def build_and_test(a_node, pp, datasets, algos, algo_additional_default_params):
####################################################################################################
# Build and do basic validation checks on models
####################################################################################################
models_to_build = [
ModelSpec.for_dataset('kmeans_prostate', 'kmeans', datasets['prostate_clustering'], { 'k': 2 } ),
ModelSpec.for_dataset('glm_prostate_regression', 'glm', datasets['prostate_regression'], {'family': 'gaussian'} ),
ModelSpec.for_dataset('glm_prostate_binomial', 'glm', datasets['prostate_binomial'], {'family': 'binomial'} ),
ModelSpec.for_dataset('glm_airlines_binomial', 'glm', datasets['airlines_binomial'], {'response_column': 'IsDepDelayed', 'family': 'binomial' } ),
ModelSpec.for_dataset('glm_iris_multinomial', 'glm', datasets['iris_multinomial'], {'response_column': 'class', 'family': 'multinomial' } ),
ModelSpec.for_dataset('deeplearning_prostate_regression', 'deeplearning', datasets['prostate_regression'], { 'epochs': 1, 'loss': 'Quadratic' } ),
ModelSpec.for_dataset('deeplearning_prostate_binomial', 'deeplearning', datasets['prostate_binomial'], { 'epochs': 1, 'hidden': [20, 20], 'loss': 'CrossEntropy' } ),
ModelSpec.for_dataset('deeplearning_airlines_binomial', 'deeplearning', datasets['airlines_binomial'], { 'epochs': 1, 'hidden': [10, 10], 'loss': 'CrossEntropy' } ),
ModelSpec.for_dataset('deeplearning_iris_multinomial', 'deeplearning', datasets['iris_multinomial'], { 'epochs': 1, 'loss': 'CrossEntropy' } ),
ModelSpec.for_dataset('gbm_prostate_regression', 'gbm', datasets['prostate_regression'], { 'ntrees': 5, 'distribution': 'gaussian' } ),
ModelSpec.for_dataset('gbm_prostate_binomial', 'gbm', datasets['prostate_binomial'], { 'ntrees': 5, 'distribution': 'multinomial' } ),
ModelSpec.for_dataset('gbm_airlines_binomial', 'gbm', datasets['airlines_binomial'], { 'ntrees': 5, 'distribution': 'multinomial' } ),
ModelSpec.for_dataset('gbm_iris_multinomial', 'gbm', datasets['iris_multinomial'], { 'ntrees': 5, 'distribution': 'multinomial' } ),
]
# For grid testing, don't build any non-grid models:
# models_to_build = []
built_models = {}
for model_spec in models_to_build:
model = model_spec.build_and_validate_model(a_node)
built_models[model_spec['dest_key']] = model
grids_to_build = [
# setting a hyperparameter in both places:
# GridSpec.for_dataset('kmeans_prostate_grid', 'kmeans', datasets['prostate_clustering'], { 'k': 6 }, { 'k': [2, 3, 4] } ),
GridSpec.for_dataset('kmeans_prostate_grid', 'kmeans', datasets['prostate_clustering'], { }, { 'k': [2, 3, 4] } ),
GridSpec.for_dataset('glm_prostate_regression_grid', 'glm', datasets['prostate_regression'], {'family': 'gaussian'}, { 'lambda': [0.0001, 0.001, 0.01, 0.1] } ),
GridSpec.for_dataset('glm_prostate_binomial_grid', 'glm', datasets['prostate_binomial'], {'family': 'binomial'}, { 'lambda': [0.0001, 0.001, 0.01, 0.1] } ),
GridSpec.for_dataset('glm_airlines_binomial_grid', 'glm', datasets['airlines_binomial'], {'response_column': 'IsDepDelayed', 'family': 'binomial'}, { 'lambda': [0.0001, 0.001, 0.01, 0.025] } ),
GridSpec.for_dataset('glm_iris_multinomial_grid', 'glm', datasets['iris_multinomial'], {'response_column': 'class', 'family': 'multinomial'}, { 'lambda': [0.0001, 0.001, 0.01, 0.025] } ),
GridSpec.for_dataset('deeplearning_prostate_regression_grid', 'deeplearning', datasets['prostate_regression'], { 'loss': 'Quadratic' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('deeplearning_prostate_binomial_grid', 'deeplearning', datasets['prostate_binomial'], { 'hidden': [20, 20], 'loss': 'CrossEntropy' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('deeplearning_airlines_binomial_grid', 'deeplearning', datasets['airlines_binomial'], { 'hidden': [10, 10], 'loss': 'CrossEntropy' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('deeplearning_iris_multinomial_grid', 'deeplearning', datasets['iris_multinomial'], { 'loss': 'CrossEntropy' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('gbm_prostate_regression_grid', 'gbm', datasets['prostate_regression'], { 'max_depth': 3 }, { 'ntrees': [1, 5, 10], 'distribution': ["gaussian", "poisson", "gamma", "tweedie"] } ),
GridSpec.for_dataset('gbm_prostate_binomial_grid', 'gbm', datasets['prostate_binomial'], { }, { 'ntrees': [5, 7], 'max_depth': [1, 3, 5] } ),
GridSpec.for_dataset('gbm_airlines_binomial_grid', 'gbm', datasets['airlines_binomial'], { 'distribution': 'multinomial' }, { 'ntrees': [1, 5, 10], 'max_depth': [1, 3, 5] } ),
GridSpec.for_dataset('gbm_iris_multinomial_grid', 'gbm', datasets['iris_multinomial'], { 'distribution': 'multinomial' }, { 'ntrees': [1, 5, 10], 'max_depth': [1, 3, 5] } ),
# TODO: this should trigger a parameter validation error, but instead the non-grid ntrees silently overrides the grid values: GridSpec.for_dataset('gbm_iris_multinomial_grid', 'gbm', datasets['iris_multinomial'], { 'ntrees': 5, 'distribution': 'multinomial' }, { 'ntrees': [1, 5, 10], 'max_depth': [1, 3, 5] } ),
# Test stopping criteria:
GridSpec.for_dataset('gbm_prostate_regression_grid_max_3', 'gbm', datasets['prostate_regression'], { 'max_depth': 3 }, { 'ntrees': [1, 2, 4], 'distribution': ["gaussian", "poisson", "gamma", "tweedie"] }, { 'strategy': "RandomDiscrete", 'max_models': 3 } ),
GridSpec.for_dataset('gbm_prostate_regression_grid_max_20mS', 'gbm', datasets['prostate_regression'], { 'max_depth': 3 }, { 'ntrees': [1, 2, 4], 'distribution': ["gaussian", "poisson", "gamma", "tweedie"] }, { 'strategy': "RandomDiscrete", 'max_runtime_secs': 0.020 } ),
GridSpec.for_dataset('gbm_prostate_regression_grid_stopping_deviance', 'gbm', datasets['prostate_regression'], { }, { 'max_depth': [1, 2, 3, 4, 5, 6, 7], 'ntrees': [1, 2, 3, 4, 5, 6], 'distribution': ["gaussian", "poisson", "gamma"] }, { 'strategy': "RandomDiscrete", 'seed': 42, 'stopping_metric': 'deviance', 'stopping_tolerance': 0.00001, 'stopping_rounds': 5 } ),
GridSpec.for_dataset('gbm_prostate_regression_grid_stopping_auto', 'gbm', datasets['prostate_regression'], { }, { 'max_depth': [1, 2, 3, 4, 5, 6, 7], 'ntrees': [1, 2, 3, 4, 5, 6], 'distribution': ["gaussian", "poisson", "gamma"] }, { 'strategy': "RandomDiscrete", 'seed': 42, 'stopping_metric': 'AUTO', 'stopping_tolerance': 0.00001, 'stopping_rounds': 5 } ),
]
for grid_spec in grids_to_build:
grid = grid_spec.build_and_validate_grid(a_node)
for model_key in grid['model_ids']:
model_key = model_key['name']
built_models[model_key] = a_node.models(key=model_key)
# test search limits: max_models
grid = a_node.grid(key='gbm_prostate_regression_grid_max_3')
assert len(grid['model_ids']) == 3, "FAIL: using max_models, expected a max of 3 models, got: " + str(len(grid['model_ids']))
# test search limits: max_runtime_secs
grid = a_node.grid(key='gbm_prostate_regression_grid_max_20mS')
assert len(grid['model_ids']) < 12, "FAIL: using max_runtime_secs, expected less than 12 models, got: " + str(len(grid['model_ids']))
# test search limits: stopping_deviance
grid = a_node.grid(key='gbm_prostate_regression_grid_stopping_deviance')
deviance_model_count = len(grid['model_ids'])
assert len(grid['model_ids']) < 126, "FAIL: using asymptotic deviance stopping criterion, expected less than 126 models, got: " + str(len(grid['model_ids']))
# test search limits: stopping_auto
grid = a_node.grid(key='gbm_prostate_regression_grid_stopping_auto')
auto_model_count = len(grid['model_ids'])
assert len(grid['model_ids']) < 126, "FAIL: using asymptotic auto stopping criterion, expected less than 126 models, got: " + str(len(grid['model_ids']))
# test that AUTO gave the same answer as deviance
assert deviance_model_count == auto_model_count, "FAIL: using asymptotic auto stopping criterion, expected the same number of models as deviance, got: " + str(auto_model_count) + " instead of: " + str(deviance_model_count)
# grid = a_node.grid(key='kmeans_prostate_grid', sort_by='', decreasing=True)
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='totss', decreasing=True)
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='tot_withinss', decreasing=True)
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='betweenss', decreasing=True)
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='totss', decreasing=False)
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='tot_withinss', decreasing=False)
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='betweenss', decreasing=False)
# import sys
# sys.exit(0)
#######################################
# Test default parameters validation for each model builder
#
if h2o_test_utils.isVerbose(): print('Testing ModelBuilder default parameters. . .')
model_builders = a_node.model_builders(timeoutSecs=240)['model_builders']
# Do we know about all of them?
server_algos = model_builders.keys()
assert len(set(server_algos) - set(algos)) == 0, "FAIL: Our set of algos doesn't match what the server knows about. Ours: " + repr(algos) + "; server's: " + repr(server_algos)
for algo, model_builder in model_builders.iteritems():
parameters_list = model_builder['parameters']
test_parameters = { value['name'] : value['default_value'] for value in parameters_list } # collect default parameters
if algo in algo_additional_default_params:
test_parameters.update(algo_additional_default_params[algo])
if h2o_test_utils.isVerboser(): print('Testing ' + algo + ' with params: ' + repr(test_parameters))
parameters_validation = a_node.validate_model_parameters(algo=algo, training_frame=None, parameters=test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in good-parameters parameters validation result."
h2o.H2O.verboseprint("Bad params validation messages: ", repr(parameters_validation))
expected_count = 0
if expected_count != parameters_validation['error_count']:
print("validation errors: ")
pp.pprint(parameters_validation)
assert expected_count == parameters_validation['error_count'], "FAIL: " + str(expected_count) + " != error_count in good-parameters parameters validation result."
#######################################
# Test DeepLearning parameters validation
#
# Default parameters:
if h2o_test_utils.isVerbose(): print('Testing DeepLearning default parameters. . .')
model_builder = a_node.model_builders(algo='deeplearning', timeoutSecs=240)['model_builders']['deeplearning']
dl_test_parameters_list = model_builder['parameters']
dl_test_parameters = {value['name'] : value['default_value'] for value in dl_test_parameters_list}
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame=None, parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in good-parameters parameters validation result."
h2o.H2O.verboseprint("Bad params validation messages: ", repr(parameters_validation))
if 0 != parameters_validation['error_count']:
print("validation errors: ")
pp.pprint(parameters_validation)
assert 0 == parameters_validation['error_count'], "FAIL: 0 != error_count in good-parameters parameters validation result."
# Good parameters (note: testing with null training_frame):
if h2o_test_utils.isVerbose(): print('Testing DeepLearning good parameters. . .')
dl_test_parameters = {'response_column': 'CAPSULE', 'hidden': "[10, 20, 10]" }
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame=None, parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in good-parameters parameters validation result."
h2o.H2O.verboseprint("Bad params validation messages: ", repr(parameters_validation))
if 0 != parameters_validation['error_count']:
print("validation errors: ")
pp.pprint(parameters_validation)
assert 0 == parameters_validation['error_count'], "FAIL: 0 != error_count in good-parameters parameters validation result."
# Bad parameters (hidden is null):
# (note: testing with null training_frame)
if h2o_test_utils.isVerbose(): print('Testing DeepLearning bad parameters, null training_frame. . .')
dl_test_parameters = {'response_column': 'CAPSULE', 'hidden': "[10, 20, 10]", 'input_dropout_ratio': 27 }
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame=None, parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in bad-parameters parameters validation result (input_dropout_ratio)."
h2o.H2O.verboseprint("Good params validation messages: ", repr(parameters_validation))
assert 0 != parameters_validation['error_count'], "FAIL: 0 == error_count in bad-parameters parameters validation result: " + repr(parameters_validation)
found_expected_error = False
for validation_message in parameters_validation['messages']:
if validation_message['message_type'] == 'ERRR' and validation_message['field_name'] == 'input_dropout_ratio':
found_expected_error = True
assert found_expected_error, "FAIL: Failed to find error message about input_dropout_ratio in the validation messages."
# Bad parameters (no response_column):
if h2o_test_utils.isVerbose(): print('Testing DeepLearning bad parameters, null response_column. . .')
dl_test_parameters = {'hidden': "[10, 20, 10]" }
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame='prostate_binomial', parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in bad-parameters parameters validation result (response_column)."
h2o.H2O.verboseprint("Good params validation messages: ", repr(parameters_validation))
assert 0 != parameters_validation['error_count'], "FAIL: 0 == error_count in bad-parameters parameters validation result: " + repr(parameters_validation)
#######################################
# Try to build DeepLearning model for Prostate but with bad parameters; we should get a ModelParametersSchema with the error.
if h2o_test_utils.isVerbose(): print('About to try to build a DeepLearning model with bad parameters. . .')
dl_prostate_bad_parameters = {'response_column': 'CAPSULE', 'hidden': "[10, 20, 10]", 'input_dropout_ratio': 27 }
parameters_validation = a_node.build_model(algo='deeplearning', model_id='deeplearning_prostate_binomial_bad', training_frame='prostate_binomial', parameters=dl_prostate_bad_parameters, timeoutSecs=240) # synchronous
h2o_test_utils.validate_validation_messages(parameters_validation, ['input_dropout_ratio'])
assert parameters_validation['__http_response']['status_code'] == requests.codes.precondition_failed, "FAIL: expected 412 Precondition Failed from a bad build request, got: " + str(parameters_validation['__http_response']['status_code'])
if h2o_test_utils.isVerbose(): print('Done trying to build DeepLearning model with bad parameters.')
#####################################
# Early test of predict()
# TODO: remove after we remove the early exit
p = a_node.predict(model='deeplearning_airlines_binomial', frame='airlines_binomial', predictions_frame='deeplearning_airlines_binomial_predictions')
h2o_test_utils.validate_predictions(a_node, p, 'deeplearning_airlines_binomial', 'airlines_binomial', 43978, predictions_frame='deeplearning_airlines_binomial_predictions')
h2o_test_utils.validate_frame_exists(a_node, 'deeplearning_airlines_binomial_predictions')
h2o.H2O.verboseprint("Predictions for scoring: ", 'deeplearning_airlines_binomial', " on: ", 'airlines_binomial', ": ", repr(p))
# print(h2o_test_utils.dump_json(p))
|
h2oai/h2o-dev
|
py/rest_tests/test_models.py
|
Python
|
apache-2.0
| 16,970
|
[
"Gaussian"
] |
05d4e460f3fcb4dc7cdc51a6d455a0318caa7a75e97eaba6f91fc8b9e4c850c3
|
#
# unit tests for edgegrid. runs tests from testdata.json
#
# Original author: Jonathan Landis <jlandis@akamai.com>
#
# For more information visit https://developer.akamai.com
# Copyright 2014 Akamai Technologies, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import requests
import sys
import traceback
import unittest
PY_VER = sys.version_info[0]
if sys.version_info[0] == 3:
# python3
from urllib.parse import urljoin
else:
# python2.7
from urlparse import urljoin
from akamai.edgegrid import EdgeGridAuth, EdgeRc
import akamai.edgegrid.edgegrid as eg
mydir=os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
class EdgeGridTest(unittest.TestCase):
def __init__(self, testdata=None, testcase=None):
super(EdgeGridTest, self).__init__()
self.testdata = testdata
self.testcase = testcase
self.maxDiff = None
def runTest(self):
auth = EdgeGridAuth(
client_token=self.testdata['client_token'],
client_secret=self.testdata['client_secret'],
access_token=self.testdata['access_token'],
headers_to_sign=self.testdata['headers_to_sign'],
max_body=self.testdata['max_body']
)
headers = { }
if 'headers' in self.testcase['request']:
for h in self.testcase['request']['headers']:
for k,v in h.items():
headers[k] = v
request = requests.Request(
method=self.testcase['request']['method'],
url=urljoin(self.testdata['base_url'],self.testcase['request']['path']),
headers=headers,
data=self.testcase['request'].get('data') if self.testcase['request'].get('data') \
else None
)
try:
auth_header = auth.make_auth_header(
request.prepare(), self.testdata['timestamp'], self.testdata['nonce']
)
except Exception as e:
logger.debug('Got exception from make_auth_header', exc_info=True)
self.assertEqual(str(e), self.testcase['failsWithMessage'])
return
self.assertEqual(auth_header, self.testcase['expectedAuthorization'])
class EGSimpleTest(unittest.TestCase):
def test_nonce(self):
count = 100
nonces = set()
while count > 0:
n = eg.new_nonce()
self.assertNotIn(n, nonces)
count -= 1
def test_timestamp(self):
valid_timestamp = re.compile(r"""
^
\d{4} # year
[0-1][0-9] # month
[0-3][0-9] # day
T
[0-2][0-9] # hour
:
[0-5][0-9] # minute
:
[0-5][0-9] # second
\+0000 # timezone
$
""", re.VERBOSE)
if PY_VER >= 3:
self.assertRegex(eg.eg_timestamp(), valid_timestamp)
else:
self.assertRegexpMatches(eg.eg_timestamp(), valid_timestamp)
def test_defaults(self):
auth = EdgeGridAuth(
client_token='xxx', client_secret='xxx', access_token='xxx'
)
self.assertEqual(auth.max_body, 131072)
self.assertEqual(auth.headers_to_sign, [])
def test_edgerc_default(self):
auth = EdgeGridAuth.from_edgerc(os.path.join(mydir, 'sample_edgerc'))
self.assertEqual(auth.client_token, 'xxxx-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx')
self.assertEqual(auth.client_secret, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=')
self.assertEqual(auth.access_token, 'xxxx-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx')
self.assertEqual(auth.max_body, 131072)
self.assertEqual(auth.headers_to_sign, [])
def test_edgerc_broken(self):
auth = EdgeGridAuth.from_edgerc(os.path.join(mydir, 'sample_edgerc'), 'broken')
self.assertEqual(auth.client_secret, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=')
self.assertEqual(auth.access_token, 'xxxx-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx')
self.assertEqual(auth.max_body, 128*1024)
self.assertEqual(auth.headers_to_sign, [])
def test_edgerc_unparseable(self):
try:
auth = EdgeGridAuth.from_edgerc(os.path.join(mydir, 'edgerc_that_doesnt_parse'))
self.fail("should have thrown an exception")
except:
pass
def test_edgerc_headers(self):
auth = EdgeGridAuth.from_edgerc(os.path.join(mydir, 'sample_edgerc'), 'headers')
self.assertEqual(auth.headers_to_sign, ['x-mything1', 'x-mything2'])
def test_get_header_versions(self):
auth = EdgeGridAuth.from_edgerc(os.path.join(mydir, 'sample_edgerc'), 'headers')
header = auth.get_header_versions()
self.assertFalse('user-agent' in header)
header = auth.get_header_versions({'User-Agent': 'testvalue'})
self.assertTrue('User-Agent' in header)
os.environ["AKAMAI_CLI"] = '1.0.0'
os.environ["AKAMAI_CLI_VERSION"] = '1.0.0'
header = auth.get_header_versions()
self.assertTrue('User-Agent' in header)
self.assertEqual(header['User-Agent'], ' AkamaiCLI/1.0.0')
os.environ["AKAMAI_CLI_COMMAND"] = '1.0.0'
os.environ["AKAMAI_CLI_COMMAND_VERSION"] = '1.0.0'
header = auth.get_header_versions()
self.assertTrue('User-Agent' in header)
self.assertEqual(header['User-Agent'], ' AkamaiCLI/1.0.0 AkamaiCLI-1.0.0/1.0.0')
header = auth.get_header_versions({'User-Agent': 'testvalue'})
self.assertTrue('User-Agent' in header)
self.assertEqual(header['User-Agent'], 'testvalue AkamaiCLI/1.0.0 AkamaiCLI-1.0.0/1.0.0')
del os.environ['AKAMAI_CLI']
del os.environ['AKAMAI_CLI_VERSION']
del os.environ['AKAMAI_CLI_COMMAND']
del os.environ['AKAMAI_CLI_COMMAND_VERSION']
self.assertFalse('AKAMAI_CLI' in os.environ)
self.assertFalse('AKAMAI_CLI_VERSION' in os.environ)
self.assertFalse('AKAMAI_CLI_COMMAND' in os.environ)
self.assertFalse('AKAMAI_CLI_COMMAND_VERSION' in os.environ)
def test_edgerc_from_object(self):
auth = EdgeGridAuth.from_edgerc(EdgeRc(os.path.join(mydir, 'sample_edgerc')))
self.assertEqual(auth.client_token, 'xxxx-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx')
self.assertEqual(auth.client_secret, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=')
self.assertEqual(auth.access_token, 'xxxx-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx')
self.assertEqual(auth.max_body, 131072)
self.assertEqual(auth.headers_to_sign, [])
def test_edgerc_dashes(self):
auth = EdgeGridAuth.from_edgerc(os.path.join(mydir, 'sample_edgerc'), 'dashes')
self.assertEqual(auth.max_body, 128*1024)
class JsonTest(unittest.TestCase):
def __init__(self, testdata=None, testcase=None):
super(JsonTest, self).__init__()
self.testdata = testdata
self.testcase = testcase
self.maxDiff = None
def runTest(self):
auth = EdgeGridAuth(
client_token=self.testdata['client_token'],
client_secret=self.testdata['client_secret'],
access_token=self.testdata['access_token'],
)
params = {
'extended': 'true',
}
data = {
'key':'value',
}
request = requests.Request(
method='POST',
url=urljoin(self.testdata['base_url'],'/testapi/v1/t3'),
params=params,
json=data,
)
auth_header = auth.make_auth_header(
request.prepare(), self.testdata['timestamp'], self.testdata['nonce']
)
self.assertEqual(auth_header, self.testdata['jsontest_hash'])
def suite():
suite = unittest.TestSuite()
with open("%s/testdata.json" % mydir) as testdata:
testdata = json.load(testdata)
tests = testdata['tests']
del testdata['tests']
for test in tests:
suite.addTest(EdgeGridTest(testdata, test))
suite.addTest(JsonTest(testdata))
suite.addTest(EGSimpleTest('test_nonce'))
suite.addTest(EGSimpleTest('test_timestamp'))
suite.addTest(EGSimpleTest('test_defaults'))
suite.addTest(EGSimpleTest('test_edgerc_default'))
suite.addTest(EGSimpleTest('test_edgerc_broken'))
suite.addTest(EGSimpleTest('test_edgerc_unparseable'))
suite.addTest(EGSimpleTest('test_edgerc_headers'))
suite.addTest(EGSimpleTest('test_get_header_versions'))
suite.addTest(EGSimpleTest('test_edgerc_from_object'))
return suite
def load_tests(loader=None, tests=None, pattern=None):
return suite()
if __name__ == '__main__':
runner = unittest.TextTestRunner().run(suite())
|
akamai-open/AkamaiOPEN-edgegrid-python
|
akamai/edgegrid/test/test_edgegrid.py
|
Python
|
apache-2.0
| 9,332
|
[
"VisIt"
] |
c60881ee8909215949fd97b3e56dbb5df5d9a5cdfbe9259387be0f2bdb0278d8
|
"""Visual effects"""
import pygame
try:
_arraytype = pygame.surfarray.get_arraytype()
if _arraytype == 'numeric':
from Numeric import UInt8 as uint8, minimum, array, Float32 as float32, Int32 as int32
elif _arraytype == 'numpy':
from numpy import uint8, minimum, array, float32, int32
Numeric = True
except (ImportError, NotImplementedError):
Numeric = None
import serge.visual
import serge.render
log = serge.common.getLogger('visual')
if not Numeric:
ShadowLayer = lambda a, b, c, d: serge.render.Layer(a, b)
else:
class Shadow(serge.visual.SurfaceDrawing):
"""Creates a shadow from an image"""
def __init__(self, source, colour):
"""Initialise the Shadow"""
super(Shadow, self).__init__(*source.get_size())
self._source = source
self._colour = colour
self.createShadow()
def createShadow(self):
"""Create the shadow now
Most of the logic here from http://pygame.org/wiki/ShadowEffects
"""
#
# Create alpha of main image
image = self._source
ambience = float(255-self._colour[3])/255
if image.get_masks()[3] != 0:
image_alpha = pygame.surfarray.pixels_alpha(image)
if ambience > 0.0:
shadow_alpha = (image_alpha *
(1.0 - ambience)).astype(uint8)
else:
shadow_alpha = image_alpha
elif image.get_colorkey() is not None:
image_alpha = pygame.surfarray.array_colorkey(image)
image.unlock(); image.unlock() # pygame 1.7 bug (fixed in 1.8).
surface_alpha = image.get_alpha()
if surface_alpha is not None:
# Do what array_colorkey should have done: use surface alpha!
Numeric.minimum(image_alpha, surface_alpha, image_alpha)
if ambience > 0.0:
shadow_alpha = (image_alpha *
(1.0 - ambience)).astype(uint)
else:
shadow_alpha = image_alpha
else:
image_alpha = image.get_alpha()
if image_alpha is None:
image_alpha = 255
shadow_alpha = int(image_alpha * (1.0 - ambience))
#
# Make the shadow
shadow = image.convert_alpha()
shading = self.getSurface()
shading.fill(self._colour)
pygame.surfarray.pixels_alpha(shading)[...] = image_alpha
shadow.blit(shading, (0, 0))
pygame.surfarray.pixels_alpha(shadow)[...] = shadow_alpha
#
self.surface = shadow
class ShadowLayer(serge.render.Layer):
"""A layer that renders with a shadow beneath it"""
def __init__(self, name, order, colour, offset):
"""Initialise the ShadowLayer"""
super(ShadowLayer, self).__init__(name, order)
self._offset = offset
self._colour = colour
def initSurface(self, renderer):
"""Initialise the surface"""
super(ShadowLayer, self).initSurface(renderer)
self._shadow = Shadow(self.getSurface(), self._colour)
def render(self, surface):
"""Render to a surface
When rendering to the surface we first create our shadow then
render this to the surface followed by our normal rendering.
"""
self._shadow.createShadow()
surface.blit(self._shadow.surface, self._offset)
super(ShadowLayer, self).render(surface)
class FadingLayer(serge.render.Layer):
"""A layer that you can fade in and out"""
def __init__(self, name, order):
"""Initialise the layer"""
super(FadingLayer, self).__init__(name, order)
self.visibility = 255
def postRender(self):
"""After rendering the surface"""
v = 255-self.visibility
if v:
self.getSurface().fill((v, v, v, v), special_flags=pygame.BLEND_RGBA_SUB)
class FadingScreen(object):
"""Fade in and out everything"""
def __init__(self):
"""Initialise the layer"""
self.visibility = 255
self.renderer = serge.engine.CurrentEngine().getRenderer()
self.renderer.linkEvent(serge.events.E_AFTER_RENDER, self.postRender)
def postRender(self, obj, arg):
"""After rendering the surface"""
v = 255-self.visibility
if v:
self.renderer.getSurface().fill((v, v, v, v), special_flags=pygame.BLEND_RGBA_SUB)
def deleteFade(self):
"""Remove the fade"""
self.renderer.unlinkEvent(serge.events.E_AFTER_RENDER, self.postRender)
def darkenSurf2(img, amount):
"""Darken the given surface by the given amount"""
import numpy
alpha = pygame.surfarray.pixels_alpha(img)
rgbarray = pygame.surfarray.array3d(img)
src = numpy.array(rgbarray)
dest = numpy.zeros(rgbarray.shape)
# Use the cross-fade technique (found in pygame documentation) to
# darken the image.
dest[:] = (0, 0, 0)
diff = (dest - src) * (amount/255.0)
new = src + diff.astype(numpy.uint8)
try:
newsurf = pygame.surfarray.make_surface(new).convert_alpha()
except Exception, err:
# For some reason this occasionally fails - give up trying to darken
# the image. We will end up with a bright person! Seems to be an error in pygame 1.9.1
# http://archives.seul.org/pygame/users/Apr-2011/msg00072.html
log.error('Convert Alpha issue on %s: %s' % (img, err))
#
# Try again?!
#import pdb; a = pdb.Pdb()
try:
newsurf = pygame.surfarray.make_surface(new).convert_alpha()
except Exception, err:
return img
log.error('Retrying seemed to succeed')
#
pygame.surfarray.pixels_alpha(newsurf)[:] = alpha
return newsurf
def darkenSurf(img, amount):
"""Darken a surface"""
mask = pygame.surface.Surface((img.get_width(), img.get_height()))
mask.fill((255-amount, 255-amount, 255-amount, 255-amount))
#
new_img = img.copy()
new_img.blit(mask, (0,0), special_flags=pygame.BLEND_RGB_MULT)
#
return new_img
def fadeSurface(surface, v):
"""Fade the given suface by an amount 0 to 255 - 0 is completely faded"""
surface.fill((v, v, v, v), special_flags=pygame.BLEND_RGBA_SUB)
return surface
def gaussianBlur(surface, sigma):
"""This function takes a pygame surface, converts it to a numpy array
carries out gaussian blur, converts back then returns the pygame surface.
"""
from scipy import signal, ndimage
# Convert to a NumPy array.
# In theory this should be able to be surfarray.pixels3d fro direct access.
np_array = pygame.surfarray.array3d(surface)
alpha = pygame.surfarray.pixels_alpha(surface)
# Filter the image
result = ndimage.filters.gaussian_filter(np_array,
sigma=(sigma, sigma, 0),
order=0,
mode='reflect'
)
#import pdb; pdb.set_trace()
new_alpha = ndimage.filters.gaussian_filter(alpha, sigma=(sigma, sigma), order=0, mode='reflect')
# Convert back to a surface. ... seems to periodically fail
try:
surf = pygame.surfarray.make_surface(result).convert_alpha()
except:
surf = pygame.surfarray.make_surface(result).convert_alpha()
#
pygame.surfarray.pixels_alpha(surf)[:] = new_alpha
return surf
|
smmosquera/serge
|
blocks/visualeffects.py
|
Python
|
lgpl-3.0
| 7,901
|
[
"Gaussian"
] |
e5e7f5b8861b53c2d37f612e97b116664f0776c0e4b0854cdcf7bbc745a3129f
|
#!/usr/bin/env python2
'''
This script collects PDBs corresponding to certain TCIDs (of any level) and assigns TMSs based on STRIDE and PDBTM (and a pinch of geometry)
'''
from __future__ import print_function, division
import xml.etree.ElementTree as ET
import os, re, subprocess, sys
import numpy as np
import yaml
import Bio.PDB
DEBUG = 1
VERBOSITY = 0
MAX_POSS_HELIX = 50
def status(line='done!'):
'''
prints 'done!' or anything else, really. Improves greppability when debugging with print statements
'''
print(line, file=sys.stderr)
def info(*lines):
'''
prints INFO text to stderr
'''
for l in lines: print('[INFO]:', l, file=sys.stderr)
def warn(*lines):
'''
prints WARNING text to stderr
'''
for l in lines: print('[WARNING]:', l, file=sys.stderr)
def error(*lines):
'''
prints error text to stderr and exits
'''
for l in lines: print('[ERROR]:', l, file=sys.stderr)
exit()
def progress(*lines):
'''
prints INFO text to stderr without a trailing newline
'''
for l in lines: print('[INFO]:', l, end=' ', file=sys.stderr)
def prompt(line, default=None):
'''
does y/n prompts
'''
while 1:
if default is None:
x = raw_input('%s [y/n] ' % str(line))
if x.lower().strip().startswith('y'): return True
elif x.lower().strip().startswith('n'): return False
elif default is True:
x = raw_input('%s [Y/n] ' % str(line))
if x.lower().strip().startswith('n'): return False
else: return True
elif default is False:
x = raw_input('%s [y/N] ' % str(line))
if x.lower().strip().startswith('y'): return True
else: return False
class Chain(object):
'''
container for chain objects
'''
def __init__(self, id):
self.id = id
self.seq = ''
self.tmh = []
self.tms = []
class PDB:
'''
container for PDB objects
'''
def __init__(self, fn):
self.chains = {}
self.parse_xml(fn)
def parse_xml(self, fn):
'''
parse pdbtm entries built by pdbtmtop/dbtool.py (which cleans up the anomalous XML in places)
'''
self.tree = ET.parse(fn)
self.root = self.tree.getroot()
self.id = self.root.attrib['ID']
###<BIOMATRIX> <APPLY_TO_CHAIN_CHAINID="A" NEW_CHAINID="D"> for example
removeme = set()
for x in self.root:
if x.tag.endswith('CHAIN'):
chainid = x.attrib['CHAINID']
if x.attrib['TYPE'] == 'non_tm': continue
for y in x:
if y.tag.endswith('SEQ'):
seq = y.text.replace('U', 'X')
seq = re.sub('[^\nA-Z]', '', seq).strip()
if not seq.replace('X', '').strip(): break
self.chains[chainid] = Chain(self.id + '_' + chainid)
self.chains[chainid].seq = seq
elif y.tag.endswith('REGION'):
if y.attrib['type'] == 'H' or y.attrib['type'] == 'C':
self.chains[chainid].tmh.append((int(y.attrib['seq_beg'])-1, int(y.attrib['seq_end'])-1))
if y.attrib['type'] == 'B':
self.chains[chainid].tms.append((int(y.attrib['seq_beg'])-1, int(y.attrib['seq_end'])-1))
#print(dir(y))
elif x.tag.endswith('BIOMATRIX'):
for y in x:
if y.tag.endswith('DELETE'): removeme.add(y.attrib['CHAINID'])
elif y.tag.endswith('MATRIX'):
for z in y:
if z.tag.endswith('APPLY_TO_CHAIN'):
removeme.add(z.attrib['NEW_CHAINID'])
for c in list(removeme):
try: self.chains.pop(c)
except KeyError: continue
def cat(self):
'''
dumps the info
'''
out = ''
for chain in self.chains:
out += '>%s\n%s\n' % (self.chains[chain].id, self.chains[chain].seq)
return out.strip()
class BLAST:
'''
container for blastp-related functions
'''
def __init__(self):
self.hits = {}
#def blast(self, query): pass
# #blastp -db tcdb -comp_based_stats no -outfmt 7 -max_target_seqs 3 < pdbtm.fa
def parse7(self, results, minl=60, evalue=1e-5):
'''
parser for outfmt 7. May be deprecated in favor of Biopython's BLAST results parser
'''
if type(results) is str: f = iter(results.split('\n'))
else: f = results
blacklist = ''
for l in f:
if not l.strip(): continue
elif l.strip().startswith('#'): continue
else:
sl = l.strip().split()
if sl[0] == blacklist: continue
data = (float(sl[2]),) + tuple([int(x) for x in sl[3:10]]) + (float(sl[10]), float(sl[11]))
if data[1] < minl: continue
elif data[8] > evalue: continue
if data[0] >= 95: blacklist = sl[0]
try: self.hits[sl[0]][sl[1]] = data
except KeyError: self.hits[sl[0]] = {sl[1]:data}
for q in self.hits:
if len(self.hits[q]) > 1:
prods = sorted([(self.hits[q][t][0] * self.hits[q][t][1], t) for t in self.hits[q]])[::-1]
self.hits[q] = {prods[0][1]:self.hits[q][prods[0][1]]}
def by_target(self, namestart):
'''
searches for all queries matching an initial substring of the target (i.e. targets matching /^${namestart}/)
'''
out = {}
for q in self.hits:
for t in self.hits[q]:
if t.startswith(namestart):
try: out[q][t] = self.hits[q][t]
except KeyError: out[q] = {t:self.hits[q][t]}
return out
class Protocol1:
'''
wrapper for doing its Protocol1-like task
'''
def __init__(self, pdbtmdir, outdir, force=False, offline=False):
'''
sets up the workspace
'''
self.pdbs = []
self.outdir = outdir
self.pdbtmdir = pdbtmdir
self.force = force
self.helices = {}
if not os.path.isdir(outdir): os.mkdir(outdir)
self.ntmss = {}
self.offline = offline
self.blast = BLAST()
self.hits = []
self.lengths = {}
self.fams = set()
try:
with open('%s/deuterocol1.yaml' % self.outdir) as f:
d1 = yaml.safe_load(f)
self.lengths = d1['lengths']
self.fams = set(d1['fams'])
except IOError: pass
def blast_pdbs(self):
'''
blasts all PDBs in PDBTM against TCDB
TODO: optimize away identical sequences when possible, e.g. by collapsing them by BLASTing them against PDB to resolve to a single sequence and copying the results with modified query fields
'''
fastas = ''
if self.offline: blasting = 0
elif self.force: blasting = 1
else:
if os.path.isfile('%s/blastp.tbl' % self.outdir) and os.path.getsize('%s/blastp.tbl' % self.outdir):
#blasting = prompt('[WARNING]: Found an existing blastp table. Overwrite?', default=False)
blasting = 0
else: blasting = 1
if VERBOSITY: progress('Checking PDBTM database...')
pdb = ''
for basename in os.listdir(self.pdbtmdir):
if basename.lower().endswith('xml'):
self.pdbs.append(PDB(self.pdbtmdir + '/' + basename))
seqs = self.pdbs[-1].cat().split('\n')
for seg in seqs:
if seg.startswith('>'):
pdb = seg[1:]
self.lengths[pdb] = 0
else: self.lengths[pdb] += len(seg)
if VERBOSITY: status()
self.dump_inputs()
if blasting:
for pdb in self.pdbs:
fastas += pdb.cat() + '\n'
if VERBOSITY: progress('BLASTing %d sequences...' % len(self.pdbs))
p = subprocess.Popen(['blastp', '-db', 'tcdb', '-comp_based_stats', 'no', '-outfmt', '7'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate(input=fastas)
with open('%s/blastp.tbl' % self.outdir, 'w') as f: f.write(out)
if VERBOSITY: status()
else:
try:
with open('%s/deuterocol1.yaml' % self.outdir) as f: self.lengths = yaml.safe_load(f)['lengths']
except IOError: self.dump_inputs()
with open('%s/blastp.tbl' % self.outdir) as f: self.blast.parse7(f)
def get_queries(self, startswith):
'''
download the PDBs with decent correspondences to TCDB sequences
TODO (low): skip using wget by figuring out why urllib2 misbehaves with HTTPS URLs on the Macs
'''
self.fams.add(startswith)
self.dump_inputs()
for q in self.blast.by_target(startswith):
if q not in self.hits and not q.endswith('_'):
self.hits.append(q)
#if self.force: write = 1
#elif os.path.isfile('%s/pdblist.wget' % self.outdir): write = 0
#else: write = 1
write = 1
pdbs = []
for chain in self.hits:
if chain[:4] not in pdbs: pdbs.append(chain[:4])
with open('%s/pdblist.wget' % self.outdir, 'w') as f:
for pdb in pdbs:
f.write('https://files.rcsb.org/view/%s.pdb\n' % pdb[:4])
if not self.offline: subprocess.check_output(['wget', '--no-check-certificate', '-nc', '-i', '%s/pdblist.wget' % self.outdir, '-P', '%s/pdbs_raw' % self.outdir])
#if offline:
#
# cache = os.listdir('%s/pdbs_raw' % self.outdir)
# missing = []
# for fn in cache:
# if fn.endswith('pdb') and not os.path.getsize('%s/pdbs_raw/%s' % (self.outdir, fn)): missing.append(fn)
# #if missing: raise IOError('Cannot run offline: Could not find %s' % missing)
# for pdb in pdbs:
# if not os.path.getsize('%s/pdbs_raw/%s.pdb' % (self.outdir, pdb)): missing.append(pdb)
# #if missing: raise IOError('Cannot run offline: Could not find PDBs for %s' % missing)
removeme = []
for pdb in pdbs:
with open('%s/pdbs_raw/%s.pdb' % (self.outdir, pdb)) as f:
for l in f:
if 'THEORETICAL MODEL' in l:
removeme.append(pdb)
break
removemefinal = []
for pdb in removeme:
for x in self.hits:
if x.startswith(pdb):
removemefinal.append(x)
for x in removemefinal:
self.hits.remove(x)
return self.hits
def assign_helices(self):
'''
integrate STRIDE (assigns many small helices) and PDBTM (assigns correct but incomplete TMSs) definitions to get full TMSs
'''
if not os.path.isdir('%s/derp' % self.outdir): os.mkdir('%s/derp' % self.outdir)
if VERBOSITY: progress('Computing helices...')
removeme = []
for pdb in self.hits:
fn = '%s/pdbs_raw/%s.pdb' % (self.outdir, pdb[:4])
try:
d = DERP(pdb, self.outdir, self.pdbtmdir)
self.helices[pdb] = d.get_helices()
self.ntmss[pdb] = d.get_ntmss()
except subprocess.CalledProcessError: removeme.append(pdb)
for x in removeme: self.hits.remove(x)
if VERBOSITY: status()
self.dump_inputs()
def generate_loopless(self, extend=2):
'''
generate loopless PDBs (or loop-reduced PDBs, if extend is non-zero)
'''
for pdb in self.hits:
out = ''
chain = pdb[-1]
with open('%s/pdbs_raw/%s.pdb' % (self.outdir, pdb[:4])) as f:
for l in f:
if l.startswith('DBREF'):
if l[11:13].strip() == chain: out += l
elif l.startswith('SEQADV'):
if l[15:17].strip() == chain: out += l
elif l.startswith('SEQRES'):
if l[10:12].strip() == chain: out += l
elif l.startswith('MODRES'):
if l[15:17].strip() == chain: out += l
elif l.startswith('HET '):
if l[11:13].strip() == chain: out += l
elif l.startswith('HELIX'):
if l[18:20].strip() == chain: out += l
elif l.startswith('SHEET'):
if l[12:14].strip() == chain: out += l
elif l.startswith('SSBOND'):
if l[14:16].strip() == chain: out += l
elif l[28:30].strip() == chain: out += l
elif l.startswith('SITE '):
if l[10:12].strip() == chain: out += l
elif l[21:23].strip() == chain: out += l
elif l.startswith('CISPEP'):
if l[14:16].strip() == chain: out += l
elif l.startswith('LINK '):
if l[20:22].strip() == chain: out += l
elif l[50:52].strip() == chain: out += l
elif l.startswith('ANISOU'):
continue#if l[20:22].strip() == chain: out += l
elif (l.startswith('ATOM ') or l.startswith('HETATM') or l.startswith('TER ')) and (l[20:22].strip() == chain):
for h in self.helices[pdb]:
if (h[0] - extend) <= int(l[22:26].strip()) <= (h[1] + extend):
out += l
break
elif l[:6] not in ('DBREF ', 'SEQADV', 'SEQRES', 'HET ', 'HELIX ', 'SHEET ', 'SSBOND', 'SITE ', 'ATOM ', 'HETATM', 'TER ', 'CISPEP', 'ANISOU', 'LINK ', 'MODRES'):
out += l
if not os.path.isdir('%s/pdbs_loopless' % self.outdir): os.mkdir('%s/pdbs_loopless' % self.outdir)
with open('%s/pdbs_loopless/%s.pdb' % (self.outdir, pdb), 'w') as f: f.write(out)
def dump_inputs(self):
'''
dump inputs to the deuterocol1 configuration file
'''
with open('%s/deuterocol1.yaml' % self.outdir, 'w') as f:
yaml.safe_dump({'lengths':self.lengths, 'fams':list(self.fams), 'ntmss':self.ntmss}, f)
def parse_pdbtm(fn):
'''
another PDBTM parser for some reason
TODO: remove one of them
'''
x = ET.parse(fn)
root = x.getroot()
helices = {}
for y in root:
if y.tag.endswith('CHAIN'):
helices[y.attrib['CHAINID']] = []
for z in y:
if z.tag.endswith('REGION') and (z.attrib['type'] == 'H' or z.attrib['type'] == 'C'):
#chainhelices[y.tag append
helices[y.attrib['CHAINID']].append((int(z.attrib['pdb_beg']), int(z.attrib['pdb_end'])))
return helices
class DERP:
'''
Determine Egregious Rods in Proteins
This code does the actual integration between STRIDE and PDBTM
'''
def __init__(self, pdb_c, outdir, pdbtmdir):
self.pdb_c = pdb_c
self.pdb = pdb_c[:4]
self.outdir = outdir
self.pdbtmdir = pdbtmdir
def get_tangent(self, c, interval):
'''
averages the Can - Can+1 - Can+2 normals to obtain an axis angle for the segment
this works best with sufficiently long helices
'''
coords = []
for model in self.structure:
for chain in model:
if chain.id == c:
for residue in chain:
if interval[0] <= residue.id[1] <= interval[1]:
#print(dir(residue))
coords.append([atom.coord for atom in list(residue.get_iterator())[:3]])
normal = np.zeros(3)
for i in range(1, len(coords)-1):
for j in range(3):
normal += np.cross(coords[i][j]-coords[i-1][j], coords[i+1][j]-coords[i][j])
return normal/np.linalg.norm(normal)
def get_ntmss(self):
'''
gets the number of TMSs assigned to a PDB chain from DERP output
'''
n = ''
n = 0
with open('%s/derp/%s.derp' % (self.outdir, self.pdb_c)) as f:
for l in f:
if not l.strip(): continue
elif l.lstrip().startswith('#'): continue
else: n += 1
#n = l.strip().split()[0]
return n
def get_helices(self, angle=45):
'''
attempts to get helix ranges from DERP output if possible and generates it if not
'''
if os.path.isfile('%s/derp/%s.derp' % (self.outdir, self.pdb_c)) and os.path.getsize('%s/derp/%s.derp' % (self.outdir, self.pdb_c)):
helices = []
with open('%s/derp/%s.derp' % (self.outdir, self.pdb_c)) as f:
for l in f:
if not l.strip(): continue
else: helices.append([int(x) for x in l.split()[1:]])
return helices
elif not (os.path.isfile('%s/derp/%s.stride' % (self.outdir, self.pdb)) and os.path.getsize('%s/derp/%s.stride' % (self.outdir, self.pdb))):
strideout = subprocess.check_output(['stride', '%s/pdbs_raw/%s.pdb' % (self.outdir, self.pdb)])
with open('%s/derp/%s.stride' % (self.outdir, self.pdb), 'w') as f: f.write(strideout)
stridehelices = {}
parser = Bio.PDB.PDBParser()
self.structure = parser.get_structure(self.pdb, '%s/pdbs_raw/%s.pdb' % (self.outdir, self.pdb))
with open('%s/derp/%s.stride' % (self.outdir, self.pdb)) as f:
for l in f:
if l.startswith('LOC AlphaHelix'):
chain = l[27:29].strip()
start = int(l[21:27].strip())
end = int(l[38:45].strip())
try: stridehelices[chain].append((start,end))
except KeyError: stridehelices[chain] = [(start,end)]
pdbtmhelices = parse_pdbtm('%s/%s.xml' % (self.pdbtmdir, self.pdb))
truetmhelices = []
for ph in pdbtmhelices[self.pdb_c[-1]]:
phcandidate = ph
try:
for sh in stridehelices[self.pdb_c[-1]]:
if set(range(*sh)).intersection(set(range(*phcandidate))):
if sh[1] - sh[0] > MAX_POSS_HELIX: pass
elif np.dot(self.get_tangent(self.pdb_c[-1], phcandidate), self.get_tangent(self.pdb_c[-1], sh)) > np.cos(angle*np.pi/180):
phcandidate = (min(phcandidate[0], sh[0]), max(phcandidate[1], sh[1]))
except KeyError: warn('Could not find chain %s of %s' % (self.pdb_c[-1], self.pdb))
truetmhelices.append(phcandidate)
#out = 'color red, i. '
#for h in truetmhelices: out += '%s-%s+' % h
#out = out[:-1]
#out += '\ncolor yellow, i. '
#for h in pdbtmhelices[self.pdb_c[-1]]: out += '%s-%s+' % h
#out = out[:-1]
#print(out)
with open('%s/derp/%s.derp' % (self.outdir, self.pdb_c), 'w') as f:
for i, h in enumerate(truetmhelices):
f.write('%d\t%d\t%d\n' % ((i+1,)+h))
return truetmhelices
def protocol1(fams, pdbtmdir='pdbtm', outdir='ubi_out', overwrite=False, extend=2):
'''
the highest-level wrapper contained in Deuterocol1
does everything needed in a single line
'''
p = Protocol1(pdbtmdir, outdir, force=overwrite)
p.blast_pdbs()
for fam in fams: p.get_queries(fam)
p.assign_helices()
p.generate_loopless(extend=extend)
relevant_hits = {}
for pdb in p.hits: relevant_hits[pdb] = p.blast.hits[pdb]
return p.hits, relevant_hits
if __name__ == '__main__':
'''
finally, the interface for those running this directly from the command line or a non-Python script
'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', type=int, default=2, help='how many residues to extend TMSs by (for loopless cuts) {default:2}')
parser.add_argument('-v', action='store_true', help='verbose output')
parser.add_argument('-d', default='pdbtm', help='PDBTM database {default:./pdbtm}')
parser.add_argument('-o', '--outdir', default='ubi_out', help='where to put everything')
parser.add_argument('families', nargs='+', help='prefixes for family 1, e.g. 1.A.24.1. or 1.H.1. or 8.A.16')
parser.add_argument('-F', '--force-overwrite', action='store_true', help='force overwrites/regenerations')
args = parser.parse_args()
if args.v: VERBOSITY = 1
if not args.families:
print('[ERROR]: Family/ies must be specified!', file=sys.stderr)
parser.print_usage()
exit()
protocol1(args.families, args.d, args.outdir, overwrite=args.force_overwrite, extend=args.l)
|
khendarg/ubi
|
Deuterocol1.py
|
Python
|
bsd-3-clause
| 17,513
|
[
"BLAST",
"Biopython"
] |
adc53c5d5f962a38ce233e55c5ecbc86a1b93fcad5ffea336bf45527583fafe7
|
import os
import re
import sys
import glob
import gzip
import json
import time
import errno
import shutil
import urllib
import logging
import tarfile
import urllib2
import requests
import feedparser # For LOVD atom data
import subprocess # For transvar
from distutils.spawn import find_executable # https://docs.python.org/release/2.4/dist/module-distutils.spawn.html
from Bio import Entrez, SeqIO
from appdirs import *
# Importing: https://bitbucket.org/biocommons/hgvs
try:
import hgvs as hgvs_biocommons
except ImportError as e:
if 'cannot import name ExtendedInterpolation' in str(e):
print str(e)
print 'This is a known issue.'
print 'Please refer to https://github.com/kantale/MutationInfo/issues/9 in order to resolve it'
raise e
import hgvs.parser as hgvs_biocommons_parser
def check_libssl(e):
if 'Library not loaded: libssl.1.0.0.dylib' in str(e):
print '='*10 + '==========' + '='*10
print ' '*10 + 'IMPORTANT:'
print '='*10 + '==========' + '='*10
print 'Module psycopg2 although installed cannot be imported properly. Error message:'
print '=' * 20
print str(e)
print '=' * 20
print 'To resolve this, before running MutationInfo set the following environment variable:'
lib_path = os.path.split(os.path.split(os.__file__)[0])[0]
DYLD_FALLBACK_LIBRARY_PATH = os.environ.get('DYLD_FALLBACK_LIBRARY_PATH', '') # Not used..
command = "export DYLD_FALLBACK_LIBRARY_PATH={}:$DYLD_FALLBACK_LIBRARY_PATH".format(lib_path)
print command
print 'For more please check: http://stackoverflow.com/questions/27264574/import-psycopg2-library-not-loaded-libssl-1-0-0-dylib'
sys.exit(1)
#raise e
return False
try:
import psycopg2 # In order to catch psycopg2.OperationalError
import hgvs.dataproviders.uta as hgvs_biocommons_uta # http://hgvs.readthedocs.org/en/latest/examples/manuscript-example.html#project-genomic-variant-to-a-new-transcript
#import hgvs.variantmapper as hgvs_biocommons_variantmapper
import hgvs.assemblymapper as hgvs_biocommons_assemblymapper
except ImportError as e:
if 'sphinx' in sys.modules:
# Ignore this. These packages are not required for doc building.
# Credits to http://stackoverflow.com/questions/20843737/check-if-sphinx-doc-called-the-script for the tip.
pass
elif check_libssl(str(e)):
pass
else:
# We do not know what caused this
raise e
# Importing https://github.com/counsyl/hgvs
# How to setup data files : https://github.com/counsyl/hgvs/blob/master/examples/example1.py
import pyhgvs as hgvs_counsyl
import pyhgvs.utils as hgvs_counsyl_utils
from pygr.seqdb import SequenceFileDB
# Use this package to retrieve genomic position for known refSeq entries.
# MutationInfo comes to the rescue when pyhgvs fails
# Github's User lennax has created a useful mapper from converting from c to g coordinates with biopython.
# This library has not been incorporated in official biopython release (at least not to my knowledge)
# So we include it manually here.
# The mapper is available here: https://github.com/lennax/biopython/tree/f_loc5/Bio/SeqUtils/Mapper
# I have changed the name from mapper to biopython_mapper
# An example of how to use this mapper is here: https://gist.github.com/lennax/10600113
from biopython_mapper import CoordinateMapper
from biopython_mapper.MapPositions import GenomePositionError as biopython_GenomePositionError
from cruzdb import Genome as UCSC_genome # To Access UCSC https://pypi.python.org/pypi/cruzdb/
from pyVEP import VEP # Variant Effect Predictor https://github.com/kantale/pyVEP
from bs4 import BeautifulSoup
# For progress bar..
try:
from IPython.core.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
__docformat__ = 'reStructuredText'
__version__ = '1.3.0'
"""
TODO:
* More documentation http://thomas-cokelaer.info/tutorials/sphinx/docstring_python.html
* Fix setup.py http://stackoverflow.com/questions/3472430/how-can-i-make-setuptools-install-a-package-thats-not-on-pypi
* Add hgvs_counsyl installation and automate these steps: https://github.com/counsyl/hgvs/blob/master/examples/example1.py
* Automate steps: Done
* Maybe import inline for performance reasons? http://stackoverflow.com/questions/477096/python-import-coding-style
Notes:
* This link: http://www.ncbi.nlm.nih.gov/books/NBK21091/table/ch18.T.refseq_accession_numbers_and_mole/?report=objectonly
Contains a list of all accession codes of NCBI
* Interesting: M61857.1 Crashes mutalyzer.nl
* None of the three methods of VariantMapper can convert from c. to g.
* http://hgvs.readthedocs.org/en/latest/modules/mapping.html#module-hgvs.variantmapper
* Clinvar : http://www.ncbi.nlm.nih.gov/clinvar/?term=M61857.1%3Ac.121A%3EG Could not identify variant M61857.1:c.121A>G
* Interesting: NT_005120.15:g.-1126G>T is the same as NT_005120.15:g.1126G>T in mutalyzer
* https://mutalyzer.nl/name-checker?description=NT_005120.15%3Ag.-1126G%3ET
"""
class MutationInfoException(Exception):
pass
class MutationInfo(object):
"""The MutationInfo class handles all necessary connections to various sources in order to assess the chromosomal position of a variant.
The first time that this class is instantiated it downloads the reference genome in fasta format and splits it per chromosome.
This might take approximately 13GB of disc space.
MutationInfo offers a single method for accessing the complete functionality of the module: :py:func:`get_info`.
This class does not have any required arguments for initialization. Nevertheless the following optional arguments are supported:
:param local_directory: The local directory where the fasta files will be stored. By default MutationInfo uses \
the `appdirs <https://pypi.python.org/pypi/appdirs>`_ module in order to create a platform specifc local directory. \
This directory is also used as a cache. Whenever there is a succesful attempt to access an external service, the acquired object is saved to \
local_directory for future reference.
:param email: An email is required to connect with Entrez through biopython (see also this: http://biopython.org/DIST/docs/api/Bio.Entrez-module.html). \
If not set, MutationInfo looks for an email entry in the file stored in ``<local_directory>/properties.json``. \
If this file does not exist (for example when the class is instantiated for the first time), \
then it requests one email from the user and stores it in the ``properties.json`` file.
:param genome: The version of the **preferred** human genome assembly that will be used for reporting chromosomal positions. \
Accepted values should have the ``hgXX`` format. Default value is *hg19*.
.. warning::
MutationInfo does not guarantee that the returned position is aligned according to the ``genome`` parameter \
since certain tools work only with specific genome assemblies. For this reason always check the ``genome`` key of the returned \
item after calling the :py:func:`get_info` method.
:param ucsc_genome: Set the version of human genome assembly explicitly for the CruzDB tool (UCSC). \
Default: Same as the ``genome`` parameter.
:param dbsnp_version: The version of dbsnp for rs variants. Default value is *snp146*.
"""
_properties_file = 'properties.json'
biocommons_parser = hgvs_biocommons_parser.Parser() # https://bitbucket.org/biocommons/hgvs
# This is the size of the sequence, left and right to the variant
# position that we will attempt to perform a blat search on the
# Reference genome
# Accordint to this: https://genome.ucsc.edu/goldenPath/help/hgTracksHelp.html
# "DNA input sequences are limited to a maximum length of 25,000 bases"
# Nevertheless the real maximum is 70.000 bases. In order not to "push"
# UCSC's blat service we define a value of 2*20.000 = 40.0000
# (20.000 to the left and 20.000 to the right)
blat_margin = 20000
ucsc_blat_url = 'https://genome.ucsc.edu/cgi-bin/hgBlat'
GrCh_genomes = {
'hg18' : 'GrCh36',
'hg19' : 'GRCh37',
'hg38' : 'GRCh38',
}
# Link taken from: http://www.lovd.nl/3.0/docs/LOVD_manual_3.0.pdf page 71
lovd_genes_url = 'http://databases.lovd.nl/shared/api/rest.php/genes'
lovd_variants_url = 'http://databases.lovd.nl/shared/api/rest.php/variants/{gene}'
mutalyzer_url = 'https://mutalyzer.nl/name-checker?description={variant}'
hgnc_url = 'ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/json/hgnc_complete_set.json'
def __init__(self, local_directory=None, email=None, genome='hg19', dbsnp_version='snp146', **kwargs):
#def __init__(self, local_directory=None, email=None, genome='hg38', dbsnp_version='snp146'):
'''
Current dbSNP version by default is 142 :
#http://genome.ucsc.edu/goldenPath/newsarch.html
#11 February 2015 - dbSNP 142 Available for hg19 and hg38
15 April 2016 - dbSNP 146 Available for hg19 and hg38
'''
#Check genome value
match = re.match(r'hg[\d]+', genome)
if not match:
raise ValueError('genome parameter should be hgDD (for example hg18, hg19, hg38, ...)')
self.genome = genome
if not self.genome in self.GrCh_genomes:
raise KeyError('genome parameter: %s does not have an GrCh equivalent..' % (self.genome))
self.genome_GrCh = self.GrCh_genomes[self.genome]
#Do a simple check in dbsnp_version
match = re.match(r'snp[\w]+', dbsnp_version)
if not match:
raise ValueError('dbsnp_version should be snpDDD (for example snp142)')
self.dbsnp_version = dbsnp_version
#Get local directory
if local_directory is None:
self.local_directory = Utils.get_application_dir('MutationInfo')
logging.info('Using local directory: %s' % (self.local_directory))
else:
if not Utils.directory_exists(local_directory):
raise EnvironmentError('Local Directory %s does not exist' % (str(local_directory)))
self.local_directory = local_directory
self._properties_file = os.path.join(self.local_directory, self._properties_file)
if not Utils.file_exists(self._properties_file):
#Create property file
with open(self._properties_file, 'w') as f:
f.write('{}\n')
#Read property file
self.properties = Utils.load_json_filename(self._properties_file)
#Get email
if not email is None:
self.properties['email'] = email
elif not 'email' in self.properties:
self.properties['email'] = raw_input('I need an email to query Entrez. Please insert one (it will be stored for future access): ')
Entrez.email = self.properties['email']
logging.info('Using email for accessing Entrez: %s' % (str(Entrez.email)))
#Create transcripts directory
self.transcripts_directory = os.path.join(self.local_directory, 'transcripts')
logging.info('transcripts Directory: %s' % self.transcripts_directory)
Utils.mkdir_p(self.transcripts_directory)
#Create blat directory
self.blat_directory = os.path.join(self.local_directory, 'blat')
logging.info('blat Directory: %s' % (self.blat_directory))
Utils.mkdir_p(self.blat_directory)
self.counsyl_hgvs = Counsyl_HGVS(
local_directory = self.local_directory,
genome = self.genome,
)
self.biocommons_connect()
# Set up LOVD data
self._lovd_setup()
# Set up mutalizer
self.mutalyzer_directory = os.path.join(self.local_directory, 'mutalyzer')
Utils.mkdir_p(self.mutalyzer_directory)
logging.info('Mutalyzer directory: %s' % (self.mutalyzer_directory))
# Set up cruzdb (UCSC)
self.ucsc_options = {}
if 'ucsc_genome' in kwargs:
self.ucsc_options['ucsc_genome'] = kwargs['ucsc_genome']
self._setup_UCSC(**self.ucsc_options)
# Save HGNC file
# Create a set of all HGNC gene names and all HGNC gene names aliases. Convert everything to lower case
self.hgnc_directory = os.path.join(self.local_directory, 'hgnc')
logging.info('HGNC Directory: %s' % self.hgnc_directory)
self.hgnc_filename = os.path.join(self.hgnc_directory, 'hgnc_complete_set.json') # https://www.genenames.org/download/statistics-and-files/
self.hgnc_genes_json_filename = os.path.join(self.hgnc_directory, 'hgnc_genes.json')
logging.info('HGNC Filename: %s' % self.hgnc_filename)
Utils.mkdir_p(self.hgnc_directory)
if not Utils.file_exists(self.hgnc_filename):
Utils.download(self.hgnc_url, self.hgnc_filename)
if not Utils.file_exists(self.hgnc_genes_json_filename):
with open(self.hgnc_filename) as f:
data = json.load(f)
set_1 = {x['symbol'].lower() for x in data['response']['docs']}
set_2 = {y.lower() for x in data['response']['docs'] for y in x.get('alias_symbol', [])}
hgnc_genes = list(set_1.union(set_2))
with open(self.hgnc_genes_json_filename, 'w') as f2:
json.dump(hgnc_genes, f2)
with open(self.hgnc_genes_json_filename) as f:
self.hgnc_genes = set(json.load(f))
#Save properties file
Utils.save_json_filenane(self._properties_file, self.properties)
#Stores what went wrong during a conversion
self.current_fatal_error = []
def _setup_UCSC(self, **kwargs):
# Set up cruzdb (UCSC)
logging.info('Setting up UCSC access..')
try:
if 'ucsc_genome' in kwargs:
logging.info('Using UCSC GENOME: %s' % (kwargs['ucsc_genome']))
self.ucsc = UCSC_genome(kwargs['ucsc_genome'])
self.ucsc_assembly = kwargs['ucsc_genome']
else:
self.ucsc = UCSC_genome(self.genome)
self.ucsc_assembly = self.genome
self.ucsc_dbsnp = getattr(self.ucsc, self.dbsnp_version)
except ImportError as e:
if 'No module named MySQLdb' in str(e):
logging.error('Please refer to https://github.com/kantale/MutationInfo/issues/7 in order to resolve this issue')
elif check_libssl(str(e)):
pass
raise e
def biocommons_connect(self,):
'''
Establish a PostgreSQL connection.
This connection might be timed out, so we might have to call this again when this happens.
See also issue #10
'''
logging.info('Connecting to biocommons uta..')
self.biocommons_hdp = hgvs_biocommons_uta.connect()
# http://hgvs.readthedocs.org/en/latest/examples/manuscript-example.html#project-genomic-variant-to-a-new-transcript
#self.biocommons_vm_splign = hgvs_biocommons_variantmapper.EasyVariantMapper(self.biocommons_hdp, primary_assembly=self.genome_GrCh, alt_aln_method='splign') # for biocommons hgvs < 0.5
self.biocommons_vm_splign = hgvs_biocommons_assemblymapper.AssemblyMapper(self.biocommons_hdp, assembly_name=self.genome_GrCh, alt_aln_method='splign')
self.biocommons_vm_blat = hgvs_biocommons_assemblymapper.AssemblyMapper(self.biocommons_hdp, assembly_name=self.genome_GrCh, alt_aln_method='blat')
self.biocommons_vm_genewise = hgvs_biocommons_assemblymapper.AssemblyMapper(self.biocommons_hdp, assembly_name=self.genome_GrCh, alt_aln_method='genewise')
@staticmethod
def biocommons_parse(variant):
"""
Parse a variant with the biocommons parser
:param variant: The hgvs name of the variant
"""
try:
return MutationInfo.biocommons_parser.parse_hgvs_variant(variant)
except hgvs_biocommons.exceptions.HGVSParseError as e:
logging.warning('Biocommons could not parse variant: %s . Error: %s' % (str(variant), str(e)))
return None
@staticmethod
def fuzzy_hgvs_corrector(variant, transcript=None, ref_type=None, **kwargs):
"""
Try to correct a wrong HGVS-ish variant by checking if it matches some patterns with common mistakes.
Following directions from here: http://www.hgvs.org/mutnomen/recs-DNA.html#sub
This is by far not exhaustive..
:param variant: The name of the variant (example: 1234A>G)
:param trascript: In case the variant does not have a transcript part then use this.
:param ref_type: In case the variant does not include a reference type indicator (c or g) the define it here
"""
if ref_type not in [None, 'c', 'g']:
raise ValueError('Available values for ref_type: None, "c" and "g" . Found: %s' % (str(ref_type)))
#Exclude variants in unicode
new_variant = str(variant)
#Check if we have all necessary information
if not ':' in new_variant:
if transcript is None:
logging.error('Variant: %s does not include a transcript part (":") and the transcript argument is None. Returning None ' % (new_variant))
return None
search = re.search(r'[cg]\.', new_variant)
if search is None:
if ref_type is None:
logging.error('Variant: %s does not include a reference type part (c or g) and the ref_type argument is None. Returning None ' % (new_variant))
return None
new_variant = str(transcript) + ':' + ref_type + '.' + new_variant
#Case 1
#Instead if ">" the input is: "->". For example:
if '->' in new_variant:
logging.warning('Variant: %s . "->" found. Substituting it with ">"' % (new_variant))
new_variant = new_variant.replace('->', '>')
# Case 2
# The variant contains / in order to declare two possible substitutions
search = re.search(r'([ACGT])>([ACGT])/([ACGT])', new_variant)
if search:
logging.warning('Variant: %s . "/" found suggesting that it contains 2 variants' % (new_variant))
new_variant_1 = re.sub(r'([ACGT])>([ACGT])/([ACGT])', r'\1>\2', new_variant)
new_variant_2 = re.sub(r'([ACGT])>([ACGT])/([ACGT])', r'\1>\3', new_variant)
return [
MutationInfo.fuzzy_hgvs_corrector(new_variant_1),
MutationInfo.fuzzy_hgvs_corrector(new_variant_2)]
# Case 3
# -1126(C>T)
# The variant contains parenthesis in the substitition
# NT_005120.15:c.-1126(C>T) --> NT_005120.15:c.-1126C>T
search = re.search(r'[\d]+\([ACGT]+>[ACGT]+\)', new_variant)
if search:
logging.warning('Variant: %s . Contains parenthesis around substitition. Removing the parenthesis' % (new_variant))
new_variant = re.sub(r'([\d]+)\(([ACGT]+)>([ACGT]+)\)', r'\1\2>\3', new_variant)
#Case 4
# NT_005120.15:c.1160CC>GT --> NT_005120.15(UGT1A1):c.1160_1161delinsGT
search =re.search(r'([\-\d]+)([ACGT]+)>([ACGT]+)', new_variant)
if search:
if len(search.group(2)) > 1 or len(search.group(3)) > 1:
logging.warning('Variant: %s . Improper substitition please see: http://www.hgvs.org/mutnomen/recs-DNA.html#sub' % (new_variant))
to_substitute = str(int(search.group(1))) + '_' + str(int(search.group(1)) + len(search.group(2)) -1 ) + 'delins' + search.group(3)
new_variant = re.sub(r'([\-\d]+)([ACGT]+)>([ACGT]+)', to_substitute, new_variant)
return new_variant
def _get_info_rs(self, variant):
return self._search_ucsc(variant)
def get_info_ucsc(self, variant):
return self._search_ucsc(variant)
def get_info_vep(self, variant, **kwargs):
if 'vep_assembly' in kwargs:
vep_ret = self._search_VEP(variant, vep_assembly=kwargs['vep_assembly'])
else:
vep_ret = self._search_VEP(variant)
if vep_ret is None:
return {'notes': ' / '.join(self.current_fatal_error)}
else:
return vep_ret
def get_info_myvariantinfo(self, variant):
url_pattern = 'http://myvariant.info/v1/query?q={variant}'
#url_pattern = 'http://myvariant.info/v1/query?q={variant}&hg38=true' # As of now (1 June 2016) it does not work. Returns hg19
url = url_pattern.format(variant=variant)
logging.info('TRYING MyVariant INFO for: %s' % (variant))
logging.debug('MyVariantInfo URL: %s' % (url) )
r = requests.get(url)
t = r.text
logging.debug('Variant: %s . Recieved from MyVariant.info:' % (variant))
logging.debug(t)
result = json.loads(t)
if 'hits' in result:
if len(result['hits']) > 0:
if '_id' in result['hits'][0]:
_id = result['hits'][0]['_id']
logging.debug('MYVARIANT INFO for variant %s returned: %s' % (variant, _id))
hgvs = MutationInfo.biocommons_parse(_id)
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs)
ret = self._build_ret_dict(hgvs_transcript, hgvs_position, hgvs_reference, hgvs_alternative, 'hg19', 'MyVariantInfo', ' , '.join(self.current_fatal_error)) # http://myvariant.info/faq/ By default, MyVariant.info supports hg19-based HGVS ids as the "_id" field for each variant object.
return ret
else:
message = 'MyVariant Info returned No _id entry in hits'
logging.warning(message)
self.current_fatal_error.append(message)
return None
else:
message = 'MyVariantInfo Returned empty hits'
logging.warning(message)
self.current_fatal_error.append(message)
return None
else:
message = 'MyVariantInfo return No hits'
logging.warning(message)
self.current_fatal_error.append(message)
return None
def get_info_biocommons(self, variant):
'''
TODO: Replace "print" with logging.info
'''
hgvs = MutationInfo.biocommons_parse(variant)
if hgvs is None:
print 'COULD NOT PARSE VARIANT WITH BIOCOMMONS'
return {}
ret = {}
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs)
if hgvs_type == 'g':
#Get all transcripts
t_splign = self.biocommons_vm_splign.relevant_transcripts(hgvs)
t_blat = self.biocommons_vm_blat.relevant_transcripts(hgvs)
t_genewise = self.biocommons_vm_genewise.relevant_transcripts(hgvs)
if t_splign + t_blat + t_genewise == []:
print 'Could not find relevant_transcripts'
return {}
else:
if len(t_splign):
var_c = self.biocommons_vm_splign.g_to_c(hgvs, t_splign[0])
elif len(t_blat):
var_c = self.biocommons_vm_blat.g_to_c(hgvs, t_blat[0])
elif len(t_genewise):
var_c = self.biocommons_vm_genewise.g_to_c(hgvs, t_blat)
var_c_str = str(var_c)
print 'BIOCOMMONS CONVERTED FROM G to C:', var_c_str
return self.get_info_biocommons(var_c_str)
if hgvs_type != 'c':
print 'INVESTIGATE MORE... 5681'
assert False
for method_name, method in [('bc_splign', self.biocommons_vm_splign), ('bc_blat', self.biocommons_vm_blat), ('bc_genewise', self.biocommons_vm_genewise)]:
print 'Trying BIOCOMMONS METHOD:', method_name
hgvs_notes = ''
try:
hgvs_reference_assembly = method.c_to_g(hgvs)
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs_reference_assembly)
print 'BIOCOMMONS METHOD: %s SUCCEEDED: ' % method_name, hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative
print 'FETCHING TRANSCRIPT %s FROM ENTREZ' % (hgvs_transcript)
ncbi_info = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='asn.1')
search = re.search(r'Homo sapiens chromosome ([\w]+), ([\w\.]+) Primary Assembly', ncbi_info)
if search is None:
print 'INVESTIGATE MORE.... 9834'
assert False
entrez_chromosome = search.group(1)
entrez_genome = search.group(2)
except hgvs_biocommons.exceptions.HGVSDataNotAvailableError as e:
print 'BIOCOMMONS METHOD: %s FAILED' % method_name
print 'REASON:', str(e)
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = ['UNKNOWN'] * 5
entrez_chromosome, entrez_genome = ['UNKNOWN'] * 2
hgvs_notes = str(e)
except hgvs_biocommons.exceptions.HGVSError as e:
print 'BIOCOMMONS METHOD: %s FAILED' % method_name
print 'REASON:', str(e)
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = ['UNKNOWN'] * 5
entrez_chromosome, entrez_genome = ['UNKNOWN'] * 2
hgvs_notes = str(e)
ret['%s_hgvs_transcript' % method_name] = hgvs_transcript
ret['%s_hgvs_type' % method_name] = hgvs_type
ret['%s_hgvs_position' % method_name] = hgvs_position
ret['%s_hgvs_reference' % method_name] = hgvs_reference
ret['%s_hgvs_alternative' % method_name] = hgvs_alternative
ret['%s_entrez_chromosome' % method_name] = entrez_chromosome
ret['%s_entrez_genome' % method_name] = entrez_genome
ret['%s_hgvs_notes' % method_name] = hgvs_notes
return ret
def get_info_counsyl(self, variant):
try:
chrom, offset, ref, alt = self.counsyl_hgvs.hgvs_to_vcf(variant)
except KeyError as e:
logging.error('COUNSYL REPORTED: %s' % (str(e)))
return {}
except ValueError as e:
logging.error('COUNSYL REPORTED: %s' % (str(e)))
return {}
except hgvs_counsyl.InvalidHGVSName as e:
logging.error('COULD NOT PARSE VARIANT WITH COUNSYL: %s' % (str(e)))
return {}
return self._build_ret_dict(chrom, offset, ref, alt, self.genome, 'counsyl_hgvs_to_vcf')
def get_info_mutalyzer(self, variant, gene=None):
#print self._search_mutalyzer(variant)
logging.debug('Variant: %s TRYING MUTALYZER POSITION CONVERTER..' % (str(variant)))
new_variant = self.search_mutalyzer_position_converter(variant)
if new_variant is None:
logging.debug('Variant: %s MUTALYZER POSITION CONVERTER FAILED' % (str(variant)))
logging.debug('Variant: %s TRYING MAIN MUTALYZER.. GENE=%s' % (str(variant), str(gene)))
search_mutalyzer_ret = self._search_mutalyzer(variant, gene=gene)
if search_mutalyzer_ret is None:
logging.error('Variant: %s MAIN MUTALYZER FAILED' % (str(variant)))
return {'notes': ' / '.join(self.current_fatal_error)}
new_variant_mutalyzer, mutalyzer_reference, mutalyzer_alternative = search_mutalyzer_ret
logging.debug('Variant: %s MAIN MUTALYZER REPORTED NEW VARIANT: %s' % (str(variant), str(new_variant_mutalyzer)))
if new_variant_mutalyzer is None:
logging.error('Variant: %s MAIN MUTALYZER FAILED' % (str(variant)))
return {'notes': ' / '.join(self.current_fatal_error)}
# Sometimes Main mutalyzer + Position converter return the same variant without the reference.
# for example in NG_008377.1:g.6502_6507delCTCTCT --> NG_008377.1:g.6502_6504del
if variant.startswith(new_variant_mutalyzer) and len(new_variant_mutalyzer) < len(variant):
# We lost information!
logging.debug('Variant: %s . Switching back to %s' % (str(variant), str(variant)))
new_variant_mutalyzer = variant
# Special check for cases like:
# AY545216.1:g.8326_8334dupGTGCCCACT --> AY545216.1:g.8327_8335dup
if re.match(r'[\w]+\.[\w]+:g.[\d]+_[\d]+dup[ACGT]+', variant) and re.match(r'[\w]+\.[\w]+:g.[\d]+_[\d]+dup', new_variant_mutalyzer):
logging.debug('Variant: %s . Switching back to %s' % (str(variant), str(variant)))
new_variant_mutalyzer = variant
logging.debug('RUNNING BLAT for %s' % (str(new_variant_mutalyzer)))
new_variant_snp_info = self.get_info_BLAT(new_variant_mutalyzer, mutalyzer_reference=mutalyzer_reference, mutalyzer_alternative=mutalyzer_alternative)
if new_variant_snp_info is None:
return None
ret =self._build_ret_dict(
new_variant_snp_info['chrom'],
new_variant_snp_info['offset'],
new_variant_snp_info['ref'],
new_variant_snp_info['alt'],
new_variant_snp_info['genome'],
'Mutalyzer',
new_variant_snp_info['notes'] + ' / Mutalyzer did c_g conversion, INFO BY BLAT',
)
return ret
# print 'INVESTIGATE MORE.. 4912'
# assert False
new_variant = str(new_variant)
#Using BioCommons to Parse the new variant
hgvs = MutationInfo.biocommons_parse(new_variant)
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs)
#print hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative
logging.debug('SEARCHING NCBI FOR TRANSCRIPT %s GENERATED FROM MUTALYZER' % (hgvs_transcript))
ncbi_info = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='asn.1')
search = re.search(r'Homo sapiens chromosome ([\w]+), ([\w\.]+) Primary Assembly', ncbi_info)
if search is None:
print 'INVESTIGATE MORE.. 5910'
assert False
ret = self._build_ret_dict(search.group(1), hgvs_position, hgvs_reference, hgvs_alternative, search.group(2), 'Mutalyzer', ' , '.join(self.current_fatal_error))
return ret
def get_info_LOVD(self, variant):
#Parse with biocommons
hgvs = MutationInfo.biocommons_parse(variant)
if hgvs is None:
print 'LOVD USES BIOCOMMONS TO PARSE VARIANT and BIOCOMMONS FAILED'
return None
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs)
try:
lovd_chrom, lovd_pos_1, lovd_pos_2, lovd_genome = self._search_lovd(hgvs_transcript, 'c.' + str(hgvs.posedit))
except urllib2.HTTPError as e:
print 'urllib2.HTTPError exception:', str(e)
return None
if not lovd_chrom is None:
logging.warning('***SERIOUS*** strand of variant has not been checked!')
return self._build_ret_dict(lovd_chrom, lovd_pos_1, hgvs_reference, hgvs_alternative, lovd_genome, 'LOVD')
logging.error( 'LOVD ???? 4498')
return None
def get_info_VARIATION_REPORTER(self, variant):
return self._search_variation_reporter(variant)
def get_info_TRANSVAR(self, variant):
return self._search_transvar(variant)
def get_info_gene_name(self, variant):
ret_transvar = self._search_transvar(variant)
ret_vep_post = self._search_vep_post(variant)
if not ret_transvar and ret_vep_post:
return ret_vep_post
if ret_transvar and not ret_vep_post:
return ret_transvar
if ret_transvar and ret_vep_post:
if ret_transvar['chrom'] != ret_vep_post['chrom'] and ret_transvar['offset'] != ret_vep_post['offset']:
logging.warning('SERIOUS! Transvar and VEP returned different postions')
return ret_transvar
logging.debug('Variant: %s Both TransVar and VEP failed' % variant)
return None
def get_info_BLAT(self, variant=None, hgvs_transcript=None, hgvs_type=None, hgvs_position=None, hgvs_reference=None, hgvs_alternative=None, **kwargs):
#Parse with biocommons
notes = ''
if hgvs_transcript is None:
print 'PARSING VARIANT: %s WITH BIOCOMMONS' % str(variant)
hgvs = MutationInfo.biocommons_parse(variant)
if not hgvs is None:
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs)
print 'BIOCOMMONS REPORTED:', hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative
else:
print 'BIOCOMMONS FAILED TO PARSE VARIANT.. Applying our own simple parser.. Experimental'
hgvs_transcript = variant.split(':')[0]
print 'Using hgvs_transcript:', hgvs_transcript
s = re.search(r'[\w\.]+\:([cg]).', variant)
if not s:
print 'OUR OWN PARSER FAILED..'
return None
hgvs_type = s.group(1)
print 'Using hgvs_type:', hgvs_type
logging.info('Fetching fasta sequence for trascript: %s' % (hgvs_transcript))
#fasta = self._get_fasta_from_nucleotide_entrez(hgvs_transcript)
fasta = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='fasta')
if fasta is None:
logging.error('BLAT method failed')
return None
# Check variant type
if hgvs_type == 'c':
logging.warning('Variant: %s . This is a c (coding DNA) variant. Trying to infer g position..' % (variant))
#logging.info('Variant: %s . Fetching NCBI XML for transcript: %s' % (variant, hgvs_transcript))
logging.info('Variant: %s . Fetching genbank entry for transcript: %s' % (variant, hgvs_transcript))
#ncbi_xml = self._get_xml_from_nucleotide_entrez(hgvs_transcript)
#ncbi_xml = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='xml')
#genbank = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='gb')
genbank = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='gbwithparts')
if genbank is None:
logging.error('Variant: %s . Could not get data from Entrez' % (variant))
return None
genbank_filename = self._ncbi_filename(hgvs_transcript, 'gbwithparts')
logging.info('Variant: %s . Genbank filename: %s' % (variant, genbank_filename))
if 'gene' in kwargs:
genbank_gene = kwargs['gene']
else:
genbank_gene = None
#genbank_c_to_g_mapper = self._get_sequence_features_from_genbank(genbank_filename, gene=genbank_gene)
genbank_c_to_g_mapper = self._biopython_c2g_mapper(genbank_filename)
if genbank_c_to_g_mapper is None:
logging.error('Variant: %s . Could not infer a g. position' % (variant))
return None
new_hgvs_position = genbank_c_to_g_mapper(int(hgvs_position))
if new_hgvs_position is None:
self.current_fatal_error += ['Variant: %s . Could not infer a g. position' % (variant)]
logging.error(self.current_fatal_error[-1])
return {'notes': ' / '.join(self.current_fatal_error)}
new_hgvs_position = int(new_hgvs_position)
logging.info('Variant: %s . New hgvs g. position: %i Old c. position: %i' % (variant, new_hgvs_position, hgvs_position))
hgvs_position = new_hgvs_position
hgvs_type = 'g'
elif hgvs_type == 'g':
#This should be fine
pass
else:
logging.error('Variant: %s Sorry.. only c (coding DNA) and g (genomic) variants are supported so far.' % (variant))
return None
fasta_reference = fasta[hgvs_position-1:hgvs_position-1 + (0 if hgvs_reference is None else len(hgvs_reference)) ]
logging.info('Variant: %s . Reference on fasta: %s Reference on variant: %s' % (variant, fasta_reference, hgvs_reference))
if fasta_reference != hgvs_reference:
if fasta_reference == '' and hgvs_reference is None:
pass
else:
notes = 'Variant: %s . ***SERIOUS*** Reference on fasta (%s) and Reference on variant name (%s) are different!' % (variant, fasta_reference, hgvs_reference)
logging.error(notes)
self.current_fatal_error.append(notes)
logging.info('Variant: %s . Fasta length: %i' % (variant, len(fasta)))
logging.info('Variant: %s . Variant position: %i' % (variant, hgvs_position))
#relatve_pos is the relative position in the 2*blat_margin sample of the variant
relative_pos = hgvs_position
#Take an as much as possible chunk of the fasta
if hgvs_position - self.blat_margin < 0:
chunk_start = 0
else:
chunk_start = hgvs_position - self.blat_margin
relative_pos = self.blat_margin
if hgvs_position + self.blat_margin > len(fasta):
chunk_end = len(fasta)
else:
chunk_end = hgvs_position + self.blat_margin
fasta_chunk = fasta[chunk_start:chunk_end]
logging.info('Variant: %s . Chunk position [start, end] = [%i, %i]' % (variant, chunk_start, chunk_end))
logging.info('Variant: %s . Position of variant in chunk: %i ' % (variant, relative_pos))
logging.info('Variant: %s . Reference on chunk: %s Reference on fasta: %s Reference at variant position +/- 1: %s' % (variant, fasta_chunk[relative_pos-1], fasta[hgvs_position-1], fasta_chunk[relative_pos-2:relative_pos+1]))
assert fasta_chunk[relative_pos-1] == fasta[hgvs_position-1]
reference_on_fasta = fasta[hgvs_position-1]
#Now that we have a fair sample of the sample
# We can blat it!
blat_filename = self._create_blat_filename(hgvs_transcript, chunk_start, chunk_end)
logging.info('Variant: %s . Blat results filename: %s' % (variant, blat_filename) )
if not Utils.file_exists(blat_filename):
logging.info('Variant: %s . Blat filename does not exist. Requesting it from UCSC..' % (variant) )
self._perform_blat(fasta_chunk, blat_filename)
logging.info('Variant: %s . Blat results filename exists (or created). Parsing it..' % (variant))
blat = self._parse_blat_results_filename(blat_filename)
#Log some details regarding the blat results
logging.info('Variant: %s . Blat identity: %s' % (variant, blat[0][u'IDENTITY']))
logging.info('Variant: %s . Blat Span: %s' % (variant, blat[0][u'SPAN']))
chrom = blat[0][u'CHRO']
logging.info('Variant: %s . Chromosome: %s' % (variant, chrom))
blat_details_url = blat[0]['details_url']
logging.info('Variant: %s . Details URL: %s' % (variant, blat_details_url))
blat_alignment_filename = self._create_blat_alignment_filename(hgvs_transcript, chunk_start, chunk_end)
logging.info('Variant: %s . Blat alignment filename: %s' % (variant, blat_alignment_filename))
if not Utils.file_exists(blat_alignment_filename):
logging.info('Variant: %s . Blat alignment filename does not exist. Creating it..' % (variant))
blat_temp_alignment_filename = blat_alignment_filename + '.tmp'
logging.info('Variant: %s . Temporary blat alignment filename: %s' % (variant, blat_temp_alignment_filename))
logging.info('Variant: %s . Downloading Details url in Temporary blat alignment filename' % (variant))
Utils.download(blat_details_url, blat_temp_alignment_filename)
logging.info('Variant: %s . Parsing temporary blat alignment filename' % (variant))
with open(blat_temp_alignment_filename) as blat_temp_alignment_file:
blat_temp_alignment_soup = BeautifulSoup(blat_temp_alignment_file)
blat_real_alignment_url = 'https://genome.ucsc.edu/' + blat_temp_alignment_soup.find_all('frame')[1]['src'].replace('../', '')
logging.info('Variant: %s . Real blat alignment URL: %s' % (variant, blat_real_alignment_url))
blat_real_alignment_filename = blat_alignment_filename + '.html'
logging.info('Variant: %s . Real blat alignment filename: %s' % (variant, blat_real_alignment_url))
logging.info('Variant: %s . Downloading real blat alignment filename..' % (variant))
Utils.download(blat_real_alignment_url, blat_real_alignment_filename)
logging.info('Variant: %s . Reading content from real alignment filename' % (variant))
with open(blat_real_alignment_filename) as blat_real_alignment_file:
# We have to set html.parser otherwise parsing is incomplete
blat_real_alignment_soup = BeautifulSoup(blat_real_alignment_file, 'html.parser')
#Take the complete text
blat_real_alignment_text = blat_real_alignment_soup.text
logging.info('Variant: %s . Saving content to blat alignment filename: %s' % (variant, blat_alignment_filename))
with open(blat_alignment_filename, 'w') as blat_alignment_file:
blat_alignment_file.write(blat_real_alignment_text)
logging.info('Variant: %s . Blat alignment filename exists (or created)' % (variant))
human_genome_position, direction = self._find_alignment_position_in_blat_result(blat_alignment_filename, relative_pos, verbose=True)
if human_genome_position is None:
return None
logging.info('Variant: %s . Blat alignment position: %i, direction: %s' % (variant, human_genome_position, direction))
if (hgvs_reference == '' or hgvs_reference is None) and (hgvs_alternative == '' or hgvs_alternative is None):
# Assume one deletion (NT_005120.15:g.609721del)
if 'mutalyzer_reference' in kwargs:
mutalyzer_reference = kwargs['mutalyzer_reference']
mutalyzer_alternative = kwargs['mutalyzer_alternative']
logging.info('Variant: %s . The reference/aternative is not present on the variant! Using ref/alt info from mutalyzer: %s/%s' % (variant, mutalyzer_reference, mutalyzer_alternative))
hgvs_reference = mutalyzer_reference
hgvs_alternative = mutalyzer_alternative
#Invert reference / alternative if sequence was located in negative strand
if direction == '-':
# TODO : Reverse also sequence for deletions / additions
hgvs_reference = self.inverse(hgvs_reference)
hgvs_alternative = self.inverse(hgvs_alternative)
ret = self._build_ret_dict(chrom, human_genome_position, hgvs_reference, hgvs_alternative, self.genome, 'BLAT', ' / '.join(self.current_fatal_error))
return ret
def get_elements_from_hgvs(self, hgvs):
hgvs_transcript = hgvs.ac
hgvs_type = hgvs.type
hgvs_position = hgvs.posedit.pos.start.base
hgvs_reference = hgvs.posedit.edit.ref
if hasattr(hgvs.posedit.edit, 'alt'):
hgvs_alternative = hgvs.posedit.edit.alt
else:
hgvs_alternative = None
return hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative
def get_info(self, variant, empty_current_fatal_error=True, **kwargs):
"""
Gets the chromosome, position, reference and alternative of a `dbsnp <http://www.ncbi.nlm.nih.gov/SNP/>`_ or `HGVS <http://varnomen.hgvs.org/>`_ variant. \
If the ``method`` parameter is not specified, by default it will go through the following pipeline:
.. image:: http://i.imgur.com/BAak2rE.png
:param variant: A variant (in str or unicode) or list of variants. \
Both rs (i.e. ``rs56404215``) or HGVS (i.e. ``NM_006446.4:c.1198T>G``) are accepted.
Optional arguments:
:param method: Instead of the default pipeline, use a specific tool. Accepted values are:
- ``UCSC`` : Use `CruzDB <https://github.com/brentp/cruzdb>`_ (only for dbsnp variants)
- ``VEP`` : Use `Variant Effect Predictor <http://www.ensembl.org/info/docs/tools/vep/index.html>`_ (only for dbsnp variants)
- ``MYVARIANTINFO`` : Use `MyVariant.info <http://myvariant.info/>`_ (only for dbsnp variants)
- ``BIOCOMMONS`` : Use `Biocommons HGVS <https://bitbucket.org/biocommons/hgvs>`_ (only for HGVS variants)
- ``COUNSYL`` : Use `Counsyl HGVS <https://github.com/counsyl/hgvs>`_ (only for HGVS variants)
- ``MUTALYZER`` : Use `Mutalyzer <https://mutalyzer.nl/>`_ (only for HGVS variants)
- ``BLAT`` : Perform a BLAT search (only for HGVS variants)
- ``LOVD`` Search `LOVD <http://databases.lovd.nl/shared/genes>`_ database (only for HGVS variants)
- ``VARIATION_REPORTER`` Search `Variation Reported <https://www.ncbi.nlm.nih.gov/variation/tools/reporter/>`_
- ``TRANSVAR`` Search `Transvar <http://bioinformatics.mdanderson.org/main/Transvar>`_ (Experimental, requires installation of TRANSVAR CLI)
:return: If the pipeline or the selected method fails then the return value is ``None``. \
Otherwise it returns a dictionary with the following keys:
- ``chrom`` : The chromosome where this variant is located. \
The type of this value is *str* in order to have a universal type for all possible chromosome values (including X and Y).
- ``offset`` : The nucleotide position of the variant.
- ``ref`` : The reference sequence of the variant. In case of insertions this value is an empty string.
- ``alt`` : The alternative sequence of the variant. In case of deletions this value is an empty string.
- ``genome`` : The version of the human genome assembly for this position.
- ``source`` : The name of the tool that was used to locate the position.
- ``notes`` : Possible warnings, errors and notes that the tools generated during the conversion.
An example of output is the following:
:Example:
>>> from MutationInfo import MutationInfo
>>> mi = MutationInfo()
>>> info = mi.get_info('NM_000367.2:c.-178C>T')
>>> print info
{'chrom': '6', 'notes': '', 'source': 'counsyl_hgvs_to_vcf', 'genome': 'hg19', 'offset': 18155397, 'alt': 'A', 'ref': 'G'}
"""
if empty_current_fatal_error:
self.current_fatal_error = []
#Check if a preferred info is in parameters:
if 'method' in kwargs:
if kwargs['method'] == 'UCSC':
return self.get_info_ucsc(variant)
elif kwargs['method'] == 'VEP':
return self.get_info_vep(variant, **kwargs)
elif kwargs['method'] == 'MYVARIANTINFO':
return self.get_info_myvariantinfo(variant)
elif kwargs['method'] == 'BIOCOMMONS':
return self.get_info_biocommons(variant)
elif kwargs['method'] == 'COUNSYL':
return self.get_info_counsyl(variant)
elif kwargs['method'] == 'MUTALYZER':
return self.get_info_mutalyzer(variant)
elif kwargs['method'] == 'LOVD':
return self.get_info_LOVD(variant)
elif kwargs['method'] == 'BLAT':
return self.get_info_BLAT(variant=variant)
elif kwargs['method'] == 'VARIATION_REPORTER':
return self.get_info_VARIATION_REPORTER(variant)
elif kwargs['method'] == 'TRANSVAR':
return self.get_info_TRANSVAR(variant)
else:
raise MutationInfoException('Unknown method: %s ' % (str(kwargs['method'])))
#Check the type of variant
if type(variant) is list:
ret = [self.get_info(v) for v in variant]
return ret
elif type(variant) is unicode:
logging.info('Converting variant: %s from unicode to str and rerunning..' % (variant))
ret = self.get_info(str(variant.strip()), **kwargs)
return ret
elif type(variant) is str:
#This is expected
pass
else:
logging.error('Unknown type of variant parameter: %s (Accepted str and list)' % (type(variant).__name__))
return None
#Is this an rs variant?
match = re.match(r'rs[\d]+', variant)
if match:
# This is an rs variant
logging.info('Variant %s is an rs variant' % (variant))
# Variant Effect Predictor
logging.info('Variant: %s . Trying VEP..' % (variant))
ret = self.get_info_vep(variant, **kwargs)
if ret and 'chrom' in ret: # The fact that ret is not null does not mean that it has the info that we want!
return ret
else:
logging.warning('Variant: %s . VEP Failed' % (variant))
# MyVariant.info
logging.info('Variant: %s . Trying MyVariant.info' % (variant))
ret = self.get_info_myvariantinfo(variant)
if ret:
return ret
else:
logging.warning('Variant: %s . MyVariant.info failed' % (variant))
# CruzDB
logging.info('Variant: %s . Trying CruzDB (UCSC)..' % (variant))
ret = self._get_info_rs(variant)
if not ret:
logging.warning('Variant: %s CruzDB (UCSC) failed..'% (variant))
return ret
elif ret['alt'] == 'lengthTooLong':
logging.warning('Variant: %s . CruzDB (UCSC) Returned "lengthTooLong"' % (variant))
return ret
return ret
#Does it contain a gene name?
match = re.match(r'(.+?):', variant) # XYZ:...
if match:
variant_gene_name = match.group(1).strip()
if variant_gene_name.lower() in self.hgnc_genes:
logging.debug('Located HGNC gene name: %s in variant: %s' % (variant_gene_name, variant))
ret = self.get_info_gene_name(variant.strip())
if ret:
return ret
# Is this an Ensembl transcript?
# Example: ENST00000375549.7:c.204C>T
match = re.search(r'ENST[\d\.]+', variant_gene_name)
if match:
logging.debug('Located an Ensembl transcript: %s in variant: %s' % (variant_gene_name, variant))
ret = self._search_vep_post(variant)
if ret:
return ret
#Is this an hgvs variant?
hgvs = MutationInfo.biocommons_parse(variant)
if hgvs is None:
logging.warning('Variant: %s . Biocommons parsing failed. Trying to fix possible problems..' % (str(variant)))
new_variant = MutationInfo.fuzzy_hgvs_corrector(variant, **kwargs)
if type(new_variant) is list:
return [self.get_info(v) for v in new_variant]
elif type(new_variant) is str:
hgvs = MutationInfo.biocommons_parse(new_variant)
variant = new_variant
if hgvs is None:
#Parsing failed again..
logging.warning('Biocommons failed to parse variant: %s .' % (variant))
logging.info('Variant: %s . Trying to reparse with Mutalyzer and get the genomic description' % (variant))
new_variant, mutalyzer_reference, mutalyzer_alternative = self._search_mutalyzer(variant, **kwargs)
if new_variant is None:
logging.error('Variant: %s . Mutalyzer failed. Nothing left to do.. (could not parse variant)' % (variant))
#print self._search_VEP(variant)
return None
logging.info('Variant: %s . rerunning get_info with variant=%s' % (variant, new_variant))
return self.get_info(new_variant, **kwargs)
#Up to here we have managed to parse the variant
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs)
#Try to map the variant in the reference assembly with biocommons
if hgvs_type == 'c':
logging.info('Variant: %s . Trying to map variant in the reference assembly with biocommons' % (variant))
success = False
for biocommons_vm_name, biocommons_vm_method in [
('splign', self.biocommons_vm_splign),
('blat', self.biocommons_vm_blat),
('genewise', self.biocommons_vm_genewise),
]:
retry = 3
while retry:
try:
logging.info('Trying biocommon method: %s' % (biocommons_vm_name))
hgvs_reference_assembly = biocommons_vm_method.c_to_g(hgvs)
hgvs_transcript, hgvs_type, hgvs_position, hgvs_reference, hgvs_alternative = self.get_elements_from_hgvs(hgvs_reference_assembly)
success = True
retry = 0
except hgvs_biocommons.exceptions.HGVSDataNotAvailableError as e:
error_message = 'Variant: %s . biocommons method %s method failed: %s' % (variant, biocommons_vm_name, str(e))
logging.warning(error_message)
self.current_fatal_error.append(error_message)
retry = 0
except hgvs_biocommons.exceptions.HGVSError as e:
error_message = 'Variant: %s . biocommons method %s reported error: %s' % (variant, biocommons_vm_name, str(e))
logging.error(error_message)
self.current_fatal_error.append(error_message)
retry = 0
except psycopg2.OperationalError as e: # Issue #10
logging.warning('Caught: %s' % (str(e)))
if retry == 1:
logging.error('Maximum connection tries reached')
else:
logging.info('Re-establishing connection.. %i' % (4-retry))
self.biocommons_connect()
retry -= 1
if success:
break
#Is this a reference assembly?
if self._get_ncbi_accession_type(hgvs_transcript) == 'NC':
logging.info('Variant: %s . is a Complete genomic molecule, reference assembly' % (variant))
#ncbi_info = self._get_info_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='asn.1')
ncbi_info = self._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='asn.1')
search = re.search(r'Homo sapiens chromosome ([\w]+), ([\w\.]+) Primary Assembly', ncbi_info)
if search is None:
logging.error('Variant: %s . Although this variant is a reference assembly, could not locate the chromosome and assembly name in the NCBI entry' % (variant))
return None
ret = self._build_ret_dict(search.group(1), hgvs_position, hgvs_reference, hgvs_alternative, search.group(2), 'NC_transcript', ' / '.join(self.current_fatal_error))
return ret
logging.info('Biocommons Failed')
logging.info('Variant: %s Converting to VCF with pyhgvs..' % (variant))
try:
chrom, offset, ref, alt = self.counsyl_hgvs.hgvs_to_vcf(variant)
return self._build_ret_dict(chrom, offset, ref, alt, self.genome, 'counsyl_hgvs_to_vcf', ' / '.join(self.current_fatal_error))
except KeyError as e:
logging.warning('Variant: %s . pyhgvs KeyError: %s' % (variant, str(e)))
except ValueError as e:
logging.warning('Variant: %s . pyhgvs ValueError: %s' % (variant, str(e)))
except IndexError as e:
logging.warning('Variant: %s . pyhgvs IndexError: %s' % (variant, str(e)))
logging.info('Variant: %s counsyl pyhgvs failed...' % (str(variant)))
logging.info('Variant: %s Trying Mutalyzer..' % (str(variant)) )
ret = self.get_info_mutalyzer(variant, **kwargs)
if ret and len(ret) > 2:
return ret
logging.warning('Variant: %s Mutalyzer failed..' % (str(variant)))
logging.info('Variant: %s . Trying BLAT search..' % (str(variant)))
ret = self.get_info_BLAT(variant=variant, hgvs_transcript=hgvs_transcript, hgvs_type=hgvs_type, hgvs_position=hgvs_position, hgvs_reference=hgvs_reference, hgvs_alternative=hgvs_alternative, **kwargs)
if ret and len(ret) > 2:
return ret
logging.warning('Variant: %s BLAT search failed.' % (str(variant)))
# As a measure of last resort, try LOVD...
logging.info('Variant: %s Trying LOVD..' % (str(variant)))
lovd_chrom, lovd_pos_1, lovd_pos_2, lovd_genome = self._search_lovd(hgvs_transcript, 'c.' + str(hgvs.posedit))
if not lovd_chrom is None:
warning = '***SERIOUS*** strand of variant has not been checked!'
logging.warning(warning)
return self._build_ret_dict(lovd_chrom, lovd_pos_1, hgvs_reference, hgvs_alternative, lovd_genome, 'LOVD', warning)
logging.info('Variant: %s . LOVD failed..' % (str(variant)))
# Pipeline failed
logging.error('Variant: %s . ALL METHODS FAILED!' % (str(variant)))
return None
@staticmethod
def inverse(nucleotide):
inverter = {
'A' : 'T',
'T' : 'A',
'C' : 'G',
'G' : 'C',
}
if nucleotide is None:
return None
return ''.join([inverter[x] for x in nucleotide.upper()])
@staticmethod
def reverse_inverse(nucleotide):
if nucleotide is None:
return None
return MutationInfo.inverse(nucleotide)[::-1]
def _create_blat_filename(self, transcript, chunk_start, chunk_end):
return os.path.join(self.blat_directory,
transcript + '_' + str(chunk_start) + '_' + str(chunk_end) + '.blat.results.html')
def _create_blat_alignment_filename(self, transcript, chunk_start, chunk_end):
return os.path.join(self.blat_directory,
transcript + '_' + str(chunk_start) + '_' + str(chunk_end) + '.blat')
def _entrez_request(self, ncbi_access_id, retmode, rettype):
'''
http://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
'''
try:
handle = Entrez.efetch(db='nuccore', id=ncbi_access_id, retmode=retmode, rettype=rettype)
except urllib2.HTTPError as e:
logging.error('Entrez request failed: %s' % (str(e)))
return None
data = handle.read()
handle.close()
return data
def _get_data_from_nucleotide_entrez(self, ncbi_access_id, retmode, rettype):
filename = self._ncbi_filename(ncbi_access_id, rettype)
logging.info('NCBI %s %s filename: %s' % (retmode, rettype, filename))
if Utils.file_exists(filename):
logging.info('NCBI Filename: %s exists.' % (filename))
data = self._load_ncbi_filename(ncbi_access_id, rettype)
else:
logging.info('Filename: %s does not exist. Querying ncbi through Entrez..' % (filename))
data = self._entrez_request(ncbi_access_id, retmode, rettype)
if data is None:
return None
self._save_ncbi_filename(ncbi_access_id, rettype, data)
logging.info('NCBI Filename: %s created.' % (filename))
if rettype == 'fasta':
return self.strip_fasta(data)
else:
return data
@staticmethod
def strip_fasta(fasta):
'''
Strips comments and newline characters from fasta data
'''
return ''.join([x for x in fasta.split('\n') if '>' not in x])
def _ncbi_filename(self, ncbi_access_id, rettype):
'''
Create filename that contains NCBI fasta file
rettype : fasta , xml , gb (genbank)
'''
return os.path.join(self.transcripts_directory, ncbi_access_id + '.' + rettype)
def _save_ncbi_filename(self, ncbi_access_id, rettype, data):
'''
Save NCBI fasta to file
'''
filename = self._ncbi_filename(ncbi_access_id, rettype)
with open(filename, 'w') as f:
f.write(data)
def _load_ncbi_filename(self, ncbi_access_id, rettype):
'''
Load NCBI fasta file
'''
filename = self._ncbi_filename(ncbi_access_id, rettype)
with open(filename) as f:
data = f.read()
return data
def _perform_blat(self, fasta, output_filename):
'''
Perform a blat request at UCSC
Saves results in output_filename
TODO:
* Support organisms other than Human
* Error check on request.post
'''
data = {
'org':'Human',
'db':self.genome,
'sort':'query,score',
'output':'hyperlink',
'userSeq': fasta,
'type':"BLAT's guess"
}
logging.info('Requesting data from UCSC\'s blat..')
r = requests.post(self.ucsc_blat_url, data=data)
logging.info(' ... Request is done')
with open(output_filename, 'w') as f:
f.write(r.text)
return True
@staticmethod
def _parse_blat_results_filename(input_filename):
'''
Parse the html blat results filename
TODO:
* Improve readability..
'''
with open(input_filename) as f:
soup = BeautifulSoup(f, 'html.parser')
header = soup.find_all('pre')[0].text.split('\n')[0].split()[1:]
header[header.index('START')] = 'RELATIVE_START'
header[header.index('END')] = 'RELATIVE_END'
all_urls = [x.get('href') for x in soup.find_all('pre')[0].find_all('a')]
all_urls_pairs = zip(all_urls[::2], all_urls[1::2])
ret = [{k:v for k,v in zip(header, x.split()[2:])} for x in soup.find_all('pre')[0].text.split('\n') if 'details' in x]
for i, x in enumerate(ret):
ret[i]['browse_url'] = 'https://genome.ucsc.edu/cgi-bin' + all_urls_pairs[i][0].replace('../cgi-bin', '')
ret[i]['details_url'] = 'https://genome.ucsc.edu/cgi-bin' + all_urls_pairs[i][1].replace('../cgi-bin', '')
return ret
@staticmethod
def _find_alignment_position_in_blat_result(blat_filename, pos, verbose=True):
print 'Position:', pos
def get_pos(record, index):
#print record
return int(re.findall(r'[\d]+', record)[index])
def get_sequence(record, index):
return re.findall(r'[acgt\.]+', record)[index]
def get_matching(record):
match = re.search(r'[\<\>]+ ([\|\ ]*) [\<\>]+', record)
return match.group(1)
with open(blat_filename) as f:
blat_results = f.read()
blat_records = re.findall(r'[\d]* [acgt\.]* [\d]*\n[\<\>]+ [\|\ ]* [\<\>]+\n[\d]* [acgt\.]* [\d]*', blat_results)
found = False
for blat_index, blat_record in enumerate(blat_records):
fasta_start = get_pos(blat_record, 0)
fasta_end = get_pos(blat_record, 1)
if fasta_start <= pos <= fasta_end:
found = True
break
if not found:
logging.error("BLAT: The position {} was not found (did not match anywhere) in BLAT results.".format(pos))
return None, None
if verbose:
print blat_record
fasta_sequence = get_sequence(blat_record, 0)
reference_sequence = get_sequence(blat_record, 1)
alignment_start = get_pos(blat_record, 2)
alignment_end = get_pos(blat_record, 3)
if alignment_start < alignment_end:
alignment_step = 1
direction = '+'
elif alignment_start > alignment_end:
alignment_step = -1
direction = '-'
else:
# These are the same. Check the direction elsewhere
if '>>>>>>>>>' in blat_record:
alignment_step = 1
direction = '+'
elif '<<<<<<<<' in blat_record:
alignment_step = -1
direction = '-'
else:
raise Exception('WTF!')
matching = get_matching(blat_record)
#Find position in fasta sequence
#fasta_real_index = fasta_start
fasta_real_index = None
for fasta_absolute_index, (fasta_index, alignment_index) in enumerate(zip(range(fasta_start, fasta_end+1), range(alignment_start, alignment_end+alignment_step, alignment_step))):
if fasta_sequence[fasta_absolute_index] != '.':
if fasta_real_index is None:
fasta_real_index = fasta_index
else:
fasta_real_index += 1
last_sequence = fasta_sequence[fasta_absolute_index]
if verbose:
print 'Seq: %s Alignment: %i Fasta: %i Real_fasta: %i Match: %s' % (fasta_sequence[fasta_absolute_index], alignment_index, fasta_index, fasta_real_index, matching[fasta_absolute_index])
if fasta_real_index == pos:
break
#assert fasta_real_index != fasta_start
if matching[fasta_absolute_index] != '|':
print '***** WARNING: This position does not have a match with aligned sequence'
#raise Exception('This position does not have a match with aligned sequence')
print last_sequence
return alignment_index, direction
@staticmethod
def _get_ncbi_accession_type(transcript):
'''
Get the accession type of a transcript
List of all accessions
http://www.ncbi.nlm.nih.gov/books/NBK21091/table/ch18.T.refseq_accession_numbers_and_mole/?report=objectonly
Also: http://www.ncbi.nlm.nih.gov/books/NBK21091/table/ch18.T.entrez_queries_to_retrieve_sets_o/
'''
# Headers
# Accession prefix Molecule type Comment
accession_types = {
'AC_': ['Genomic', 'Complete genomic molecule, usually alternate assembly'],
'NC_': ['Genomic', 'Complete genomic molecule, usually reference assembly'],
'NG_': ['Genomic', 'Incomplete genomic region'],
'NT_': ['Genomic', 'Contig or scaffold, clone-based or WGSa'],
'NW_': ['Genomic', 'Contig or scaffold, primarily WGSa'],
'NS_': ['Genomic', 'Environmental sequence'],
'NZ_': ['Genomic', 'Unfinished WGS'],
'NM_': ['mRNA', ''],
'NR_': ['RNA', ''],
'XM_': ['mRNA', 'Predicted model'],
'XR_': ['RNA', 'Predicted model'],
'AP_': ['Protein', 'Annotated on AC_ alternate assembly'],
'NP_': ['Protein', 'Associated with an NM_ or NC_ accession'],
'YP_': ['Protein', ''],
'XP_': ['Protein', 'Predicted model, associated with an XM_ accession'],
'ZP_': ['Protein', 'Predicted model, annotated on NZ_ genomic records'],
}
search = re.search(r'^\w\w_', transcript)
if search is None:
logging.warning('Transcript: %s does not follow a WW_ pattern' % (transcript))
return None
if not search.group() in accession_types:
logging.warning('Accesion type: %s of transcript: %s does not belong to known accesion types' % (search.group(), transcript))
return None
ret = search.group()[0:-1] # Remove '_'
return ret
#@staticmethod
def _biopython_c2g_mapper(self, filename):
'''
See comments at top!
Using this biopython mapper: https://github.com/lennax/biopython/tree/f_loc5/Bio/SeqUtils/Mapper
This code is adapted from: https://gist.github.com/lennax/10600113
'''
def get_parser():
return SeqIO.parse(filename, "genbank")
def get_first_CDS(feat_type='CDS', max_feat_location_parts=1):
parser = get_parser()
for rec in parser:
for feat in rec.features:
if feat.type == feat_type and len(feat.location.parts) > max_feat_location_parts:
return feat
return None # This is by default but it looks nicer..
def make_ret_function(cm):
def ret_f(c_pos):
try:
# Important: We assume that c_pos is 1-based
# Both input and output of the method are 0-based
ret = int(cm.c2g(c_pos-1))+1
except IndexError as e:
self.current_fatal_error += ['Could not convert from c. to g. Could not find position in exons. Error message: %s' % (str(e))]
logging.error(self.current_fatal_error[-1])
return None
except biopython_GenomePositionError as e:
self.current_fatal_error += [str(e)]
logging.error(self.current_fatal_error[-1])
return None
return ret
return ret_f
exons = get_first_CDS()
if exons is None:
logging.warning('Could not find any CDS (exons) information in %s . Looking for mRNA..' % (filename))
exons = get_first_CDS(feat_type='mRNA', max_feat_location_parts=0)
if exons is None:
logging.error('Could not find mRNA information in %s . Trying a 1 size exon..' % (filename))
exons = get_first_CDS(max_feat_location_parts=0)
if exons is None:
logging.error('Could not find a 1 size exon. Returning None')
return None
logging.info('Exons found: %s' % (str(exons)))
cm = CoordinateMapper(exons)
return make_ret_function(cm=cm)
@staticmethod
def _get_sequence_features_from_genbank(filename, gene=None):
'''
DEPRECATED use _biopython_c2g_mapper instead
Get sequence features from genbank file.
'''
def get_feature_genes(features):
#feature_genes = [feature.qualifiers['gene'] for feature in features if 'gene' in feature.qualifiers]
feature_genes = [feature.qualifiers['gene'] for feature in features]
feature_genes_flat_set = list(set([y for x in feature_genes for y in x]))
return feature_genes_flat_set
def select_gene(features, genes):
if len(genes) == 1:
logging.info('Genbank filename: %s . Selecting unique gene: %s' % (filename, genes[0]))
selected_gene = genes[0]
else:
if gene is None:
logging.error('Genbank filename: %s . gene parameter is None . please select one of the following genes: %s' % (filename, genes))
return None
else:
if gene in genes:
selected_gene = gene
else:
logging.error('Genbank filename: %s . gene: %s is not present in genbank file' % (filename, gene))
return [feature for feature in features if selected_gene in feature.qualifiers['gene']]
def get_positions(features):
ret = []
assert len(features) == 1
feature = features[0]
if len(feature.sub_features) == 0:
ret.append((
feature.location.start.position,
feature.location.end.position,
feature.location.strand
))
else:
for sub_feature in feature.sub_features:
ret.append((
sub_feature.location.start.position,
sub_feature.location.end.position,
sub_feature.location.strand
))
return ret
def select_features(record, f_type):
all_f = [x for x in record.features if x.type == f_type]
if len(all_f) == 0:
return []
all_f_genes = get_feature_genes(all_f)
all_f_gene_features = select_gene(all_f, all_f_genes)
if all_f_gene_features is None:
return None
all_f_positions = get_positions(all_f_gene_features)
return all_f_positions
def make_ret_function(CDS):
start = CDS[0][0] + 1
end = CDS[-1][1]
def ret_f(c_pos):
if c_pos < 1:
return start + c_pos
previous_dif = 0
for CDS_position in CDS:
current_dif = CDS_position[1] - CDS_position[0]
if previous_dif <= c_pos <= previous_dif + current_dif:
return CDS_position[0] + c_pos - previous_dif
previous_dif += current_dif
return CDS[-1][1] + c_pos - previous_dif
return ret_f
with open(filename) as f:
records = list(SeqIO.parse(f, 'genbank'))
if len(records) != 1:
logging.error('Genbank file: %s . Found more than one genbank record.' % (filename))
return None
#Get CDS
CDS_positions = select_features(records[0], 'CDS')
mRNA_positions = select_features(records[0], 'mRNA')
if not CDS_positions is None and len(CDS_positions) == 0:
logging.warning('Genbank file %s . No CDS features found.' % (filename))
logging.info('CDS: %s' % str(CDS_positions))
logging.info('mRNA: %s' % str(mRNA_positions))
if CDS_positions:
return make_ret_function(CDS_positions)
return make_ret_function(mRNA_positions)
@staticmethod
def _get_sequence_features_from_XML_NCBI(filename):
'''
DEPRECATED!!
Use _get_sequence_features_from_genbank instead
Return all sequence features from XML NCBI file.
returns a dictionary with start and end positions of each feature
Data for debugging:
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][i][u'Seq-feat_data'][u'SeqFeatData'][u'SeqFeatData_rna'][u'RNA-ref'][u'RNA-ref_type'].attributes[u'value']
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][i][u'Seq-feat_location'][u'Seq-loc'][u'Seq-loc_int'][u'Seq-interval'][u'Seq-interval_from']
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][i][u'Seq-feat_location'][u'Seq-loc'][u'Seq-loc_int'][u'Seq-interval'][u'Seq-interval_to']
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][1][u'Seq-feat_location'][u'Seq-loc'].keys() --> [u'Seq-loc_pnt']
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][3][u'Seq-feat_data'][u'SeqFeatData'].keys() --> [u'SeqFeatData_gene']
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][3][u'Seq-feat_data'][u'SeqFeatData'][u'SeqFeatData_gene'][u'Gene-ref'][u'Gene-ref_locus'] --> CYP2C9
# ----------------------------------------
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_set'][u'Bioseq-set'][u'Bioseq-set_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][0][u'Seq-feat_location']
# ----------------------------------------
# record[u'Bioseq-set_seq-set'][0][u'Seq-entry_seq'][u'Bioseq'][u'Bioseq_annot'][0][u'Seq-annot_data'][u'Seq-annot_data_ftable'][0][u'Seq-feat_location'][u'Seq-loc'][u'Seq-loc_mix'][u'Seq-loc-mix'][10]
Test with
#Check XML parsers
hgvs_transcripts = ['NM_052896.3', 'M61857.1']
for hgvs_transcript in hgvs_transcripts:
ncbi_xml = mi._get_data_from_nucleotide_entrez(hgvs_transcript, retmode='text', rettype='xml')
ncbi_xml_filename = mi._ncbi_filename(hgvs_transcript, 'xml')
print 'Filename: ', ncbi_xml_filename
ncbi_xml_features = mi._get_sequence_features_from_XML_NCBI(ncbi_xml_filename)
print 'Features:', ncbi_xml_features
'''
fields_1 = [
u'Bioseq-set_seq-set',
0,
(u'Seq-entry_seq', u'Seq-entry_set'),
(u'Bioseq', u'Bioseq-set'),
(u'Bioseq_annot', u'Bioseq-set_annot'),
0,
u'Seq-annot_data',
u'Seq-annot_data_ftable',
]
def apply_field(record, fields, starting_path):
current_record = record
current_path = starting_path
for field in fields:
if type(field) is tuple:
found = False
for tuple_field in field:
if tuple_field in current_record:
current_field = tuple_field
found = True
break
if not found:
logging.error('Could not find record: %s in path: %s in XML Entrez filename: %s' % (str(field), current_path, filename))
return None
else:
current_field = field
current_path += u' --> ' + str(current_field)
#print current_path
if type(current_record).__name__ == 'DictionaryElement':
if current_field in current_record:
current_record = current_record[current_field]
else:
logging.error('Could not find record: %s in XML Entrez filename: %s' % (current_path, filename))
logging.error('Available keys: %s' % (str(current_record.keys())))
return None
elif type(current_record).__name__ == 'ListElement':
if len(current_record) < current_field:
logging.error('Could not find record: %s in XML Entrex filename: %s' % (current_path, filename))
return None
else:
current_record = current_record[current_field]
else:
raise Exception('Unknown XML field type: %s' % type(current_record).__name__)
return current_record, current_path
with open(filename) as f:
record = Entrez.read(f)
path = 'START'
current_record, path = apply_field(record, fields_1, path)
ret = {}
# Keep only data entries of the feature table
for location_entry_index, location_entry in enumerate(current_record):
current_path = path + u' --> ' + str(location_entry_index)
loc_current_entry, loc_path = apply_field(location_entry, [u'Seq-feat_data', u'SeqFeatData'], current_path)
location_keys = loc_current_entry.keys()
if len(location_keys) != 1:
logging.error('Entrez XML file: %s , path: %s has more than one record: %s' % (filename, loc_path, str(location_keys)))
return None
location_key = location_keys[0]
search = re.search(r'_([\w]+)$', location_key)
if search is None:
logging.error('Entrez XML file: %s , path: %s . Cannot process key: %s (expected YYY_ZZZ name (for example: SeqFeatData_rna))' % (filename, loc_path, location_key))
return None
key = search.group(1)
loc_current_entry, loc_path = apply_field(loc_current_entry, ['SeqFeatData_%s' % (key)], loc_path)
ref_keys = loc_current_entry.keys()
if len(ref_keys) != 1:
logging.error('Entrez XML file: %s , path: %s has more than one record: %s' % (filename, loc_path, str(ref_keys)))
return None
ref_key = ref_keys[0]
if not ref_key in [u'Cdregion']:
search = re.search(r'([\w]+)-([\w]+)', ref_key)
if search is None:
logging.error('Entrex XML file: %s , path: %s . Cannot process key: %s (expected YYY-ZZZ name (for example: Gene-ref))' % (filename, loc_path, ref_key))
return None
if search.group(2) != u'ref':
#Ignore these entries
continue
if search.group(1).lower() != key.lower():
logging.error('Entrez XML file: %s , path: %s . Could not find expected key: %s' % (filename, loc_path, key + '-ref'))
return None
loc_current_entry, loc_path = apply_field(loc_current_entry, [ref_key], loc_path)
#We do not traverse any further. We keep the key as the element name
#We continue to seek the interval positions
# loc_current_entry, loc_path = apply_field(location_entry, [u'Seq-feat_location', u'Seq-loc', u'Seq-loc_int', u'Seq-interval', u'Seq-interval_from'], current_path)
loc_current_entry, loc_path = apply_field(location_entry, [u'Seq-feat_location', u'Seq-loc'], current_path)
if u'Seq-loc_int' in loc_current_entry:
loc_current_entry, loc_path = apply_field(loc_current_entry, [u'Seq-loc_int', u'Seq-interval'], loc_path)
sequence_from, _ = apply_field(loc_current_entry, [u'Seq-interval_from'], loc_path)
sequence_to, _ = apply_field(loc_current_entry, [u'Seq-interval_to'], loc_path)
ret[key] = [sequence_from, sequence_to]
elif u'Seq-loc_mix' in loc_current_entry:
loc_current_entry, loc_path = apply_field(loc_current_entry, [u'Seq-loc_mix', u'Seq-loc-mix'], loc_path)
ret[key] = []
for seq_loc_mix_index, seq_loc_mix in enumerate(loc_current_entry):
seq_loc_path = loc_path + ' --> %i ' % (seq_loc_mix_index)
loc_current_entry, loc_path = apply_field(seq_loc_mix, [u'Seq-loc_int', u'Seq-interval'], seq_loc_path)
sequence_from, _ = apply_field(loc_current_entry, [u'Seq-interval_from'], loc_path)
sequence_to, _ = apply_field(loc_current_entry, [u'Seq-interval_to'], loc_path)
ret[key].append([sequence_from, sequence_to])
elif u'Seq-loc_packed-int' in loc_current_entry:
loc_current_entry, loc_path = apply_field(loc_current_entry, [u'Seq-loc_packed-int', u'Packed-seqint'], loc_path)
ret[key] = []
for seq_loc_mix_index, seq_loc_mix in enumerate(loc_current_entry):
seq_loc_path = loc_path + ' --> %i ' % (seq_loc_mix_index)
#loc_current_entry, loc_path = apply_field(seq_loc_mix, [u'Seq-loc_int', u'Seq-interval'], seq_loc_path)
sequence_from, _ = apply_field(seq_loc_mix, [u'Seq-interval_from'], seq_loc_path)
sequence_to, _ = apply_field(seq_loc_mix, [u'Seq-interval_to'], seq_loc_path)
ret[key].append([sequence_from, sequence_to])
else:
if hasattr(loc_current_entry, 'keys'):
logging.error('Entrez XML file: %s , path: %s . Could not find Seq-loc_int OR Seq-loc_mix . Existing keys: %s' % (filename, loc_path, str(loc_current_entry.keys())))
return None
else:
assert False
# sequence_from = loc_current_entry
# loc_current_entry, loc_path = apply_field(location_entry, [u'Seq-feat_location', u'Seq-loc', u'Seq-loc_int', u'Seq-interval', u'Seq-interval_to'], current_path)
# sequence_to = loc_current_entry
# ret[key] = [sequence_from, sequence_to]
return ret
def _lovd_setup(self):
self.lovd_directory = os.path.join(self.local_directory, 'LOVD')
logging.info('LOVD directory: %s' % (self.lovd_directory))
Utils.mkdir_p(self.lovd_directory)
self.lovd_genes_atom = os.path.join(self.lovd_directory, 'genes.atom')
logging.info('LOVD genes atom filename: %s' % (self.lovd_genes_atom))
#Check if genes_atom file exists
if not Utils.file_exists(self.lovd_genes_atom):
logging.info('File %s does not exist. Downloading from: %s' % (self.lovd_genes_atom, self.lovd_genes_url))
Utils.download(self.lovd_genes_url, self.lovd_genes_atom)
self.lovd_genes_json = os.path.join(self.lovd_directory, 'genes.json')
logging.info('LOVD gene json filename: %s' % (self.lovd_genes_json))
#Check if it exists
if Utils.file_exists(self.lovd_genes_json):
logging.info('LOVD gene json filename %s exists. Loading..' % (self.lovd_genes_json))
self.lovd_transcript_dict = Utils.load_json_filename(self.lovd_genes_json)
return
logging.info('LOVD gene json filename does not exist. Creating it..')
logging.info('Parsing LOVD genes file: %s ..' % (self.lovd_genes_atom))
data = feedparser.parse(self.lovd_genes_atom)
logging.info('Parsed LOVD genes file with %s entries' % (len(data['entries'])))
ret = {}
for entry_index, entry in enumerate(data['entries']):
summary = entry['summary']
# if entry_index % 100 == 0:
# logging.info('Parsed entries: %i', entry_index)
# id:A1BG
search = re.search(r'id:([\w]+)', summary)
if search is None:
message = 'Could not find ID in LOVD entry: %s' % (summary)
logging.error(message)
#This shouldn't happen..
raise MutationInfoException(message)
_id = search.group(1)
# refseq_build:hg19
search = re.search(r'refseq_build:([\w]+)', summary)
if search is None:
message = 'Could not find refseq_build in LOVD entry: %s' % (summary)
logging.error(message)
refseq_build = None
else:
refseq_build = search.group(1)
# refseq_mrna:NM_130786.3
search = re.search(r'refseq_mrna:([\w_\.]+)', summary)
if search is None:
refseq_mrna = None
#message = 'Could not find refseq_mrna on LOVD entry: %s ' % (summary)
#logging.warning(message)
# This shouldn't happen
#raise MutationInfoException(message)
else:
refseq_mrna = search.group(1)
if refseq_mrna in ret:
if _id != ret[refseq_mrna][0]:
message = 'mRNA Refseq entry %s is present in more than one genes: %s, %s' % (refseq_mrna, ret[refseq_mrna], _id)
logging.error(message)
raise MutationInfoException('Entr')
ret[refseq_mrna] = [_id, refseq_build]
self.lovd_transcript_dict = ret
logging.info('Built LOVD trascript dictionary')
logging.info('Saving to json file: %s' % (self.lovd_genes_json))
Utils.save_json_filenane(self.lovd_genes_json, self.lovd_transcript_dict)
def _search_lovd(self, transcript, variation):
if not transcript in self.lovd_transcript_dict:
logging.warning('Transcript %s does not appear to be in LOVD' % (transcript))
return None, None, None, None
gene, genome = self.lovd_transcript_dict[transcript]
lovd_gene_url = self.lovd_variants_url.format(gene=gene)
lovd_gene_filename = os.path.join(self.lovd_directory, gene + '.atom')
logging.info('LOVD entry for trascript %s is gene %s ' % (transcript, gene))
logging.info('Looking for LOVD file: %s' % (lovd_gene_filename))
if not Utils.file_exists(lovd_gene_filename):
logging.info('Filename: %s does not exist . Downloading from: %s' % (lovd_gene_filename, lovd_gene_url))
Utils.download(lovd_gene_url, lovd_gene_filename)
else:
logging.info('Filename: %s exists' % (lovd_gene_filename))
logging.info('Parsing XML atom file: %s' % (lovd_gene_filename))
data = feedparser.parse(lovd_gene_filename)
for entry_index, entry in enumerate(data['entries']):
#print entry.keys()
#print entry['content']
#print len(entry['content'])
#print entry['content'][0].keys()
#print entry['content'][0]['value']
entry_value = entry['content'][0]['value']
# Example: position_mRNA:NM_000367.2:c.*2240
position_mRNA = [''.join(x.split(':')[1:]) for x in entry_value.split('\n') if 'position_mRNA' in x][0]
# Variant/DNA:c.*2240A>T
variant_DNA = [x.split(':')[1] for x in entry_value.split('\n') if 'Variant/DNA' in x][0]
# Match:
# position_genomic:chr6:18155397
# position_genomic:chr6:18155437_18155384
search = re.search(r'position_genomic:chr([\w]+):([\w\?]+)|position_genomic:chr([\w]+):([\w]+)_([\w]+)', entry_value)
if search is None:
logging.warning('Filename: %s Could not find position_genomic in entry: %s' % (lovd_gene_filename, entry))
continue
#print position_mRNA, variant_DNA, position_genomic
#print position_mRNA, variant_DNA, variation
if variant_DNA == variation:
logging.info('Found LOVD entry: \n%s' % entry_value)
chrom = search.group(1)
pos_1 = search.group(2)
if pos_1 == '?':
pos_1 = None
else:
pos_1 = int(pos_1)
if len(search.groups()) == 4:
pos_2 = int(search.group(3))
else:
pos_2 = None
logging.info('Found: Chrom: %s pos_1: %s pos_2: %s Genome: %s' % (str(chrom), str(pos_1), str(pos_2), genome))
return chrom, pos_1, pos_2, genome
logging.error('Could not find %s:%s in file: %s' % (transcript, variation, lovd_gene_filename))
return None, None, None, None
def _search_mutalyzer(self, variant, gene=None, **kwargs):
'''
Warning: It removes reference information in indels
For NG_008377.1:g.6502_6507delCTCTCT it returns NG_008377.1:g.6502_6507del .
https://mutalyzer.nl/name-checker?description=NG_008377.1%3Ag.6502_6507delCTCTCT
EDIT: RESOLVED IN https://github.com/kantale/MutationInfo/issues/20
'''
#Check if gene is defined
if not gene is None:
if not gene in variant:
#we need to change the name of the variant.
variant_splitted = variant.split(':')
if len(variant_splitted) != 2:
logging.error('More than one (or none) ":" characters detected in variant: %s' % (variant))
return None
new_variant = variant_splitted[0] + '(' + str(gene) + '):' + variant_splitted[1]
logging.info('Mutalyzer. Changed variant name from %s to %s' % (variant, new_variant))
variant = new_variant
variant_url_encode = urllib.quote(variant)
if '/' in variant_url_encode:
logging.error('Variant: %s . Variant contains character: "/" . Aborting.. ' % (str(variant_url_encode)) )
return None
variant_filename = os.path.join(self.mutalyzer_directory, variant_url_encode + '.html')
logging.info('Variant: %s . Mutalyzer variant filename: %s' % (variant, variant_filename))
if not Utils.file_exists(variant_filename):
logging.info('Variant: %s . Mutalyzer variant filename: %s does not exist. Creating it..' % (variant, variant_filename))
variant_url = self.mutalyzer_url.format(variant=variant_url_encode)
logging.info('Variant: %s . Variant Mutalyzer url: %s' % (variant, variant_url))
try:
Utils.download(variant_url, variant_filename)
except urllib2.HTTPError as e:
error_message = 'Variant: %s . MUTALYZER CRASHED? : %s' % (str(variant), str(e))
logging.error(error_message)
self.current_fatal_error += [error_message]
return None
#Check for errors
with open(variant_filename) as f:
soup = BeautifulSoup(f, features="html.parser")
alert_danger = soup.find_all(class_="alert alert-danger")
if len(alert_danger) > 0:
error_message = 'Variant: %s . Mutalyzer returned the following critical error: %s' % (variant, alert_danger[0].text)
logging.error(error_message)
self.current_fatal_error += [error_message]
logging.error('Variant: %s . Variant file will not be saved' % (variant))
os.remove(variant_filename)
return None
logging.info('Variant: %s . Mutalyzer file: %s exists (or created). Parsing..' % (variant, variant_filename))
with open(variant_filename) as f:
soup = BeautifulSoup(f, "html.parser")
description = soup.find_all(class_='name-checker-left-column')[0].find_all('p')[0].text
logging.info('Variant: %s . Found description: %s' % (variant, description))
#Get reference and alternative from mutalyzer
mutalyzer_seqs = str(soup.find_all(class_='name-checker-left-column')[0].find_all('pre')[0]).replace('<pre>', '').replace('</pre>', '').replace('<br>', '!@#').replace('</br>', '!@#').replace('<br/>', '!@#').split('!@#')
mutalyzer_seqs = [x for x in mutalyzer_seqs if x]
mutalyzer_reference = re.search(r' ([ACGT-]+) ', mutalyzer_seqs[0]).group(1).replace('-', '').strip()
mutalyzer_alternative = re.search(r' ([ACGT-]+) ', mutalyzer_seqs[1]).group(1).replace('-', '').strip()
logging.info('Variant: %s Mutalyzer reference: %s Mutalyzer alternative: %s' % (variant, mutalyzer_reference, mutalyzer_alternative))
#new_variant_url = soup.find_all(class_='name-checker-left-column')[0].find_all('p')[1].code.a.get('href')
bs_results = soup.find_all(class_='name-checker-left-column')[0].find_all('p')[1].code
if bs_results is None:
self.current_fatal_error += ['MUTALYZER COULD NOT FIND GENOMIC LOCATION']
logging.error(self.current_fatal_error[-1])
return None
else:
new_variant_url = bs_results.a.get('href')
logging.info('Variant: %s . Found new variant url: %s' % (variant, new_variant_url))
new_variant = new_variant_url.split('=')[1]
new_variant = urllib.unquote(new_variant)
logging.info('Variant: %s . Found Genomic description: %s' % (variant, new_variant))
return new_variant, mutalyzer_reference, mutalyzer_alternative
def search_mutalyzer_position_converter(self, variant):
'''
https://mutalyzer.nl/position-converter?assembly_name_or_alias=GRCh38&description=NM_017781.2%3Ac.166C%3ET
'''
variant_url_encode = urllib.quote(variant)
for mutalyzer_assembly in ['GRCh38', 'GRCh37']:
new_variant = None
logging.debug('RUNNING MUTALYZER POSITION CONVERTER FOR ASSEMBLY: %s' % str(mutalyzer_assembly))
variant_url = 'https://mutalyzer.nl/position-converter?assembly_name_or_alias={}&description={}'.format(mutalyzer_assembly, variant_url_encode)
logging.debug('MUTALYZER URL: %s' % variant_url)
variant_filename = os.path.join(self.mutalyzer_directory, variant_url_encode + '_{}_position_converter.html'.format(mutalyzer_assembly))
logging.debug('MUTALYZER FILENAME: %s' % variant_filename )
if not Utils.file_exists(variant_filename):
#logging.debug('DOWNLOADING MUTALYZER URL')
Utils.download(variant_url, variant_filename)
with open(variant_filename) as f:
soup = BeautifulSoup(f, features="html.parser")
#Check for errors
if len(soup.find_all(class_ = 'alert-danger')) > 0:
error_message = 'MUTALYZER POSITION CONVERTER REPORTED ERROR: %s' % soup.find_all(class_ = 'alert-danger')[0].text
logging.warning(error_message)
self.current_fatal_error.append(error_message)
else:
new_variant = soup.find_all('code')[4].text
break
if new_variant is None:
logging.warning('MUTALYZER POSITION CONVERTER FAILED')
return None
logging.debug('MUTALYZER POSITION CONVERTER REPORTED: %s' % str(new_variant))
return new_variant
def _search_ucsc(self, variant):
'''
Adapted from: https://www.biostars.org/p/59249/
Variant should be an rs variant
'''
# Trying three times to query UCSC..
ucsc_query_efforts = 0
ucsc_query_efforts_MAX = 3
while ucsc_query_efforts < ucsc_query_efforts_MAX:
ucsc_query_efforts += 1
success = False
try:
results = list(self.ucsc_dbsnp.filter_by(name=variant))
success = True
except Exception as e:
message = "Could not query UCSC. Error: {}".format(str(e))
logging.error(message)
message = "This was effort {} from {}".format(ucsc_query_efforts, ucsc_query_efforts_MAX)
logging.error(message)
if ucsc_query_efforts < ucsc_query_efforts_MAX:
logging.info("Resetting UCSC connection...")
self._setup_UCSC(**self.ucsc_options)
else:
logging.error("Maximum UCSC connection efforts reached. Aborting..")
return None
if success:
break
logging.info('Variant: %s . Returned from UCSC filter_by: %s' % (str(variant), str(results)))
if not results:
message = 'UCSC returned an empty result list'
logging.warning(message)
self.current_fatal_error.append(message)
return None
ret = []
for result in results:
chrom = result.chrom
start = result.chromStart
offset = result.chromEnd # This is the position reported from dbSNP
refNCBI = result.refNCBI
refUCSC = result.refUCSC
reference = refNCBI
if refNCBI != refUCSC:
logging.warning('Variant: %s has different reference in NCBI (%s) and UCSC (%s)' % (variant, refNCBI, refUCSC))
logging.warning('Keeping NCBI reference')
observed = result.observed
observed_s = observed.split('/')
if result.strand == u'-':
#observed_s = list([MutationInfo.inverse(x) if not x in ['-'] else '-' for x in ''.join(observed_s)]) # Do not invert '-'
observed_s = [MutationInfo.reverse_inverse(x) if not x in ['-'] else '' for x in observed_s] # Do not invert '-'
else:
observed_s = [x if not x in ['-'] else '' for x in observed_s]
if reference == '-':
reference = ''
alternative = [x for x in observed_s if x != reference]
logging.info('Variant: %s . observed: %s alternate: %s' % (variant, observed, str(alternative)))
if len(alternative) == 1:
alternative = alternative[0]
#In case of a deletion we need to make this correction in order to report the same position as in HGVS
#For example: rs113993960
if alternative == '':
offset = offset - len(reference) + 1
ret.append(self._build_ret_dict(chrom, offset, reference, alternative, self.ucsc_assembly, 'UCSC'))
if len(ret) == 1:
return ret[0]
return ret
def _search_VEP(self, variant, vep_assembly='grch38'):
'''
Variant Effect Predictor
'''
#vep_assembly = 'grch38'
#vep_assembly = 'grch37'
v = VEP(variant, assembly=vep_assembly)
logging.debug('VEP for for variant %s returned: %s' % (variant, str(v)))
if not type(v) is list:
self.current_fatal_error += ['Variant: %s . VEP did not return a list: %s' % (variant, str(v))]
logging.error(self.current_fatal_error[-1])
return None
if len(v) == 0:
self.current_fatal_error += ['Variant: %s . VEP returned an empty list' % (variant)]
logging.error(self.current_fatal_error[-1])
return None
logging.info('Variant: %s . VEP returned %i results. Getting the info from the first' % (variant, len(v)))
allele_string = v[0]['allele_string']
logging.info('Variant: %s . Allele string: %s' % (variant, allele_string))
allele_string_s = allele_string.split('/')
# Looking for 'transcript_consequences'
variant_alleles = []
if 'transcript_consequences' in v[0]:
# Getting all variant alleles
for t_c in v[0]['transcript_consequences']:
if 'variant_allele' in t_c:
variant_alleles.append(t_c['variant_allele'])
#Get all different variant alleles
variant_alleles = list(set(variant_alleles))
if len(variant_alleles) > 1:
logging.warning('Variant: %s . More than one variant alleles found' % (variant))
if len(variant_alleles) == 0:
logging.warning('Variant: %s . No variant alleles found' % (variant))
reference = [x for x in allele_string_s if x not in variant_alleles + [u'-']]
if len(reference) == 1:
reference = reference[0]
elif len(reference) == 0:
reference = u''
if len(variant_alleles) == 1:
variant_alleles = variant_alleles[0]
elif len(variant_alleles) == 0:
variant_alleles = u''
#In case of a deletion we need to make this correction in order to report the same position as in HGVS
#For example: rs113993960
if variant_alleles in [u'', u'-']:
offset = v[0]['start']
variant_alleles = u'';
else:
offset = v[0]['end']
arguments = [
v[0]['seq_region_name'], # chrom
offset, # offset
reference, # ref
variant_alleles, # alt
v[0]['assembly_name'], # genome
'VEP', # source
]
return self._build_ret_dict(*arguments)
def _search_vep_post(self, hgvs):
server = "https://rest.ensembl.org"
ext = "/vep/human/hgvs"
headers={ "Content-Type" : "application/json", "Accept" : "application/json"}
data = json.dumps({"hgvs_notations": [hgvs]})
logging.debug('Variant: %s . Accessing VEP via POST..' % hgvs)
r = requests.post(server+ext, headers=headers, data=data)
logging.debug('Variant: %s . VEP-POST returned' % hgvs)
if not r.ok:
try:
r.raise_for_status()
except Exception as e:
logging.error('Library requests failed: %s' % (str(e)))
return None
vep = r.json()
if not type(vep) is list:
logging.error('Variants: %s . VEP did not return a list' % hgvs)
return None
if not len(vep) == 1:
logging.error('Variant: %s . VEP returned more than one items' % hgvs)
return None
#transcripts = [x['gene_id'] for x in vep[0]['transcript_consequences'] if 'gene_id' in x]
#transcripts = set(transcripts)
#print (transcripts)
ref_alt = vep[0]['allele_string'].split('/')
ref = ref_alt[0].replace('-', '')
alt = ref_alt[1].replace('-', '')
strand = vep[0]['strand']
if strand == -1:
ref = self.inverse(ref)
alt = self.inverse(alt)
ret2 = [
vep[0]['seq_region_name'], # Chromosome
vep[0]['start'], # Offset
ref, # ref
alt, # alt,
'hg38',
'VEP',
]
#print (ret2)
return self._build_ret_dict(*ret2)
def _search_variation_reporter(self, variant):
'''
https://www.ncbi.nlm.nih.gov/variation/tools/reporter/docs/api/webservice
source-assembly - The accession and version of the reference assembly (e.g. the accession and version for human GRCh38.2
is GCF_000001405.28). Visit the Variation Reporter web service to see available supported assemblies and the Assembly
database for the corresponding assembly accession and version numbers. When not provided, the value defaults to GCF_000001405.25 (GRCh37.p13).
# Example requests.post("https://www.ncbi.nlm.nih.gov/projects/SNP/VariantAnalyzer/var_rep.cgi", data={"annot1":"NM_017781.2:c.166C>T", "api_protocol_version":"1.0"})
https://www.ncbi.nlm.nih.gov/variation/tools/reporter/docs/help
'''
url = 'https://www.ncbi.nlm.nih.gov/projects/SNP/VariantAnalyzer/var_rep.cgi'
data = {
'annot1': variant,
'api_protocol_version': '1.0',
}
try:
r = requests.post(url, data=data)
except Exception as e:
message = 'Variation Reporter. Variant: %s Could not access www.ncbi.nlm.nih.gov. Exception: %s' % (str(variant), str(e))
logging.error(message)
self.current_fatal_error.append(message)
return None
if not hasattr(r, 'text'):
message = 'Variation Reporter. Variant: %s POST request on %s Failed' % (str(variant), url)
logging.error(message)
self.current_fatal_error.append(message)
return None
text = r.text
assert type(text) is unicode
logging.debug('Variant: %s . Variation Reporter returned:' % (str(variant)))
logging.debug(text)
if "Failed" in text:
message = 'Variation Reporter. Variant: %s . Returned:\n%s\n' % (str(variant), text)
logging.error(message)
self.current_fatal_error.append(message)
return None
# Get the assembly
# ## Assembly: GRCh37.p13
s = re.search(r'Assembly:[\s]+([\w\.]+)', text)
if not s:
message = 'Variation Reporter. Variant: %s. Could not find assembly' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
assembly = s.group(1)
# Get headers
s = re.search(r"^\# (.+)$", text, re.MULTILINE)
if not s:
message = 'Variation Reporter. Variant: %s. Could not find header' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
headers = s.group(1).split('\t')
if not 'Hgvs_g' in headers:
message = 'Variation Reporter. Variant: %s. Could not find field "Hgvs_g" in header' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
#Hgvs_g_index = headers.index('Hgvs_g')
Hgvs_g_index = headers.index('Hgvs_g (RefSeqGene)')
all_records = [x.split('\t') for x in text.split('\n') if (len(x) > 10) and (not 'Submitted:' in x) and (x[0] != '#')]
all_hgvs_g = [x[Hgvs_g_index] for x in all_records]
# Check if g. is indeed present
all_hgvs_g = [x for x in all_hgvs_g if 'g.' in x]
if not (len(all_hgvs_g)):
message = 'Variation Reporter. Variant: %s. Could not convert to genomic coordinates' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
# Parse the remaining with biocommons
all_hgvs_g_report = all_hgvs_g
all_hgvs_g = [y for y in [MutationInfo.biocommons_parse(x) for x in all_hgvs_g] if y]
if not len(all_hgvs_g):
message = 'Variation Reporter. Variant: %s. Could not parse any of the hgvs_g variants: %s with biocommons' % (str(variant), str(all_hgvs_g_report))
# Count
all_hgvs_g_count = {}
for current_hgvs in all_hgvs_g:
all_hgvs_g_count[str(current_hgvs)] = all_hgvs_g_count.get(str(current_hgvs), 0) + 1
# Take the most common:
most_common_hgvs = max([(v, k) for k, v in all_hgvs_g_count.iteritems()])[1]
logging.debug('Variation Reporter. Variant: %s . Returning value for: %s' % (str(variant), most_common_hgvs))
self.current_fatal_error.append('Variation Reporter converted %s to %s' % (str(variant), most_common_hgvs))
return self.get_info(most_common_hgvs, empty_current_fatal_error=False)
def _search_transvar(self, variant):
'''
RHAG:c.236G>A --> ['6', 49619284, 'C', 'T', 'hg38', 'transvar']
RHAG:c.236_237insA --> ['6', 49619283, '', 'T', 'hg38', 'transvar']
RHAG:c.236delG --> ['6', 49619284, 'C', '', 'hg38', 'transvar']
RHAG:c.236_237delGT --> ['6', 49619283, 'AC', '', 'hg38', 'transvar']
Resources:
* http://www.transvar.info/transvar_user/annotations/
* http://seqanswers.com/forums/showthread.php?t=32557 samtools
* https://bitbucket.org/wanding/transvar
TODO: By default returns genome reference = hg19
NM_017781.2:c.166C>T NM_017781 (protein_coding) CYP2W1 + chr7:g.1023013C>T/c.166C>T/p.L56L cds_in_exon_1 synonymous;reference_codon=CTG;alternative_codon=TTG;source=UCSCRefGene
NM_017781.2:c.166C>T NM_017781 (protein_coding) CYP2W1 + chr7:g.1023013C>T/c.166C>T/p.L56L cds_in_exon_1 synonymous;reference_codon=CTG;alternative_codon=TTG;dbxref=GeneID:54905,HGNC:20243;aliases=NP_060251;source=RefSeq
'''
transvar_exec = find_executable('transvar')
if not transvar_exec:
message = 'Transvar . Variant: %s . Could not find transvar in the system. Is it installeD?' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
if "c." in variant:
t_anno = "canno"
elif "g." in variant:
t_anno = "ganno"
elif "p." in variant:
t_anno = "panno"
else:
message = 'Transvar . Variant: %s . Variant does not have a "c." , "g." or "p." part' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
command = "transvar %s -i %s --ccds --ucsc --ensembl --refseq --aceview --gencode --refversion hg38" % (t_anno, variant)
logging.debug('Transvar Command: %s' % command)
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(err):
logging.warning('Variant: %s . Transvar returned this error message: %s' % (str(variant), err))
logging.debug('Transvar returned:\n%s\n' % (out))
out_raw = [x for x in out.split('\n') if not "coordinates(gDNA/cDNA/protein)" in x and len(x)>10]
if not len(out_raw):
message = 'Variant: %s . Transvar did not return any output' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
out_raw = [x.split() for x in out_raw]
hgvs_dict = {}
for item in out_raw:
if len(item) < 6:
continue
hgvs_item = item[5]
if not '/c.' in hgvs_item:
continue
hgvs_item_index = hgvs_item.index('/c.')
hgvs_item = hgvs_item[:hgvs_item_index]
hgvs_dict[hgvs_item] = hgvs_dict.get(hgvs_item, 0) + 1
if len(hgvs_dict) == 0:
message = 'Variant: %s . Transvar did not convert to g. ' % (str(variant))
logging.error(message)
self.current_fatal_error.append(message)
return None
to_ret = max([(v, k) for k, v in hgvs_dict.iteritems()])[1]
message = 'Transvar converted %s to %s' % (str(variant), to_ret)
logging.debug(message)
self.current_fatal_error.append(message)
# chr6:g.49619284C>T <-- RHAG:c.236G>A
# chr6:g.49619283_49619284insT <-- RHAG:c.236_237insA
# chr6:g.49619284delC <-- RHAG:c.236delG
# chr6:g.49619283_49619284delAC <-- RHAG:c.236_237delGT
s = re.search(r'chr(?P<chromosome>.+?):g\.(?P<start>[\d]+)(?P<end>_[\d]+)?((?P<SNV>[ACGT]+>[ACGT]+)|(?P<INS>ins[ACGT]+)|(?P<DEL>del[ACGT]+))', to_ret)
if not s:
logging.debug('Could not extract location from Transvar output: %s' % (to_ret))
return None
s_dict = s.groupdict()
#print (s_dict)
chromosome = s_dict['chromosome']
offset = int(s_dict['start'])
if s_dict['SNV']:
ref, alt = s_dict['SNV'].split('>')
elif s_dict['INS']:
ref = ''
alt = s_dict['INS'].replace('ins', '')
elif s_dict['DEL']:
ref = s_dict['DEL'].replace('del', '')
alt = ''
else:
logging.error('error 619')
return None
elements = [chromosome, offset, ref, alt, 'hg38', 'transvar']
#print ('%s --> %s' % (variant, elements))
return self._build_ret_dict(*elements)
#chrom, offset, ref, alt = self.counsyl_hgvs.hgvs_to_vcf(variant)
#return self._build_ret_dict(chrom, offset, ref, alt,'hg19', 'counsyl_hgvs_to_vcf', ' / '.join(self.current_fatal_error))
def _build_ret_dict(self, *args):
return {
'chrom' : str(args[0]).lower().replace('chr', ''),
'offset' : args[1],
'ref' : args[2] if not args[2] is None else '',
'alt' : args[3] if not args[3] is None else '',
'genome' : args[4],
'source' : args[5],
'notes' : args[6] if len(args)>6 else '',
}
class Counsyl_HGVS(object):
'''
Wrapper class for pyhgvs https://github.com/counsyl/hgvs
'''
# http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.chromFa.tar.gz
# fasta_url_pattern = 'http://hgdownload.cse.ucsc.edu/goldenPath/{genome}/bigZips/chromFa.tar.gz'
fasta_url_hg19 = 'http://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/chromFa.tar.gz'
fasta_url_hg38 = 'http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.chromFa.tar.gz'
refseq_url = 'https://github.com/counsyl/hgvs/raw/master/pyhgvs/data/genes.refGene'
def __init__(self, local_directory, genome='hg19'):
self.local_directory = local_directory
self.genome = genome
# Check genome option
if re.match(r'hg[\d]+', genome) is None:
raise ValueError('Parameter genome should follow the pattern: hgDD (for example hg18, hg19, hg38) ')
#Init counsyl PYHGVS
self.fasta_directory = os.path.join(self.local_directory, genome)
self.fasta_filename = os.path.join(self.fasta_directory, genome + '.fa')
self.refseq_filename = os.path.join(self.local_directory, 'genes.refGene')
if not Utils.file_exists(self.fasta_filename):
logging.info('Could not find fasta filename: %s' % self.fasta_filename)
self._install_fasta_files()
else:
logging.info('Found fasta filename: %s' % self.fasta_filename)
try:
self.sequence_genome = SequenceFileDB(self.fasta_filename)
except TypeError as e:
logging.error('Please refer to https://github.com/kantale/MutationInfo/issues/6 to resolve this issue')
raise e
self._load_transcripts()
def hgvs_to_vcf(self, variant):
chrom, offset, ref, alt = hgvs_counsyl.parse_hgvs_name(
variant, self.sequence_genome, get_transcript=self._get_transcript)
return chrom, offset, ref, alt
def _load_transcripts(self):
logging.info('Indexing transcripts..')
with open(self.refseq_filename) as f:
self.transcripts = hgvs_counsyl_utils.read_transcripts(f)
def _get_transcript(self, name):
return self.transcripts.get(name)
def _install_fasta_files(self):
fasta_filename_tar_gz = os.path.join(self.fasta_directory, 'chromFa.tar.gz')
fasta_filename_tar = os.path.join(self.fasta_directory, 'chromFa.tar')
#fasta_url = self.fasta_url_pattern.format(genome=self.genome)
if self.genome == 'hg19':
fasta_url = self.fasta_url_hg19
elif self.genome == 'hg38':
fasta_url = self.fasta_url_hg38
logging.info('Downloading from: %s' % fasta_url)
logging.info('Downloading to: %s' % fasta_filename_tar_gz)
Utils.mkdir_p(self.fasta_directory)
Utils.download(fasta_url, fasta_filename_tar_gz)
logging.info('Unzipping to: %s' % fasta_filename_tar)
Utils.gunzip(fasta_filename_tar_gz, fasta_filename_tar)
logging.info('Untar to: %s' % self.fasta_directory)
Utils.untar(fasta_filename_tar, self.fasta_directory)
logging.info('Merging *.fa to %s.fa' % (self.genome))
all_fasta_filenames_glob = os.path.join(self.fasta_directory, 'chr*.fa')
all_fasta_filenames = glob.glob(all_fasta_filenames_glob)
Utils.cat_filenames(all_fasta_filenames, self.fasta_filename)
logging.info('Downloading refGene')
logging.info('Downloading from: %s' % self.refseq_url)
logging.info('Downloading to: %s' % self.refseq_filename)
Utils.download(self.refseq_url, self.refseq_filename)
class Utils(object):
'''
Useful functions to help manange files
'''
@staticmethod
def directory_exists(dirname):
'''
Check if directory exists
'''
return os.path.isdir(dirname)
@staticmethod
def file_exists(filename):
'''
Check if filename exists
'''
return os.path.isfile(filename)
@staticmethod
def mkdir_p(dirname):
'''
Create directory
Functionality similar with: mkdir -p
Reference: http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
'''
try:
os.makedirs(dirname)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dirname):
pass
else:
raise
@staticmethod
def load_json_filename(filename):
'''
Load a json file
'''
with open(filename) as f:
data = json.load(f)
return data
@staticmethod
def save_json_filenane(filename, data):
'''
Save a json file
'''
with open(filename, 'w') as f:
f.write(json.dumps(data, indent=4) + '\n')
@staticmethod
def download(url, filename=None):
'''
http://www.pypedia.com/index.php/download
'''
if not filename:
file_name = url.split('/')[-1]
else:
file_name = filename
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
try:
file_size = int(meta.getheaders("Content-Length")[0])
pb = ProgressBar(file_size, 'Progress')
except IndexError:
file_size = None
logging.warning('Could not determine file size')
print("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if file_size:
pb.animate_ipython(file_size_dl)
print # We need a new line here
f.close()
@staticmethod
def gunzip(compressed_filename, uncompressed_filename):
'''
unzips a gunzip file
https://docs.python.org/2/library/gzip.html
'''
with gzip.open(compressed_filename, 'rb') as f_out, open(uncompressed_filename, 'wb') as f_in:
shutil.copyfileobj(f_out, f_in)
@staticmethod
def untar(tar_filename, path):
'''
Untar a filename
https://docs.python.org/2/library/tarfile.html
'''
with tarfile.open(tar_filename) as tar:
tar.extractall(path=path)
@staticmethod
def cat_filenames(filenames, output_filename):
'''
Concat filenames
http://stackoverflow.com/questions/13613336/python-concatenate-text-files
'''
with open(output_filename, 'w') as outfile:
for fname in filenames:
logging.info('Concatenating: %s' % fname)
with open(fname) as infile:
for line in infile:
outfile.write(line)
@staticmethod
def get_application_dir(application_name):
'''
Create a cross platform local directory for this app
Reference: https://pypi.python.org/pypi/appdirs/1.4.0
'''
directory = user_data_dir(application_name, '')
if not Utils.directory_exists(directory):
Utils.mkdir_p(directory)
return directory
@staticmethod
def execute(command):
'''
Execute a command line and fetch the results
'''
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# wait for the process to terminate
out, err = process.communicate()
errcode = process.returncode
out = out.decode('utf-8')
err = err.decode('utf-8')
#print ('STDOUT:')
#print (out)
#print ('ERR:')
#print (err)
#print ('RETURN CODE:', errcode)
return {'out': out, 'err': err, 'code': errcode}
class ProgressBar:
'''
http://www.pypedia.com/index.php/ProgressBar
'''
def __init__(self, iterations, msg = ''):
self.iterations = iterations
self.prog_bar = '[]'
self.msg = msg
self.fill_char = '*'
self.width = 40
self.__update_amount(0)
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
try:
clear_output()
except Exception:
# terminal IPython has no clear_output
pass
print '\r', self,
sys.stdout.flush()
self.update_iteration(iter + 1)
def animate_noipython( self, iter ):
'''
https://github.com/tomevans/pyhm/blob/master/pyhm/ProgressBar.py
'''
if sys.platform.lower().startswith( 'win' ):
print self, '\r',
else:
print self, chr( 27 ) + '[A'
self.update_iteration( iter )
# time.sleep( 0.5 )
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = self.msg + '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) / 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + (pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
|
kantale/MutationInfo
|
MutationInfo/__init__.py
|
Python
|
mit
| 108,229
|
[
"Biopython",
"VisIt"
] |
6ccca32fafb4716c03b6d5443997177a04fbc3643d59d8cf6e58b21be4f0eeda
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# make yield compatible with Python 2.2
from __future__ import generators
from Numeric import array, sum, sqrt
import tempfile
import os
import sys
from Bio.PDB import *
from AbstractPropertyMap import AbstractPropertyMap
__doc__="""
Calculation of residue depth (using Michel Sanner's MSMS program for the
surface calculation).
Residue depth is the average distance of the atoms of a residue from
the solvent accessible surface.
Residue Depth:
rd=ResidueDepth(model, pdb_file)
print rd[(chain_id, res_id)]
Direct MSMS interface:
Typical use:
surface=get_surface("1FAT.pdb")
Surface is a Numeric array with all the surface
vertices.
Distance to surface:
dist=min_dist(coord, surface)
where coord is the coord of an atom within the volume
bound by the surface (ie. atom depth).
To calculate the residue depth (average atom depth
of the atoms in a residue):
rd=residue_depth(residue, surface)
"""
def _read_vertex_array(filename):
"""
Read the vertex list into a Numeric array.
"""
fp=open(filename, "r")
vertex_list=[]
for l in fp.readlines():
sl=l.split()
if not len(sl)==9:
# skip header
continue
vl=map(float, sl[0:3])
vertex_list.append(vl)
fp.close()
return array(vertex_list)
def get_surface(pdb_file, PDB_TO_XYZR="pdb_to_xyzr", MSMS="msms"):
"""
Return a Numeric array that represents
the vertex list of the molecular surface.
PDB_TO_XYZR --- pdb_to_xyzr executable (arg. to os.system)
MSMS --- msms executable (arg. to os.system)
"""
# extract xyz and set radii
xyz_tmp=tempfile.mktemp()
PDB_TO_XYZR=PDB_TO_XYZR+" %s > %s"
make_xyz=PDB_TO_XYZR % (pdb_file, xyz_tmp)
os.system(make_xyz)
# make surface
surface_tmp=tempfile.mktemp()
MSMS=MSMS+" -probe_radius 1.5 -if %s -of %s > "+tempfile.mktemp()
make_surface=MSMS % (xyz_tmp, surface_tmp)
os.system(make_surface)
surface_file=surface_tmp+".vert"
# read surface vertices from vertex file
surface=_read_vertex_array(surface_file)
# clean up tmp files
# ...this is dangerous
#os.system("rm "+xyz_tmp)
#os.system("rm "+surface_tmp+".vert")
#os.system("rm "+surface_tmp+".face")
return surface
def min_dist(coord, surface):
"""
Return minimum distance between coord
and surface.
"""
d=surface-coord
d2=sum(d*d, 1)
return sqrt(min(d2))
def residue_depth(residue, surface):
"""
Return average distance to surface for all
atoms in a residue, ie. the residue depth.
"""
atom_list=residue.get_unpacked_list()
length=len(atom_list)
d=0
for atom in atom_list:
coord=atom.get_coord()
d=d+min_dist(coord, surface)
return d/length
def ca_depth(residue, surface):
if not residue.has_id("CA"):
return None
ca=residue["CA"]
coord=ca.get_coord()
return min_dist(coord, surface)
class ResidueDepth(AbstractPropertyMap):
"""
Calculate residue and CA depth for all residues.
"""
def __init__(self, model, pdb_file):
depth_dict={}
depth_list=[]
depth_keys=[]
# get_residue
residue_list=Selection.unfold_entities(model, 'R')
# make surface from PDB file
surface=get_surface(pdb_file)
# calculate rdepth for each residue
for residue in residue_list:
if not is_aa(residue):
continue
rd=residue_depth(residue, surface)
ca_rd=ca_depth(residue, surface)
# Get the key
res_id=residue.get_id()
chain_id=residue.get_parent().get_id()
depth_dict[(chain_id, res_id)]=(rd, ca_rd)
depth_list.append((residue, (rd, ca_rd)))
depth_keys.append((chain_id, res_id))
# Update xtra information
residue.xtra['EXP_RD']=rd
residue.xtra['EXP_RD_CA']=ca_rd
AbstractPropertyMap.__init__(self, depth_dict, depth_keys, depth_list)
if __name__=="__main__":
import sys
p=PDBParser()
s=p.get_structure("X", sys.argv[1])
model=s[0]
rd=ResidueDepth(model, sys.argv[1])
for item in rd:
print item
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/PDB/ResidueDepth.py
|
Python
|
apache-2.0
| 4,506
|
[
"Biopython"
] |
d1322fa0245130db57c70b9458e8e8ffb904998be4ddfd134671175383e61698
|
"""
Utility functions for cloud endpoints.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import os
import six
from DIRAC import S_OK, S_ERROR
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
STATE_MAP = {
0: "RUNNING",
1: "REBOOTING",
2: "TERMINATED",
3: "PENDING",
4: "UNKNOWN",
5: "STOPPED",
6: "SUSPENDED",
7: "ERROR",
8: "PAUSED",
}
def createMimeData(userDataTuple):
userData = MIMEMultipart()
for contents, mtype, fname in userDataTuple:
try:
mimeText = MIMEText(contents, mtype, sys.getdefaultencoding())
mimeText.add_header("Content-Disposition", 'attachment; filename="%s"' % fname)
userData.attach(mimeText)
except Exception as e:
return S_ERROR(str(e))
return S_OK(userData)
def createPilotDataScript(vmParameters, bootstrapParameters):
userDataDict = {}
# Arguments to the vm-bootstrap command
parameters = dict(vmParameters)
parameters.update(bootstrapParameters)
bootstrapArgs = {
"dirac-site": parameters.get("Site"),
"submit-pool": parameters.get("SubmitPool", ""),
"ce-name": parameters.get("CEName"),
"image-name": parameters.get("Image"),
"vm-uuid": parameters.get("VMUUID"),
"vmtype": parameters.get("VMType"),
"vo": parameters.get("VO", ""),
"running-pod": parameters.get("RunningPod", parameters.get("VO", "")),
"cvmfs-proxy": parameters.get("CVMFSProxy", "DIRECT"),
"cs-servers": ",".join(parameters.get("CSServers", [])),
"number-of-processors": parameters.get("NumberOfProcessors", 1),
"whole-node": parameters.get("WholeNode", True),
"required-tag": parameters.get("RequiredTag", ""),
"release-version": parameters.get("Version"),
"lcgbundle-version": parameters.get("LCGBundleVersion", ""),
"release-project": parameters.get("Project"),
"setup": parameters.get("Setup"),
}
bootstrapString = ""
for key, value in bootstrapArgs.items():
bootstrapString += " --%s=%s \\\n" % (key, value)
userDataDict["bootstrapArgs"] = bootstrapString
userDataDict["user_data_commands_base_url"] = bootstrapParameters.get("user_data_commands_base_url")
if not userDataDict["user_data_commands_base_url"]:
return S_ERROR("user_data_commands_base_url is not defined")
with open(bootstrapParameters["CloudPilotCert"]) as cfile:
userDataDict["user_data_file_hostkey"] = cfile.read().strip()
with open(bootstrapParameters["CloudPilotKey"]) as kfile:
userDataDict["user_data_file_hostcert"] = kfile.read().strip()
sshKey = None
userDataDict["add_root_ssh_key"] = ""
if "SshKey" in parameters:
with open(parameters["SshKey"]) as sfile:
sshKey = sfile.read().strip()
userDataDict["add_root_ssh_key"] = (
"""
# Allow root login
sed -i 's/PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config
# Copy id_rsa.pub to authorized_keys
echo \" """
+ sshKey
+ """\" > /root/.ssh/authorized_keys
service sshd restart
"""
)
# List of commands to be downloaded
bootstrapCommands = bootstrapParameters.get("user_data_commands")
if isinstance(bootstrapCommands, six.string_types):
bootstrapCommands = bootstrapCommands.split(",")
if not bootstrapCommands:
return S_ERROR("user_data_commands list is not defined")
userDataDict["bootstrapCommands"] = " ".join(bootstrapCommands)
script = (
"""
cat <<X5_EOF >/root/hostkey.pem
%(user_data_file_hostkey)s
%(user_data_file_hostcert)s
X5_EOF
mkdir -p /var/spool/checkout/context
cd /var/spool/checkout/context
for dfile in %(bootstrapCommands)s
do
echo curl --insecure -s %(user_data_commands_base_url)s/$dfile -o $dfile
i=7
while [ $i -eq 7 ]
do
curl --insecure -s %(user_data_commands_base_url)s/$dfile -o $dfile
i=$?
if [ $i -eq 7 ]; then
echo curl connection failure for file $dfile
sleep 10
fi
done
curl --insecure -s %(user_data_commands_base_url)s/$dfile -o $dfile || echo Download of $dfile failed with $? !
done
%(add_root_ssh_key)s
chmod +x vm-bootstrap
/var/spool/checkout/context/vm-bootstrap %(bootstrapArgs)s
#/sbin/shutdown -h now
"""
% userDataDict
)
if "HEPIX" in vmParameters:
script = (
"""
cat <<EP_EOF >>/var/lib/hepix/context/epilog.sh
#!/bin/sh
%s
EP_EOF
chmod +x /var/lib/hepix/context/epilog.sh
"""
% script
)
user_data = (
"""#!/bin/bash
mkdir -p /etc/joboutputs
(
%s
) > /etc/joboutputs/user_data.log 2>&1 &
exit 0
"""
% script
)
cloud_config = """#cloud-config
output: {all: '| tee -a /var/log/cloud-init-output.log'}
cloud_final_modules:
- [scripts-user, always]
"""
# Also try to add ssh key using standart cloudinit approach(may not work)
if sshKey:
cloud_config += (
"""
users:
- name: diracroot
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: true
ssh-authorized-keys:
- ssh-rsa %s
"""
% sshKey
)
# print "AT >>> user_data", user_data
# print "AT >>> cloud_config", cloud_config
return createMimeData(
((user_data, "x-shellscript", "dirac_boot.sh"), (cloud_config, "cloud-config", "cloud-config"))
)
def createUserDataScript(parameters):
defaultUser = os.environ.get("USER", parameters.get("User", "root"))
sshUser = parameters.get("SshUser", defaultUser)
defaultKey = os.path.expandvars("$HOME/.ssh/id_rsa.pub")
sshKeyFile = parameters.get("SshKey", defaultKey)
with open(sshKeyFile) as skf:
sshKey = skf.read().strip()
script = (
"""
# Allow root login
sed -i 's/PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
# Copy id_rsa.pub to authorized_keys
echo \" """
+ sshKey
+ """\" > /root/.ssh/authorized_keys
service sshd restart
"""
)
if "HEPIX" in parameters:
script = (
"""
cat <<EP_EOF >>/var/lib/hepix/context/epilog.sh
#!/bin/sh
%s
EP_EOF
chmod +x /var/lib/hepix/context/epilog.sh
"""
% script
)
user_data = (
"""#!/bin/bash
mkdir -p /etc/joboutputs
(
%s
) > /etc/joboutputs/user_data.log 2>&1 &
exit 0
"""
% script
)
cloud_config = """#cloud-config
output: {all: '| tee -a /var/log/cloud-init-output.log'}
cloud_final_modules:
- [scripts-user, always]
"""
if sshKey:
cloud_config += """
users:
- name: %s
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: false
ssh-authorized-keys:
- %s
""" % (
sshUser,
sshKey,
)
mime = createMimeData(
((user_data, "x-shellscript", "dirac_boot.sh"), (cloud_config, "cloud-config", "cloud-config"))
)
return mime
def createCloudInitScript(vmParameters, bootstrapParameters):
"""Create a user data script for cloud-init based images."""
parameters = dict(vmParameters)
parameters.update(bootstrapParameters)
extraOpts = ""
lcgVer = parameters.get("LCGBundleVersion", None)
if lcgVer:
extraOpts = "-g %s" % lcgVer
# add extra yum installable packages
extraPackages = ""
if parameters.get("ExtraPackages"):
packages = parameters.get("ExtraPackages")
extraPackages = "\n".join([" - %s" % pp.strip() for pp in packages.split(",")])
# add user account to connect by ssh
sshUserConnect = ""
sshUser = parameters.get("SshUser")
sshKeyFile = parameters.get("SshKey")
sshKey = ""
if sshKeyFile:
with open(sshKeyFile) as sshFile:
sshKey = sshFile.read()
if sshUser and sshKey:
sshUserConnect = """
users:
- name: %s
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: false
ssh-authorized-keys:
- %s
""" % (
sshUser,
sshKey,
)
bootstrapArgs = {
"dirac-site": parameters.get("Site"),
"submit-pool": parameters.get("SubmitPool", ""),
"ce-name": parameters.get("CEName"),
"ce-type": parameters.get("InnerCEType", "Singularity"),
"image-name": parameters.get("Image"),
"vm-uuid": parameters.get("VMUUID"),
"vmtype": parameters.get("VMType"),
"vo": parameters.get("VO", ""),
"running-pod": parameters.get("RunningPod", parameters.get("VO", "")),
"cvmfs-proxy": parameters.get("CVMFSProxy", "DIRECT"),
"cs-servers": ",".join(parameters.get("CSServers", [])),
"number-of-processors": parameters.get("NumberOfProcessors", 1),
"whole-node": parameters.get("WholeNode", True),
"required-tag": parameters.get("RequiredTag", ""),
"release-version": parameters.get("Version"),
"extraopts": extraOpts,
"release-project": parameters.get("Project"),
"setup": parameters.get("Setup"),
"user-root": parameters.get("UserRoot", "/cvmfs/cernvm-prod.cern.ch/cvm4"),
"timezone": parameters.get("Timezone", "UTC"),
"pilot-server": parameters.get("pilotFileServer", "localhost"),
"extra-packages": extraPackages,
"ssh-user": sshUserConnect,
"max-cycles": parameters.get("MaxCycles", "100"),
}
default_template = os.path.join(os.path.dirname(__file__), "cloudinit.template")
template_path = parameters.get("CITemplate", default_template)
# Cert/Key need extra indents to keep yaml formatting happy
with open(bootstrapParameters["CloudPilotCert"]) as cfile:
raw_str = cfile.read().strip()
raw_str = raw_str.replace("\n", "\n ")
bootstrapArgs["hostkey"] = raw_str
with open(bootstrapParameters["CloudPilotKey"]) as kfile:
raw_str = kfile.read().strip()
raw_str = raw_str.replace("\n", "\n ")
bootstrapArgs["hostcert"] = raw_str
with open(template_path) as template_fd:
template = template_fd.read()
template = template % bootstrapArgs
mime = createMimeData(((template, "cloud-config", "pilotconfig"),))
return mime
|
ic-hep/DIRAC
|
src/DIRAC/Resources/Cloud/Utilities.py
|
Python
|
gpl-3.0
| 10,367
|
[
"DIRAC"
] |
77acfe06fa73b4027f68003f5d35a69744ec8a6c4f6867727992fec539186f09
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import warnings
import ConfigParser as cfg
from math import exp, ceil, floor
from collections import OrderedDict
from os import linesep as NL
import numpy as n
import numpy.random as nr
from python_util.options import OptionsParser
class LayerParsingError(Exception):
pass
class WeightInitializationError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={},
init_layers=None):
# try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
warnings.warn("Layer '%s' of type '%s' is unused" % (name, l['type']))
# raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
if init_layers: # init weights from existing files if needed
old_name2weights_dic = {}
for old_l in init_layers.values(): # make dictionary for fast query
if old_l.has_key('weights'):
old_name2weights_dic[old_l['name']] = [old_l['weights']]
if old_l.has_key('biases'):
try:
old_name2weights_dic[old_l['name']] += [old_l['biases']]
except:
old_name2weights_dic[old_l['name']] = [old_l['biases']]
for l in layers.values(): # init weights of the new layer
lname = l['name']
if old_name2weights_dic.has_key(lname):
old_weights = old_name2weights_dic[lname]
if l.has_key('weights'):
if len(l['weights']) == len(old_weights[0]):
for (new_w, old_w) in zip(l['weights'], old_weights[0]):
if new_w.shape != old_w.shape:
raise WeightInitializationError('WEIGHTS: sizes not equal')
l['weights'] = old_weights[0]
print 'weights of layer %s successfully loaded' % lname
else:
raise WeightInitializationError('WEIGHTS: sizes not equal')
if l.has_key('biases'):
if l['biases'].shape == old_weights[-1].shape:
l['biases'] = old_weights[-1]
print ' biases of layer %s successfully loaded' % lname
else:
raise WeightInitializationError('BIASES: sizes not equal')
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
# except LayerParsingError, e:
# print e
# # sys.exit(1)
# raise e
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
dic['biasesInc'] = n.zeros_like(dic['biases'])
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
if dic['weightSourceLayers'][0] != '': # Shared bias matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][0]] if dic['weightSourceLayers'][0] != dic['name'] else dic
dic['biases'] = src_layer['biases']
dic['biasesInc'] = src_layer['biasesInc']
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][0], dic['biases'].shape[0], dic['biases'].shape[1], rows, cols))
print "Layer '%s' initialized bias matrix from %s" % (dic['name'], dic['weightSource'][0])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LocRankCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=4)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized local rank cost '%s'" % name
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.ce': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser(),
'cost.locrank': lambda : LocRankCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True)],
key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
|
zhaofang0627/cuda-convnet-for-hashing
|
layer.py
|
Python
|
apache-2.0
| 85,482
|
[
"Gaussian",
"NEURON"
] |
8ca5d8110434d7b350a00212ba131cce7d560288855d75328bc7564739a61068
|
import sys
import blib
import pymc
import numpy as np
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as u
import mdtraj as md
ff = blib.ff
def set_parms(f, sigma, epsilon, q=0.0):
print("\nsigma=%f, epsilon=%f" % (sigma, epsilon))
for k in range(f.getNumParticles()):
f.setParticleParameters(k, q * u.elementary_charge, sigma * u.nanometer, epsilon * u.kilojoule_per_mole)
def setup(traj, mmtop, temperature, pressure, sigma, epsilon, nonbondedCutoff=1.4*u.nanometer):
system = ff.createSystem(mmtop, nonbondedMethod=app.CutoffPeriodic, nonbondedCutoff=nonbondedCutoff)
f = system.getForce(0)
set_parms(f, sigma, epsilon)
friction = 1.0 / u.picoseconds
timestep = 3.0 * u.femtoseconds
barostat_frequency = 25
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostat_frequency))
simulation = app.Simulation(mmtop, system, integrator)
simulation.reporters.append(app.StateDataReporter(sys.stdout, 100, step=True, density=True))
simulation.context.setPositions(traj.openmm_positions(0))
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(temperature)
simulation.step(10000)
return simulation
def propagate(simulation, state, temperature, sigma, epsilon):
f = simulation.system.getForce(0)
set_parms(f, sigma, epsilon)
simulation.context.setState(state)
simulation.context.setVelocitiesToTemperature(temperature)
simulation.step(5000)
state = simulation.context.getState(getPositions=True, getParameters=True, getEnergy=True)
return state
mass = 12.01078 * u.daltons + 4 * 35.4532 * u.daltons
#sigma = pymc.Uniform("sigma", 0.53, 0.57, value=0.545)
sigma0 = 0.545
epsilon = 13.0
observed = 1.58436 * u.grams / u.milliliter
observed = observed / (u.grams / u.milliliter)
error = 0.02
temperature = 298.15 * u.kelvin
pressure = 101.325 * u.kilopascals
kB = u.BOLTZMANN_CONSTANT_kB * u.AVOGADRO_CONSTANT_NA
kt = kB * temperature
atoms_per_dim = 7
n_atoms = atoms_per_dim ** 3
traj, mmtop = blib.build_top(atoms_per_dim, sigma0)
simulation = setup(traj, mmtop, temperature, pressure, sigma0, epsilon)
state0 = simulation.context.getState(getPositions=True, getParameters=True, getEnergy=True)
class Step(object):
def __init__(self, var):
self.var = var.name
def step(self, point):
new = point.copy()
#new[self.var] = 10 + np.random.rand() # Normal samples
state = point['state']
sigma = point['sigma']
new[self.var] = propagate(simulation, state, temperature, sigma, epsilon)
return new
with pymc.Model() as model:
sigma = pymc.Uniform("sigma", 0.535, 0.565)
state = pymc.Flat('state')
step1 = pymc.step_methods.NUTS(vars=[sigma])
step2 = Step(state) # not sure how to limit this to one variable
trace = pymc.sample(10, [step1, step2])
pymc.traceplot(trace[:])
show()
|
kyleabeauchamp/DBayes
|
dbayes/test_pymc3.py
|
Python
|
gpl-2.0
| 3,035
|
[
"MDTraj",
"OpenMM"
] |
acf1ec45280151bb50ebecaebee100e250b8a45b7c330c44f638b91f4c526f10
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
import heapq
img = cv2.imread('color.jpg')
#edges = cv2.Canny(img,100,200)
#plt.subplot(121),plt.imshow(edges,cmap = 'gray')
#plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
#imgray = img
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
plt.subplot(121),plt.imshow(imgray,cmap = 'gray')
plt.title('Grayscale Image'), plt.xticks([]), plt.yticks([])
#ret,thresh = cv2.threshold(imgray,230,255,cv2.THRESH_BINARY)
#th3 = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,8)
# Otsu's thresholding after Gaussian filtering
imgray = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,8)
blur = cv2.GaussianBlur(imgray,(5,5),0.5)
#plt.subplot(122),plt.imshow(blur,cmap = 'gray')
#plt.title('Blurred image'), plt.xticks([]), plt.yticks([])
ret3,th3 = cv2.threshold(blur,127, 255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#corners = cv2.goodFeaturesToTrack(th3,8,0.03,50)
#corners = np.int0(corners)
#for i in corners:
# x,y = i.ravel()
# cv2.circle(th3,(x,y),20,255,-1)
#plt.imshow(th3),plt.show()
contours, hierarchy = cv2.findContours(th3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(th3, contours, -1, (0,255,0), 3)
plt.subplot(122),plt.imshow(th3,cmap = 'gray')
plt.title('Contours'), plt.xticks([]), plt.yticks([])
areas = [cv2.contourArea(c) for c in contours]
res = heapq.nlargest(10, areas)
'''
approx_contours=[]
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.125*cv2.arcLength(cnt,True),True)
print len(approx)
approx_contours.append(approx)
'''
|
SimplyPaper/SimplyPaper.github.io
|
Python Files/find_contours.py
|
Python
|
apache-2.0
| 1,644
|
[
"Gaussian"
] |
abc7b03296e55cd0437dcefd710f5ee68e94d3c577d475f5f5f85228edc3722c
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Provide cache classes to handle creation of OpenMM Context objects.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import re
import copy
import collections
try:
import openmm
from openmm import unit
except ImportError: # OpenMM < 7.6
from simtk import openmm, unit
from openmmtools import integrators
# =============================================================================
# GENERAL LRU CACHE
# =============================================================================
class LRUCache(object):
"""A simple LRU cache with a dictionary-like interface that supports maximum capacity and expiration.
It can be configured to have a maximum number of elements (capacity)
and an element expiration (time_to_live) measured in number of accesses
to the cache. Both read and write operations count as an access, but
only successful reads (i.e. those not raising KeyError) increment the
counter.
Parameters
----------
capacity : int, optional
Maximum number of elements in the cache. When set to None, the
cache has infinite capacity (default is None).
time_to_live : int, optional
If an element is not accessed after time_to_live read/write
operations, the element is removed. When set to None, elements do
not have an expiration (default is None).
Attributes
----------
capacity
time_to_live
Examples
--------
>>> cache = LRUCache(capacity=2, time_to_live=3)
When the capacity is exceeded, the least recently used element is
removed.
>>> cache['1'] = 1
>>> cache['2'] = 2
>>> elem = cache['1'] # read '1', now '2' is the least recently used
>>> cache['3'] = 3
>>> len(cache)
2
>>> '2' in cache
False
After time_to_live read/write operations an element is deleted if
it is not used.
>>> elem = cache['3'] # '3' is used, counter is reset
>>> elem = cache['1'] # access 1
>>> elem = cache['1'] # access 2
>>> elem = cache['1'] # access 3
>>> len(cache)
1
>>> '3' in cache
False
"""
def __init__(self, capacity=None, time_to_live=None):
self._data = collections.OrderedDict()
self._capacity = capacity
self._ttl = time_to_live
self._n_access = 0
@property
def capacity(self):
"""Maximum number of elements that can be cached.
If None, the capacity is unlimited.
"""
return self._capacity
@capacity.setter
def capacity(self, new_capacity):
# Remove excess elements
while len(self._data) > new_capacity:
self._data.popitem(last=False)
self._capacity = new_capacity
@property
def time_to_live(self):
"""Number of read/write operations before an cached element expires.
If None, elements have no expiration.
"""
return self._ttl
@time_to_live.setter
def time_to_live(self, new_time_to_live):
# Update entries only if we are changing the ttl.
if new_time_to_live == self._ttl:
return
# Update expiration of cache entries.
for entry in self._data.values():
# If there was no time to live before, just let entries
# expire in new_time_to_live accesses
if self._ttl is None:
entry.expiration = self._n_access + new_time_to_live
# If we don't want expiration anymore, delete the field.
# This way we save memory in case there are a lot of entries.
elif new_time_to_live is None:
del entry.expiration
# Otherwise just add/subtract the difference.
else:
entry.expiration += new_time_to_live - self._ttl
# Purge cache only if there is a time to live.
if new_time_to_live is not None:
self._remove_expired()
self._ttl = new_time_to_live
def empty(self):
"""Purge the cache."""
self._data = collections.OrderedDict()
def __getitem__(self, key):
# When we access data, push element at the
# end to make it the most recently used.
entry = self._data.pop(key)
self._data[key] = entry
# We increment the number of accesses only on successful reads.
self._n_access += 1
# Update expiration and cleanup expired values.
if self._ttl is not None:
entry.expiration = self._n_access + self._ttl
self._remove_expired()
return entry.value
def __setitem__(self, key, value):
self._n_access += 1
# When we access data, push element at the
# end to make it the most recently used.
try:
self._data.pop(key)
except KeyError:
# Remove first item if we hit maximum capacity.
if self._capacity is not None and len(self._data) >= self._capacity:
self._data.popitem(last=False)
# Determine expiration and clean up expired.
if self._ttl is None:
ttl = None
else:
ttl = self._ttl + self._n_access
self._remove_expired()
self._data[key] = _CacheEntry(value, ttl)
def __len__(self):
return len(self._data)
def __contains__(self, item):
return item in self._data
def __iter__(self):
return self._data.__iter__()
def _remove_expired(self):
"""Remove all expired cache entries.
Assumes that entries were created with an expiration attribute.
"""
keys_to_remove = set()
for key, entry in self._data.items():
if entry.expiration <= self._n_access:
keys_to_remove.add(key)
else:
# Later entries have been accessed later
# and they surely haven't expired yet.
break
for key in keys_to_remove:
del self._data[key]
# =============================================================================
# GENERAL CONTEXT CACHE
# =============================================================================
class ContextCache(object):
"""LRU cache hosting the minimum amount of incompatible Contexts.
Two Contexts are compatible if they are in a compatible ThermodynamicState,
and have compatible integrators. In general, two integrators are compatible
if they have the same serialized state, but ContextCache can decide to store
a single Context to optimize memory when two integrators differ by only few
parameters that can be set after the Context is initialized. These parameters
include all the global variables defined by a ``CustomIntegrator``.
You can force ``ContextCache`` to consider an integrator global variable incompatible
by adding it to the blacklist ``ContextCache.INCOMPATIBLE_INTEGRATOR_ATTRIBUTES``.
Similarly, you can add other attributes that should be considered compatible
through the whitelist ``ContextCache.COMPATIBLE_INTEGRATOR_ATTRIBUTES``. If
an attribute in that dictionary is not found in the integrator, the cache
will search for a corresponding getter and setter.
Parameters
----------
platform : openmm.Platform, optional
The OpenMM platform to use to create Contexts. If None, OpenMM
tries to select the fastest one available (default is None).
platform_properties : dict, optional
A dictionary of platform properties for the OpenMM platform.
Only valid if the platform is not None (default is None).
**kwargs
Parameters to pass to the underlying LRUCache constructor such
as capacity and time_to_live.
Attributes
----------
platform
capacity
time_to_live
Warnings
--------
Python instance attributes are not copied when ``ContextCache.get_context()``
is called. You can force this by setting adding them to the whitelist
``ContextCache.COMPATIBLE_INTEGRATOR_ATTRIBUTES``, but if modifying your
Python attributes won't modify the OpenMM serialization, this will likely cause
problems so this is discouraged unless you know exactly what you are doing.
Examples
--------
>>> from openmm import unit
>>> from openmmtools import testsystems
>>> from openmmtools.states import ThermodynamicState
>>> alanine = testsystems.AlanineDipeptideExplicit()
>>> thermodynamic_state = ThermodynamicState(alanine.system, 310*unit.kelvin)
>>> time_step = 1.0*unit.femtosecond
Two compatible thermodynamic states generate only a single cached Context.
ContextCache can also (in few explicitly supported cases) recycle the same
Context even if the integrators differ by some parameters.
>>> context_cache = ContextCache()
>>> context1, integrator1 = context_cache.get_context(thermodynamic_state,
... openmm.VerletIntegrator(time_step))
>>> thermodynamic_state.temperature = 300*unit.kelvin
>>> time_step2 = 2.0*unit.femtosecond
>>> context2, integrator2 = context_cache.get_context(thermodynamic_state,
... openmm.VerletIntegrator(time_step2))
>>> id(context1) == id(context2)
True
>>> len(context_cache)
1
When we switch to NPT the states are not compatible and so neither the
Contexts are.
>>> integrator2 = openmm.VerletIntegrator(2.0*unit.femtosecond)
>>> thermodynamic_state_npt = copy.deepcopy(thermodynamic_state)
>>> thermodynamic_state_npt.pressure = 1.0*unit.atmosphere
>>> context3, integrator3 = context_cache.get_context(thermodynamic_state_npt,
... openmm.VerletIntegrator(time_step))
>>> id(context1) == id(context3)
False
>>> len(context_cache)
2
You can set a capacity and a time to live for contexts like in a normal
LRUCache.
>>> context_cache = ContextCache(capacity=1, time_to_live=5)
>>> context2, integrator2 = context_cache.get_context(thermodynamic_state,
... openmm.VerletIntegrator(time_step))
>>> context3, integrator3 = context_cache.get_context(thermodynamic_state_npt,
... openmm.VerletIntegrator(time_step))
>>> len(context_cache)
1
See Also
--------
LRUCache
states.ThermodynamicState.is_state_compatible
"""
def __init__(self, platform=None, platform_properties=None, **kwargs):
self._validate_platform_properties(platform, platform_properties)
self._platform = platform
self._platform_properties = platform_properties
self._lru = LRUCache(**kwargs)
def __len__(self):
return len(self._lru)
@property
def platform(self):
"""The OpenMM platform to use to create Contexts.
If None, OpenMM tries to select the fastest one available. This
can be set only if the cache is empty.
"""
return self._platform
@platform.setter
def platform(self, new_platform):
if len(self._lru) > 0:
raise RuntimeError('Cannot change platform of a non-empty ContextCache')
if new_platform is None:
self._platform_properties = None
self._validate_platform_properties(new_platform, self._platform_properties)
self._platform = new_platform
def set_platform(self, new_platform, platform_properties=None):
if len(self._lru) > 0:
raise RuntimeError('Cannot change platform of a non-empty ContextCache')
self._validate_platform_properties(new_platform, platform_properties)
self._platform = new_platform
self._platform_properties = platform_properties
@property
def capacity(self):
"""The maximum number of Context cached.
If None, the capacity is unlimited.
"""
return self._lru.capacity
@capacity.setter
def capacity(self, new_capacity):
self._lru.capacity = new_capacity
@property
def time_to_live(self):
"""The Contexts expiration date in number of accesses to the LRUCache.
If None, Contexts do not expire.
"""
return self._lru.time_to_live
@time_to_live.setter
def time_to_live(self, new_time_to_live):
self._lru.time_to_live = new_time_to_live
def empty(self):
"""Clear up cache and remove all Contexts."""
self._lru.empty()
def get_context(self, thermodynamic_state, integrator=None):
"""Return a context in the given thermodynamic state.
In general, the Context must be considered newly initialized. This
means that positions and velocities must be set afterwards.
If the integrator is not provided, this will search the cache for
any Context in the given ThermodynamicState, regardless of its
integrator. In this case, the method guarantees that two consecutive
calls with the same thermodynamic state will retrieve the same context.
This creates a new Context if no compatible one has been cached.
If a compatible Context exists, the ThermodynamicState is applied
to it, and the Context integrator state is changed to match the
one passed as an argument. As a consequence, the returned integrator
is guaranteed to be in the same state as the one provided, but it
can be a different instance. This is to minimize the number of
Contexts objects cached that use the same or very similar integrator.
Parameters
----------
thermodynamic_state : states.ThermodynamicState
The thermodynamic state of the system.
integrator : openmm.Integrator, optional
The integrator for the context (default is None).
Returns
-------
context : openmm.Context
The context in the given thermodynamic system.
context_integrator : openmm.Integrator
The integrator to be used to propagate the Context. Can be
a difference instance from the one passed as an argument.
Warnings
--------
Python instance attributes are not copied when ``get_context()``
is called. You can force this by setting adding them to the whitelist
``ContextCache.COMPATIBLE_INTEGRATOR_ATTRIBUTES``, but if modifying the
attributes won't modify the OpenMM serialization, this will likely cause
problems so this is discouraged unless you know exactly what you're doing.
"""
context = None
# If the user requires a specific integrator, look for one that matches.
if integrator is None:
thermodynamic_state_id = self._generate_state_id(thermodynamic_state)
matching_context_ids = [context_id for context_id in self._lru
if context_id[0] == thermodynamic_state_id]
if len(matching_context_ids) == 0:
# We have to create a new Context.
integrator = self._get_default_integrator(thermodynamic_state.temperature)
elif len(matching_context_ids) == 1:
# Only one match.
context = self._lru[matching_context_ids[0]]
else:
# Multiple matches, prefer the non-default Integrator.
# Always pick the least recently used to make two consective
# calls retrieving the same integrator.
for context_id in reversed(matching_context_ids):
if context_id[1] != self._default_integrator_id():
context = self._lru[context_id]
break
if context is None:
# Determine the Context id matching the pair state-integrator.
context_id = self._generate_context_id(thermodynamic_state, integrator)
# Search for previously cached compatible Contexts or create new one.
try:
context = self._lru[context_id]
except KeyError:
context = thermodynamic_state.create_context(integrator, self._platform, self._platform_properties)
self._lru[context_id] = context
context_integrator = context.getIntegrator()
# Update state of system and integrator of the cached context.
# We don't have to copy the state of the integrator if the user
# didn't ask for a specific one.
if integrator is not None:
self._copy_integrator_state(integrator, context_integrator)
thermodynamic_state.apply_to_context(context)
return context, context_integrator
def __getstate__(self):
# this serialization format was introduced in openmmtools > 0.18.3 (pull request #437)
if self.platform is not None:
platform_serialization = self.platform.getName()
else:
platform_serialization = None
return dict(platform=platform_serialization, capacity=self.capacity,
time_to_live=self.time_to_live, platform_properties=self._platform_properties)
def __setstate__(self, serialization):
# this serialization format was introduced in openmmtools > 0.18.3 (pull request #437)
if serialization['platform'] is None:
self._platform = None
else:
self._platform = openmm.Platform.getPlatformByName(serialization['platform'])
if not 'platform_properties' in serialization:
self._platform_properties = None
else:
self._platform_properties = serialization["platform_properties"]
self._lru = LRUCache(serialization['capacity'], serialization['time_to_live'])
def __eq__(self, other):
"""Two ContextCache objects are equal if they have the same values in their public attributes."""
# Check types are compatible
if isinstance(other, ContextCache):
# Check all inner public attributes have the same values (excluding methods)
my_inner_attrs = [attr_ for attr_ in dir(self) if not attr_.startswith('_')
and not callable(getattr(self, attr_))]
return all([getattr(self, attr) == getattr(other, attr) for attr in my_inner_attrs])
else:
return False
# -------------------------------------------------------------------------
# Internal usage
# -------------------------------------------------------------------------
# Each element is the name of the integrator attribute used before
# get/set, and its standard value used to check for compatibility.
COMPATIBLE_INTEGRATOR_ATTRIBUTES = {
'StepSize': 0.001,
'ConstraintTolerance': 1e-05,
'Temperature': 273,
'Friction': 5,
'RandomNumberSeed': 0,
}
INCOMPATIBLE_INTEGRATOR_ATTRIBUTES = {
'_restorable__class_hash',
}
@classmethod
def _check_integrator_compatibility_configuration(cls):
"""Verify that the user didn't specify the same attributes as both compatible and incompatible."""
shared_attributes = set(cls.COMPATIBLE_INTEGRATOR_ATTRIBUTES)
shared_attributes = shared_attributes.intersection(cls.INCOMPATIBLE_INTEGRATOR_ATTRIBUTES)
if len(shared_attributes) != 0:
raise RuntimeError('These integrator attributes have been specified both as '
'compatible and incompatible: {}'.format(shared_attributes))
@classmethod
def _set_integrator_compatible_variables(cls, integrator, reference_value):
"""Set all the global variables to the specified reference.
If the argument reference_value is another integrator, the global
variables will be copied. If integrator is not a CustomIntegrator,
the function has no effect.
The function doesn't copy the global variables that are included in
the blacklist INCOMPATIBLE_INTEGRATOR_ATTRIBUTES.
"""
# Check if the integrator has no global variables.
try:
n_global_variables = integrator.getNumGlobalVariables()
except AttributeError:
return
# Check if we'll have to copy the values from a reference integrator.
is_reference_integrator = isinstance(reference_value, integrator.__class__)
for global_variable_idx in range(n_global_variables):
# Do not set variables that should be incompatible.
global_variable_name = integrator.getGlobalVariableName(global_variable_idx)
if global_variable_name in cls.INCOMPATIBLE_INTEGRATOR_ATTRIBUTES:
continue
# Either copy the value from the reference integrator or just set it.
if is_reference_integrator:
value = reference_value.getGlobalVariable(global_variable_idx)
else:
value = reference_value
integrator.setGlobalVariable(global_variable_idx, value)
@classmethod
def _copy_integrator_state(cls, copied_integrator, integrator):
"""Copy the compatible parameters of copied_integrator to integrator.
Simply using __getstate__ and __setstate__ doesn't work because
__setstate__ set also the bound Context.
We can assume the two integrators are of the same class since
get_context() found that they match the has.
"""
# Check that there are no contrasting settings for the attribute compatibility.
cls._check_integrator_compatibility_configuration()
# Restore temperature getter/setter before copying attributes.
integrators.ThermostatedIntegrator.restore_interface(integrator)
integrators.ThermostatedIntegrator.restore_interface(copied_integrator)
assert integrator.__class__ == copied_integrator.__class__
# Copy all compatible global variables.
cls._set_integrator_compatible_variables(integrator, copied_integrator)
# Copy other compatible attributes through getters/setters.
for attribute in cls.COMPATIBLE_INTEGRATOR_ATTRIBUTES:
try: # getter/setter
value = getattr(copied_integrator, 'get' + attribute)()
except AttributeError:
pass
else: # getter/setter
getattr(integrator, 'set' + attribute)(value)
@classmethod
def _standardize_integrator(cls, integrator):
"""Return a standard copy of the integrator.
This is used to determine if the same context can be used with
different integrators that differ by only compatible parameters.
"""
# Check that there are no contrasting settings for the attribute compatibility.
cls._check_integrator_compatibility_configuration()
standard_integrator = copy.deepcopy(integrator)
integrators.ThermostatedIntegrator.restore_interface(standard_integrator)
# Set all compatible global variables to 0, except those in the blacklist.
cls._set_integrator_compatible_variables(standard_integrator, 0.0)
# Copy other compatible attributes through getters/setters overwriting
# eventual global variables with a different standard value.
for attribute, std_value in cls.COMPATIBLE_INTEGRATOR_ATTRIBUTES.items():
try: # setter
getattr(standard_integrator, 'set' + attribute)(std_value)
except AttributeError:
# Try to set CustomIntegrator global variable
try:
standard_integrator.setGlobalVariableByName(attribute, std_value)
except Exception:
pass
return standard_integrator
@staticmethod
def _generate_state_id(thermodynamic_state):
"""Return a unique key for the ThermodynamicState."""
# We take advantage of the cached _standard_system_hash property
# to generate a compatible hash for the thermodynamic state.
return thermodynamic_state._standard_system_hash
@classmethod
def _generate_integrator_id(cls, integrator):
"""Return a unique key for the given Integrator."""
standard_integrator = cls._standardize_integrator(integrator)
xml_serialization = openmm.XmlSerializer.serialize(standard_integrator)
# Ignore per-DOF variables for the purpose of hashing.
if isinstance(integrator, openmm.CustomIntegrator):
tag_iter = re.finditer(r'PerDofVariables>', xml_serialization)
try:
open_tag_index = next(tag_iter).start() - 1
except StopIteration: # No DOF variables.
pass
else:
close_tag_index = next(tag_iter).end() + 1
xml_serialization = xml_serialization[:open_tag_index] + xml_serialization[close_tag_index:]
return xml_serialization.__hash__()
@classmethod
def _generate_context_id(cls, thermodynamic_state, integrator):
"""Return a unique key for a context in the given state.
We return a tuple containing the ThermodynamicState hash and the
the serialization of the Integrator. Keeping the two separated
makes it possible to search for Contexts in a given state regardless
of the integrator.
"""
state_id = cls._generate_state_id(thermodynamic_state)
integrator_id = cls._generate_integrator_id(integrator)
return state_id, integrator_id
@staticmethod
def _get_default_integrator(temperature):
"""Return a new instance of the default integrator."""
# Use a likely-to-be-used Integrator.
return integrators.GeodesicBAOABIntegrator(temperature=temperature)
@classmethod
def _default_integrator_id(cls):
"""Return the unique key of the default integrator."""
if cls._cached_default_integrator_id is None:
default_integrator = cls._get_default_integrator(300*unit.kelvin)
default_integrator_id = cls._generate_integrator_id(default_integrator)
cls._cached_default_integrator_id = default_integrator_id
return cls._cached_default_integrator_id
_cached_default_integrator_id = None
@staticmethod
def _validate_platform_properties(platform=None, platform_properties=None):
"""Check if platform properties are valid for the platform; else raise ValueError."""
if platform_properties is None:
return True
if platform_properties is not None and platform is None:
raise ValueError("To set platform_properties, you need to also specify the platform.")
if not isinstance(platform_properties, dict):
raise ValueError("platform_properties must be a dictionary")
for key, value in platform_properties.items():
if not isinstance(value, str):
raise ValueError(
"All platform properties must be strings. You supplied {}: {} of type {}".format(
key, value, type(value)
)
)
# create a context to check if all properties are
dummy_system = openmm.System()
dummy_system.addParticle(1)
dummy_integrator = openmm.VerletIntegrator(1.0*unit.femtoseconds)
try:
openmm.Context(dummy_system, dummy_integrator, platform, platform_properties)
return True
except Exception as e:
if "Illegal property name" in str(e):
raise ValueError("Invalid platform property for this platform. {}".format(e))
else:
raise e
# =============================================================================
# DUMMY CONTEXT CACHE
# =============================================================================
class DummyContextCache(object):
"""A dummy ContextCache which always create a new Context.
Parameters
----------
platform : openmm.Platform, optional
The OpenMM platform to use. If None, OpenMM tries to select
the fastest one available (default is None).
Attributes
----------
platform : openmm.Platform
The OpenMM platform to use. If None, OpenMM tries to select
the fastest one available.
Examples
--------
Create a new ``Context`` object for alanine dipeptide in vacuum in NPT.
>>> import openmm
>>> from openmm import unit
>>> from openmmtools import states, testsystems
>>> system = testsystems.AlanineDipeptideVacuum().system
>>> thermo_state = states.ThermodynamicState(system, temperature=300*unit.kelvin)
>>> context_cache = DummyContextCache()
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context, context_integrator = context_cache.get_context(thermo_state, integrator)
Or create a ``Context`` with an arbitrary integrator (when you only
need to compute energies, for example).
>>> context, context_integrator = context_cache.get_context(thermo_state)
"""
def __init__(self, platform=None):
self.platform = platform
def get_context(self, thermodynamic_state, integrator=None):
"""Create a new context in the given thermodynamic state.
Parameters
----------
thermodynamic_state : states.ThermodynamicState
The thermodynamic state of the system.
integrator : openmm.Integrator, optional
The integrator to bind to the new context. If ``None``, an arbitrary
integrator is used. Currently, this is a ``LangevinIntegrator`` with
"V R O R V" splitting, but this might change in the future. Default
is ``None``.
Returns
-------
context : openmm.Context
The new context in the given thermodynamic system.
context_integrator : openmm.Integrator
The integrator bound to the context that can be used for
propagation. This is identical to the ``integrator`` argument
if it was passed.
"""
if integrator is None:
integrator = integrators.LangevinIntegrator(
timestep=1.0*unit.femtoseconds,
splitting="V R O R V",
temperature=thermodynamic_state.temperature
)
context = thermodynamic_state.create_context(integrator, self.platform)
return context, integrator
def __getstate__(self):
if self.platform is not None:
platform_serialization = self.platform.getName()
else:
platform_serialization = None
return dict(platform=platform_serialization)
def __setstate__(self, serialization):
if serialization['platform'] is None:
self.platform = None
else:
self.platform = openmm.Platform.getPlatformByName(serialization['platform'])
# =============================================================================
# GLOBAL CONTEXT CACHE
# =============================================================================
global_context_cache = ContextCache(capacity=None, time_to_live=None)
"""A shared ContextCache that minimizes Context object creating when using MCMCMove."""
# =============================================================================
# CACHE ENTRY (MODULE INTERNAL USAGE)
# =============================================================================
class _CacheEntry(object):
"""A cache entry holding an optional expiration attribute."""
def __init__(self, value, expiration=None):
self.value = value
# We create the self.expiration attribute only if requested
# to save memory in case the cache stores a lot of entries.
if expiration is not None:
self.expiration = expiration
if __name__ == '__main__':
import doctest
doctest.testmod()
|
choderalab/openmmtools
|
openmmtools/cache.py
|
Python
|
mit
| 32,415
|
[
"OpenMM"
] |
5288b1e184b0d99bfb146fc3ab544a12c384d4f35a86998ea93397c5593f9c18
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 04:29:13 2020
@author: mathieumoog
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 14 05:54:11 2020
@author: mathieumoog
"""
import cpmd
import filexyz
import numpy as np
import matplotlib.pyplot as plt
from msmbuilder.msm import MarkovStateModel
from msmbuilder.utils import dump
def getDistance1Dsq( position1, position2, length):
dist = position1-position2
half_length = length*0.5
if dist > half_length :
dist -= length
elif dist < -half_length:
dist += length
return dist*dist
def getDistanceOrtho( positions, index1, index2, cell_lengths ):
dist=0
for i in range(3):
dist += getDistance1Dsq( positions[index1,i], positions[index2,i], cell_lengths[i] )
return np.sqrt(dist)
def computeContactMatrix( positions, cell_lengths, cut_off ):
nb_atoms = len(positions[:,0])
matrix = np.zeros(( nb_atoms, nb_atoms ))
for atom in range(nb_atoms):
for atom2 in range(atom+1,nb_atoms):
if getDistanceOrtho( positions, atom, atom2, cell_lengths ) < cut_off :
matrix[atom,atom2] = 1
matrix[atom2,atom] = 1
return matrix
def computeDistanceMatrix( positions, cell_lengths ):
nb_atoms = len(positions[:,0])
matrix = np.zeros(( nb_atoms, nb_atoms ))
for atom in range(nb_atoms):
for atom2 in range(atom+1,nb_atoms):
dist = getDistanceOrtho( positions, atom, atom2, cell_lengths )
matrix[atom,atom2] = dist
matrix[atom2,atom] = dist
return matrix
def computeTransitionMatrix( states, nb_states, tau, step_max ):
nb_step = len(states)
matrix = np.zeros((nb_states,nb_states))
for step in range( nb_step-step_max ):
matrix[ states[step], states[step+tau] ] += 1
return matrix
def computeChapmanKolmogorov( matrix, nb_states ):
matrix_ck = np.zeros((nb_states,nb_states),dtype=float)
for state_i in range( nb_states ):
for state_j in range( nb_states ):
for state_k in range( nb_states) :
matrix_ck[ state_i, state_j ] += matrix[state_i,state_k]*matrix[state_k,state_j]
return matrix_ck
volume=8.82
temperature=3000
# run_nb=1
path_sim = str( "/Users/mathieumoog/Documents/CO2/" +
str(volume) + "/" +
str(temperature) + "K/"
# + str(run_nb) + "-run/"
)
cell_lengths = np.ones(3)*volume
traj_path = str( path_sim + "TRAJEC_fdb_wrapped.xyz" )
traj = filexyz.readAsArray( traj_path )
nbC=32
nbO=64
nb_atoms=nbC+nbO
max_neigh=5
nb_step=len(traj[:,0,0])
distances = np.zeros( (nb_step,nbC,max_neigh), dtype=float )
for step in range(nb_step):
matrix = computeDistanceMatrix( traj[step,:,:], cell_lengths )
distances[step,0:nbC,0:max_neigh] = np.sort(matrix,axis=1)[0:nbC,1:max_neigh+1]
nbins=25
r_width=0.5
plt.figure()
plt.hist( np.reshape(distances[:,:,0],(20000*32)), bins=nbins, rwidth=r_width )
plt.hist( np.reshape(distances[:,:,1],(20000*32)), bins=nbins, rwidth=r_width )
plt.hist( np.reshape(distances[:,:,2],(20000*32)), bins=nbins, rwidth=r_width )
plt.hist( np.reshape(distances[:,:,3],(20000*32)), bins=nbins, rwidth=r_width )
plt.show()
nb_box=5
min_distance = np.zeros(max_neigh, dtype=float)
max_distance = np.zeros(max_neigh, dtype=float)
delta_box=np.zeros( max_neigh, dtype=float )
for neigh in range(max_neigh):
min_distance[neigh] = np.min( distances[:,:,neigh] )
max_distance[neigh] = np.max( distances[:,:,neigh] )
delta_box[neigh] = (max_distance[neigh]-min_distance[neigh])/nb_box
states = np.zeros( (nb_step,nbC,max_neigh), dtype=int )
for neigh in range(max_neigh):
states[:,:,neigh] = (distances[:,:,neigh]-min_distance[neigh])/delta_box[neigh]
msm = MarkovStateModel( lag_time=1, n_timescales=6)
msm.fit(states[:,0,0])
msm.timescales_
dt=5*0.001
frac = 0.5
max_step=int(nb_step*frac)
nb_tau_min=int(500)
nb_tau_max=int(2*nb_tau_min)
target_neigh=3
# Computing Transition Matrix for a given tau
matrix_tot=np.zeros((nb_box,nb_box,nb_tau_max), dtype=float )
matrix_tot_ck=np.zeros((nb_box,nb_box,nb_tau_min), dtype=float )
for tau in range(nb_tau_max):
matrix = np.zeros((nb_box,nb_box),dtype=float)
for carbon in range(nbC):
matrix += computeTransitionMatrix( states[:,carbon,target_neigh-1], nb_box, tau, max_step )
for state in range(nb_box):
matrix[state,:] /= sum( matrix[state,:] )
matrix_tot[:,:,tau] = matrix[:,:]
if tau < nb_tau_min:
matrix_tot_ck[:,:,tau] = computeChapmanKolmogorov( matrix_tot[:,:,tau], nb_box)
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("P_ij, P_ij^CK")
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[0,0,:], "k-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[0,0,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[1,1,:], "r-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[1,1,:], "r--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[2,2,:], "b-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[2,2,:], "b--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[3,3,:], "g-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[3,3,:], "g--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[4,4,:], "m-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[4,4,:], "m--" )
plt.show()
target_state=9
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("P_ij, P_ij^CK")
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[target_state,target_state,:], "k-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[target_state,target_state,:], "k--" )
plt.show()
|
CondensedOtters/PHYSIX_Utils
|
Projects/Moog_2016-2019/CO2/CO2_NN/hmm_bonds.py
|
Python
|
gpl-3.0
| 5,753
|
[
"CPMD"
] |
92513d00fe7d2fb03b752df41d5307b09920fc81f34b25ed2c2d9338d1ef85ed
|
"""Unit test for kgp.py
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import unittest
import sys
if 'kgp' not in sys.path:
sys.path.append('kgp')
import kgp
class KGPTest(unittest.TestCase):
resultsMap = {"a":"0",
"b":"1",
"c":"2",
"d":"",
"e":"0",
"f":"10",
"g":"1"}
def setUp(self):
self.parser = kgp.KantGenerator('kgp/test.xml')
def doTest(self, key):
self.parser.loadSource('<xref id="%s"/>' % key)
self.assertEqual(self.resultsMap[key], self.parser.refresh())
def testA(self):
"""kgp a ref test"""
self.doTest("a")
def testB(self):
"""kgp b ref test"""
self.doTest("b")
def testC(self):
"""kgp c ref test"""
self.doTest("c")
def testD(self):
"""kgp d ref test"""
self.doTest("d")
def testE(self):
"""kgp e ref test"""
self.doTest("e")
def testF(self):
"""kgp f ref test"""
self.doTest("f")
def testG(self):
"""kgp g ref test"""
self.doTest("g")
if __name__ == "__main__":
unittest.main()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/kgptest.py
|
Python
|
mit
| 1,303
|
[
"VisIt"
] |
86b6750eb53fc3befe337d5db138b38401842d9e4b8eb4b644341dc22cd351d3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# relevant imports
import sys
import time
import espresso
import mpi4py.MPI as MPI
import Tetracryst # Preparation of tetrahedral crystal and constuctions of bonds in tetrahedral liquid
from espresso import Real3D, Int3D
from espresso.tools import decomp
from espresso.tools import timers
# integration steps, cutoff, skin, AdResS specifications
steps = 10000
timestep = 0.0005
intervals = 1000
rc = 4.5 # cutoff coarse-grained potential
rca = 1.122462048309373 # cutoff atomistic potential (cutoff (2^(1/6)), WCA)
skin = 0.4
# Parameters for the thermostat
#gamma = 2.0
#temp = 1.0
# Parameters for size of AdResS dimensions
ex_size = 5.0
hy_size = 5.0
# read equilibrated configuration file
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espresso.tools.readxyz("equilibrated_conf.xyz")
# Table for coarse-grained potential
tabCG = "table_potential.dat"
# number of CG particles
num_particlesCG = len(x)/4
# number of AT particles
num_particles = len(x)
# set up the system
sys.stdout.write('Setting up simulation ...\n')
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
# (H-)AdResS domain decomposition
system.storage = espresso.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
# prepare AT particles
allParticlesAT = []
allParticles = []
tuples = []
for pidAT in range(num_particles):
allParticlesAT.append([pidAT, # add here these particles just temporarily
Real3D(x[pidAT], y[pidAT], z[pidAT]), # position
Real3D(vx[pidAT], vy[pidAT], vz[pidAT]), # velocity
Real3D(0, 0, 0), # force
1, 1.0, 1]) # type, mass, is AT particle
# create CG particles
for pidCG in range(num_particlesCG):
# we put CG molecule in first atom, later CG molecules will be positioned in the center
cmp = espresso.tools.AdressSetCG(4, pidCG, allParticlesAT)
# Preparation of tuples (tuples define, which atoms belong to which CG molecules)
tmptuple = [pidCG+num_particles]
for pidAT2 in range(4):
pid = pidCG*4+pidAT2
tmptuple.append(pid)
# append CG particles
allParticles.append([pidCG+num_particles, # CG particle has to be added first!
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(0, 0, 0), # vel
Real3D(0, 0, 0), # force
0, 4.0, 0]) # type, mass, is not AT particle
# append AT particles
for pidAT in range(4):
pid = pidCG*4+pidAT
allParticles.append([pid, # now the AT particles can be added
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # type
(allParticlesAT[pid])[5], # mass
(allParticlesAT[pid])[6]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
# add particles to system
system.storage.addParticles(allParticles, "id", "pos", "v", "f", "type", "mass", "adrat")
# create FixedTupleList object
ftpl = espresso.FixedTupleListAdress(system.storage)
# and add the tuples
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
# add bonds between AT particles
fpl = espresso.FixedPairListAdress(system.storage, ftpl)
bonds = Tetracryst.makebonds(len(x))
fpl.addBonds(bonds)
# decompose after adding tuples and bonds
print "Added tuples and bonds, decomposing now ..."
system.storage.decompose()
print "done decomposing"
# AdResS Verlet list
vl = espresso.VerletListAdress(system, cutoff=rc, adrcut=rc,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2])
# non-bonded potentials
# LJ Capped WCA between AT and tabulated potential between CG particles
interNB = espresso.interaction.VerletListHadressLennardJones(vl, ftpl) # Here we need specific (H-)AdResS interaction type
potWCA = espresso.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=rca)
potCG = espresso.interaction.Tabulated(itype=3, filename=tabCG, cutoff=rc) # CG
interNB.setPotentialAT(type1=1, type2=1, potential=potWCA) # AT
interNB.setPotentialCG(type1=0, type2=0, potential=potCG) # CG
system.addInteraction(interNB)
# bonded potentials
# Quartic potential between AT particles
potQuartic = espresso.interaction.Quartic(K=75.0, r0=1.0)
interQuartic = espresso.interaction.FixedPairListQuartic(system, fpl, potQuartic)
system.addInteraction(interQuartic)
# VelocityVerlet integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = timestep
# add AdResS extension
adress = espresso.integrator.Adress(system, vl, ftpl)
integrator.addExtension(adress)
# add Langevin thermostat extension
#langevin = espresso.integrator.LangevinThermostat(system)
#langevin.gamma = gamma
#langevin.temperature = temp
#langevin.adress = True # enable AdResS!
#integrator.addExtension(langevin)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
espresso.tools.AdressDecomp(system, integrator)
# system information
print ''
print 'AdResS Center =', [Lx/2, Ly/2, Lz/2]
print 'number of AT particles =', num_particles
print 'number of CG particles =', num_particlesCG
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
temperature = espresso.analysis.Temperature(system)
fmt = '%5d %8.4f %12.3f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
sys.stdout.write(' step Temp etotal enonbonded ebonded ekinetic\n')
sys.stdout.write(fmt % (0, T, Ek + Ep + Eb, Ep, Eb, Ek))
# Timer, Steps
start_time = time.clock()
nsteps = steps / intervals
# write the start configuration to trajectory pdb-file
#filename = "hadress.pdb"
#espresso.tools.pdbwrite(filename, system, molsize=num_particles+num_particlesCG, append=True)
# integration and on the fly analysis
for s in range(1, intervals + 1):
integrator.run(nsteps)
step = nsteps * s
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
sys.stdout.write(fmt % (step, T, Ek + Ep + Eb, Ep, Eb, Ek))
#espresso.tools.pdbwrite(filename, system, molsize=num_particles+num_particlesCG, append=True)
# simulation information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
BackupTheBerlios/espressopp
|
examples/hadress/hadressPlain/hadress.py
|
Python
|
gpl-3.0
| 7,398
|
[
"CRYSTAL",
"ESPResSo"
] |
f92246b24977039c7e02a302efc532b3eb53426fbb0a51ef1dbd2f69a7ab62c9
|
from datetime import datetime
from django.core.management.base import BaseCommand
from schools.models import School, BoundaryType
from stories.models import (
Question, Questiongroup, QuestionType,
QuestiongroupQuestions, Source, UserType)
class Command(BaseCommand):
args = ""
help = """Populate DB with Anganwadi Bangalore 2014-15 v1
./manage.py populateanganwadiv1"""
def handle(self, *args, **options):
s = Source.objects.get_or_create(name="anganwadi")[0]
start_date = datetime.strptime('2014-08-18', '%Y-%m-%d')
end_date = datetime.strptime('2015-12-30', '%Y-%m-%d')
question_group = Questiongroup.objects.get_or_create(
version=1,
source=s,
start_date=start_date,
end_date=end_date,
)[0]
question_type_checkbox = QuestionType.objects.get(name="checkbox")
question_type_numeric = QuestionType.objects.get(name="numeric")
school_type = BoundaryType.objects.get(name="PreSchool")
user_type = UserType.objects.get_or_create(name=UserType.AKSHARA_STAFF)[0]
questions = [
"Number of students enrolled (boys)",
"Number of students enrolled (girls)",
"Number of students present (boys)",
"Number of students present (girls)",
"Where is the center functioning",
"The anganwadi center is in a spacious room (35 sq according to ecce rule), meaning there is an indoor enclosure",
"There is an outdoor space for 30 children, with space of 30sq",
"There is a toilet for children to use",
"There is pure drinking water facility",
"There is safety around the center",
"There is cleanliness around the center",
"The building is safe",
"There is basic facility for children with special needs",
"There is seating facility available for children",
"There are clean rooms to sit for children",
"The floor, walls, corners of walls and roof are free of cobweb and dust",
"There is dust bin in the center",
"Store room is maintained to preserve food items",
"Store room is clean",
"Food to be distributed on that day was covered properly",
"The cook / chef maintains cleanliness and wore clean clothes on the day of visit",
"There is separate facility for washing hands after meals",
"First aid box contains all the necessary items",
"There is sufficient learning material and playing items for children",
"There is sufficient learning material for indoor activities",
"Indoor learning materials are being used by children",
"There is black board in the center",
"Anganwadi Center wall was painted and was full of writings related to learning",
"Anganwadi has a record of health details of each children",
"Bala Vikas Samithi is present",
"Anganwadi friendship group is formed",
"Friends of Anganwadi members are conducting the activities",
"Anganwadi worker is trained",
"Learning materials are present in the center",
"Number of students with special needs (boys)",
"Number of students with special needs (girls)"
]
for count, question in enumerate(questions):
if count in [0, 1, 2, 3, 4, 34, 35]:
question_type = question_type_numeric
options = None
else:
question_type = question_type_checkbox
options = "{'Yes','No'}"
q = Question.objects.get_or_create(
text=question,
data_type=1,
user_type=user_type,
question_type=question_type,
school_type=school_type,
options=options,
)[0]
QuestiongroupQuestions.objects.get_or_create(
questiongroup=question_group, question=q, sequence=count+1)
print "Anganwadi Bangalore 2014-15 v1 questions populated."
|
klpdotorg/dubdubdub
|
apps/stories/management/commands/archived_commands/populateanganwadiv1.py
|
Python
|
mit
| 4,188
|
[
"VisIt"
] |
72c30654bc62848e5a9431ad942697d03951915071d7f9cb615cdfe2e74b70bb
|
"""
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
https://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.validation import array_like, float_like
from . import bandwidths
from .kdetools import forrt, revrt, silverman_transform
from .linbin import fast_linbin
# Kernels Switch for estimators
kernel_switch = dict(
gau=kernels.Gaussian,
epa=kernels.Epanechnikov,
uni=kernels.Uniform,
tri=kernels.Triangular,
biw=kernels.Biweight,
triw=kernels.Triweight,
cos=kernels.Cosine,
cos2=kernels.Cosine2,
)
def _checkisfit(self):
try:
self.density
except Exception:
raise ValueError("Call fit to fit the density first")
# Kernel Density Estimator Class
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array_like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = array_like(endog, "endog", ndim=1, contiguous=True)
def fit(
self,
kernel="gau",
bw="normal_reference",
fft=True,
weights=None,
gridsize=None,
adjust=1,
cut=3,
clip=(-np.inf, np.inf),
):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of x so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(x) or max(x)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
KDEUnivariate
The instance fit,
"""
if isinstance(bw, str):
self.bw_method = bw
else:
self.bw_method = "user-given"
if not callable(bw):
bw = float_like(bw, "bw")
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(
endog,
kernel=kernel,
bw=bw,
adjust=adjust,
weights=weights,
gridsize=gridsize,
clip=clip,
cut=cut,
)
else:
density, grid, bw = kdensity(
endog,
kernel=kernel,
bw=bw,
adjust=adjust,
weights=weights,
gridsize=gridsize,
clip=clip,
cut=cut,
)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = {}
return self
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a, b = -np.inf, np.inf
else:
a, b = kern.domain
def func(x, s):
return kern.density(s, x)
support = self.support
support = np.r_[a, support]
gridsize = len(support)
endog = self.endog
probs = [
integrate.quad(func, support[i - 1], support[i], args=endog)[0]
for i in range(1, gridsize)
]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x, s):
pdf = kern.density(s, x)
return pdf * np.log(pdf + 1e-12)
kern = self.kernel
if kern.domain is not None:
a, b = self.domain
else:
a, b = -np.inf, np.inf
endog = self.endog
# TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a, b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0, 1, gridsize))
def evaluate(self, point):
"""
Evaluate density at a point or points.
Parameters
----------
point : {float, ndarray}
Point(s) at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
# Kernel Density Estimator Functions
def kdensity(
x,
kernel="gau",
bw="normal_reference",
weights=None,
gridsize=None,
adjust=1,
clip=(-np.inf, np.inf),
cut=3,
retgrid=True,
):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(x) or max(x)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
clip_x = np.logical_and(x > clip[0], x < clip[1])
x = x[clip_x]
nobs = len(x) # after trim
if gridsize is None:
gridsize = max(nobs, 50) # do not need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given x."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
if callable(bw):
bw = float(bw(x, kern))
# user passed a callable custom bandwidth function
elif isinstance(bw, str):
bw = bandwidths.select_bandwidth(x, bw, kern)
# will cross-val fit this pattern?
else:
bw = float_like(bw, "bw")
bw *= adjust
a = np.min(x, axis=0) - cut * bw
b = np.max(x, axis=0) + cut * bw
grid = np.linspace(a, b, gridsize)
k = (
x.T - grid[:, None]
) / bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if (
kern.domain is not None
): # will not work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k < 0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k, weights) / (q * bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(
x,
kernel="gau",
bw="normal_reference",
weights=None,
gridsize=None,
adjust=1,
clip=(-np.inf, np.inf),
cut=3,
retgrid=True,
):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(x), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{x.min() or x.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights are not implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
----------
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
x = np.asarray(x)
# will not work for two columns.
x = x[np.logical_and(x > clip[0], x < clip[1])]
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
if callable(bw):
bw = float(bw(x, kern))
# user passed a callable custom bandwidth function
elif isinstance(bw, str):
# if bw is None, select optimal bandwidth for kernel
bw = bandwidths.select_bandwidth(x, bw, kern)
# will cross-val fit this pattern?
else:
bw = float_like(bw, "bw")
bw *= adjust
nobs = len(x) # after trim
# 1 Make grid and discretize the data
if gridsize is None:
gridsize = np.max((nobs, 512.0))
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(x) - cut * bw
b = np.max(x) + cut * bw
grid, delta = np.linspace(a, b, int(gridsize), retstep=True)
RANGE = b - a
# TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(x,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of x in the grid here
# Xingrid = x[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
# NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(x, a, b, gridsize) / (delta * nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# do not have to redo the above if just changing bw, ie., for cross val
# NOTE: silverman_transform is the closed form solution of the FFT of the
# gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE) * y
# 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
|
jseabold/statsmodels
|
statsmodels/nonparametric/kde.py
|
Python
|
bsd-3-clause
| 19,093
|
[
"Gaussian"
] |
c75b92909f497984a72e8870a0886d8af16dcfb01d20b27d1070c6a0da01868b
|
# -*- coding: utf-8 -*-
#
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright © 2014, 2019 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
#
# Device all units off
#
import commands.ServerCommand as ServerCommand
import drivers.X10ControllerAdapter
import datetime
#######################################################################
# Command handler for bright command
class DeviceAllUnitsOff(ServerCommand.ServerCommand):
#######################################################################
# Execute the "of" command.
def Execute(self, request):
result = drivers.X10ControllerAdapter.X10ControllerAdapter.DeviceAllUnitsOff(request["args"]["house-code"])
# Generate a successful response
r = DeviceAllUnitsOff.CreateResponse(request["request"])
r['result-code'] = drivers.X10ControllerAdapter.X10ControllerAdapter.GetLastErrorCode()
if result:
# r['error'] = "Command not fully implemented"
r['message'] = "Success"
else:
r['error'] = drivers.X10ControllerAdapter.X10ControllerAdapter.GetLastError()
r['message'] = "Failure"
return r
|
dhocker/athomepowerlineserver
|
commands/DeviceAllUnitsOff.py
|
Python
|
gpl-3.0
| 1,464
|
[
"xTB"
] |
d8fd030ec028fcb16b77ca5601e80e339585218dde35bae9b6458b7ddc31584c
|
# Copyright (c) 2008-2010, 2013 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 Takahide Nojima <nozzy123nozzy@gmail.com>
# Copyright (c) 2021 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Mark Byrne <31762852+mbyrnepr2@users.noreply.github.com>
# Copyright (c) 2021 bot <bot@noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Unit test for the extensions.diadefslib modules."""
# pylint: disable=redefined-outer-name
import sys
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import pytest
from astroid import nodes
from pylint.pyreverse.diadefslib import (
ClassDiadefGenerator,
DefaultDiadefGenerator,
DiaDefGenerator,
DiadefsHandler,
)
from pylint.pyreverse.diagrams import DiagramEntity, Relationship
from pylint.pyreverse.inspector import Linker, Project
from pylint.testutils.pyreverse import PyreverseConfig
def _process_classes(classes: List[DiagramEntity]) -> List[Tuple[bool, str]]:
"""Extract class names of a list."""
return sorted((isinstance(c.node, nodes.ClassDef), c.title) for c in classes)
def _process_relations(
relations: Dict[str, List[Relationship]]
) -> List[Tuple[str, str, str]]:
"""Extract relation indices from a relation list."""
result = []
for rel_type, rels in relations.items():
for rel in rels:
result.append((rel_type, rel.from_object.title, rel.to_object.title))
result.sort()
return result
@pytest.fixture
def HANDLER(default_config: PyreverseConfig) -> DiadefsHandler:
return DiadefsHandler(default_config)
@pytest.fixture(scope="module")
def PROJECT(get_project):
return get_project("data")
def test_option_values(
default_config: PyreverseConfig, HANDLER: DiadefsHandler, PROJECT: Project
) -> None:
"""Test for ancestor, associated and module options."""
df_h = DiaDefGenerator(Linker(PROJECT), HANDLER)
cl_config = default_config
cl_config.classes = ["Specialization"]
cl_h = DiaDefGenerator(Linker(PROJECT), DiadefsHandler(cl_config))
assert df_h._get_levels() == (0, 0)
assert not df_h.module_names
assert cl_h._get_levels() == (-1, -1)
assert cl_h.module_names
for hndl in (df_h, cl_h):
hndl.config.all_ancestors = True
hndl.config.all_associated = True
hndl.config.module_names = True
hndl._set_default_options()
assert hndl._get_levels() == (-1, -1)
assert hndl.module_names
handler = DiadefsHandler(default_config)
df_h = DiaDefGenerator(Linker(PROJECT), handler)
cl_config = default_config
cl_config.classes = ["Specialization"]
cl_h = DiaDefGenerator(Linker(PROJECT), DiadefsHandler(cl_config))
for hndl in (df_h, cl_h):
hndl.config.show_ancestors = 2
hndl.config.show_associated = 1
hndl.config.module_names = False
hndl._set_default_options()
assert hndl._get_levels() == (2, 1)
assert not hndl.module_names
def test_default_values() -> None:
"""Test default values for package or class diagrams."""
# TODO : should test difference between default values for package or class diagrams pylint: disable=fixme
class TestDefaultDiadefGenerator:
_should_rels = [
("association", "DoNothing", "Ancestor"),
("association", "DoNothing", "Specialization"),
("association", "DoNothing2", "Specialization"),
("implements", "Ancestor", "Interface"),
("specialization", "Specialization", "Ancestor"),
]
def test_exctract_relations(
self, HANDLER: DiadefsHandler, PROJECT: Project
) -> None:
"""Test extract_relations between classes."""
cd = DefaultDiadefGenerator(Linker(PROJECT), HANDLER).visit(PROJECT)[1]
cd.extract_relationships()
relations = _process_relations(cd.relationships)
assert relations == self._should_rels
def test_functional_relation_extraction(
self, default_config: PyreverseConfig, get_project: Callable
) -> None:
"""Functional test of relations extraction;
different classes possibly in different modules
"""
# XXX should be catching pyreverse environment problem but doesn't
# pyreverse doesn't extract the relations but this test ok
project = get_project("data")
handler = DiadefsHandler(default_config)
diadefs = handler.get_diadefs(project, Linker(project, tag=True))
cd = diadefs[1]
relations = _process_relations(cd.relationships)
assert relations == self._should_rels
def test_known_values1(HANDLER: DiadefsHandler, PROJECT: Project) -> None:
dd = DefaultDiadefGenerator(Linker(PROJECT), HANDLER).visit(PROJECT)
assert len(dd) == 2
keys = [d.TYPE for d in dd]
assert keys == ["package", "class"]
pd = dd[0]
assert pd.title == "packages No Name"
modules = sorted((isinstance(m.node, nodes.Module), m.title) for m in pd.objects)
assert modules == [
(True, "data"),
(True, "data.clientmodule_test"),
(True, "data.property_pattern"),
(True, "data.suppliermodule_test"),
]
cd = dd[1]
assert cd.title == "classes No Name"
classes = _process_classes(cd.objects)
assert classes == [
(True, "Ancestor"),
(True, "CustomException"),
(True, "DoNothing"),
(True, "DoNothing2"),
(True, "DoSomething"),
(True, "Interface"),
(True, "PropertyPatterns"),
(True, "Specialization"),
]
def test_known_values2(HANDLER: DiadefsHandler, get_project: Callable) -> None:
project = get_project("data.clientmodule_test")
dd = DefaultDiadefGenerator(Linker(project), HANDLER).visit(project)
assert len(dd) == 1
keys = [d.TYPE for d in dd]
assert keys == ["class"]
cd = dd[0]
assert cd.title == "classes No Name"
classes = _process_classes(cd.objects)
assert classes == [(True, "Ancestor"), (True, "Specialization")]
def test_known_values3(HANDLER: DiadefsHandler, PROJECT: Project) -> None:
HANDLER.config.classes = ["Specialization"]
cdg = ClassDiadefGenerator(Linker(PROJECT), HANDLER)
special = "data.clientmodule_test.Specialization"
cd = cdg.class_diagram(PROJECT, special)
assert cd.title == special
classes = _process_classes(cd.objects)
assert classes == [
(True, "data.clientmodule_test.Ancestor"),
(True, special),
(True, "data.suppliermodule_test.DoNothing"),
(True, "data.suppliermodule_test.DoNothing2"),
]
def test_known_values4(HANDLER: DiadefsHandler, PROJECT: Project) -> None:
HANDLER.config.classes = ["Specialization"]
HANDLER.config.module_names = False
cd = ClassDiadefGenerator(Linker(PROJECT), HANDLER).class_diagram(
PROJECT, "data.clientmodule_test.Specialization"
)
assert cd.title == "data.clientmodule_test.Specialization"
classes = _process_classes(cd.objects)
assert classes == [
(True, "Ancestor"),
(True, "DoNothing"),
(True, "DoNothing2"),
(True, "Specialization"),
]
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Requires dataclasses")
def test_regression_dataclasses_inference(
HANDLER: DiadefsHandler, get_project: Callable
) -> None:
project_path = Path("regrtest_data") / "dataclasses_pyreverse"
path = get_project(str(project_path))
cdg = ClassDiadefGenerator(Linker(path), HANDLER)
special = "regrtest_data.dataclasses_pyreverse.InventoryItem"
cd = cdg.class_diagram(path, special)
assert cd.title == special
|
PyCQA/pylint
|
tests/pyreverse/test_diadefs.py
|
Python
|
gpl-2.0
| 8,602
|
[
"VisIt"
] |
38ed937fc354c3d59ec50f4214162b84bffed03a2f43ec54a7a40a627fcbe355
|
"""
===================
pacbio
===================
Tools for processing PacBio sequencing data.
"""
import os
import gzip
import re
import io
import math
import subprocess
import collections
import tempfile
import numbers
import regex
import numpy
import pandas
import pysam
import Bio.SeqFeature
# import dms_tools2.plot to set plotting contexts / themes
import dms_tools2
from dms_tools2.plot import COLOR_BLIND_PALETTE
from dms_tools2.plot import COLOR_BLIND_PALETTE_GRAY
import matplotlib.pyplot as plt
from plotnine import *
class CCS:
"""Class to handle results of ``ccs``.
Holds results of PacBio ``ccs``.
Has been tested on output of ``ccs`` version 3.0.0.
This class reads all data into memory, and so you
may need a lot of RAM if `ccsfile` is large.
Args:
`samplename` (str)
Sample or sequencing run
`ccsfile` (str)
File created by ``ccs`` that holds the CCSs. The
``ccs`` program outputs BAM files. However, you
can also pass FASTQ files generated from these
BAM files using ``samtools bam2fq -T np,rq <bamfile>``
(note that ``-T np,rq`` flag which is needed to
preserve the number of passes and accuracy flags).
The file format is determined from the file extension,
and can be ``*.bam``, ``*.fastq``, ``*.fq``,
``*.fastq.gz``, or ``*.fq.gz``.
`reportfile` (str or `None`)
Report file created by ``ccs``, or
`None` if you have no reports.
Attributes:
`samplename` (str)
Name set at initialization
`ccsfile` (str)
``ccs`` BAM file set at initialization
`reportfile` (str or `None`)
``ccs`` report file set at initialization
`zmw_report` (pandas.DataFrame or `None`):
ZMW stats in `reportfile`, or `None` if no
`reportfile`. Columns are *status*, *number*,
*percent*, and *fraction*.
`subread_report` (pandas.DataFrame or `None`)
Like `zmw_report` but for subreads.
`df` (pandas.DataFrame)
The CCSs in `ccsfile`. Each row is a different CCS
On creation, there will be the following columns (you
can modify to add more):
- "name": the name of the CCS
- "samplename": the sample as set via `samplename`
- "CCS": the circular consensus sequence
- "CCS_qvals": the Q-values as a numpy array
- "passes": the number of passes of the CCS
- "CCS_accuracy": the accuracy of the CCS
- "CCS_length": the length of the CCS
Here is an example.
First, define the sequences, quality scores,
and names for 3 example sequences. The names indicate
the barcodes, the accuracy of the barcode, and the polarity.
Two of the sequences have the desired termini and
a barcode. The other does not. Note that the second
sequence has an extra nucleotide at each end, this
will turn out to be fine with the `match_str` we write.
The second sequence is also reverse complemented:
>>> termini5 = 'ACG'
>>> termini3 = 'CTT'
>>> ccs_seqs = [
... {'name':'barcoded_TTC_0.999_plus',
... 'seq':termini5 + 'TTC' + 'ACG' + termini3,
... 'qvals':'?' * 12,
... },
... {'name':'barcoded_AGA_0.995_minus',
... 'seq':dms_tools2.utils.reverseComplement(
... 'T' + termini5 + 'AGA' + 'GCA' + termini3 + 'A'),
... 'qvals':''.join(reversed('?' * 4 + '5?9' + '?' * 7)),
... },
... {'name':'invalid',
... 'seq':'GGG' + 'CAT' + 'GCA' + termini3,
... 'qvals':'?' * 12,
... }
... ]
>>> for iccs in ccs_seqs:
... iccs['accuracy'] = qvalsToAccuracy(iccs['qvals'], encoding='sanger')
Now place these in a block of text that meets the
`CCS SAM specification <https://github.com/PacificBiosciences/unanimity/blob/develop/doc/PBCCS.md>`_:
>>> sam_template = '\\t'.join([
... '{0[name]}',
... '4', '*', '0', '255', '*', '*', '0', '0',
... '{0[seq]}',
... '{0[qvals]}',
... 'np:i:6',
... 'rq:f:{0[accuracy]}',
... ])
>>> samtext = '\\n'.join([sam_template.format(iccs) for
... iccs in ccs_seqs])
Create small SAM file with these sequences, then
convert to BAM file used to initialize a :class:`CCS`
(note this requires ``samtools`` to be installed):
>>> samfile = '_temp.sam'
>>> bamfile = '_temp.bam'
>>> with open(samfile, 'w') as f:
... _ = f.write(samtext)
>>> _ = subprocess.check_call(['samtools', 'view',
... '-b', '-o', bamfile, samfile])
>>> ccs = CCS('test', bamfile, None)
>>> os.remove(samfile)
We also sometimes create the BAM files created by PacBio
``ccs`` to FASTQ. Do that using ``samtools bam2fq -T np,rq``
to keep flags with number of passes and overall read quality:
>>> fastq_data = subprocess.check_output(
... ['samtools', 'bam2fq', '-T', 'np,rq', bamfile])
Show how the resulting FASTQ data keeps the *np* and *rq* tags:
>>> print(fastq_data.decode('utf-8').strip().replace('\\t', ' '))
@barcoded_TTC_0.999_plus np:i:6 rq:f:0.999
ACGTTCACGCTT
+
????????????
@barcoded_AGA_0.995_minus np:i:6 rq:f:0.998144
TAAGTGCTCTCGTA
+
???????9?5????
@invalid np:i:6 rq:f:0.999
GGGCATGCACTT
+
????????????
Write the FASTQ to a file, and check that :class:`CCS`
initialized from the FASTQ is the same as one from the BAM:
>>> fastqfile = '_temp.fastq'
>>> gzfastqfile = '_temp.fastq.gz'
>>> with open(fastqfile, 'wb') as f:
... _ = f.write(fastq_data)
>>> with gzip.open(gzfastqfile, 'wb') as f:
... _ = f.write(fastq_data)
>>> ccs_fastq = CCS('test', fastqfile, None)
>>> ccs_gzfastq = CCS('test', gzfastqfile, None)
>>> pandas.testing.assert_frame_equal(ccs_fastq.df, ccs.df)
>>> pandas.testing.assert_frame_equal(ccs_gzfastq.df, ccs.df)
>>> os.remove(fastqfile)
>>> os.remove(gzfastqfile)
>>> os.remove(bamfile)
Check `ccs.df` has correct names, samplename, CCS sequences,
and columns:
>>> set(ccs.df.name) == {s['name'] for s in ccs_seqs}
True
>>> all(ccs.df.samplename == 'test')
True
>>> set(ccs.df.CCS) == {s['seq'] for s in ccs_seqs}
True
>>> set(ccs.df.columns) == {'CCS', 'CCS_qvals', 'name',
... 'passes', 'CCS_accuracy', 'CCS_length', 'samplename'}
True
Use :meth:`matchSeqs` to match sequences with expected termini
and define barcodes and reads in these:
>>> match_str = (termini5 + '(?P<barcode>N{3})' +
... '(?P<read>N+)' + termini3)
>>> ccs.df = matchSeqs(ccs.df, match_str, 'CCS', 'barcoded')
This matching adds new columns to the new `ccs.df`:
>>> set(ccs.df.columns) >= {'barcode', 'barcode_qvals',
... 'barcode_accuracy', 'read', 'read_qvals',
... 'read_accuracy', 'barcoded', 'barcoded_polarity'}
True
Now make sure `df` indicates that the correct sequences
are barcoded, and that they have the correct barcodes:
>>> bc_names = sorted([s['name'] for s in ccs_seqs if
... 'barcoded' in s['name']])
>>> ccs.df = ccs.df.sort_values('barcode')
>>> (ccs.df.query('barcoded').name == bc_names).all()
True
>>> barcodes = [x.split('_')[1] for x in bc_names]
>>> (ccs.df.query('barcoded').barcode == barcodes).all()
True
>>> (ccs.df.query('not barcoded').barcode == ['']).all()
True
>>> barcode_accuracies = [float(x.split('_')[2]) for x in bc_names]
>>> numpy.allclose(ccs.df.query('barcoded').barcode_accuracy,
... barcode_accuracies, atol=1e-4)
True
>>> numpy.allclose(ccs.df.query('barcoded').barcode_accuracy,
... [qvalsToAccuracy(qvals) for qvals in
... ccs.df.query('barcoded').barcode_qvals])
True
>>> numpy.allclose(ccs.df.query('not barcoded').barcode_accuracy,
... -1, atol=1e-4)
True
>>> barcoded_polarity = [{'plus':1, 'minus':-1}[x.split('_')[3]]
... for x in bc_names]
>>> (ccs.df.query('barcoded').barcoded_polarity == barcoded_polarity).all()
True
"""
def __init__(self, samplename, ccsfile, reportfile):
"""See main class doc string."""
self.samplename = samplename
assert os.path.isfile(ccsfile), f"can't find {ccsfile}"
self.ccsfile = ccsfile
self.reportfile = reportfile
if self.reportfile is None:
self.zmw_report = None
self.subread_report = None
else:
assert os.path.isfile(reportfile), \
"can't find {0}".format(reportfile)
# set `zmw_report` and `subread_report`
self._parse_report()
self._build_df_from_ccsfile()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def _parse_report(self):
"""Set `zmw_report` and `subread_report` using `reportfile`."""
# match reports made by ccs 3.0.0
reportmatch = regex.compile('^ZMW Yield\n(?P<zmw>(.+\n)+)\n\n'
'Subread Yield\n(?P<subread>(.+\n)+)$')
with open(self.reportfile) as f:
report = f.read()
m = reportmatch.search(report)
assert m, "Cannot match {0}\n\n{1}".format(
self.reportfile, report)
for read_type in ['zmw', 'subread']:
df = (pandas.read_csv(
io.StringIO(m.group(read_type)),
names=['status', 'number', 'percent']
)
.assign(fraction=lambda x:
x.percent.str.slice(None, -1)
.astype('float') / 100)
)
setattr(self, read_type + '_report', df)
def _build_df_from_ccsfile(self):
"""Builds `df` from `ccsfile`."""
# read into dictionary
d = collections.defaultdict(list)
# get file type by extensions
base, ext = [s.lower() for s in os.path.splitext(self.ccsfile)]
if ext in {'.gz', '.gzip'}:
gzipped = True
ext = os.path.splitext(base)[1].lower()
else:
gzipped = False
# extract data based on file extension
if ext == '.bam':
if gzipped:
raise ValueError("Cannot handle gzipped BAM")
for s in pysam.AlignmentFile(self.ccsfile, 'rb',
check_sq=False):
d['CCS'].append(s.query_sequence)
d['CCS_qvals'].append(numpy.asarray(s.query_qualities,
dtype='int'))
d['name'].append(s.query_name)
d['passes'].append(s.get_tag('np'))
d['CCS_accuracy'].append(s.get_tag('rq'))
d['CCS_length'].append(s.query_length)
d['samplename'].append(self.samplename)
elif ext in {'.fq', '.fastq'}:
headmatch = re.compile(r'^(?P<name>\S+)\s+'
r'np:i:(?P<passes>\d+)\s+'
r'rq:f:(?P<accuracy>\d+\.{0,1}\d*)')
for a in pysam.FastxFile(self.ccsfile):
if a.comment is not None:
head = f"{a.name} {a.comment}"
else:
head = a.name
m = headmatch.match(head)
if not m:
raise ValueError(f"could not match {head}")
d['CCS'].append(a.sequence)
qvals = numpy.array([ord(qi) - 33 for qi in a.quality],
dtype='int')
d['CCS_qvals'].append(qvals)
d['name'].append(m.group('name'))
d['passes'].append(int(m.group('passes')))
d['CCS_accuracy'].append(float(m.group('accuracy')))
d['CCS_length'].append(len(a.sequence))
d['samplename'].append(self.samplename)
else:
raise ValueError(f"invalid file extension {ext}")
# create data frame
self.df = pandas.DataFrame(d)
# some checks on `df`
assert self.df.name.size == self.df.name.unique().size,\
"non-unique names for {0}".format(self.name)
assert (self.df.CCS_length == self.df.CCS.apply(len)).all(),\
"CCS not correct length"
assert (self.df.CCS_length == self.df.CCS_qvals.apply(len)).all(),\
"qvals not correct length"
TerminiVariantTag = collections.namedtuple(
'TerminiVariantTag', ['termini', 'site', 'nucleotides'])
TerminiVariantTag.__doc__ = "Variant tag at termini."
TerminiVariantTag.termini.__doc__ = \
"Location of tag: `termini5` or `termini3`."
TerminiVariantTag.site.__doc__ = \
"Site of tag in termini (0, 1, ... numbering)."
TerminiVariantTag.nucleotides.__doc__ = \
"A dict keyed variant nucleotides, values variant name."
class TerminiVariantTagCaller:
"""Call variant tags at termini of CCSs.
Args:
`features` (list)
List of BioPython `SeqFeature` objects. Any
features with a type attribute of `variant_tag`
are taken to specify variant tags. These should
consist of a single nucleotide, and have qualifiers
that give the nucleotide for each variant. The
features list should also have features with
type attributes `termini5` and `termini3` used
to determine which termin each tag falls in.
`variants` (list)
List of variant names, must have nucleotide for
variant specified in qualifier for each variant tag.
`trim_termini` (int)
The amount trimmed from the 5' termini of `termini5`
and the 3' termini of `termini3` when these are
passed to :class:`TerminiVariantTagCaller.call`.
Attributes:
`variant_tags` (list)
List of :class:`TerminiVariantTag` objects.
`variants` (list)
List of variant names set on initialization.
`trim_termini` (int)
Value set as argument on initialization.
`termini5` (Bio.SeqFeature.SeqFeature)
The 5' termini in `features`
`termini3` (Bio.SeqFeature.SeqFeature)
The 3' termini in `features`
Here is an example. First, create list of features that has a
single-nucleotide variant tag for each of two possible variants
('variant_1' and 'variant_2') in each termini:
>>> SeqFeature = Bio.SeqFeature.SeqFeature
>>> FeatureLocation = Bio.SeqFeature.FeatureLocation
>>> features = [
... SeqFeature(type='termini5', location=FeatureLocation(0, 147)),
... SeqFeature(type='termini3', location=FeatureLocation(1303, 1342)),
... SeqFeature(type='variant_tag', location=FeatureLocation(32, 33),
... qualifiers={'variant_1':['A'], 'variant_2':['G']}),
... SeqFeature(type='variant_tag', location=FeatureLocation(1310, 1311),
... qualifiers={'variant_1':['T'], 'variant_2':['C']})
... ]
Now initialize the :class:`TerminiVariantTagCaller`:
>>> caller = TerminiVariantTagCaller(features, trim_termini=4)
>>> caller.variants
['variant_1', 'variant_2']
>>> int(caller.termini5.location.start)
0
>>> int(caller.termini5.location.end)
147
>>> int(caller.termini3.location.start)
1303
>>> int(caller.termini3.location.end)
1342
>>> len(caller.variant_tags)
2
>>> caller.variant_tags[0].termini
'termini5'
>>> caller.variant_tags[0].site
32
>>> caller.variant_tags[0].nucleotides == {'A':'variant_1', 'G':'variant_2'}
True
>>> caller.variant_tags[1].termini
'termini3'
>>> caller.variant_tags[1].site
7
>>> caller.variant_tags[1].nucleotides == {'T':'variant_1', 'C':'variant_2'}
True
Do some example variant calling:
>>> caller.call({'termini5':'GGCGTCACACTTTGCTATGCCATAGCATATTTATCC',
... 'termini3':'AGATCGGTAGAGCGTCGTGTAGGGAAAGAGTGTGG'})
'variant_1'
>>> caller.call({'termini5':'GGCGTCACACTTTGCTATGCCATAGCATGTTTATCC',
... 'termini3':'AGATCGGCAGAGCGTCGTGTAGGGAAAGAGTGTGG'})
'variant_2'
>>> caller.call({'termini5':'GGCGTCACACTTTGCTATGCCATAGCATGTTTATCC',
... 'termini3':'AGATCGGTAGAGCGTCGTGTAGGGAAAGAGTGTGG'})
'mixed'
>>> caller.call({'termini5':'CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC',
... 'termini3':'AGATCGGTAGAGCGTCGTGTAGGGAAAGAGTGTGG'})
'invalid'
>>> caller.call({'termini5':'GGC', 'termini3':'AGAT'})
'unknown'
>>> caller.call({})
'unknown'
"""
def __init__(self, features, *, variants=['variant_1', 'variant_2'],
trim_termini):
"""See main class docs."""
features_dict = collections.defaultdict(list)
for feature in features:
features_dict[feature.type].append(feature)
for termini in ['termini5', 'termini3']:
if len(features_dict[termini]) != 1:
raise ValueError(f"Failed to find exactly one {termini}")
else:
setattr(self, termini, features_dict[termini][0])
if len(features_dict['variant_tag']) < 1:
raise ValueError("no `variant_tag`s specified")
self.trim_termini = trim_termini
if trim_termini < 0:
raise ValueError("trim_termini must be >= 0")
self.variants = variants
if len(self.variants) < 1:
raise ValueError("no variants specified")
self.variant_tags = []
for variant_feature in features_dict['variant_tag']:
if len(variant_feature) != 1:
raise ValueError(f"variant not length 1: {variant_feature}")
termini = [f for f in [self.termini5, self.termini3] if
variant_feature.location.start in f]
if len(termini) != 1:
raise ValueError("variant tag not in exactly one termini")
self.variant_tags.append(
TerminiVariantTag(
termini=termini[0].type,
site=variant_feature.location.start -
termini[0].location.start,
nucleotides={variant_feature.qualifiers[v][0]:v
for v in self.variants})
)
def checkTagNucleotides(self, amplicon):
"""Check amplicon carrying tags has right ambiguous nucleotides.
Arguments:
`amplicon` (BioPython `SeqRecord`)
The full amplicon that contains the termini
and variant tags
This method checks that the `amplicon` correctly has
IUPAC ambiguous nucleotides that cover the possible
diversity at the site of each variant tag. If so,
it does nothing and returns `None`. If not, it raises
a `ValueError`.
"""
for variant_tag in self.variant_tags:
termini = {'termini5':self.termini5,
'termini3':self.termini3}[variant_tag.termini]
terminiseq = str(termini.location.extract(amplicon).seq)
nt = terminiseq[variant_tag.site]
if not all(re.match(dms_tools2.NT_TO_REGEXP[nt], variant_nt)
for variant_nt in variant_tag.nucleotides.keys()):
raise ValueError(f"Nucleotide {nt} invalid for {variant_tag}")
def call(self, termini_seqs):
"""Call variant identity.
Args:
`termini_seqs` (dict, namedtuple, pandas row)
Some object that has attributes that can be
accessed as `termini5` and `termini3`. These
termini are assumed to have the amount specified
by :class:`TerminiVariantTagCaller.trim_termini`
trimmed from the 5' termini of `termini5` and
the 3' termini of `termini3`.
Returns:
A str that can be any of the following:
- If all tag sites in termini match the same variant,
return the name of that variant.
- If different tag sites match different variants,
return "mixed".
- If any tag sites have a nucleotide that matches no
known variants, return "invalid".
- If `termini_seqs` lacks a termini or has a termini
that is too short to contain the tag sites, return
"unknown".
"""
if not ('termini5' in termini_seqs and 'termini3' in termini_seqs):
return "unknown"
variants = []
for variant_tag in self.variant_tags:
i = variant_tag.site
if variant_tag.termini == 'termini5':
i -= self.trim_termini
if len(termini_seqs[variant_tag.termini]) <= i:
return 'unknown'
nt = termini_seqs[variant_tag.termini][i]
if nt in variant_tag.nucleotides:
variants.append(variant_tag.nucleotides[nt])
else:
return 'invalid'
if len(set(variants)) == 1:
return variants[0]
else:
return 'mixed'
def matchAndAlignCCS(ccslist, mapper, *,
termini5, gene, spacer, umi, barcode, termini3,
termini5_fuzziness=0, gene_fuzziness=0,
spacer_fuzziness=0, umi_fuzziness=0,
barcode_fuzziness=0, termini3_fuzziness=0,
targetvariants=None, mutationcaller=None,
terminiVariantTagCaller=None,
tagged_termini_remove_indels=True,
rc_barcode_umi=True):
"""Identify CCSs that match pattern and align them.
This is a convenience function that runs :meth:`matchSeqs`
and :meth:`alignSeqs` for a common use case. It takes one
or more :class:`CCS` objects, looks for CCS sequences in them
that match a specific pattern, and aligns them to targets. It
returns a pandas data frame with all the results. The CCS
sequences are assumed to be molecules that have the following
structure, although potentially in either orientation::
5'-...-termini5-gene-spacer-umi-barcode-termini3-...-3'
As indicated by the ``...``, there can be sequence before and
after our expected pattern that we ignore. The gene element
is the aligned to the targets. The full CCS is also aligned
in the absence of the pattern matching.
Args:
`ccslist` (:class:`CCS` object or list of them)
Analyze the CCS's in the `df` attributes. If there are
multiple :class:`CCS` objectes, they are concatenated.
However, they must have the same columns.
`mapper` (:py:mod:`dms_tools2.minimap2.Mapper`)
Mapper used to perform alignments.
`termini5` (str or `None`)
Expected sequence at 5' end as str that can be compiled
to `regex` object. Passed through :meth:`re_expandIUPAC`.
For instance, make it 'ATG|CTG' if the sequence might
start with either `ATG` or `CTG`. Set to `None` if
no expected 5' termini.
`gene` (str)
Like `termini5` but gives the gene to match. For instance,
'N+' if the gene can be arbitrary sequence and length.
`spacer` (str or `None`)
Like `termini5`, but for the spacer after `gene`.
`umi` (str or `None`)
Like `termini5`, but for UMI.
`barcode` (str or `None`)
Like `termini5`, but for barcode. For instance, 'N{10}'
if 10-nucleotide barcode.
`termini3` (str or `None`)
Like `termini5`, but for termini3.
`termini5_fuzziness`, ..., `termini3_fuzziness` (int)
The matching for the sequence patterns uses `regex`,
which enables fuzzy matching. Set `termini5_fuzziness`
to enable a specific number of differences (can be
insertion, deletion, or mismatch) when matching
`termini5`. Likewise for `gene_fuzziness`, etc.
Note that the fuzzy matching uses the *BESTMATCH*
flag to try to find the best fuzzy match.
Note also that you can **not** both use fuzzy
matching charcters in the strings to match (e.g.,
`termini5` and set fuzziness to a value > 0:
choose one or the other way to specify fuzzy matches.
`targetvariants` (:class:`dms_tools2.minimap2.TargetVariants`)
Call target variants. See docs for same argument to
:meth:`alignSeqs`.
`mutationcaller` (:class:`dms_tools2.minimap2.MutationCaller`)
Call mutations. See docs for same argument to :meth:`alignSeqs`.
`terminiVariantTagCaller` (:class:`TerminiVariantTagCaller`)
Call variants in termini.
`tagged_termini_remove_indels` (bool)
If `terminiVariantTagCaller` is being used and this,
is `True`, then use `remove_indels` flag when calling
`matchSeqs` for the termini. This is useful if
using fuzzy matching from the termini, as it aids
in the calling of tags as it doesn't cause indels
to misplace the tag. Has no meaning if
`terminiVariantTagCaller` is not being used.
`rc_barcode_umi` (bool)
Do we reverse complement the `barcode` and `UMI` in the
returned data frame relative to the orientation of
the gene. Typically this is desirable because actual
barcode sequencing goes in the reverse direction of the
gene.
Returns:
A pandas dataframe that will have all columns already in the
`df` attribute of the input :class:`CCS` objects with the
following columns added:
- `barcoded`: `True` if CCS matches full expected pattern,
`False` otherwise.
- `barcoded_polarity`: 1 of the match is in the polarity of
the CCS, -1 if to the reverse complement, 0 if no match.
- Columns named `termini5`, `gene`, `spacer`, `UMI`,
`barcode`, and `termini3` (except if any of these elements
are `None`). If `barcoded` is `True` for that CCS, these
columns give the sequence for that element. If it is `False`,
they are empty strings. There are likewise columns with
these same names suffixed with "_accuracy" that give the CCS
accuracy for that element, and columns suffixed with "_qvals"
that give the quality scores for the elements.
- For each of `termini5`, `spacer`, and `termini3` that are
not `None`, a column named `has_termini5`, etc that
indicates if that element is matched in isolate even if
the full pattern is not matched.
- `gene_aligned` is True if the CCS matches the expected
pattern (is `barcoded`), and `gene` can further be
aligned using `mapper`. It is `False` otherwise.
- `gene_aligned_alignment`, `gene_aligned_target`,
`gene_aligned_n_trimmed_query_start`,
`gene_aligned_n_trimmed_query_end`,
`gene_aligned_n_trimmed_target_start`,
`gene_aligned_n_trimmed_target_end`,
`gene_aligned_n_additional`, and
`gene_aligned_n_additional_difftarget` give the
:py:mod:`dms_tools2.minimap2.Alignment`, the alignment
target, number of nucleotides trimmed from ends of
the query gene or target, the number
of additional alignments if `gene_aligned`,
and the number of additional alignments to different
targets (see `target_isoforms` attribute of
:py:mod:`dms_tools2.minimap2.Mapper`). If
the gene is not aligned, these are `None`,
empty strings, or -1.
- If `targetvariants` is not `None`, column named
`gene_aligned_target_variant` giving target variant
returned by :class:`dms_tools2.minimap2.TargtVariants.call`.
- If `mutationcaller` is not `None`, column named
`gene_aligned_mutations` giving the
:class:`dms_tools2.minimap2.Mutations` object returned
by :class:`dms_tools2.minimap2.MutationCaller.call`,
or `None` if there is no alignment.
- If `terminiVariantTagCaller` is not `None`, column
named `termini_variant` giving the termini variant
returned by :class:`TerminiVariantTagCaller.call`,
or the str "unknown" if both termini are not matched.
- `CCS_aligned` is `True` if the CCS can be aligned
using `mapper` even if a gene cannot be matched,
and `False` otherwise. `CCS_aligned_alignment`
and `CCS_aligned_target` give the
:py:mod:`dms_tools2.minimap2.Alignment` (or `None`)
and the target (or empty string).
"""
if isinstance(ccslist, collections.Iterable):
col_list = [ccs.df.columns for ccs in ccslist]
assert all([col_list[0].equals(col) for col in col_list]),\
"the CCS.df's in `ccslist` don't have same columns"
df = pandas.concat([ccs.df for ccs in ccslist])
else:
df = ccslist.df
# internal function:
def _align_CCS_both_orientations(df, mapper):
"""Try align CCS both ways, adds columns.
`CCS_aligned`, `CCS_aligned_alignment`, and
`CCS_aligned_target`."""
df_bi = (df.pipe(dms_tools2.pacbio.alignSeqs,
mapper=mapper,
query_col='CCS',
aligned_col='CCS_for_aligned')
.assign(CCS_rev=lambda x: x.CCS.map(
dms_tools2.utils.reverseComplement))
.pipe(dms_tools2.pacbio.alignSeqs,
mapper=mapper,
query_col='CCS_rev',
aligned_col='CCS_rev_aligned')
)
return (df.assign(CCS_aligned=df_bi.CCS_for_aligned |
df_bi.CCS_rev_aligned)
.assign(CCS_aligned_alignment=
df_bi.CCS_for_aligned_alignment.where(
df_bi.CCS_for_aligned,
df_bi.CCS_rev_aligned_alignment))
.assign(CCS_aligned_target=lambda x:
x.CCS_aligned_alignment.map(
lambda x: x.target if x is not None else ''))
)
# build match_str
match_str = collections.OrderedDict()
fuzz = {'termini5':termini5_fuzziness,
'gene':gene_fuzziness,
'spacer':spacer_fuzziness,
'UMI':umi_fuzziness,
'barcode':barcode_fuzziness,
'termini3':termini3_fuzziness}
has_fuzz = any(f > 0 for f in fuzz.values())
seqs = {'termini5':termini5,
'gene':gene,
'spacer':spacer,
'UMI':umi,
'barcode':barcode,
'termini3':termini3}
for s in ['termini5', 'gene', 'spacer', 'UMI', 'barcode', 'termini3']:
if seqs[s] is not None:
if has_fuzz:
match_str[s] = f"(?P<{s}>{seqs[s]}){{e<={fuzz[s]}}}"
if '{' in seqs[s] or '}' in seqs[s]:
raise ValueError('Using fuzziness and fuzzy match in'
f" {s}:\nfuzziness = {fuzz[s]}\nseq = {seqs[s]}")
else:
match_str[s] = f"(?P<{s}>{seqs[s]})"
else:
match_str[s] = None
if tagged_termini_remove_indels and (
terminiVariantTagCaller is not None):
remove_indels = ['termini5', 'termini3']
else:
remove_indels = []
# now create df
df = (
df
# match barcoded sequences
.pipe(dms_tools2.pacbio.matchSeqs,
match_str=''.join(m for m in match_str.values()
if m is not None),
col_to_match='CCS',
match_col='barcoded',
remove_indels=remove_indels)
# look for just termini or spacer
.pipe(dms_tools2.pacbio.matchSeqs,
match_str=match_str['termini5'],
col_to_match='CCS',
match_col='has_termini5',
add_polarity=False,
add_group_cols=False)
.pipe(dms_tools2.pacbio.matchSeqs,
match_str=match_str['termini3'],
col_to_match='CCS',
match_col='has_termini3',
add_polarity=False,
add_group_cols=False)
.pipe(dms_tools2.pacbio.matchSeqs,
match_str=match_str['spacer'],
col_to_match='CCS',
match_col='has_spacer',
add_polarity=False,
add_group_cols=False)
# see if gene aligns in correct orientation
.pipe(dms_tools2.pacbio.alignSeqs,
mapper=mapper,
query_col='gene',
aligned_col='gene_aligned',
targetvariants=targetvariants,
mutationcaller=mutationcaller)
# look for any alignment of CCS, take best in either orientation
.pipe(_align_CCS_both_orientations,
mapper=mapper)
)
if terminiVariantTagCaller is not None:
df = df.assign(termini_variant=lambda x: x.apply(
terminiVariantTagCaller.call, axis=1))
# reverse complement barcode and UMI
if rc_barcode_umi:
if barcode is not None:
df.barcode = df.barcode.map(dms_tools2.utils.reverseComplement)
if umi is not None:
df.UMI = df.UMI.map(dms_tools2.utils.reverseComplement)
return df
def matchSeqs(df, match_str, col_to_match, match_col, *,
add_polarity=True, add_group_cols=True, remove_indels=[],
add_accuracy=True, add_qvals=True,
expandIUPAC=True, overwrite=False):
"""Identify sequences in a dataframe that match a specific pattern.
Args:
`df` (pandas DataFrame)
Data frame with column holding sequences to match.
`match_str` (str)
A string that can be passed to `regex.compile` that gives
the pattern that we are looking for, with target
subsequences as named groups. See also the `expandIUPAC`
parameter, which simplifies writing `match_str`.
If `None` we just return `df`. Note that we use
`regex` rather than `re`, so fuzzy matching is
enabled. Note that the matching uses the *BESTMATCH*
flag to find the best match.
`col_to_match` (str)
Name of column in `df` that contains the sequences
to match.
`match_col` (str)
Name of column added to `df`. Elements of columns are
`True` if `col_to_match` matches `match_str` for that
row, and `False` otherwise.
`add_polarity` (bool)
Add a column specifying the polarity of the match?
`add_group_cols` (bool)
Add columns with the sequence of every group in
`match_str`?
`remove_indels` (list)
Only meaningful if `match_str` specifies to allow
fuzzy matching for a group, and `add_group_cols`
is `True`. Then for each named group in `match_str`,
in sequence for that group that is added to the
returned `df`, indicate indels by adding a `-`
gap character for deletions, and removing the
inserted nucleotide called by regex if there
is an insertion.
`add_accuracy` (bool)
For each group in the match, add a column giving
the accuracy of that group's sequence? Only used
if `add_group_cols` is `True`.
`add_qvals` (bool)
For each group in the match, add a column giving
the Q values for that group's sequence? Only used if
`add_group_cols` is `True`.
`expandIUPAC` (bool)
Use `IUPAC code <https://en.wikipedia.org/wiki/Nucleic_acid_notation>`_
to expand ambiguous nucleotides (e.g., "N") by passing
`match_str` through the :meth:`re_expandIUPAC` function.
`overwrite` (bool)
If `True`, we overwrite any existing columns to
be created that already exist. If `False`, raise
an error if any of the columns already exist.
Returns:
A **copy** of `df` with new columns added. The exact columns
to add are specified by the calling arguments. Specifically:
- We always add a column with the name given by `match_col`
that is `True` if there was a match and `False` otherwise.
- If `add_polarity` is `True`, add a column that is
`match_col` suffixed by "_polarity" which is 1 if
the match is directly to the sequence in `col_to_match`,
and -1 if it is to the reverse complement of this sequence.
The value is 0 if there is no match.
- If `add_group_cols` is `True`, then for each group
in `match_str` specified using the `re` group naming
syntax, add a column with that group name that
gives the sequence matching that group. These
sequences are empty strings if there is no match.
These added sequences are in the polarity of the
match, so if the sequence in `match_col` has
to be reverse complemented for a match, then these
sequences will be the reverse complement that matches.
Additionally, when `add_group_cols` is True:
- If `add_accuracy` is `True`, we also add a column
suffixed by "_accuracy" that gives the
accuracy of that group as computed from the Q-values.
The value -1 if there is match for that row. Adding
accuracy requires a colum in `df` with the name
given by `match_col` suffixed by "_qvals."
- If `add_qvals` is `True`, we also add a column
suffixed by "_qvals" that gives the Q-values
for that sequence. Adding these Q-values requires
that there by a column in `df` with the name given by
`match_col` suffixed by "_qvals". The Q-values are
in the form of a numpy array, or an empty numpy array
if there is no match for that row.
See docs for :class:`CCS` for example uses of this function.
Here is a short example that uses the fuzzy matching of
the `regex` model for the polyA tail:
>>> gene = 'ATGGCT'
>>> polyA = 'AAAACAAAA'
>>> df_in = pandas.DataFrame({'CCS':[gene + polyA]})
>>> match_str = '(?P<gene>N+)(?P<polyA>AA(A{5,}){e<=1}AA)'
>>> df = matchSeqs(df_in, match_str, 'CCS', 'matched',
... add_accuracy=False, add_qvals=False)
>>> expected = df.assign(gene=gene, polyA=polyA,
... matched=True, matched_polarity=1)
>>> (df.sort_index(axis=1) == expected.sort_index(axis=1)).all().all()
True
Here is a short example with fuzzy matching that uses the
`remove_indels` option.
First, do not remove the indels:
>>> termini5 = 'ACAT'
>>> termini3 = 'ATAC'
>>> match_str2 = '^(?P<termini5>AAT){e<=1}(?P<gene>ATGGCT){e<=1}(?P<termini3>ATGAC){e<=1}$'
>>> df_in2 = pandas.DataFrame({'CCS':[termini5 + gene + termini3]})
>>> df2 = matchSeqs(df_in2, match_str2, 'CCS', 'matched',
... add_accuracy=False, add_qvals=False)
>>> df2.gene.values[0] == gene
True
>>> df2.termini5.values[0] == termini5
True
>>> df2.termini3.values[0] == termini3
True
Now remove the indels in just *termini3*:
>>> df2_rm = matchSeqs(df_in2, match_str2, 'CCS', 'matched',
... remove_indels=['termini3'],
... add_accuracy=False, add_qvals=False)
>>> df2_rm.gene.values[0] == gene
True
>>> df2_rm.termini5.values[0] == termini5
True
>>> df2_rm.termini3.values[0] == termini3
False
Now remove indels in **both** termini:
>>> df2_rm2 = matchSeqs(df_in2, match_str2, 'CCS', 'matched',
... remove_indels=['termini5', 'termini3'],
... add_accuracy=False, add_qvals=False)
>>> df2_rm2.gene.values[0] == gene
True
>>> df2_rm2.termini5.values[0] == termini5
False
>>> df2_rm2.termini5.values[0]
'AAT'
>>> df2_rm2.termini3.values[0] == termini3
False
"""
if match_str is None:
return df
assert col_to_match in df.columns, \
"`df` lacks `col_to_match` column {0}".format(col_to_match)
if expandIUPAC:
match_str = re_expandIUPAC(match_str)
matcher = regex.compile(match_str, flags=regex.BESTMATCH)
newcols = [match_col]
if add_polarity:
polarity_col = match_col + '_polarity'
newcols.append(polarity_col)
if add_group_cols:
groupnames = list(matcher.groupindex.keys())
if len(set(groupnames)) != len(groupnames):
raise ValueError("duplicate group names in {0}"
.format(match_str))
newcols += groupnames
if add_accuracy:
newcols += [g + '_accuracy' for g in groupnames]
if add_qvals:
newcols += [g + '_qvals' for g in groupnames]
if add_accuracy or add_qvals:
match_qvals_col = col_to_match + '_qvals'
if match_qvals_col not in df.columns:
raise ValueError("To use `add_accuracy` or "
"`add_qvals`, you need a column in `df` "
"named {0}".format(match_qvals_col))
if remove_indels:
if set(remove_indels) > set(remove_indels):
raise ValueError("`remove_indels` specifies "
"unknown group(s)")
else:
remove_indels = []
else:
groupnames = []
if remove_indels:
raise ValueError("can't use `remove_indels` without "
"using `add_group_cols`")
# make sure created columns don't already exist
dup_cols = set(newcols).intersection(set(df.columns))
if not overwrite and dup_cols:
raise ValueError("`df` already contains some of the "
"columns that we are supposed to add:\n{0}"
.format(dup_cols))
# look for matches for each row
match_d = {c:[] for c in newcols}
for tup in df.itertuples():
s = getattr(tup, col_to_match)
m = matcher.search(s)
if add_group_cols and (add_accuracy or add_qvals):
qs = getattr(tup, match_qvals_col)
if m:
polarity = 1
else:
m = matcher.search(dms_tools2.utils.reverseComplement(s))
polarity = -1
if add_group_cols and (add_accuracy or add_qvals):
qs = numpy.flip(qs, axis=0)
if m:
match_d[match_col].append(True)
if add_polarity:
match_d[polarity_col].append(polarity)
ins_sites = m.fuzzy_changes[1]
del_sites = m.fuzzy_changes[2]
for g in groupnames:
g_start = m.start(g)
g_end = m.end(g)
if g in remove_indels:
g_ins_sites = [i for i in ins_sites
if g_start <= i < g_end]
g_del_sites = [i for i in del_sites
if g_start <= i < g_end]
else:
g_ins_sites = []
g_del_sites = []
if add_qvals:
g_qs = qs[g_start : g_end]
g_qs_list = []
if g_ins_sites or g_del_sites:
g_seq = []
for i, x in enumerate(m.group(g)):
if i + g_start in g_ins_sites:
pass
elif i + g_start in g_del_sites:
g_seq.append(x + '-')
if add_qvals:
g_qs_list.append(g_qs[i])
g_qs_list.append(numpy.nan)
else:
g_seq.append(x)
if add_qvals:
g_qs_list.append(g_qs[i])
g_seq = ''.join(g_seq)
if add_qvals:
g_qs = numpy.array(g_qs_list)
else:
g_seq = m.group(g)
match_d[g].append(g_seq)
if add_qvals:
match_d[g + '_qvals'].append(g_qs)
if add_accuracy:
match_d[g + '_accuracy'].append(qvalsToAccuracy(
qs[g_start : g_end]))
else:
match_d[match_col].append(False)
if add_polarity:
match_d[polarity_col].append(0)
for g in groupnames:
match_d[g].append('')
if add_qvals:
match_d[g + '_qvals'].append(numpy.array([], dtype='int'))
if add_accuracy:
match_d[g + '_accuracy'].append(-1)
# set index to make sure matches `df`
indexname = df.index.name
assert indexname not in match_d
match_d[indexname] = df.index.tolist()
if (not overwrite) and dup_cols:
raise ValueError("overwriting columns")
return pandas.concat(
[df.drop(dup_cols, axis=1),
pandas.DataFrame(match_d).set_index(indexname),
],
axis=1)
def alignSeqs(df, mapper, query_col, aligned_col, *,
add_alignment=True, add_target=True,
add_n_trimmed=True, add_n_additional=True,
add_n_additional_difftarget=True, targetvariants=None,
mutationcaller=None, overwrite=True, paf_file=None):
"""Align sequences in a dataframe to target sequence(s).
Arguments:
`df` (pandas DataFrame)
Data frame in which one column holds sequences to match.
There also must be a column named "name" with unique names.
`mapper` (:py:mod:`dms_tools2.minimap2.Mapper`)
Align using the :py:mod:`dms_tools2.minimap2.Mapper.map`
function of `mapper`. Target sequence(s) to which
we align are specified when initializing `mapper`.
`query_col` (str)
Name of column in `df` with query sequences to align.
If we are to use Q-values, there must also be a column
with this name suffixed by "_qvals".
`aligned_col` (str)
Name of column added to `df`. Elements of column are
`True` if `query_col` aligns, and `False` otherwise.
`add_alignment` (bool)
Add column with the :py:mod:`dms_tools2.minimap2.Alignment`.
`add_target` (bool)
Add column giving target (reference) to which sequence
aligns.
`add_n_trimmed` (bool)
Add columns giving number of nucleotides trimmed from
ends of both the query and target in the alignment.
`add_n_additional` (bool)
Add column specifying the number of additional
alignments.
`targetvariants` (:class:`dms_tools2.minimap2.TargetVariants`)
Call target variants of aligned genes using the `call`
function of this object. Note that this also adjusts
the returned alignments / CIGAR if a variant is called.
If the `variantsites_min_acc` attribute is not `None`,
then `df` must have a column with the name of `query_col`
suffixed by '_qvals' that gives the Q-values to compute
accuracies.
`mutationcaller` (:class:`dms_tools2.minimap2.MutationCaller`)
Call mutations of aligned genes using the `call` function
of this object. Note that any target variant mutations are
handled first and then removed and not called here.
`add_n_additional_difftarget` (bool)
Add columns specifying number of additional alignments
to a target other than the one in the primary alignment.
`overwrite` (bool)
If `True`, we overwrite any existing columns to
be created that already exist. If `False`, raise
an error if any of the columns already exist.
`paf_file` (`None` or str)
If a str, is the name of the PAF file created
by `mapper` (see `outfile` argument of
:py:mod:`dms_tools2.minimap2.Mapper.map`) Otherwise
this file is not saved.
Returns:
A **copy** of `df` with new columns added. The exact
columns to add are specified by the calling arguments.
Specifically:
- We always add a column with the name given by
`aligned_col` that is `True` if there was an
alignment and `False` otherwise.
- If `add_alignment` is `True`, add column named
`aligned_col` suffixed by "_alignment" that gives
the alignment as a :py:mod:`dms_tools2.minimap2.Alignment`
object, or `None` if there is no alignment. Note that
if there are multiple alignments, then this is the
"best" alignment, and the remaining alignments are in
the :py:mod:`dms_tools2.minimap2.Alignment.additional`
attribute.
- If `add_target` is `True`, add column named
`aligned_col` suffixed by "_target" that gives
the target to which the sequence aligns in the
"best" alignment, or an empty string if no alignment.
- If `add_n_trimmed` is `True`, add column named
`aligned_col` suffixed by "_n_trimmed_query_start",
"_n_trimmed_query_end", "_n_trimmed_target_start",
and "_n_trimmed_target_end" that give the number
of nucleotides trimmed from the query and target
in the "best" alignment. Are all zero if the
zero if the alignment is end-to-end. Are -1 if no
alignment.
- If `add_n_additional` is `True`, add column
named `aligned_col` suffixed by "_n_additional" that
gives the number of additional alignments (in
:py:mod:`dms_tools2.minimap2.Alignment.additional`),
or -1 if there is no alignment.
- If `add_n_additional_difftarget` is `True`, add column
named `aligned_col` suffixed by "_n_additional_difftarget"
that gives the number of additional alignments to
**different** targets that are not isoforms, or -1
if if there is no alignment. See the `target_isoforms`
attribute of :py:mod:`dms_tools2.minimap2.Mapper`.
- If `targetvariants` is not `None`, add a column
named `aligned_col` suffixed by "_target_variant"
that has the values returned for that alignment by
:class:`dms_tools2.minimap2.TargetVariants.call`, or
an empty string if no alignment.
- If `mutationcaller` is not `None`, column named
`aligned_col` suffixed by "_mutations" giving the
:class:`dms_tools2.minimap2.Mutations` object returned
by :class:`dms_tools2.minimap2.MutationCaller.call`,
or `None` if there is no alignment.
"""
assert query_col in df.columns, "no `query_col` {0}".format(query_col)
newcols = [aligned_col]
if add_alignment:
alignment_col = aligned_col + '_alignment'
newcols.append(alignment_col)
if add_target:
target_col = aligned_col + '_target'
newcols.append(target_col)
if add_n_trimmed:
n_trimmed_prefix = aligned_col + '_n_trimmed_'
for suffix in ['query_start', 'query_end',
'target_start', 'target_end']:
newcols.append(n_trimmed_prefix + suffix)
if add_n_additional:
n_additional_col = aligned_col + '_n_additional'
newcols.append(n_additional_col)
if add_n_additional_difftarget:
n_additional_difftarget_col = (
aligned_col + '_n_additional_difftarget')
newcols.append(n_additional_difftarget_col)
qvals_col = query_col + '_qvals'
if qvals_col in df.columns:
qvals = pandas.Series(df[qvals_col].values,
index=df.name).to_dict()
else:
qvals = collections.defaultdict(lambda: math.nan)
if targetvariants is not None:
targetvariant_col = aligned_col + '_target_variant'
newcols.append(targetvariant_col)
if targetvariants.variantsites_min_acc is not None:
if qvals_col not in df.columns:
raise ValueError("Cannot use `variantsites_min_acc` "
"of `targetvariants` as there is not a column "
"in `df` named {0}".format(qvals_col))
if mutationcaller is not None:
mutations_col = aligned_col + '_mutations'
newcols.append(mutations_col)
assert len(newcols) == len(set(newcols))
dup_cols = set(newcols).intersection(set(df.columns))
if (not overwrite) and dup_cols:
raise ValueError("`df` already contains these columns:\n{0}"
.format(dup_cols))
# perform the mapping
assert len(df.name) == len(df.name.unique()), \
"`name` in `df` not unique"
with tempfile.NamedTemporaryFile(mode='w') as queryfile:
queryfile.write('\n'.join([
'>{0}\n{1}'.format(*tup) for tup in
df.query('{0} != ""'.format(query_col))
[['name', query_col]]
.itertuples(index=False, name=None)
]))
map_dict = mapper.map(queryfile.name, outfile=paf_file)
align_d = {c:[] for c in newcols}
for name in df.name:
if name in map_dict:
a = map_dict[name]
assert a.strand == 1, "method does not handle - polarity"
if targetvariants:
(variant, a) = targetvariants.call(a, qvals[name])
align_d[targetvariant_col].append(variant)
if mutationcaller:
align_d[mutations_col].append(mutationcaller.call(a,
qvals[name]))
align_d[aligned_col].append(True)
if add_alignment:
align_d[alignment_col].append(a)
if add_target:
align_d[target_col].append(a.target)
if add_n_trimmed:
align_d[n_trimmed_prefix + 'query_start'].append(
a.q_st)
align_d[n_trimmed_prefix + 'query_end'].append(
a.q_len - a.q_en)
align_d[n_trimmed_prefix + 'target_start'].append(
a.r_st)
align_d[n_trimmed_prefix + 'target_end'].append(
a.r_len - a.r_en)
if add_n_additional:
align_d[n_additional_col].append(len(a.additional))
if add_n_additional_difftarget:
align_d[n_additional_difftarget_col].append(
len([a2.target for a2 in a.additional if
a2.target not in mapper.target_isoforms[a.target]]))
else:
align_d[aligned_col].append(False)
if add_alignment:
align_d[alignment_col].append(None)
if add_target:
align_d[target_col].append('')
if add_n_trimmed:
for suffix in ['query_start', 'query_end',
'target_start', 'target_end']:
align_d[n_trimmed_prefix + suffix].append(-1)
if add_n_additional:
align_d[n_additional_col].append(-1)
if add_n_additional_difftarget:
align_d[n_additional_difftarget_col].append(-1)
if targetvariants:
align_d[targetvariant_col].append('')
if mutationcaller:
align_d[mutations_col].append(None)
# set index to make sure matches `df`
index_name = df.index.name
assert index_name not in align_d
align_d[index_name] = df.index.tolist()
if (not overwrite) and dup_cols:
raise ValueError("overwriting columns")
return pandas.concat(
[df.drop(dup_cols, axis=1),
pandas.DataFrame(align_d).set_index(index_name),
],
axis=1)
def qvalsToAccuracy(qvals, encoding='numbers', no_avg=False):
r"""Converts set of quality scores into average accuracy.
Args:
`qvals` (numpy array or number or str)
List of Q-values, assumed to be Phred scores.
For how they are encoded, see `encoding`.
`encoding` (str)
If it is "numbers" then `qvals` should be a
numpy array giving the Q-values, or a number
with one Q-value. If it is "sanger", then `qvals`
is a string, with the score being the ASCII value
minus 33.
`no_avg` (bool)
Compute the accuracies of individual Q-values
rather than the average of the array or list.
Returns:
A number giving the average accuracy, or
`nan` if `qvals` is empty.
Note that the probability :math:`p` of an error at a
given site is related to the Q-value :math:`Q` by
:math:`Q = -10 \log_{10} p`.
>>> qvals = numpy.array([13, 77, 93])
>>> round(qvalsToAccuracy(qvals), 3) == 0.983
True
>>> round(qvalsToAccuracy(qvals[1 : ]), 3) == 1
True
>>> qvalsToAccuracy(numpy.array([]))
nan
>>> qvals_str = '.n~'
>>> round(qvalsToAccuracy(qvals_str, encoding='sanger'), 3) == 0.983
True
>>> round(qvalsToAccuracy(15), 3) == 0.968
True
>>> [round(a, 5) for a in qvalsToAccuracy(qvals, no_avg=True)] == [0.94988, 1, 1]
True
"""
if encoding == 'numbers':
if isinstance(qvals, numbers.Number):
qvals = numpy.array([qvals])
no_avg = False
elif isinstance(qvals, list):
qvals = numpy.array(qvals)
if qvals is None or len(qvals) == 0:
return math.nan
if encoding == 'numbers':
pass
elif encoding == 'sanger':
qvals = numpy.array([ord(q) - 33 for q in qvals])
else:
raise RuntimeError("invalid `encoding`: {0}".format(encoding))
if no_avg:
return 1 - 10**(qvals / -10)
else:
return (1 - 10**(qvals / -10)).sum() / len(qvals)
def summarizeCCSreports(ccslist, report_type, plotfile,
plotminfrac=0.005):
"""Summarize and plot `CCS` reports.
Args:
`ccslist` (`CCS` object or list of them)
`CCS` objects to summarize
`report_type` (str "zmw" or "subread")
Which type of report to summarize
`plotfile` (str or `None`)
Name of created bar plot, or `None`
if you want to return the created plot.
`plotminfrac` (float)
Only plot status categories with >=
this fraction in at least one `CCS`
Returns:
- If `plotfile` is a str, returns a pandas DataFrame
aggregating the reports and creates `plotfile`.
- If `plotfile` is `None`, returns the 2-tuple
containing the data frame and the plot.
"""
if isinstance(ccslist, CCS):
ccslist = [ccslist]
assert all([isinstance(ccs, CCS) for ccs in ccslist]), \
"`ccslist` not a list of `CCS` objects"
assert report_type in ['zmw', 'subread']
report = report_type + '_report'
df = (pandas.concat([getattr(ccs, report).assign(sample=ccs.samplename)
for ccs in ccslist])
.sort_values(['sample', 'number'], ascending=False)
[['sample', 'status', 'number', 'fraction']]
)
# version of df that only has categories with `plotminfrac`
plot_df = (df.assign(maxfrac=lambda x: x.groupby('status')
.fraction.transform('max'))
.query('maxfrac >= @plotminfrac')
)
nstatus = len(plot_df.status.unique())
p = (ggplot(plot_df) +
geom_col(aes(x='sample', y='number', fill='status'),
position='stack') +
theme(axis_text_x=element_text(angle=90, vjust=1,
hjust=0.5)) +
ylab({'zmw':'ZMWs', 'subread':'subreads'}[report_type])
)
if nstatus <= len(COLOR_BLIND_PALETTE):
p = p + scale_fill_manual(list(reversed(
COLOR_BLIND_PALETTE[ : nstatus])))
if plotfile is None:
return (df, p)
else:
p.save(plotfile,
height=3,
width=(2 + 0.3 * len(ccslist)),
verbose=False)
plt.close()
return df
def re_expandIUPAC(re_str):
"""Expand IUPAC ambiguous nucleotide codes in `re` search string.
Simplifies writing `re` search strings that include ambiguous
nucleotide codes.
Args:
`re_str` (str)
String appropriate to be passed to `regex.compile`.
Returns:
A version of `re_str` where any characters not in the group
names that correspond to upper-case ambiguous nucleotide codes
are expanded according to their definitions in the
`IUPAC code <https://en.wikipedia.org/wiki/Nucleic_acid_notation>`_.
>>> re_str = '^(?P<termini5>ATG)(?P<cDNA>N+)A+(?P<barcode>N{4})$'
>>> re_expandIUPAC(re_str)
'^(?P<termini5>ATG)(?P<cDNA>[ACGT]+)A+(?P<barcode>[ACGT]{4})$'
"""
# We simply do a simple replacement on all characters not in group
# names. So first we must find group names:
groupname_indices = set([])
groupname_matcher = regex.compile(r'\(\?P<[^>]*>')
for m in groupname_matcher.finditer(re_str):
for i in range(m.start(), m.end()):
groupname_indices.add(i)
# now replace ambiguous characters
new_re_str = []
for i, c in enumerate(re_str):
if (i not in groupname_indices) and c in dms_tools2.NT_TO_REGEXP:
new_re_str.append(dms_tools2.NT_TO_REGEXP[c])
else:
new_re_str.append(c)
return ''.join(new_re_str)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
jbloomlab/dms_tools2
|
dms_tools2/pacbio.py
|
Python
|
gpl-3.0
| 63,182
|
[
"Biopython",
"pysam"
] |
ee76889395550b4d207b07425dd417446d044bfe86241d83103449c3fb60ddea
|
"""Receives Android tablet input data transmitted via UDP
by the GfxTablet Android app and pushes it to WebSocket client.
GfxTablet: https://github.com/rfc2822/GfxTablet
"""
import logging
import socket
from tornado.websocket import WebSocketHandler
from tornado.ioloop import IOLoop
_logger = logging.getLogger(__name__)
udpsock = socket.socket(type=socket.SOCK_DGRAM)
udpsock.bind(('0.0.0.0', 40118))
udpsock.setblocking(False)
class GfxTabletHandler(WebSocketHandler):
EVENT_TYPE_MOTION = 0
EVENT_TYPE_BUTTON = 1
# see http://www.bbarrows.com/blog/2013/01/27/udptornado/
def initialize(self):
self._buf = bytearray(20*10)
# TODO: maybe not robust on all platforms (see http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib)
_logger.info("in GfxTablet settings, set the recipient host to %s (this server's local IP address)" % socket.gethostbyname(socket.gethostname()))
def open(self):
_logger.debug("GfxTablet WebSocket opened")
self.set_nodelay(True) # maybe better to not do this?
ioloop = IOLoop.current()
ioloop.add_handler(udpsock.fileno(), self.handle_input, ioloop.READ)
def on_message(self, message):
_logger.debug(message)
def on_close(self):
_logger.debug("GfxTablet WebSocket closed")
ioloop = IOLoop.current()
ioloop.remove_handler(udpsock.fileno())
def handle_input(self, fd, events):
# TODO: android app sends width, height, use it
buf = self._buf
nbytes = udpsock.recv_into(buf)
event_type = buf[11]
x = (256 * buf[12] + buf[12 + 1]) / 2.0**16
y = (256 * buf[14] + buf[14 + 1]) / 2.0**16
p = (256 * buf[16] + buf[16 + 1]) / 2.0**15
if event_type == GfxTabletHandler.EVENT_TYPE_MOTION:
self.write_message({'x': x, 'y': y, 'p': p})
elif event_type == GfxTabletHandler.EVENT_TYPE_BUTTON:
# TODO: galaxy note 10.1 stylus button not working?
# if buf[18] != 255:
# _logger.debug("button: %d down: %d" % (buf[18], buf[19]))
self.write_message({'x': x, 'y': y, 'p': p, 'button': buf[18], 'down': buf[19]})
|
jzitelli/yawvrb
|
GfxTablet/GfxTablet.py
|
Python
|
mit
| 2,216
|
[
"Galaxy"
] |
6e69cea41fbc6db4f46ebcbad0e87de0bf307eb5307833cfbd0647a095a15e52
|
import math
from typing import Optional, List, Tuple, Sequence, Union, cast, TypeVar
from typing import Iterator, overload
import numpy
import itertools
from .. import registry
from ..types import Xp, Shape, DTypes, DTypesInt, DTypesFloat, List2d, ArrayXd
from ..types import Array3d, Floats1d, Floats2d, Floats3d, Floats4d
from ..types import FloatsXd, Ints1d, Ints2d, Ints3d, Ints4d, IntsXd, _Floats
from ..types import DeviceTypes, Generator, Padded, Batchable, SizedGenerator
from ..util import get_array_module, is_xp_array, to_numpy
ArrayT = TypeVar("ArrayT", bound=ArrayXd)
FloatsT = TypeVar("FloatsT", bound=_Floats)
FloatsType = TypeVar("FloatsType", bound=FloatsXd)
SQRT2PI = math.sqrt(2.0 / math.pi)
INV_SQRT2 = 1.0 / math.sqrt(2.0)
INV_SQRT_2PI = 1.0 / math.sqrt(2.0 * math.pi)
class Ops:
name: str = "base"
xp: Xp = numpy
def __init__(
self, device_type: DeviceTypes = "cpu", device_id: int = -1, **kwargs
) -> None:
self.device_type = device_type
self.device_id = device_id
def to_numpy(self, data, *, byte_order=None): # pragma: no cover
if isinstance(data, numpy.ndarray):
if byte_order:
dtype = data.dtype.newbyteorder(byte_order)
data = numpy.asarray(data, dtype=dtype)
return data
else:
raise ValueError("Cannot convert non-numpy from base Ops class")
def minibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Iterate slices from a sequence, optionally shuffled. Slices
may be either views or copies of the underlying data.
The `size` argument may be either an integer, or a sequence of integers.
If a sequence, a new size is drawn before every output.
If shuffle is True, shuffled batches are produced by first generating
an index array, shuffling it, and then using it to slice into the
sequence.
An internal queue of `buffer` items is accumulated before being each
output. Buffering is useful for some devices, to allow the
network to run asynchronously without blocking on every batch.
"""
if not hasattr(sequence, "__len__"):
err = f"Can't minibatch data. Expected sequence, got {type(sequence)}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
# This is a bit convoluted, but it's a time where convenience makes
# trickery worthwhile: instead of being an actual generator, we
# return our SizedGenerator object, which provides a __len__.
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
size = int(size)
queue.append(self._get_batch(sequence, indices[i : i + size]))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def multibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*others: Batchable,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Minibatch one or more sequences of data, and yield
lists with one batch per sequence. See ops.minibatch.
"""
# You'd think we could just do this by calling into minibatch and zip...
# But the shuffling makes it really hard.
sequences = (sequence,) + tuple(others)
if not all(hasattr(seq, "__len__") for seq in sequences):
values = ", ".join([f"{type(seq)}" for seq in sequences])
err = f"Can't multibatch data. Expected sequences, got {values}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
size = int(size)
idx_batch = indices[i : i + size]
queue.append([])
for sequence in sequences:
queue[-1].append(self._get_batch(sequence, idx_batch))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def _get_batch(self, sequence, indices):
if isinstance(sequence, list):
subseq = [sequence[i] for i in indices]
elif isinstance(sequence, tuple):
subseq = tuple(sequence[i] for i in indices) # type: ignore
else:
subseq = sequence[indices] # type: ignore
if is_xp_array(subseq):
subseq = self.as_contig(
cast(ArrayXd, self.xp.asarray(subseq))
) # type: ignore
return subseq
def _get_batch_sizes(self, length: int, sizes: Iterator[int]):
output = []
i = 0
while i < length:
output.append(next(sizes))
i += output[-1]
return output
def seq2col(
self, seq: Floats2d, nW: int, *, lengths: Optional[Ints1d] = None
) -> Floats2d:
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1))
sequence. The new sequence is constructed by concatenating nW preceding
and succeeding vectors onto each column in the sequence, to extract a
window of features.
"""
# This is a test implementation that only supports nW=1 and lengths=None
assert nW == 1
assert lengths == None
B = seq.shape[0]
I = seq.shape[1]
cols = self.alloc3f(B, (nW * 2 + 1), I)
# Copy left contexts. The last words aren't the left-context for anything.
cols[nW:, :nW] = self.reshape3f(seq[:-nW], -1, nW, I)
cols[:, nW] = seq
cols[:-nW, nW + 1 :] = self.reshape3f(seq[nW:], -1, nW, I)
return self.reshape2f(cols, B, I * (2 * nW + 1))
def backprop_seq2col(
self, dY: Floats2d, nW: int, *, lengths: Optional[Ints1d] = None
) -> Floats2d:
"""The reverse/backward operation of the `seq2col` function: calculate
the gradient of the original `(M, N)` sequence, as a function of the
gradient of the output `(M, N*(nW*2+1))` sequence.
"""
# This is a test implementation that only supports nW=1 and lengths=None
assert nW == 1
assert lengths == None
nF = nW * 2 + 1
B = dY.shape[0]
I = dY.shape[1] // nF
# Having trouble getting the kernel to work...
dX = self.alloc2f(B, I)
dY3d = self.reshape3f(dY, B, nF, I)
dX[:-nW] += self.reshape2f(dY3d[nW:, :nW], -1, I)
dX += dY3d[:, nW]
dX[nW:] += self.reshape2f(dY3d[:-nW, nW + 1 :], -1, I)
return dX
def gemm(
self,
x: Floats2d,
y: Floats2d,
out: Optional[Floats2d] = None,
trans1: bool = False,
trans2: bool = False,
) -> Floats2d:
"""Perform General Matrix Multiplication (GeMM) and optionally store
the result in the specified output variable.
"""
if trans1:
x = x.T
if trans2:
y = y.T
if out is None:
return self.xp.dot(x, y)
else:
self.xp.dot(x, y, out=out)
return out
def tile(self, X: Floats2d, reps: int) -> Floats2d:
return self.xp.tile(X, reps)
def affine(self, X: Floats2d, W: Floats2d, b: Floats1d) -> Floats2d:
"""Apply a weights layer and a bias to some inputs, i.e.
Y = X @ W.T + b
"""
Y = self.gemm(X, W, trans2=True)
Y += b
return Y
def flatten(
self,
X: Sequence[ArrayT],
dtype: Optional[DTypes] = None,
pad: int = 0,
ndim_if_empty: int = 2,
) -> ArrayT:
"""Flatten a list of arrays into one large array."""
if X is None or len(X) == 0:
return self.alloc((0,) * ndim_if_empty, dtype=dtype or "f")
xp = get_array_module(X[0])
shape_if_empty = X[0].shape
X = [x for x in X if x.size != 0]
if len(X) == 0:
return self.alloc(shape_if_empty, dtype=dtype or "f")
if int(pad) >= 1:
padded = []
for x in X:
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
padded.append(x)
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
X = padded
result = xp.concatenate(X)
if dtype is not None:
result = xp.asarray(result, dtype=dtype)
return result
def unflatten(self, X: Floats2d, lengths: Ints1d, pad: int = 0) -> List[Floats2d]:
"""The reverse/backward operation of the `flatten` function: unflatten
a large array into a list of arrays according to the given lengths.
"""
unflat = []
pad = int(pad)
for length in lengths:
length = int(length)
if pad >= 1 and length != 0:
X = X[pad:]
unflat.append(X[:length])
X = X[length:]
if pad >= 1:
X = X[pad:]
assert len(X) == 0
assert len(unflat) == len(lengths)
return unflat
@overload
def pad(self, seqs: List[Ints2d], round_to=1) -> Ints3d:
...
@overload # noqa: F811
def pad(self, seqs: List[Floats2d], round_to=1) -> Floats3d:
...
def pad( # noqa: F811
self, seqs: Union[List[Ints2d], List[Floats2d]], round_to=1
) -> Array3d:
"""Perform padding on a list of arrays so that they each have the same
length, by taking the maximum dimension across each axis. This only
works on non-empty sequences with the same `ndim` and `dtype`.
"""
# TODO: This should be generalized to handle different ranks
if not seqs:
raise ValueError("Cannot pad empty sequence")
if len(set(seq.ndim for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different ndims")
if len(set(seq.dtype for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different dtypes")
if len(set(seq.shape[1:] for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences that differ on other dimensions")
# Find the maximum dimension along each axis. That's what we'll pad to.
length = max(len(seq) for seq in seqs)
# Round the length to nearest bucket -- helps on GPU, to make similar
# array sizes.
length = (length + (round_to - 1)) // round_to * round_to
final_shape = (len(seqs), length) + seqs[0].shape[1:]
output: Array3d = self.alloc(final_shape, dtype=seqs[0].dtype)
for i, arr in enumerate(seqs):
# It's difficult to convince this that the dtypes will match.
output[i, : arr.shape[0]] = arr # type: ignore
return output
def unpad(self, padded: Array3d, lengths: List[int]) -> List2d:
"""The reverse/backward operation of the `pad` function: transform an
array back into a list of arrays, each with their original length.
"""
output = []
for i, length in enumerate(lengths):
output.append(padded[i, :length])
return cast(List2d, output)
def list2padded(self, seqs: List[Floats2d]) -> Padded:
"""Pack a sequence of 2d arrays into a Padded datatype."""
if not seqs:
return Padded(
self.alloc3f(0, 0, 0), self.alloc1i(0), self.alloc1i(0), self.alloc1i(0)
)
elif len(seqs) == 1:
data = self.reshape3f(seqs[0], seqs[0].shape[0], 1, seqs[0].shape[1])
size_at_t = self.asarray1i([1] * data.shape[0])
lengths = self.asarray1i([data.shape[0]])
indices = self.asarray1i([0])
return Padded(data, size_at_t, lengths, indices)
lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)]
lengths_indices.sort(reverse=True)
indices_ = [i for length, i in lengths_indices]
lengths_ = [length for length, i in lengths_indices]
nS = max([seq.shape[0] for seq in seqs])
nB = len(seqs)
nO = seqs[0].shape[1]
# Reorder the sequences, by length. This looks the same in either
# direction: you're swapping elements between their original and sorted
# position.
seqs = [seqs[i] for i in indices_]
arr: Floats3d = self.pad(seqs)
assert arr.shape == (nB, nS, nO), (nB, nS, nO)
arr = self.as_contig(arr.transpose((1, 0, 2)))
assert arr.shape == (nS, nB, nO)
# Build a lookup table so we can find how big the batch is at point t.
batch_size_at_t_ = [0 for _ in range(nS)]
current_size = len(lengths_)
for t in range(nS):
while current_size and t >= lengths_[current_size - 1]:
current_size -= 1
batch_size_at_t_[t] = current_size
assert sum(lengths_) == sum(batch_size_at_t_)
return Padded(
cast(Floats3d, arr),
self.asarray1i(batch_size_at_t_),
self.asarray1i(lengths_),
self.asarray1i(indices_),
)
def padded2list(self, padded: Padded) -> List2d:
"""Unpack a Padded datatype to a list of 2-dimensional arrays."""
data = padded.data
indices = to_numpy(padded.indices)
lengths = to_numpy(padded.lengths)
unpadded: List[Optional[Floats2d]] = [None] * len(lengths)
# Transpose from (length, batch, data) to (batch, length, data)
data = self.as_contig(data.transpose((1, 0, 2)))
for i in range(data.shape[0]):
unpadded[indices[i]] = data[i, : int(lengths[i])]
return cast(List2d, unpadded)
def get_dropout_mask(self, shape: Shape, drop: Optional[float]) -> FloatsXd:
"""Create a random mask for applying dropout, with a certain percent of
the mask (defined by `drop`) will contain zeros. The neurons at those
positions will be deactivated during training, resulting in a more
robust network and less overfitting.
"""
if drop is None or drop <= 0:
return self.xp.ones(shape, dtype="f")
elif drop >= 1.0:
return self.alloc(shape)
coinflips = self.xp.random.uniform(0.0, 1.0, shape)
mask = (coinflips >= drop) / (1.0 - drop)
return cast(FloatsXd, self.asarray(mask, dtype="float32"))
def alloc1f(self, d0: int, *, dtype: Optional[DTypesFloat] = "float32") -> Floats1d:
return self.alloc((d0,), dtype=dtype)
def alloc2f(
self, d0: int, d1: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3f(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4f(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesFloat] = "float32",
) -> Floats4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_f(
self, shape: Shape, *, dtype: Optional[DTypesFloat] = "float32"
) -> FloatsXd:
return self.alloc(shape, dtype=dtype)
def alloc1i(self, d0: int, *, dtype: Optional[DTypesInt] = "int32") -> Ints1d:
return self.alloc((d0,), dtype=dtype)
def alloc2i(
self, d0: int, d1: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3i(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4i(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesInt] = "int32",
) -> Ints4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_i(self, shape: Shape, *, dtype: Optional[DTypesInt] = "int32") -> IntsXd:
return self.alloc(shape, dtype=dtype)
def alloc(self, shape: Shape, *, dtype: Optional[DTypes] = "float32") -> ArrayT:
"""Allocate an array of a certain shape."""
if isinstance(shape, int):
shape = (shape,)
return self.xp.zeros(shape, dtype=dtype)
def reshape1f(self, array: FloatsXd, d0: int) -> Floats1d:
return cast(Floats1d, self.reshape(array, (d0,)))
def reshape2f(self, array: FloatsXd, d0: int, d1: int) -> Floats2d:
return cast(Floats2d, self.reshape(array, (d0, d1)))
def reshape3f(self, array: FloatsXd, d0: int, d1: int, d2: int) -> Floats3d:
return cast(Floats3d, self.reshape(array, (d0, d1, d2)))
def reshape4f(
self, array: FloatsXd, d0: int, d1: int, d2: int, d3: int
) -> Floats4d:
return cast(Floats4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_f(self, array: FloatsXd, shape: Shape) -> FloatsXd:
return self.reshape(array, shape)
def reshape1i(self, array: IntsXd, d0: int) -> Ints1d:
return cast(Ints1d, self.reshape(array, (d0,)))
def reshape2i(self, array: IntsXd, d0: int, d1: int) -> Ints2d:
return cast(Ints2d, self.reshape(array, (d0, d1)))
def reshape3i(self, array: IntsXd, d0: int, d1: int, d2: int) -> Ints3d:
return cast(Ints3d, self.reshape(array, (d0, d1, d2)))
def reshape4i(self, array: IntsXd, d0: int, d1: int, d2: int, d3: int) -> Ints4d:
return cast(Ints4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_i(self, array: IntsXd, shape: Shape) -> IntsXd:
return self.reshape(array, shape)
def reshape(self, array: ArrayT, shape: Shape) -> ArrayT:
"""Reshape an array."""
if isinstance(shape, int):
shape = (shape,)
return cast(ArrayT, array.reshape(shape))
def asarray4f(
self,
data: Union[Floats4d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats4d:
return cast(Floats4d, self.asarray(data, dtype=dtype))
def asarray3f(
self,
data: Union[Floats3d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats3d:
return cast(Floats3d, self.asarray(data, dtype=dtype))
def asarray2f(
self,
data: Union[Floats2d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats2d:
return cast(Floats2d, self.asarray(data, dtype=dtype))
def asarray1f(
self,
data: Union[Floats1d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats1d:
return cast(Floats1d, self.asarray(data, dtype=dtype))
def asarray_f(
self,
data: Union[FloatsXd, Sequence[float]],
*,
dtype: Optional[DTypes] = "float32",
) -> FloatsXd:
return cast(FloatsXd, self.asarray(data, dtype=dtype))
def asarray1i(
self, data: Union[Ints1d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints1d:
return cast(Ints1d, self.asarray(data, dtype=dtype))
def asarray2i(
self, data: Union[Ints2d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints2d:
return cast(Ints2d, self.asarray(data, dtype=dtype))
def asarray3i(
self, data: Union[Ints3d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints3d:
return cast(Ints3d, self.asarray(data, dtype=dtype))
def asarray4i(
self, data: Union[Ints4d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints4d:
return cast(Ints4d, self.asarray(data, dtype=dtype))
def asarray_i(
self, data: Union[IntsXd, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> IntsXd:
return cast(IntsXd, self.asarray(data, dtype=dtype))
def asarray(
self,
data: Union[ArrayXd, Sequence[ArrayXd], Sequence[float], Sequence[int]],
*,
dtype: Optional[DTypes] = None,
) -> ArrayXd:
"""Ensure a given array is of the correct type."""
if isinstance(data, self.xp.ndarray):
if dtype is None:
return data
elif data.dtype == dtype:
return data
else:
return self.xp.asarray(data, dtype=dtype)
elif hasattr(data, "numpy"):
# Handles PyTorch Tensor
return data.numpy() # type: ignore
elif dtype is not None:
return self.xp.array(data, dtype=dtype)
else:
return self.xp.array(data)
def as_contig(self, data: ArrayT, dtype: Optional[DTypes] = None) -> ArrayT:
"""Allow the backend to make a contiguous copy of an array.
Implementations of `Ops` do not have to make a copy or make it
contiguous if that would not improve efficiency for the execution engine.
"""
if data.flags["C_CONTIGUOUS"] and dtype in (None, data.dtype):
return data
kwargs = {"dtype": dtype} if dtype is not None else {}
return self.xp.ascontiguousarray(data, **kwargs)
def sigmoid(self, X: FloatsType, *, inplace: bool = False) -> FloatsType:
if inplace:
self.xp.exp(-X, out=X)
X += 1.0 # type: ignore
X **= -1.0 # type: ignore
return cast(FloatsType, X)
else:
return cast(FloatsType, 1.0 / (1.0 + self.xp.exp(-X)))
def dsigmoid(self, Y: FloatsType, *, inplace: bool = False) -> FloatsType:
if inplace:
Y *= 1 - Y
return Y
else:
return Y * (1.0 - Y)
def dtanh(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
Y **= 2
Y *= -1.0
Y += 1.0
return Y
else:
return 1 - Y ** 2
def softmax(
self,
x: FloatsT,
*,
inplace: bool = False,
axis: int = -1,
temperature: float = 1.0,
) -> FloatsT:
if temperature != 1.0:
x = x / temperature
maxes = self.xp.max(x, axis=axis, keepdims=True)
shifted = x - maxes
new_x = self.xp.exp(shifted)
new_x /= new_x.sum(axis=axis, keepdims=True)
return new_x
def softmax_sequences(
self, Xs: Floats2d, lengths: Ints1d, *, inplace: bool = False, axis: int = -1
) -> Floats2d:
if Xs.ndim >= 3:
err = f"Softmax currently only supports 2d. Got: {Xs.ndim}"
raise NotImplementedError(err)
# This loses almost no fidelity, and helps the numerical stability.
Xs = self.xp.clip(Xs, -20.0, 20.0)
new_x = self.xp.exp(Xs)
summed = self.backprop_reduce_sum(self.reduce_sum(new_x, lengths), lengths)
new_x /= summed
return new_x
def backprop_softmax(
self, Y: FloatsT, dY: FloatsT, *, axis: int = -1, temperature: float = 1.0
) -> FloatsT:
if temperature != 1.0:
dY = dY / temperature
dX = Y * dY
dX -= Y * dX.sum(axis=axis, keepdims=True)
return dX
def backprop_softmax_sequences(
self, dY: Floats2d, Y: Floats2d, lengths: Ints1d
) -> Floats2d:
dX = Y * dY
sum_dX = self.backprop_reduce_sum(self.reduce_sum(dX, lengths), lengths)
dX -= Y * sum_dX
return dX
def lstm_forward_training(
self,
params: Floats1d,
H0: Floats3d,
C0: Floats3d,
X: Floats2d,
size_at_t: Ints1d,
) -> Tuple[Floats2d, Tuple]:
assert H0.shape == C0.shape
assert H0.shape[1] == C0.shape[1]
Y, fwd_state = lstm_forward_training(params, H0, C0, X, size_at_t)
return Y, fwd_state
def lstm_forward_inference(
self,
params: Floats1d,
H0: Floats3d,
C0: Floats3d,
X: Floats2d,
size_at_t: Ints1d,
) -> Floats2d:
Y, _ = lstm_forward_training(params, H0, C0, X, size_at_t)
return Y
def backprop_lstm(
self, dY: Floats2d, lengths: Ints1d, params: Floats1d, fwd_state: Tuple
) -> Tuple[Floats2d, Floats1d]:
dX, d_params = backprop_lstm(dY, lengths, params, fwd_state)
return dX, d_params
def maxout(self, X: Floats3d) -> Tuple[Floats2d, Ints2d]:
which = X.argmax(axis=-1, keepdims=False)
return X.max(axis=-1), which
def backprop_maxout(self, dY: Floats2d, which: Ints2d, P: int) -> Floats3d:
dX = self.alloc3f(dY.shape[0], dY.shape[1], P)
for b in range(dY.shape[0]):
for o in range(dY.shape[1]):
dX[b, o, which[b, o]] = dY[b, o]
return dX
def relu(self, X: Floats2d, inplace: bool = False) -> Floats2d:
if not inplace:
return X * (X > 0)
else:
X *= X > 0
return X
def backprop_relu(
self, dY: Floats2d, Y: Floats2d, inplace: bool = False
) -> Floats2d:
if not inplace:
return dY * (Y > 0)
dY *= Y > 0
return dY
def clipped_linear(
self,
X: FloatsType,
slope: float = 1.0,
offset: float = 0.0,
min_val: float = 0.0,
max_val: float = 1.0,
inplace: bool = False,
) -> FloatsType:
if inplace:
X *= slope # type: ignore
X += offset # type: ignore
return cast(FloatsType, self.xp.clip(X, min_val, max_val, out=X))
out = X * slope + offset # type: ignore
return cast(FloatsType, self.xp.clip(out, min_val, max_val))
def backprop_clipped_linear(
self,
dY: FloatsType,
X: FloatsType,
slope: float = 1.0,
offset: float = 0.0,
min_val: float = 0.0,
max_val: float = 1.0,
inplace: bool = False,
) -> FloatsType:
low = (min_val - offset) / slope
high = (max_val - offset) / slope
slope = self.xp.float64(slope).astype(X.dtype)
zero = self.xp.float64(0.0).astype(X.dtype)
dX = self.xp.where((low < X) & (X < high), slope, zero)
if inplace:
dY *= dX
return dY
return dY * dX
def relu_k(
self, X: FloatsType, n: float = 6.0, inplace: bool = False
) -> FloatsType:
return self.clipped_linear(X, max_val=n, inplace=inplace)
def backprop_relu_k(
self, dY: FloatsType, X: FloatsType, n: float = 6.0, inplace: bool = False
) -> FloatsType:
return self.backprop_clipped_linear(dY, X, max_val=n, inplace=inplace)
def hard_sigmoid(self, X: FloatsType, inplace: bool = False) -> FloatsType:
return self.clipped_linear(X, slope=0.2, offset=0.5)
def backprop_hard_sigmoid(
self, dY: FloatsType, X: FloatsType, inplace: bool = False
) -> FloatsType:
return self.backprop_clipped_linear(dY, X, slope=0.2, offset=0.5)
def hard_tanh(self, X: FloatsType, inplace: bool = False) -> FloatsType:
return self.clipped_linear(X, min_val=-1.0, max_val=1.0)
def backprop_hard_tanh(
self, dY: FloatsType, X: FloatsType, inplace: bool = False
) -> FloatsType:
return self.backprop_clipped_linear(dY, X, min_val=-1.0, max_val=1.0)
def swish(self, X: FloatsType, inplace: bool = False) -> FloatsType:
if inplace:
X *= self.sigmoid(X) # type: ignore
return cast(FloatsType, X)
out = X * self.sigmoid(X) # type: ignore
return cast(FloatsType, out)
def backprop_swish(
self, dY: FloatsType, X: FloatsType, Y: FloatsType, inplace: bool = False
) -> FloatsType:
Y = Y + self.sigmoid(X) * (1 - Y) # type: ignore
if inplace:
dY *= Y # type: ignore
return cast(FloatsType, dY)
out = dY * Y # type: ignore
return cast(FloatsType, out)
# Following https://www.scitepress.org/Papers/2019/74696/74696.pdf
def hard_swish(self, X: FloatsType, inplace: bool = False) -> FloatsType:
if inplace:
X *= self.hard_sigmoid(X) # type: ignore
return cast(FloatsType, X)
out = X * self.hard_sigmoid(X) # type: ignore
return cast(FloatsType, out)
def backprop_hard_swish(
self, dY: FloatsType, X: FloatsType, inplace: bool = False
) -> FloatsType:
dX = X * 0.4 + 0.5
dX[X > 2.5] = 1.0
dX[X < -2.5] = 0
if inplace:
dY *= dX
return dY
return dY * dX
# From https://arxiv.org/pdf/1905.02244v5.pdf
def hard_swish_mobilenet(self, X: FloatsType, inplace: bool = False) -> FloatsType:
if inplace:
X *= self.relu_k(X + 3) / 6
return X
return X * (self.relu_k(X + 3) / 6)
def backprop_hard_swish_mobilenet(
self, dY: FloatsType, X: FloatsType, inplace: bool = False
) -> FloatsType:
dX = (1 / 6) * (X * 2.0 + 3.0)
dX[X > 3.0] = 1.0
dX[X < -3.0] = 0
if inplace:
dY *= dX
return dY
return dX * dY
# Code snippet taken from:
# https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf/
def erf(self, X: FloatsType) -> FloatsType:
# save the sign of x
sign = self.xp.sign(X)
X = self.xp.abs(X)
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
t = 1.0 / (1.0 + p * X)
y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * self.xp.exp(
-X * X
)
out = sign * y
out = out.astype(X.dtype)
return out
def sechsq(self, X: FloatsType) -> FloatsType:
return (1 / self.xp.cosh(X)) ** 2
def gelu_approx(self, X: FloatsType, inplace: bool = False) -> FloatsType:
tmp = 1.0 + self.xp.tanh(SQRT2PI * (X + 0.044715 * self.xp.power(X, 3)))
tmp *= 0.5
tmp = tmp.astype(X.dtype)
if inplace:
X *= tmp
return X
Y = self.xp.zeros_like(X)
Y += tmp
Y *= X
return cast(FloatsType, Y)
def backprop_gelu_approx(
self, dY: FloatsType, X: FloatsType, inplace: bool = False
) -> FloatsType:
dX = self.alloc_f(X.shape)
Xp3 = self.xp.power(X, 3)
tmp = 0.5 * self.xp.tanh(0.0356774 * Xp3 + 0.797885 * X)
tmp += (0.0535161 * Xp3 + 0.398942 * X) * self.sechsq(
0.0356774 * Xp3 + 0.797885 * X
)
tmp += 0.5
dX += tmp
if inplace:
dY *= dX
return dY
return dY * dX
def gelu(self, X: FloatsType, inplace: bool = False) -> FloatsType:
# GELU(x) = x · Φ(x)
cdf = gaussian_cdf(self, X)
if inplace:
X *= cdf # type: ignore
return X
return X * cdf # type: ignore
def backprop_gelu(
self, dY: FloatsType, X: FloatsType, inplace: bool = False
) -> FloatsType:
# GELU'(x) = Φ(x) + x · PDF(x)
dX = gaussian_cdf(self, X) + X * gaussian_pdf(self, X) # type: ignore
if inplace:
dY *= dX
return dY
return dY * dX
def mish(
self, X: FloatsType, threshold: float = 20.0, inplace: bool = False
) -> FloatsType:
tmp = X * self.xp.tanh(self.xp.log(1.0 + self.xp.exp(X)))
Y = self.xp.where(X >= threshold, X, tmp)
if inplace:
X[:] = Y
return X
else:
return Y
def backprop_mish(
self,
dY: FloatsType,
X: Floats2d,
threshold: float = 20.0,
inplace: bool = False,
) -> FloatsType:
xp = get_array_module(X)
indices = X < threshold
Xsub = X[indices]
dYsub = dY[indices]
omega = 4.0 * (Xsub + 1.0)
omega += 4.0 * xp.exp(2.0 * Xsub)
omega += xp.exp(3.0 * Xsub)
omega += xp.exp(Xsub) * ((4.0 * Xsub) + 6.0)
delta = xp.exp(Xsub) + 1.0
delta *= delta
delta += 1.0
dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta ** 2))
# Gradient when above threshold will ignore softplus.
if inplace:
out = dY
else:
out = xp.copy(dY)
out[indices] = dXsub
return out
def update_averages(
self, ema: FloatsT, weights: FloatsT, t: int, max_decay: float = 0.9999
) -> None:
# Internals for optimizer
decay = (1.0 + t) / (10.0 + t)
if decay > max_decay:
decay = max_decay
ema -= (1 - decay) * (ema - weights)
def adam(
self,
weights: Floats1d,
gradient: Floats1d,
mom1: Floats1d,
mom2: Floats1d,
beta1: float,
beta2: float,
eps: float,
learn_rate: float,
mod_rate: float = 1.0,
) -> Tuple[Floats1d, Floats1d, Floats1d, Floats1d]:
# Internals for optimizer
mom1 *= beta1
mom2 *= beta2
mom1 += gradient * (1.0 - beta1)
mom2 += gradient * gradient * (1.0 - beta2)
# Here we assume learn rate is calculated by the caller.
# cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t);
weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps))
return weights, gradient, mom1, mom2
def clip_gradient(self, gradient: FloatsT, threshold: float) -> FloatsT:
# Internals for optimizer
xp = get_array_module(gradient)
grad_norm = xp.linalg.norm(gradient)
if grad_norm >= threshold:
gradient *= threshold / grad_norm
return gradient
def logloss(self, y_true: FloatsT, y_pred: FloatsT) -> float:
# Currently not used
log_yp = self.xp.log(y_pred + 1e-8)
loss = (y_true * log_yp) + (1 - y_true) * self.xp.log((1 - y_pred) + 1e-8)
return -loss
def reduce_sum(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
Y[i] = X[start : start + length].sum(axis=0)
start += length
return Y
def reduce_mean(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
Y[i] = X[start : start + length].mean(axis=0)
start += length
return Y
def reduce_max(self, X: Floats2d, lengths: Ints1d) -> Tuple[Floats2d, Ints2d]:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
which = self.alloc2i(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
which[i] = X[start : start + length].argmax(axis=0)
Y[i] = X[start : start + length].max(axis=0)
start += length
return Y, which
def backprop_reduce_sum(self, d_sums: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_sums.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_sums[i]
start += length
return dX
def backprop_reduce_mean(self, d_means: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_means.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_means[i] / length
start += length
return dX
def backprop_reduce_max(
self, d_maxes: Floats2d, which: Ints2d, lengths: Ints1d
) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_maxes.shape[1])
start = 0
for i, length in enumerate(lengths):
self.xp.put_along_axis(
dX[start : start + length], which[i].reshape((1, -1)), d_maxes[i], 0
)
start += length
return dX
def hash(self, ids: Ints1d, seed: int) -> Ints2d:
"""Hash a sequence of 64-bit keys into a table with 4 32-bit keys, using
murmurhash3.
"""
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2i(
numpy_ops.hash(numpy_ops.asarray(ids, dtype="uint64"), seed)
)
def ngrams(self, n: int, keys: Ints1d) -> Ints1d:
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray1i(
numpy_ops.ngrams(n, numpy_ops.asarray(keys, dtype="uint64"))
)
def position_encode(
self, N: int, D: int, period: int = 10000, out: Optional[Floats2d] = None
) -> Floats2d:
# Currently internals only
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2f(numpy_ops.position_encode(N, D, period, out))
def scatter_add(
self, table: FloatsXd, indices: IntsXd, values: FloatsXd
) -> FloatsXd:
return self.xp.add.at(table, indices, values)
def insert_into(self, shape, Xs):
"""Maybe don't need this? Just a quicky to get Jax working."""
output = self.alloc(shape, dtype=Xs[0].dtype)
for i, x in enumerate(Xs):
output[i, : x.shape[0]] = x
return output
"""
LSTM Notation (kind of involved, but made it a lot easier to write)
X: Inputs
Y: Outputs (aka hiddens)
C: Cells
G: Gates (Output of non-linearity, i.e. lstm_gates(X @ W.T)
A: Activations (X @ W.T, before non-linearity)
Imagine we have the input:
batch = [
["apple", "banana", "cantaloupe", "date", "elderberry"],
["aardvark", "bat", "capybara", "dingo", "elephant"]
]
The input variable X will have one vector per word, so X[0, 1] will be banana's
vector, X[0, 1, 0] will be a float, the first element of that vector.
We're computing an output variable Y of shape (nL, nB, nO), so that Y[0, 1] is
the output variable of banana.
A problem with variables for RNNs is keeping the timesteps straight. It's hard
to distinguish the current, previous, and next timesteps. To solve this problem,
we follow the convention that **we are at timestep 3**.
Additionally, the variables for Y and C are offset by one, as the 0th elements
have the initial hiddens and initial cells. So:
t=3
Xt3: The input vectors for 'dingo' and 'date', i.e. X[t]
Yt3: The output vectors for 'dingo' and 'date', i.e. Y[t+1] (Y is offset.)
Ct2: The cells calculated at 'c...', that are the input for 'd...'
Ct3: The cells calculated at 'd...', that are the input for 'e...'
At3: The activations at 'd...'
Gt3: The gates at 'd...'
"""
def lstm_forward_training(
params: Floats1d, c_init: Floats3d, h_init: Floats3d, X: Floats2d, lengths: Ints1d
) -> Tuple[Floats2d, Tuple]:
xp = get_array_module(params)
depth, dirs, nO = c_init.shape
N, nI = X.shape
batch_size = lengths[0]
# Preallocate these so we can pass them through for loop.
G = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO * 4), dtype="f"))
Y = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO), dtype="f"))
C = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO), dtype="f"))
Yt2 = cast(Floats2d, xp.zeros((batch_size, nO), dtype="f"))
Ct2 = cast(Floats2d, xp.zeros((batch_size, nO), dtype="f"))
# Compute the start and end indices first.
indices = []
start = 0
for batch_size in lengths:
indices.append((start, start + batch_size))
start += batch_size
params_i = 0
orig_X = X
for i in range(depth):
nI = X.shape[1]
for d in range(dirs):
# The inits are shaped (depth, dirs, nO). We add the internal dimension
# to make them set correctly.
Yt2 = h_init[i, d].reshape((1, nO)) # type: ignore
Ct2 = c_init[i, d].reshape((1, nO)) # type: ignore
layer_params, params_i = _split_weights(params, i, nO, nI, params_i)
Wx, Wh, bias = _transpose_weights(layer_params)
G[i, d] += xp.dot(X, Wx.T)
G[i, d] += bias
for start, end in indices if d == 0 else reversed(indices):
# When we iterate left-to-right, t2 might be longer than t3.
Yt2 = Yt2[: end - start]
Ct2 = Ct2[: end - start]
# But in right-to-left, it's the opposite: t3 can be longer.
Gt3 = G[i, d, start:end]
Gt3 = Gt3[: Yt2.shape[0]]
Gt3 += xp.dot(Yt2, Wh.T)
Gt3_ = cast(Floats3d, Gt3.reshape((-1, nO, 4)))
hf = sigmoid(Gt3_[:, :, 0])
hi = sigmoid(Gt3_[:, :, 1])
ho = sigmoid(Gt3_[:, :, 2])
hc = xp.tanh(Gt3_[:, :, 3])
Ct3 = hf * Ct2
Ct3 += hi * hc
# Store results
Gt3 = (
xp.hstack((hf, hi, ho, hc))
.reshape((-1, 4, nO))
.transpose((0, 2, 1))
.reshape((-1, nO * 4))
)
# Fix the endpoint to account for shorter slices when iterating
# reversed. Not 100% sure this is right. If there's a bug, look
# here?
end = min(end, start + ho.shape[0])
Y[i, d, start:end] = xp.tanh(Ct3) * ho
G[i, d, start:end] = Gt3
C[i, d, start:end] = Ct3
# Set the t2 variables to the current t3 variables.
Ct2 = Ct3
Yt2 = Y[i, d, start:end]
H = cast(Floats2d, Y[i].transpose((1, 0, 2)).reshape((N, -1)))
if dirs == 2:
H = xp.ascontiguousarray(H)
X = H
return H, (Y, G, C, orig_X)
def backprop_lstm(dY: Floats2d, lengths: Ints1d, params: Floats1d, fwd_state: Tuple):
xp = get_array_module(params)
Y: Floats4d
G: Floats4d
C: Floats4d
X: Floats2d
Wx: Floats2d
Wh: Floats2d
bias: Floats1d
dWx: Floats2d
dWh: Floats2d
d_bias: Floats1d
Y, G, C, X = fwd_state
depth, dirs, N, nO = C.shape
nI = X.shape[1]
batch_size = lengths[0]
# We don't need to store all the cells for all the layers.
dC = cast(Floats2d, xp.zeros((N, nO), dtype=C.dtype))
dG = cast(Floats2d, xp.zeros((N, nO * 4), dtype=C.dtype))
d_params = cast(Floats1d, xp.zeros((params.shape[0],), dtype=params.dtype))
# Collect the params and slices. It makes it a bit easier to get the indexing
# right, when we're iterating backwards.
params_i = 0
all_layer_params: List[List[Tuple[Tuple[Floats2d, Floats2d, Floats1d], int]]] = []
for i in range(depth):
all_layer_params.append([])
n_inputs = nI if i == 0 else (nO * dirs)
for d in range(dirs):
layer_params, params_i = _split_weights(params, i, nO, n_inputs, params_i)
layer_params = _transpose_weights(layer_params)
all_layer_params[-1].append((layer_params, params_i))
params_i = 0
all_layer_grads: List[List[Tuple[Tuple[Floats2d, Floats2d, Floats1d], int]]] = []
for i in range(depth):
all_layer_grads.append([])
n_inputs = nI if i == 0 else (nO * dirs)
for d in range(dirs):
layer_grads, params_i = _split_weights(d_params, i, nO, n_inputs, params_i)
layer_grads = _transpose_weights(layer_grads)
all_layer_grads[-1].append((layer_grads, params_i))
# Similarly, we want to compute the indices first
indices = []
start = 0
for batch_size in lengths:
indices.append((start, start + batch_size))
start += batch_size
Xs = [X] + [
cast(Floats2d, Y[i].transpose((1, 0, 2)).reshape((N, -1)))
for i in range(depth - 1)
]
dXs = [xp.zeros((X.shape[0], X.shape[1]), dtype=X.dtype) for X in Xs]
# Okay, now do the actual looping
for i in reversed(range(depth)):
dY3d = cast(Floats3d, dY.reshape((N, dirs, nO)).transpose((1, 0, 2)))
dX = dXs[i]
X = Xs[i]
if dirs >= 2:
dY3d = xp.ascontiguousarray(dY3d)
for d in range(dirs):
Wx, Wh, bias = all_layer_params[i][d][0]
dWx, dWh, d_bias = all_layer_grads[i][d][0]
if d == 0:
start_t3, end_t3 = indices[-1]
layer_indices = indices[:-1]
layer_indices.reverse()
else:
start_t3, end_t3 = indices[0]
layer_indices = indices[1:]
for start_t2, end_t2 in layer_indices:
size = min(end_t2 - start_t2, end_t3 - start_t3)
dGt3, dCt2 = backprop_lstm_gates(
dY3d[d, start_t3 : start_t3 + size],
dC[start_t3 : start_t3 + size],
G[i, d, start_t3 : start_t3 + size],
C[i, d, start_t3 : start_t3 + size],
C[i, d, start_t2 : start_t2 + size],
)
# Backprop hidden-to-hidden w.r.t. hidden.
dY3d[d, start_t2 : start_t2 + size] += dGt3 @ Wh
# Update iteration variables
dC[start_t2 : start_t2 + size] = dCt2
start_t3 = start_t2
end_t3 = end_t2
# Backprop input-to-hidden w.r.t. weights.
dWx += dG.T @ X
# Backprop hidden-to-hidden w.r.t. weights.
dWh += dG.T @ Y[i, d]
# Backprop bias
d_bias += dG.sum(axis=0)
# Backprop input-to-hidden w.r.t. input
dX += dG @ Wx
dY = dX
assert dX.shape[1] == X.shape[1]
grad_parts = []
for layer_grads in all_layer_grads:
for dir_grads, _ in layer_grads:
grad_parts.append(_untranspose_unsplit_weights(dir_grads))
return dX, xp.concatenate(grad_parts)
def _split_weights(params: Floats1d, i: int, nO: int, nI: int, params_i: int):
Wx_size = 4 * nO * nI
bx_size = 4 * nO
Wh_size = 4 * nO * nO
bh_size = 4 * nO
Wx = params[params_i : params_i + Wx_size].reshape((4 * nO, nI))
params_i += Wx_size
bx = params[params_i : params_i + bx_size].reshape((4 * nO,))
params_i += bx_size
Wh = params[params_i : params_i + Wh_size].reshape((4 * nO, nO))
params_i += Wh_size
bh = params[params_i : params_i + bh_size].reshape((4 * nO,))
params_i += bh_size
return ((Wx, bx), (Wh, bh)), params_i
def _transpose_weights(params):
# Transpose the parameters so that the gates are the last dimension. This
# makes it easier to fuse.
(Wx, bx), (Wh, bh) = params
xp = get_array_module(Wx)
Wx = Wx.reshape((4, -1, Wx.shape[-1]))
Wx = Wx.transpose((1, 0, 2)).reshape((-1, Wx.shape[-1]))
bx = bx.reshape((4, -1)).transpose((1, 0)).reshape((-1,))
Wh = Wh.reshape((4, -1, Wh.shape[-1]))
Wh = Wh.transpose((1, 0, 2)).reshape((-1, Wh.shape[-1]))
bh = bh.reshape((4, -1)).transpose((1, 0)).reshape((-1,))
ascontig = xp.ascontiguousarray
Wx = ascontig(Wx)
Wh = ascontig(Wh)
bias = ascontig(bx) + bh
return Wx, Wh, bias
def _untranspose_unsplit_weights(params):
Wx, Wh, bias = params
xp = get_array_module(Wx)
nO = Wh.shape[1]
nI = Wx.shape[1]
Wx = Wx.reshape((-1, 4, nI)).transpose((1, 0, 2)).reshape((-1, nI))
Wh = Wh.reshape((-1, 4, nO)).transpose((1, 0, 2)).reshape((-1, nO))
bias = bias.reshape((-1, 4)).transpose((1, 0)).reshape((-1,))
zeros = xp.zeros(bias.shape, dtype="f")
return xp.concatenate((Wx.ravel(), bias, Wh.ravel(), zeros))
def backprop_lstm_gates(
dYt3: Floats2d, dCt3: Floats2d, Gt3: Floats2d, Ct3: Floats2d, Ct2: Floats2d
) -> Tuple[Floats2d, Floats2d]:
# See above for notation. Step numbering refers to forward_lstm_gates
xp = get_array_module(dYt3)
hf, hi, ho, hc = xp.split(Gt3, 4, axis=-1)
assert hf.shape[0] == hi.shape[0] == ho.shape[0] == hc.shape[0]
assert hf.shape[0] == dYt3.shape[0] == dCt3.shape[0] == Ct3.shape[0] == Ct2.shape[0]
tanhCt3 = xp.tanh(Ct3)
# 3b: Yt3 = tanhCt3 * ho
d_ho = dYt3 * tanhCt3
d_tanhCt3 = dYt3 * ho
# 3a: tanhCt3 = tanh(Ct3)
dCt3 += d_tanhCt3 * dtanh(tanhCt3)
# 2b: Ct3 += hi * hc
d_hi = dCt3 * hc
d_hc = dCt3 * hi
# 2a: Ct3 = hf * Ct2
d_hf = dCt3 * Ct2
dCt2 = dCt3 * hf
d_At3_hc = d_hc * dtanh(hc) # 1d
d_At3_ho = d_ho * dsigmoid(ho) # 1c
d_At3_hi = d_hi * dsigmoid(hi) # 1b
d_At3_hf = d_hf * dsigmoid(hf) # 1a
dAt3 = xp.concatenate((d_At3_hf, d_At3_hi, d_At3_ho, d_At3_hc), axis=-1)
return dAt3, dCt2
def sigmoid(X, out=None):
xp = get_array_module(X)
return 1.0 / (1.0 + xp.exp(-X))
def dsigmoid(Y: ArrayT) -> ArrayT:
return Y * (1.0 - Y)
def dtanh(Y: ArrayT) -> ArrayT:
return 1 - Y ** 2
def gaussian_cdf(ops: Ops, X: FloatsType) -> FloatsType:
"""Gaussian CDF for distribution with mean 0 and stdev 1."""
return 0.5 * (1.0 + ops.erf(INV_SQRT2 * X))
def gaussian_pdf(ops: Ops, X: FloatsType) -> FloatsType:
"""Gaussian PDF for distribution with mean 0 and stdev 1."""
return INV_SQRT_2PI * ops.xp.exp(-0.5 * X * X)
|
explosion/thinc
|
thinc/backends/ops.py
|
Python
|
mit
| 50,092
|
[
"Gaussian"
] |
b5209f3ccb4db923d1f0cd8aeb80f7069651bc74ec8269beef3be9088343ed4d
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
print(find_packages(exclude=['contrib', 'docs', 'tests']))
setup(
name='pipelines',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1-BETA',
description='A ',
long_description=long_description,
# The project's main homepage.
url='https://github.com/InformaticsMatters/pipelines/',
# Author details
author='Tim Dudgeon',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["pipelines.dimorphite", "pipelines.dmpk", "pipelines.docking", "pipelines.rdkit", "pipelines.xchem"],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
|
InformaticsMatters/pipelines
|
src/python/setup.py
|
Python
|
apache-2.0
| 3,975
|
[
"RDKit"
] |
f8cd1e646286ca2e70164e5a9411bf449097e780cbf916ee4cbaa96904c994ff
|
""" This tests only need the PilotAgentsDB, and connects directly to it
Suggestion: for local testing, run this with::
python -m pytest -c ../pytest.ini -vv tests/Integration/WorkloadManagementSystem/Test_PilotAgentsDB.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wrong-import-position
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from mock import patch
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PivotedPilotSummaryTable
gLogger.setLevel('DEBUG')
paDB = PilotAgentsDB()
def preparePilots(stateCount, testSite, testCE, testGroup):
"""
Set up a bunch of pilots in different states.
:param list stateCount: number of pilots per state. States are:'Submitted', 'Done', 'Failed',
'Aborted', 'Running', 'Waiting', 'Scheduled', 'Ready'
:param str testSite: Site name
:param str testCE: CE name
:param str testGroup: group name
:return list pilot reference list:
"""
pilotRef = []
nPilots = sum(stateCount)
for i in range(nPilots):
pilotRef.append('pilotRef_' + str(i))
res = paDB.addPilotTQReference(pilotRef, 123, 'ownerDN', testGroup, )
assert res['OK'] is True, res['Message']
index = 0
for j, num in enumerate(stateCount):
for i in range(num):
pNum = i + index
res = paDB.setPilotStatus('pilotRef_' + str(pNum), PivotedPilotSummaryTable.pstates[j], destination=testCE,
statusReason='Test States', gridSite=testSite, queue=None,
benchmark=None, currentJob=num,
updateTime=None, conn=False)
assert res['OK'] is True, res['Message']
index += num
return pilotRef
def cleanUpPilots(pilotRef):
"""
Delete all pilots pointed to by pilotRef
:param lipilotRef:
:return:
"""
for elem in pilotRef:
res = paDB.deletePilot(elem)
assert res['OK'] is True, res['Message']
def test_basic():
""" usual insert/verify
"""
res = paDB.addPilotTQReference(['pilotRef'], 123, 'ownerDN', 'ownerGroup',)
assert res['OK'] is True
res = paDB.deletePilot('pilotRef')
# FIXME: to expand...
@patch('DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB.getVOForGroup')
def test_getGroupedPilotSummary(mocked_fcn):
"""
Test 'pivoted' pilot summary method.
:return: None
"""
stateCount = [10, 50, 7, 3, 12, 8, 6, 4]
testGroup = 'ownerGroup'
testGroupVO = 'ownerGroupVO'
testCE = 'TestCE'
testSite = 'TestSite'
mocked_fcn.return_value = 'ownerGroupVO'
pilotRef = preparePilots(stateCount, testSite, testCE, testGroup)
selectDict = {}
columnList = ['GridSite', 'DestinationSite', 'OwnerGroup']
res = paDB.getGroupedPilotSummary(selectDict, columnList)
cleanUpPilots(pilotRef)
expectedParameterList = ['Site', 'CE', 'OwnerGroup', 'Submitted', 'Done', 'Failed',
'Aborted', 'Running', 'Waiting', 'Scheduled', 'Ready',
'Total', 'PilotsPerJob', 'PilotJobEff', 'Status']
assert res['OK'] is True, res['Message']
values = res['Value']
assert 'ParameterNames' in values, "ParameterNames key missing in result"
assert values['ParameterNames'] == expectedParameterList, "Expected and obtained ParameterNames differ"
assert 'Records' in values, "Records key missing in result"
# in the setup with one Site/CE/OwnerGroup there will be only one record:
assert len(values['Records']) == 1
record = values['Records'][0]
assert len(record) == len(expectedParameterList)
assert record[0] == testSite
assert record[1] == testCE
assert record[2] == testGroupVO
# pilot state counts:
for i, entry in enumerate(record[3:10]):
assert entry == stateCount[i], " found entry: %s, expected stateCount: %d " % (str(entry), stateCount[i])
# Total
total = record[expectedParameterList.index('Total')]
assert total == sum(stateCount)
# pilot efficiency
delta = 0.01
accuracy = record[expectedParameterList.index('PilotJobEff')] - 100.0 * \
(total - record[expectedParameterList.index('Aborted')]) / total
assert accuracy <= delta, " Pilot eff accuracy %d should be < %d " % (accuracy, delta)
# there aren't any jobs, so:
assert record[expectedParameterList.index('Status')] == 'Idle'
def test_PivotedPilotSummaryTable():
"""
Test the 'pivoted' query only. Check whether the number of pilots in different states returned by
the query is correct.
:return: None
"""
# PivotedPilotSummaryTable pstates gives pilot possible states (table.pstates)
# pstates = ['Submitted', 'Done', 'Failed', 'Aborted', 'Running', 'Waiting', 'Scheduled', 'Ready']
stateCount = [10, 50, 7, 3, 12, 8, 6, 4]
testGroup = 'ownerGroup'
testCE = 'TestCE'
testSite = 'TestSite'
pilotRef = preparePilots(stateCount, testSite, testCE, testGroup)
table = PivotedPilotSummaryTable(['GridSite', 'DestinationSite', 'OwnerGroup'])
sqlQuery = table.buildSQL()
res = paDB._query(sqlQuery)
assert res['OK'] is True, res['Message']
columns = table.getColumnList()
# first 3 columns are: Site, CE and a group (VO mapping comes later, not in the SQL above)
assert 'Site' in columns
assert columns.index('Site') == 0
assert 'CE' in columns
assert columns.index('CE') == 1
assert 'OwnerGroup' in columns
assert columns.index('OwnerGroup') == 2
# pilot numbers by states:
assert 'Total' in columns
# with the setup above there will be only one row, first 3 elements must match the columns.
row = res['Value'][0]
assert row[0] == testSite
assert row[1] == testCE
assert row[2] == testGroup
total = row[columns.index('Total')]
assert total == sum(stateCount), res['Value']
for i, state in enumerate(table.pstates):
assert state in columns
assert row[columns.index(state)] == stateCount[i], " state: %s, stateCount: %d " % (state, stateCount[i])
cleanUpPilots(pilotRef)
|
yujikato/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_PilotAgentsDB.py
|
Python
|
gpl-3.0
| 6,050
|
[
"DIRAC"
] |
ff59ba3228616071bb2fc7d53b194e553d724c1b0e28e8284b1df04c694a1abd
|
'''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from sasmol.test_sasmol.utilities import env
from unittest import main, skipIf
from mocker import Mocker, MockerTestCase, ANY, ARGS, KWARGS
import sasmol.system as system
import numpy, os, copy
floattype=os.environ['SASMOL_FLOATTYPE']
DataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','pdb_common')+os.path.sep
moduleDataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','sasmol','file_io')+os.path.sep
class Test_intg_file_io_Files_read_dcd(MockerTestCase):
def setUp(self):
self.o=system.Molecule(0)
def assert_list_almost_equal(self,a,b,places=5):
if (len(a)!=len(b)):
raise TypeError
else:
for i in range(len(a)):
if isinstance(a[i],(int,float,numpy.generic)):
if (numpy.isnan(a[i]) and numpy.isnan(b[i])): continue
self.assertAlmostEqual(a[i],b[i],places)
else:
self.assert_list_almost_equal(a[i],b[i],places)
def test_1ATM_one_frame(self):
'''
test a pdb file with 1 atom and 1 frame
'''
#
self.o.read_pdb(DataPath+'1ATM.pdb')
result_coor = self.o.coor()
print '\nresult_coor \n',result_coor
#
expected_coor = numpy.array([[[73.944, 41.799, 41.652]]],floattype)
print '\nexpected_coor \n',expected_coor
#
self.assert_list_almost_equal(expected_coor, result_coor,3)
def test_1ATM_two_frames(self):
'''
test a pdb file with 1 atom and 2 frames
'''
#
self.o.read_pdb(DataPath+'1ATM-1to2.pdb')
result_coor = self.o.coor()
print '\nresult_coor \n',result_coor
#
expected_coor = numpy.array([[[76.944, 41.799, 41.652]],[[73.944, 38.799, 41.652]]],floattype)
print '\nexpected_coor \n',expected_coor
#
self.assert_list_almost_equal(expected_coor, result_coor,3)
def test_2AAD_one_frame(self):
'''
test a pdb file with 2 amino acids and 1 frame
'''
#
self.o.read_pdb(DataPath+'2AAD.pdb')
result_coor = self.o.coor()
print '\nresult_coor \n',result_coor
#
expected_coor = numpy.array([[[ 73.944, 41.799, 41.652], [ 74.229, 42.563, 40.456], [ 75.667, 43.093, 40.463], [ 76.264, 43.279, 39.401], [ 73.210, 43.734, 40.336], [ 71.856, 43.168, 39.926], [ 73.677, 44.782, 39.354], [ 70.721, 44.177, 39.946], [ 76.231, 43.330, 41.647], [ 77.592, 43.852, 41.730], [ 78.617, 42.820, 42.184], [ 79.712, 43.169, 42.656], [ 77.671, 45.097, 42.648], [ 77.054, 44.816, 43.910], [ 76.970, 46.273, 42.000]]],floattype)
print '\nexpected_coor \n',expected_coor
#
self.assert_list_almost_equal(expected_coor, result_coor,3)
def test_2AAD_three_frames_separatedby_END(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by END)
'''
#
self.o.read_pdb(moduleDataPath+'2AAD-1to3-END.pdb')
result_coor = self.o.coor()
print '\nresult_coor \n',result_coor
#
expected_coor = numpy.array([[[ 73.944, 41.799, 41.652], [ 74.229, 42.563, 40.456], [ 75.667, 43.093, 40.463], [ 76.264, 43.279, 39.401], [ 73.210, 43.734, 40.336], [ 71.856, 43.168, 39.926], [ 73.677, 44.782, 39.354], [ 70.721, 44.177, 39.946], [ 76.231, 43.330, 41.647], [ 77.592, 43.852, 41.730], [ 78.617, 42.820, 42.184], [ 79.712, 43.169, 42.656], [ 77.671, 45.097, 42.648], [ 77.054, 44.816, 43.910], [ 76.970, 46.273, 42.000]],\
[[ -73.944, 41.799, 41.652], [ -74.229, 42.563, 40.456], [ -75.667, 43.093, 40.463], [ -76.264, 43.279, 39.401], [ -73.210, 43.734, 40.336], [ -71.856, 43.168, 39.926], [ -73.677, 44.782, 39.354], [ -70.721, 44.177, 39.946], [ -76.231, 43.330, 41.647], [ -77.592, 43.852, 41.730], [ -78.617, 42.820, 42.184], [ -79.712, 43.169, 42.656], [ -77.671, 45.097, 42.648], [ -77.054, 44.816, 43.910], [ -76.970, 46.273, 42.000]],\
[[ 73.944, -41.799, 41.652], [ 74.229, -42.563, 40.456], [ 75.667, -43.093, 40.463], [ 76.264, -43.279, 39.401], [ 73.210, -43.734, 40.336], [ 71.856, -43.168, 39.926], [ 73.677, -44.782, 39.354], [ 70.721, -44.177, 39.946], [ 76.231, -43.330, 41.647], [ 77.592, -43.852, 41.730], [ 78.617, -42.820, 42.184], [ 79.712, -43.169, 42.656], [ 77.671, -45.097, 42.648], [ 77.054, -44.816, 43.910], [ 76.970, -46.273, 42.000]]],floattype)
print '\nexpected_coor \n',expected_coor
#
self.assert_list_almost_equal(expected_coor, result_coor,3)
def test_2AAD_three_frames_separatedby_END_all_properties(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by END) for all properties
'''
#
self.o.read_pdb(moduleDataPath+'2AAD-1to3-END.pdb')
natoms = self.o.natoms()
self.assertEqual(natoms,15)
self.assertEqual(self.o.moltype(),['protein']*natoms)
self.assertEqual(self.o.number_of_frames(),3)
self.assertEqual(self.o.atom(),['ATOM']*natoms)
self.assertEqual(self.o.name(),['N','CA','C','O','CB','CG1','CG2','CD1','N','CA','C','O','CB','OG1','CG2'])
self.assert_list_almost_equal(self.o.index(),range(1,natoms+1))
self.assertEqual(self.o.loc(),[' ']*natoms)
self.assertEqual(self.o.resname(),['ILE']*8+['THR']*7)
self.assertEqual(self.o.chain(),['N']*natoms)
self.assert_list_almost_equal(self.o.resid(),[515]*8+[516]*7)
self.assertEqual(self.o.rescode(),[' ']*natoms)
self.assertEqual(self.o.occupancy(),['1.00']*natoms)
self.assertEqual(self.o.beta(),['36.37', '36.23', '36.32', '36.04', '36.69', '38.12', '34.42', '39.85', '35.01', '35.51', '38.09', '36.94', '36.74', '37.19', '34.44'])
self.assertEqual(self.o.segname(),['N']*natoms)
self.assertEqual(self.o.element(),['N', 'C', 'C', 'O', 'C', 'C', 'C', 'C', 'N', 'C', 'C', 'O', 'C', 'O', 'C'])
self.assertEqual(self.o.charge(),[' ']*natoms)
def test_2AAD_three_frames_separatedby_END_wrong_number_atoms(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by MODEL)
'''
#
with self.assertRaises(Exception):
self.o.read_pdb(moduleDataPath+'2AAD-1to3-END_wrong_number_atoms.pdb')
def test_2AAD_three_frames_separatedby_MODEL(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by MODEL)
'''
#
self.o.read_pdb(moduleDataPath+'2AAD-1to3-MODEL.pdb')
result_coor = self.o.coor()
print '\nresult_coor \n',result_coor
#
expected_coor = numpy.array([[[ 73.944, 41.799, 41.652], [ 74.229, 42.563, 40.456], [ 75.667, 43.093, 40.463], [ 76.264, 43.279, 39.401], [ 73.210, 43.734, 40.336], [ 71.856, 43.168, 39.926], [ 73.677, 44.782, 39.354], [ 70.721, 44.177, 39.946], [ 76.231, 43.330, 41.647], [ 77.592, 43.852, 41.730], [ 78.617, 42.820, 42.184], [ 79.712, 43.169, 42.656], [ 77.671, 45.097, 42.648], [ 77.054, 44.816, 43.910], [ 76.970, 46.273, 42.000]],\
[[ -73.944, 41.799, 41.652], [ -74.229, 42.563, 40.456], [ -75.667, 43.093, 40.463], [ -76.264, 43.279, 39.401], [ -73.210, 43.734, 40.336], [ -71.856, 43.168, 39.926], [ -73.677, 44.782, 39.354], [ -70.721, 44.177, 39.946], [ -76.231, 43.330, 41.647], [ -77.592, 43.852, 41.730], [ -78.617, 42.820, 42.184], [ -79.712, 43.169, 42.656], [ -77.671, 45.097, 42.648], [ -77.054, 44.816, 43.910], [ -76.970, 46.273, 42.000]],\
[[ 73.944, -41.799, 41.652], [ 74.229, -42.563, 40.456], [ 75.667, -43.093, 40.463], [ 76.264, -43.279, 39.401], [ 73.210, -43.734, 40.336], [ 71.856, -43.168, 39.926], [ 73.677, -44.782, 39.354], [ 70.721, -44.177, 39.946], [ 76.231, -43.330, 41.647], [ 77.592, -43.852, 41.730], [ 78.617, -42.820, 42.184], [ 79.712, -43.169, 42.656], [ 77.671, -45.097, 42.648], [ 77.054, -44.816, 43.910], [ 76.970, -46.273, 42.000]]],floattype)
print '\nexpected_coor \n',expected_coor
#
self.assert_list_almost_equal(expected_coor, result_coor,3)
def test_2AAD_three_frames_separatedby_MODEL_wrong_number_atoms(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by MODEL)
'''
#
with self.assertRaises(Exception):
self.o.read_pdb(moduleDataPath+'2AAD-1to3-MODEL_wrong_number_atoms.pdb')
def test_2AAD_three_frames_separatedby_MODEL_wrongnumber_mix_END(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by MODEL)
'''
#
with self.assertRaises(Exception):
self.o.read_pdb(moduleDataPath+'2AAD-1to3-MODEL_wrongnumber_mix_END.pdb')
def test_2AAD_three_frames_separatedby_MODEL_mix_END_noterminating(self):
'''
test a pdb file with 2 amino acids and 3 frames (separated by MODEL)
'''
#
with self.assertRaises(Exception):
self.o.read_pdb(moduleDataPath+'2AAD-1to3-MODEL_mix_END_noterminating.pdb')
def test_rna_frame1to10_frame_3(self):
'''
test a pdb file of rna with 10 frames
'''
#
self.o.read_pdb(DataPath+"rna-1to10.pdb")
result_coor = self.o.coor()
print '\nresult_coor \n',result_coor[2][10627]
#
self.assertEqual(len(result_coor),10)
self.assertEqual(len(result_coor[3]),10632)
expected_coor_sample = numpy.array([-5.564, 20.324, 26.185],floattype) #atom 10627 of frame 3
self.assert_list_almost_equal(result_coor[2][10627],expected_coor_sample,3)
def test_1PSI(self):
'''
test a pdb file without ENDMDL
'''
#
with self.assertRaises(Exception):
self.o.read_pdb(DataPath+'1PSI.pdb')
def test_blanklines(self):
'''
test a pdb file ending with blank lines
'''
#
self.o.read_pdb(DataPath+'dimcd_fixed_atoms.pdb')
expected_coor_sample = numpy.array([65.124, 35.624, 50.733],floattype)
result_coor = self.o.coor()
self.assert_list_almost_equal(result_coor[0][10],expected_coor_sample,3)
def test_1AA_NoEND(self):
'''
test a 1AA pdb file with 1frame and without END statement
'''
#
self.o.read_pdb(moduleDataPath+'1AA-NoEND.pdb')
result_coor = self.o.coor()
result_sum_coor = sum(sum(sum((result_coor))))
#
expected_coor = numpy.array([[-21.525, -67.562, 86.759], [-22.003, -68.460, 86.892],[-21.905, -66.929, 87.525],[-20.492, -67.726, 86.876],[-21.725, -66.910, 85.457],[-21.476, -67.600, 84.661],[-21.157, -65.997, 85.450],[-23.103, -66.411, 85.215],[-23.249, -65.504, 84.385]],floattype)
expected_sum_coor = sum(sum(expected_coor))
#
self.assertAlmostEqual(result_sum_coor,expected_sum_coor,3)
def test_cleaned_up_package_rna(self):
'''
test a pdb file of rna with 1250 frames of size 1.0g
'''
#
self.o.read_pdb(moduleDataPath+"new_package_rna.pdb")
result_coor = self.o.coor()
print '\nlength of result_coor \n',len(result_coor[0])
print '\nresult_coor \n',result_coor
#
self.assertEqual(len(result_coor[0]),3719)
expected_coor_sample = numpy.array([-12.872, 13.360, -153.873],floattype) #atom 10627 of frame 3
self.assert_list_almost_equal(result_coor[0][299],expected_coor_sample,3)
def test_problem_pdb(self):
'''
test a pdb file with non-charmm atom names
'''
#
print 'ZHL'
self.o.read_pdb(moduleDataPath+"nef_nohis.pdb")
print self.o.name()
def tearDown(self):
pass
if __name__ == '__main__':
main()
|
StevenCHowell/zazmol
|
src/python/test_sasmol/test_file_io/test_intg_file_io_Files_read_pdb.py
|
Python
|
gpl-3.0
| 12,561
|
[
"CHARMM"
] |
75f223299e5c65a8ccc53ee7310c8a20234f715c17a657bd8d35999293473374
|
import sys
tests=[
("testExecs/testReaction.exe","",{}),
("python","test_list.py",{'dir':'Wrap'}),
]
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
soerendip42/rdkit
|
Code/GraphMol/ChemReactions/test_list.py
|
Python
|
bsd-3-clause
| 279
|
[
"RDKit"
] |
5b3320062925cfa3dd1de5e651e86a5ef252aede248e747501621e761bd02707
|
from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
#plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
t_values = [t for t, I, v, u in spikes]
v_values = [v for t, I, v, u in spikes]
u_values = [u for t, I, v, u in spikes]
I_values = [I for t, I, v, u in spikes]
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(3, 1, 2)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(3, 1, 3)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add(cg.key)
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled', 'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
|
CodeReclaimers/neat-python
|
examples/single-pole-balancing/visualize.py
|
Python
|
bsd-3-clause
| 5,821
|
[
"NEURON"
] |
267eadd16e586d9d4db1d047b56799d40a5dce7bfd85a7f94417dd6769353fb8
|
from django.db import models
class Team(models.Model):
class Meta:
db_table = 'teams'
name = models.CharField(db_column='fullname', max_length=50)
class Player(models.Model):
class Meta:
db_table = 'players'
first_name = models.CharField(db_column='gname', max_length=30)
last_name = models.CharField(db_column='sname', max_length=30)
team = models.ForeignKey(Team, on_delete=models.PROTECT, related_name='players', db_column='team')
@property
def info(self):
return '%s %s' % (self.first_name, self.last_name)
def __str__(self):
return '%s %s (%s)' % (self.first_name, self.last_name, self.team.name)
class Segment(models.Model):
''' This class has no single primary key, so not all standard ORM API will work. '''
class Meta:
db_table = 'segments'
round = models.IntegerField(db_column='rnd', primary_key=True)
segment = models.IntegerField(db_column='segment', primary_key=True)
table = models.IntegerField(db_column='tabl', primary_key=True)
home_team = models.ForeignKey(Team, on_delete=models.PROTECT, related_name='+', db_column='homet')
away_team = models.ForeignKey(Team, on_delete=models.PROTECT, related_name='+', db_column='visit')
openN = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='openN')
openS = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='openS')
openE = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='openE')
openW = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='openW')
closeN = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='closeN')
closeS = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='closeS')
closeE = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='closeE')
closeW = models.ForeignKey(Player, on_delete=models.PROTECT, related_name='+', db_column='closeW')
def update(self, **kwargs):
affected = Segment.objects.filter(round=self.round, segment=self.segment, table=self.table).update(**kwargs)
assert affected == 1
for field, value in kwargs.items():
setattr(self, field, value)
|
michzimny/teamy-quick-lineup
|
ql/orm/models.py
|
Python
|
mit
| 2,396
|
[
"VisIt"
] |
de5e421717f51637e5d8cd9c2dd3412b8e1dcf2a8840411187118517446007e3
|
import theano
import theano.tensor as T
from helper import activations
from helper import misc,updates
from scipy.stats import gaussian_kde
from matplotlib import pyplot as plt
from matplotlib.pyplot import *
'''
from foxhound import activations
from foxhound import updates
'''
from helper import inits
from helper.theano_utils import floatX, sharedX
#defining parameters
sz=2048
nh=2048
leaky_rectify = activations.leaky_rectify()
rectify = activations.Rectify()
tanh = activations.Tanh()
sigmoid = activations.Sigmoid()
bce = T.nnet.binary_crossentropy
batch_size = 128
init_fn = misc.Normal(scale=0.02)
#returns the probability of X to be choosed if gaussian distribution followed
def gaussian_probability(X, u=0., s=1.):
return (1./(s*np.sqrt(2*np.pi)))*np.exp(-(((X - u)**2)/(2*s**2)))
def scale_and_shift(X, g, b):
return X*g + b
#defining Generator(G) network as multilayer perceptron
def G(X, w1, g1, b1, w2, g2, b2, w3):
h1 = leaky_rectify(scale_and_shift(T.dot(X,w1), g1, b1))
h2 = leaky_rectify(scale_and_shift(T.dot(h1,w2), g2, b2))
y = T.dot(h2, w3)
return y
#defining Discriminator(D) network as multilayer perceptron
def D(X, w1, g1, b1, w2, g2, b2, w3):
h1 = leaky_rectify(scale_and_shift(T.dot(X,w1), g1, b1))
h2 = tanh(scale_and_shift(T.dot(h1,w2), g2, b2))
y = sigmoid(T.dot(h2,w3))
return y
#initialise parameters for G and D
g_w1 = init_fn((1, nh))
g_g1 = inits.Normal(1., 0.02)(nh)
g_b1 = inits.Normal(0., 0.02)(nh)
g_w2 = init_fn((nh, nh))
g_g2 = inits.Normal(1., 0.02)(nh)
g_b2 = inits.Normal(0., 0.02)(nh)
g_w3 = init_fn((nh, 1))
#ggy = inits.Constant(1.)(1)
#gby = inits.Normal(0., 0.02)(1)
d_w1 = init_fn((1, nh))
d_g1 = inits.Normal(1., 0.02)(nh)
d_b1 = inits.Normal(0., 0.02)(nh)
d_w2 = init_fn((nh, nh))
d_g2 = inits.Normal(1., 0.02)(nh)
d_b2 = inits.Normal(0., 0.02)(nh)
d_w3 = init_fn((nh, 1))
#dgy = inits.Normal(1., 0.02)(1)
#dby = inits.Normal(0., 0.02)(1)
#defining input
Z = T.matrix()
X = T.matrix()
#building generator, "gen" stores the output of generator layer
gen = G(Z, g_w1, g_g1, g_b1, g_w2, g_g2, g_b2, g_w3 )
#getting the probability for real ang generated data
prob_real = D(X, d_w1, d_g1, d_b1, d_w2, d_g2, d_b2, d_w3)
prob_gen = D(gen, d_w1, d_g1, d_b1, d_w2, d_g2, d_b2, d_w3)
#cost calculation for G and D
g_cost = T.nnet.binary_crossentropy(prob_gen, T.ones(prob_gen.shape)).mean()
d_real_cost = T.nnet.binary_crossentropy(prob_real, T.ones(prob_gen.shape)).mean()
d_gen_cost = T.nnet.binary_crossentropy(prob_gen, T.zeros(prob_gen.shape)).mean()
d_cost = d_real_cost + d_gen_cost
#all costs summarized in one list
cost = [g_cost, d_cost, d_real_cost, d_gen_cost]
#using Adam optimizer
learning_rate= 0.001
g_updater = updates.Adam(lr=sharedX(learning_rate) )
d_updater = updates.Adam(lr=sharedX(learning_rate) )
g_update = g_updater([g_w1, g_g1, g_b1, g_w2, g_g2, g_b2, g_w3 ], g_cost)
d_update = d_updater([d_w1, d_g1, d_b1, d_w2, d_g2, d_b2, d_w3 ], d_cost)
#interconversion between variable and function
train_g = theano.function([X, Z], cost, updates=g_update)
train_d = theano.function([X, Z], cost, updates=d_update)
_gen = theano.function([Z], gen)
_score = theano.function([X], prob_real)
# visualising G and D
def visualise(i):
fig = plt.figure()
#generatiing distribution
x = np.linspace(-5, 5, 500).astype('float32')
z = np.linspace(-1, 1, 500).astype('float32')
y_true = gaussian_probability(x)
kde = gaussian_kde(_gen(z.reshape(-1,1)).flatten())
y_gen = kde(x)
preal = _score(x.reshape(-1, 1)).flatten()
#plotting distribution
plt.clf()
plt.plot(x, y_true, '--', lw=2)
plt.plot(x, y_gen, lw=2)
plt.plot(x, preal, lw=2)
plt.xlim([-5.,5.])
plt.ylim([0.,1.])
plt.ylabel('Probability--> ')
plt.xlabel('X --> ')
plt.legend(['Training Data', 'Generated Data', 'Discriminator'])
plt.title('GAN learn Gaussian distibution | Generation: '+str(i))
#fig.canvas.draw()
#plt.show()
#show()
plt.savefig("fig"+str(i)+".png")
#Training G and N for 50 generations
for i in range(100):
x = np.random.normal(1, 1, size=(batch_size, 1)).astype('float32')
y = np.random.uniform(-1, 1, size=(batch_size, 1)).astype('float32')
if i%5==0:
train_g(x, y)
print "Generation = ",str(i)
visualise(i)
else:
train_d(x, y)
|
iamharshit/ML_works
|
GAN learns Gaussion Function/gan_learn_gaussian.py
|
Python
|
mit
| 4,239
|
[
"Gaussian"
] |
e7f50bb2ad6e0623844c5192ec30c5440779fee2bc3161a5c7119aa98f87f55b
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
This example shows how to use pytim classes on trajectories
loaded with MDTraj (http://mdtraj.org/)
(see also the openmm interoperability)
"""
import mdtraj
import pytim
from pytim.datafiles import WATER_GRO, WATER_XTC
t = mdtraj.load_xtc(WATER_XTC, top=WATER_GRO)
inter = pytim.ITIM(t)
for step in t[:]:
print("surface atoms: "+repr(inter.atoms.indices))
|
Marcello-Sega/pytim
|
pytim/examples/example_mdtraj.py
|
Python
|
gpl-3.0
| 507
|
[
"MDTraj",
"OpenMM"
] |
41d364b1197aaacbfbace7363cf492f47b1fa4cf708c886be25ac31a33e690fa
|
from collections import OrderedDict
from edc_visit_schedule.classes import (
VisitScheduleConfiguration, site_visit_schedules, MembershipFormTuple, ScheduleTuple)
from ..models import AntenatalVisitMembership, MaternalVisit
from .entries import (maternal_antenatal1_entries,
maternal_antenatal2_entries, maternal_requisition_antenatal1,
maternal_requisition_antenatal2)
class AntenatalVisitScheduleV3(VisitScheduleConfiguration):
name = 'Antenatal visit schedule v3'
app_label = 'td_maternal'
membership_forms = OrderedDict({'antenatalv3': MembershipFormTuple(
'antenatalv3', AntenatalVisitMembership, True), })
schedules = OrderedDict({
'Antenatal Visit v3': ScheduleTuple('Antenatal Visit v3', 'antenatalv3', None, None), })
visit_definitions = OrderedDict()
visit_definitions['1010M'] = {
'title': 'Antenatal Visit 1 v3',
'time_point': 5,
'base_interval': 1,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'maternal',
'visit_tracking_model': MaternalVisit,
'schedule': 'Antenatal Visit',
'instructions': 'V3',
'requisitions': maternal_requisition_antenatal1,
'entries': maternal_antenatal1_entries}
visit_definitions['1020M'] = {
'title': 'Antenatal Visit 2 v3',
'time_point': 10,
'base_interval': 3,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'maternal',
'visit_tracking_model': MaternalVisit,
'schedule': 'Antenatal Visit',
'instructions': 'V3',
'requisitions': maternal_requisition_antenatal2,
'entries': maternal_antenatal2_entries}
site_visit_schedules.register(AntenatalVisitScheduleV3)
|
botswana-harvard/tshilo-dikotla
|
td_maternal/visit_schedule/antenatal_visits_v3.py
|
Python
|
gpl-2.0
| 2,040
|
[
"VisIt"
] |
16d2e0664b88bec930261c51f0cf1b77ca5ccfc6dfedaa298f3577cf947e7de2
|
#!/usr/bin/env python2
# Copyright (C) 2016
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
############################################
# #
# ESPResSo++ Python script for H-Adress Water #
# simulation based on Gromacs topology #
# #
############################################
import math
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import logging
from espressopp import Real3D, Int3D
from espressopp.tools import gromacs
from espressopp.tools import decomp
from espressopp.tools import timers
# This example reads in a gromacs water system (SPC/Fw) treated with reaction field. See the corresponding gromacs grompp.mdp paramter file.
# Output of gromacs energies and esp energies should be the same
# For H-Adress, special interactions and domain decomposition have to be defined. The gromacs parser has an option to create Adress interactions instead of standard ones
# In the current implementation only one type of atomistic potential can be set for each interaction(template). This makes it necessarry to create two interaction templates (one for coulomb, one for lennard-jones) and leave the coarse-grained interaction unset in one of them.
# simulation parameters (nvt = False is nve)
steps = 100000
check = steps/1
timestep = 0.0005
# parameters to convert GROMACS tabulated potential file
sigma = 1.0
epsilon = 1.0
c6 = 1.0
c12 = 1.0
# H-AdResS
rc = 1.3 # cutoff coarse-grained potential
rca = 0.9 # cutoff atomistic potential
skin = 0.2
# parameters for size of AdResS dimensions
ex_size = 1.5
hy_size = 2.0
# GROMACS setup files
grofile = "conf.gro"
topfile = "topol.top"
# this calls the gromacs parser for processing the top file (and included files) and the conf file
# The variables at the beginning defaults, types, etc... can be found by calling
# gromacs.read(grofile,topfile) without return values. It then prints out the variables to be unpacked
defaults, types, atomtypes, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, vx, vy, vz, resname, resid, Lx, Ly, Lz =gromacs.read(grofile,topfile)
# this is an equilibrated configuration!
dummy1, dummy2, x, y, z, vx, vy, vz, dummy3, dummy4, dummy5 = espressopp.tools.readxyz("equilibrated_conf.xyz")
# particles, geometry, density
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
# set up the system
sys.stdout.write('Setting up simulation ...\n')
system = espressopp.System()
# random number generator
xs = time.time()
seed = int(xs % int(xs) * 10000000000)
rng = espressopp.esutil.RNG()
rng.seed(seed)
system.rng = rng
# boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
# communication, storage and cell/node grid
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
# create a verlet list
verletlist = espressopp.VerletListAdress(system, cutoff=rc, adrcut=rc,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2])
# add particles to the system and then decompose
props = ['id', 'pos', 'v', 'f', 'type', 'mass', 'q', 'adrat']
allParticlesAT = []
allParticles = []
tuples = []
# prepare AT particles
for pid in range(num_particles):
part = [pid + 1, Real3D(x[pid], y[pid], z[pid]),
Real3D(vx[pid],vy[pid], vz[pid]), Real3D(0, 0, 0),
types[pid], masses[pid], charges[pid], 1]
allParticlesAT.append(part)
num_particlesCG = len(x)/3
typeCG=0
# create CG particles
for pidCG in range(num_particlesCG):
# we put CG molecule in first atom, later CG molecules will be positioned in the center
#cmp = espressopp.tools.AdressSetCG(3, pidCG, allParticlesAT)
# Preparation of tuples (tuples define, which atoms belong to which CG molecules)
tmptuple = [pidCG+num_particles+1]
for pidAT2 in range(3):
pid = pidCG*3+pidAT2
tmptuple.append((allParticlesAT[pid])[0])
firsParticleId=tmptuple[1]
cmp=allParticlesAT[firsParticleId-1][1]
typeCG=max(types)+1
# append CG particles
allParticles.append([pidCG+num_particles+1, # CG particle has to be added first!
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(0, 0, 0), # vel
Real3D(0, 0, 0), # force
typeCG, 18.0154, 0.0, 0]) # type, mass, q, is not AT particle
# append AT particles
for pidAT in range(3):
pid = pidCG*3+pidAT
allParticles.append([(allParticlesAT[pid])[0], # now the AT particles can be added
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # type
(allParticlesAT[pid])[5], # mass
(allParticlesAT[pid])[6], # q
(allParticlesAT[pid])[7]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
system.storage.addParticles(allParticles, *props)
# create FixedTupleList object and add the tuples
ftpl = espressopp.FixedTupleListAdress(system.storage)
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
system.storage.decompose()
# set up LJ interaction according to the parameters read from the .top file
ljinteraction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist,rca, hadress=True, ftpl=ftpl)
# set up angle interactions according to the parameters read from the .top file
angleinteractions=gromacs.setAngleInteractionsAdress(system, angletypes, angletypeparams, ftpl)
# set up coulomb interactions according to the parameters read from the .top file
# !! Warning: this only works for reaction-field now!
qq_interactions=gromacs.setCoulombInteractions(system, verletlist, rca, types, epsilon1=1, epsilon2=80, kappa=0, hadress=True, ftpl=ftpl)
# load CG interaction from table
fe="table_CG_CG.tab"
gromacs.convertTable("table_CG_CG.xvg", fe, 1, 1, 1, 1)
potCG = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=rca) # CG
# set the CG potential. There are two non-bonded interactions, we pick only the first one
for n in range(system.getNumberOfInteractions()):
interaction=system.getInteraction(n)
if interaction.bondType() == espressopp.interaction.Nonbonded:
print "Setting CG interaction", typeCG
interaction.setPotentialCG(type1=typeCG, type2=typeCG, potential=potCG)
break
# set up bonded interactions according to the parameters read from the .top file
bondedinteractions=gromacs.setBondedInteractionsAdress(system, bondtypes, bondtypeparams, ftpl)
# exlusions, i.e. pairs of atoms not considered for the non-bonded part. Those are defined either by bonds which automatically generate an exclusion. Or by the nregxcl variable
verletlist.exclude(exclusions)
# add VelocityVerlet Integrator
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = timestep
# add Langevin Thermostat
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = 2.0
langevin.temperature = 2.4942 # kT in gromacs units
langevin.adress = True
integrator.addExtension(langevin)
# add AdResS
adress = espressopp.integrator.Adress(system,verletlist,ftpl)
integrator.addExtension(adress)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
espressopp.tools.AdressDecomp(system, integrator)
# print simulation parameters
print ''
print 'number of particles =', num_particles
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
configurations = espressopp.analysis.Configurations(system)
configurations.gather()
temperature = espressopp.analysis.Temperature(system)
pressure = espressopp.analysis.Pressure(system)
pressureTensor = espressopp.analysis.PressureTensor(system)
print "i*timestep, T, Eb, EAng, ELj, EQQ, Ek, Etotal"
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8f\n'
start_time = time.clock()
outfile = open("esp.dat", "w")
# write a snapshot of the system
espressopp.tools.psfwrite("system.psf", system, typenames={0:'H', 1:'O', 2:'CG'})
espressopp.tools.pdbwrite("system.pdb", system, append=False, typenames={0:'H', 1:'O', 2:'CG'})
for i in range(check):
T = temperature.compute()
P = pressure.compute()
Eb = 0
EAng = 0
for bd in bondedinteractions.values(): Eb+=bd.computeEnergy()
for ang in angleinteractions.values(): EAng+=ang.computeEnergy()
ELj= ljinteraction.computeEnergy()
EQQ= qq_interactions.computeEnergy()
Ek = 0.5 * T * (3 * num_particles)
Etotal = Ek+Eb+EAng+EQQ+ELj
outfile.write(fmt%(i*steps/check*timestep, T, Eb, EAng, ELj, EQQ, Ek, Etotal))
print (fmt%(i*steps/check*timestep, T, Eb, EAng, ELj, EQQ, Ek, Etotal))
integrator.run(steps/check) # print out every steps/check steps
# simulation information
end_time = time.clock()
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
acfogarty/espressopp
|
examples/adress/hadress_water/water.py
|
Python
|
gpl-3.0
| 10,346
|
[
"ESPResSo",
"Gromacs"
] |
98b2d06e0ded28bd0d1bc892e59ac97594fbced579ca748b4838e90ed0ccc8fe
|
########################################################################
# File : ExecutorReactor.py
# Author : Adria Casajus
########################################################################
"""
DIRAC class to execute Executors
Executors are an active part of DIRAC.
All DIRAC executors must inherit from the basic class ExecutorModule
In the most common case, DIRAC Executors are executed using the dirac-executor command.
dirac-execuot accepts a list positional arguments.
dirac-executo then:
- produces a instance of ExecutorReactor
Executor modules must be placed under the Executor directory of a DIRAC System.
DIRAC Systems are called XXXSystem where XXX is the [DIRAC System Name], and
must inherit from the base class ExecutorModule
"""
import time
import threading
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.DISET.MessageClient import MessageClient
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader
from DIRAC.Core.Base.ExecutorModule import ExecutorModule
class ExecutorReactor:
class AliveLock:
def __init__(self):
self.__alive = 0
self.__cond = threading.Condition(threading.Lock())
def alive(self):
self.__cond.acquire()
self.__alive += 1
self.__cond.release()
def dead(self):
self.__cond.acquire()
self.__alive -= 1
self.__cond.notify()
self.__cond.release()
def lockUntilAllDead(self):
self.__cond.acquire()
while True:
if self.__alive < 1:
break
self.__cond.wait(1)
self.__cond.release()
class MindCluster:
def __init__(self, mindName, aliveLock):
self.__mindName = mindName
self.__modules = {}
self.__maxTasks = 1
self.__reconnectSleep = 1
self.__reconnectRetries = 10
self.__extraArgs = {}
self.__instances = {}
self.__instanceLock = threading.Lock()
self.__aliveLock = aliveLock
def updateMaxTasks(self, mt):
self.__maxTasks = max(self.__maxTasks, mt)
def addModule(self, name, exeClass):
self.__modules[name] = exeClass
self.__maxTasks = max(self.__maxTasks, exeClass.ex_getOption("MaxTasks", 0))
self.__reconnectSleep = max(self.__reconnectSleep, exeClass.ex_getOption("ReconnectSleep", 0))
self.__reconnectRetries = max(self.__reconnectRetries, exeClass.ex_getOption("ReconnectRetries", 0))
self.__extraArgs[name] = exeClass.ex_getExtraArguments()
def connect(self):
self.__msgClient = MessageClient(self.__mindName)
self.__msgClient.subscribeToMessage("ProcessTask", self.__processTask)
self.__msgClient.subscribeToDisconnect(self.__disconnected)
result = self.__msgClient.connect(
executorTypes=list(self.__modules), maxTasks=self.__maxTasks, extraArgs=self.__extraArgs
)
if result["OK"]:
self.__aliveLock.alive()
gLogger.info("Connected to %s" % self.__mindName)
return result
def __disconnected(self, msgClient):
retryCount = 0
while True:
gLogger.notice("Trying to reconnect to %s" % self.__mindName)
result = self.__msgClient.connect(
executorTypes=list(self.__modules), maxTasks=self.__maxTasks, extraArgs=self.__extraArgs
)
if result["OK"]:
if retryCount >= self.__reconnectRetries:
self.__aliveLock.alive()
gLogger.notice("Reconnected to %s" % self.__mindName)
return S_OK()
retryCount += 1
if retryCount == self.__reconnectRetries:
self.__aliveLock.alive()
gLogger.info("Connect error failed: %s" % result["Message"])
gLogger.notice("Failed to reconnect. Sleeping for %d seconds" % self.__reconnectSleep)
time.sleep(self.__reconnectSleep)
def __storeInstance(self, modName, modObj):
self.__instanceLock.acquire()
try:
self.__instances[modName].append(modObj)
finally:
self.__instanceLock.release()
def __getInstance(self, moduleName):
self.__instanceLock.acquire()
try:
if moduleName not in self.__instances:
self.__instances[moduleName] = []
try:
return S_OK(self.__instances[moduleName].pop(0))
except IndexError:
pass
finally:
self.__instanceLock.release()
try:
modObj = self.__modules[moduleName]
except KeyError:
return S_ERROR("Unknown %s executor")
modInstance = modObj()
return S_OK(modInstance)
def __sendExecutorError(self, eType, taskId, errMsg):
result = self.__msgClient.createMessage("ExecutorError")
if not result["OK"]:
return result
msgObj = result["Value"]
msgObj.taskId = taskId
msgObj.errorMsg = errMsg
msgObj.eType = eType
return self.__msgClient.sendMessage(msgObj)
def __processTask(self, msgObj):
eType = msgObj.eType
taskId = msgObj.taskId
taskStub = msgObj.taskStub
result = self.__moduleProcess(eType, taskId, taskStub)
if not result["OK"]:
return self.__sendExecutorError(eType, taskId, result["Message"])
msgName, taskStub, extra = result["Value"]
result = self.__msgClient.createMessage(msgName)
if not result["OK"]:
return self.__sendExecutorError(
eType, taskId, "Can't generate %s message: %s" % (msgName, result["Message"])
)
gLogger.verbose("Task %s: Sending %s" % (str(taskId), msgName))
msgObj = result["Value"]
msgObj.taskId = taskId
msgObj.taskStub = taskStub
if msgName == "TaskError":
msgObj.errorMsg = extra
msgObj.eType = eType
elif msgName == "TaskFreeze":
msgObj.freezeTime = extra
return self.__msgClient.sendMessage(msgObj)
def __moduleProcess(self, eType, taskId, taskStub, fastTrackLevel=0):
result = self.__getInstance(eType)
if not result["OK"]:
return result
modInstance = result["Value"]
try:
result = modInstance._ex_processTask(taskId, taskStub)
except Exception as excp:
gLogger.exception("Error while processing task %s" % taskId, lException=excp)
return S_ERROR("Error processing task %s: %s" % (taskId, excp))
self.__storeInstance(eType, modInstance)
if not result["OK"]:
return S_OK(("TaskError", taskStub, "Error: %s" % result["Message"]))
taskStub, freezeTime, fastTrackType = result["Value"]
if freezeTime:
return S_OK(("TaskFreeze", taskStub, freezeTime))
if fastTrackType:
if fastTrackLevel < 10 and fastTrackType in self.__modules:
gLogger.notice("Fast tracking task %s to %s" % (taskId, fastTrackType))
return self.__moduleProcess(fastTrackType, taskId, taskStub, fastTrackLevel + 1)
else:
gLogger.notice("Stopping %s fast track. Sending back to the mind" % (taskId))
return S_OK(("TaskDone", taskStub, True))
#####
# Start of ExecutorReactor
#####
def __init__(self):
self.__aliveLock = self.AliveLock()
self.__executorModules = {}
self.__codeModules = {}
self.__minds = {}
self.__loader = ModuleLoader("Executor", PathFinder.getExecutorSection, ExecutorModule)
def loadModules(self, modulesList, hideExceptions=False):
"""
Load all modules required in moduleList
"""
result = self.__loader.loadModules(modulesList, hideExceptions=hideExceptions)
if not result["OK"]:
return result
self.__executorModules = self.__loader.getModules()
return S_OK()
# Go!
def go(self):
for name in self.__executorModules:
exeClass = self.__executorModules[name]["classObj"]
result = exeClass._ex_initialize(name, self.__executorModules[name]["loadName"])
if not result["OK"]:
return result
mind = exeClass.ex_getMind()
if mind not in self.__minds:
self.__minds[mind] = self.MindCluster(mind, self.__aliveLock)
mc = self.__minds[mind]
mc.addModule(name, exeClass)
for mindName in self.__minds:
gLogger.info("Trying to connect to %s" % mindName)
result = self.__minds[mindName].connect()
if not result["OK"]:
return result
self.__aliveLock.lockUntilAllDead()
return S_OK()
|
ic-hep/DIRAC
|
src/DIRAC/Core/Base/ExecutorReactor.py
|
Python
|
gpl-3.0
| 9,476
|
[
"DIRAC"
] |
0b668a39a378107780c468e433e6605169a56c31035de452736573844c09c0da
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.472434
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/movielistrss.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class movielistrss(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(movielistrss, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_38058651 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
\t<channel>
\t\t<title>Enigma2 Movielist</title>
\t\t<link>http://</link>
\t\t<description>A list of all recordings</description>
\t\t<generator>OpenWebif</generator>
''')
for movie in VFFSL(SL,"movies",True): # generated from line 10, col 3
write(u'''\t\t<item>
\t\t\t<title>''')
_v = VFFSL(SL,"movie.eventname",True) # u'$movie.eventname' on line 12, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$movie.eventname')) # from line 12, col 11.
write(u'''</title>
\t\t\t<description>
\t\t\t\tService: ''')
_v = VFFSL(SL,"movie.servicename",True) # u'$movie.servicename' on line 14, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$movie.servicename')) # from line 14, col 14.
write(u'''<br />
\t\t\t\t''')
_v = VFFSL(SL,"movie.description",True) # u'$movie.description' on line 15, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$movie.description')) # from line 15, col 5.
write(u'''<br />
\t\t\t\t''')
_v = VFFSL(SL,"movie.descriptionExtended",True) # u'$movie.descriptionExtended' on line 16, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$movie.descriptionExtended')) # from line 16, col 5.
write(u'''<br />
\t\t\t\t''')
_v = VFFSL(SL,"movie.filename",True) # u'$movie.filename' on line 17, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$movie.filename')) # from line 17, col 5.
write(u'''<br />
\t\t\t\t''')
_v = VFFSL(SL,"movie.tags",True) # u'$movie.tags' on line 18, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$movie.tags')) # from line 18, col 5.
write(u'''<br />
\t\t\t\t''')
_v = VFFSL(SL,"movie.fullname",True) # u'$movie.fullname' on line 19, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$movie.fullname')) # from line 19, col 5.
write(u'''
\t\t\t</description>
\t\t\t<link>http://''')
_v = VFFSL(SL,"host",True) # u'$host' on line 21, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$host')) # from line 21, col 17.
write(u'''/file?file=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"movie.filename",True)) # u'$quote($movie.filename)' on line 21, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$quote($movie.filename)')) # from line 21, col 33.
write(u'''</link>
\t\t\t<enclosure type="video/mpeg" url="http://''')
_v = VFFSL(SL,"host",True) # u'$host' on line 22, col 45
if _v is not None: write(_filter(_v, rawExpr=u'$host')) # from line 22, col 45.
write(u'''/file?file=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"movie.filename",True)) # u'$quote($movie.filename)' on line 22, col 61
if _v is not None: write(_filter(_v, rawExpr=u'$quote($movie.filename)')) # from line 22, col 61.
write(u'''"/>
\t\t\t<pubDate>''')
_v = VFFSL(SL,"movie.begintime",True) # u'$movie.begintime' on line 23, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$movie.begintime')) # from line 23, col 13.
write(u'''</pubDate>
\t\t\t<category>''')
_v = VFFSL(SL,"movie.servicename",True) # u'$movie.servicename' on line 24, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$movie.servicename')) # from line 24, col 14.
write(u'''</category>
\t\t\t<author>Dreambox Enigma2</author>
\t\t</item>
''')
write(u'''\t</channel>
</rss>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_38058651
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_movielistrss= 'respond'
## END CLASS DEFINITION
if not hasattr(movielistrss, '_initCheetahAttributes'):
templateAPIClass = getattr(movielistrss, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(movielistrss)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=movielistrss()).run()
|
pli3/Openwebif
|
plugin/controllers/views/web/movielistrss.py
|
Python
|
gpl-2.0
| 8,420
|
[
"VisIt"
] |
a4c951e701cd430c3b0b16efd2b59ca1d1677f14f52b72cb265a4506f2fe31ae
|
# $Id$
#
# Copyright (c) 2007, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions
from rdkit import Geometry
from rdkit import RDConfig
import unittest
import os,sys
import cPickle
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x,pt2.x,tol) and feq(pt1.y,pt2.y,tol) and feq(pt1.z,pt2.z,tol)
class TestCase(unittest.TestCase) :
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','ChemReactions','testData')
def test1Basics(self):
rxn = rdChemReactions.ChemicalReaction()
self.failUnless(rxn.GetNumReactantTemplates()==0)
self.failUnless(rxn.GetNumProductTemplates()==0)
r1= Chem.MolFromSmarts('[C:1](=[O:2])O')
rxn.AddReactantTemplate(r1)
self.failUnless(rxn.GetNumReactantTemplates()==1)
r1= Chem.MolFromSmarts('[N:3]')
rxn.AddReactantTemplate(r1)
self.failUnless(rxn.GetNumReactantTemplates()==2)
r1= Chem.MolFromSmarts('[C:1](=[O:2])[N:3]')
rxn.AddProductTemplate(r1)
self.failUnless(rxn.GetNumProductTemplates()==1)
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
self.failUnless(ps[0][0].GetNumAtoms()==3)
ps = rxn.RunReactants(list(reacts))
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
self.failUnless(ps[0][0].GetNumAtoms()==3)
def test2DaylightParser(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.failUnless(rxn)
self.failUnless(rxn.GetNumReactantTemplates()==2)
self.failUnless(rxn.GetNumProductTemplates()==1)
self.failUnless(rxn._getImplicitPropertiesFlag())
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
self.failUnless(ps[0][0].GetNumAtoms()==3)
reacts = (Chem.MolFromSmiles('CC(=O)OC'),Chem.MolFromSmiles('CN'))
ps = rxn.RunReactants(reacts)
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
self.failUnless(ps[0][0].GetNumAtoms()==5)
def test3MDLParsers(self):
fileN = os.path.join(self.dataDir,'AmideBond.rxn')
rxn = rdChemReactions.ReactionFromRxnFile(fileN)
self.failUnless(rxn)
self.failIf(rxn._getImplicitPropertiesFlag())
self.failUnless(rxn.GetNumReactantTemplates()==2)
self.failUnless(rxn.GetNumProductTemplates()==1)
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
self.failUnless(ps[0][0].GetNumAtoms()==3)
rxnBlock = file(fileN,'r').read()
rxn = rdChemReactions.ReactionFromRxnBlock(rxnBlock)
self.failUnless(rxn)
self.failUnless(rxn.GetNumReactantTemplates()==2)
self.failUnless(rxn.GetNumProductTemplates()==1)
reacts = (Chem.MolFromSmiles('C(=O)O'),Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
self.failUnless(ps[0][0].GetNumAtoms()==3)
def test4ErrorHandling(self):
self.failUnlessRaises(ValueError,lambda x='[C:1](=[O:2])Q.[N:3]>>[C:1](=[O:2])[N:3]':rdChemReactions.ReactionFromSmarts(x))
self.failUnlessRaises(ValueError,lambda x='[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]Q':rdChemReactions.ReactionFromSmarts(x))
self.failUnlessRaises(ValueError,lambda x='[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]>>CC':rdChemReactions.ReactionFromSmarts(x))
block="""$RXN
ISIS 082120061354
3 1
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
self.failUnlessRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
block="""$RXN
ISIS 082120061354
2 1
$MOL
-ISIS- 08210613542D
4 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
#self.failUnlessRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
block="""$RXN
ISIS 082120061354
2 1
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 1 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
#self.failUnlessRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
def test5Validation(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.failUnless(rxn)
self.failUnless(rxn.Validate()==(0,0))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:1])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.failUnless(rxn)
self.failUnless(rxn.Validate()==(1,1))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])[O:4].[N:3]>>[C:1](=[O:2])[N:3]')
self.failUnless(rxn)
self.failUnless(rxn.Validate()==(1,0))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3][C:5]')
self.failUnless(rxn)
self.failUnless(rxn.Validate()==(1,0))
def test6Exceptions(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]Cl>>[C:1]')
self.failUnless(rxn)
self.failUnlessRaises(ValueError,lambda x=rxn:x.RunReactants(()))
self.failUnlessRaises(ValueError,lambda x=rxn:x.RunReactants((Chem.MolFromSmiles('CC'),Chem.MolFromSmiles('C'))))
ps=rxn.RunReactants((Chem.MolFromSmiles('CCCl'),))
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
def _test7Leak(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]Cl>>[C:1]')
self.failUnless(rxn)
print 'running: '
for i in range(1e5):
ps=rxn.RunReactants((Chem.MolFromSmiles('CCCl'),))
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
if not i%1000: print i
def test8Properties(self):
rxn = rdChemReactions.ReactionFromSmarts('[O:1]>>[O:1][3#0]')
self.failUnless(rxn)
ps=rxn.RunReactants((Chem.MolFromSmiles('CO'),))
self.failUnless(len(ps)==1)
self.failUnless(len(ps[0])==1)
Chem.SanitizeMol(ps[0][0])
self.failUnlessEqual(ps[0][0].GetAtomWithIdx(1).GetIsotope(),3);
def test9AromaticityTransfer(self):
# this was issue 2664121
mol = Chem.MolFromSmiles('c1ccc(C2C3(Cc4c(cccc4)C2)CCCC3)cc1')
rxn = rdChemReactions.ReactionFromSmarts('[A:1]1~[*:2]~[*:3]~[*:4]~[*:5]~[A:6]-;@1>>[*:1]~[*:2]~[*:3]~[*:4]~[*:5]~[*:6]')
products = rxn.RunReactants([mol])
self.failUnlessEqual(len(products),6)
for p in products:
self.failUnlessEqual(len(p),1)
Chem.SanitizeMol(p[0])
def test10DotSeparation(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>[C:1]1[O:2].[N:3]1')
mol = Chem.MolFromSmiles('C1ON1')
products = rxn.RunReactants([mol])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(p[0].GetNumAtoms(),3)
self.failUnlessEqual(p[0].GetNumBonds(),2)
def test11ImplicitProperties(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]O>>[C:1]')
mol = Chem.MolFromSmiles('CCO')
products = rxn.RunReactants([mol])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(Chem.MolToSmiles(p[0]),'CC')
mol2 = Chem.MolFromSmiles('C[CH-]O')
products = rxn.RunReactants([mol2])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(Chem.MolToSmiles(p[0]),'[CH2-]C')
rxn._setImplicitPropertiesFlag(False)
products = rxn.RunReactants([mol])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(Chem.MolToSmiles(p[0]),'CC')
products = rxn.RunReactants([mol2])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(Chem.MolToSmiles(p[0]),'CC')
def test12Pickles(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>[C:1]1[O:2].[N:3]1')
pkl = cPickle.dumps(rxn)
rxn = cPickle.loads(pkl)
mol = Chem.MolFromSmiles('C1ON1')
products = rxn.RunReactants([mol])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(p[0].GetNumAtoms(),3)
self.failUnlessEqual(p[0].GetNumBonds(),2)
rxn = rdChemReactions.ChemicalReaction(rxn.ToBinary())
products = rxn.RunReactants([mol])
self.failUnlessEqual(len(products),1)
for p in products:
self.failUnlessEqual(len(p),1)
self.failUnlessEqual(p[0].GetNumAtoms(),3)
self.failUnlessEqual(p[0].GetNumBonds(),2)
def test13GetTemplates(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>[C:1][O:2].[N:3]')
r1 = rxn.GetReactantTemplate(0)
sma=Chem.MolToSmarts(r1)
self.failUnlessEqual(sma,'[C:1]1-,:[O:2]-,:[N:3]-,:1')
p1 = rxn.GetProductTemplate(0)
sma=Chem.MolToSmarts(p1)
self.failUnlessEqual(sma,'[C:1]-,:[O:2]')
p2 = rxn.GetProductTemplate(1)
sma=Chem.MolToSmarts(p2)
self.failUnlessEqual(sma,'[N:3]')
self.failUnlessRaises(ValueError,lambda :rxn.GetProductTemplate(2))
self.failUnlessRaises(ValueError,lambda :rxn.GetReactantTemplate(1))
def test14Matchers(self):
rxn = rdChemReactions.ReactionFromSmarts('[C;!$(C(-O)-O):1](=[O:2])[O;H,-1].[N;!H0:3]>>[C:1](=[O:2])[N:3]')
self.failUnless(rxn)
rxn.Initialize()
self.failUnless(rxn.IsMoleculeReactant(Chem.MolFromSmiles('OC(=O)C')))
self.failIf(rxn.IsMoleculeReactant(Chem.MolFromSmiles('OC(=O)O')))
self.failUnless(rxn.IsMoleculeReactant(Chem.MolFromSmiles('CNC')))
self.failIf(rxn.IsMoleculeReactant(Chem.MolFromSmiles('CN(C)C')))
self.failUnless(rxn.IsMoleculeProduct(Chem.MolFromSmiles('NC(=O)C')))
self.failUnless(rxn.IsMoleculeProduct(Chem.MolFromSmiles('CNC(=O)C')))
self.failIf(rxn.IsMoleculeProduct(Chem.MolFromSmiles('COC(=O)C')))
def test15Replacements(self):
rxn = rdChemReactions.ReactionFromSmarts('[{amine}:1]>>[*:1]-C',
replacements={'{amine}':'$([N;!H0;$(N-[#6]);!$(N-[!#6;!#1]);!$(N-C=[O,N,S])])'})
self.failUnless(rxn)
rxn.Initialize()
reactants = (Chem.MolFromSmiles('CCN'),)
ps = rxn.RunReactants(reactants)
self.failUnlessEqual(len(ps),1)
self.failUnlessEqual(len(ps[0]),1)
self.failUnlessEqual(ps[0][0].GetNumAtoms(),4)
def test16GetReactingAtoms(self):
rxn = rdChemReactions.ReactionFromSmarts("[O:1][C:2].[N:3]>>[N:1][C:2].[N:3]")
self.failUnless(rxn)
rxn.Initialize()
rAs = rxn.GetReactingAtoms()
self.failUnlessEqual(len(rAs),2)
self.failUnlessEqual(len(rAs[0]),1)
self.failUnlessEqual(len(rAs[1]),0)
rxn = rdChemReactions.ReactionFromSmarts("[O:1]C>>[O:1]C")
self.failUnless(rxn)
rxn.Initialize()
rAs = rxn.GetReactingAtoms()
self.failUnlessEqual(len(rAs),1)
self.failUnlessEqual(len(rAs[0]),2)
rAs = rxn.GetReactingAtoms(True)
self.failUnlessEqual(len(rAs),1)
self.failUnlessEqual(len(rAs[0]),1)
def test17AddRecursiveQueriesToReaction(self):
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
self.failUnless(rxn)
rxn.Initialize()
qs = {'aliphatic':Chem.MolFromSmiles('CC')}
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'aliphatic')
rxn.AddRecursiveQueriesToReaction(qs,'query')
q = rxn.GetReactantTemplate(0)
m = Chem.MolFromSmiles('CCOC')
self.failUnless(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('CO')
self.failIf(m.HasSubstructMatch(q))
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
rxn.Initialize()
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'aliphatic')
labels = rxn.AddRecursiveQueriesToReaction(qs,'query', getLabels=True)
self.failUnless(len(labels), 1)
def test18GithubIssue16(self):
rxn = rdChemReactions.ReactionFromSmarts("[F:1]>>[Cl:1]")
self.failUnless(rxn)
rxn.Initialize()
self.failUnlessRaises(ValueError,lambda : rxn.RunReactants((None,)))
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
Code/GraphMol/ChemReactions/Wrap/testReactionWrapper.py
|
Python
|
bsd-3-clause
| 16,101
|
[
"RDKit"
] |
8fe0f7e47f7cc9d4d24dbeb401bfd017184fcbedd36f48b068b4427562ba29f2
|
"""Galaxy Reports root package -- this is a namespace package."""
__import__( "pkg_resources" ).declare_namespace( __name__ )
|
volpino/Yeps-EURAC
|
lib/galaxy/webapps/__init__.py
|
Python
|
mit
| 126
|
[
"Galaxy"
] |
004b4f55fc989ada6e4d8253c4b30ad52c22f0a955b9c7debcc36508494dd131
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.display_functions import *
from ansible.utils.plugins import *
from ansible.utils.su_prompts import *
from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
from ansible.callbacks import display
from ansible.module_utils.splitter import split_args, unquote
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.utils.unicode import to_bytes, to_unicode
import ansible.constants as C
import ast
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import subprocess
import contextlib
from vault import VaultLib
VERBOSITY=0
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
# caching the compilation of the regex used
# to check for lookup calls within data
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
CODE_REGEX = re.compile(r'(?:{%|%})')
try:
# simplejson can be much faster if it's available
import simplejson as json
except ImportError:
import json
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
import builtin
except ImportError:
import __builtin__ as builtin
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
os.makedirs(key_path, mode=0700)
os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
elif not os.path.isdir(key_path):
raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
fh = os.fdopen(fd, 'w')
fh.write(str(key))
fh.close()
return key
else:
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def read_vault_file(vault_password_file):
"""Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
if vault_password_file:
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError), e:
raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
return vault_pass
else:
return None
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
from ansible.utils import template
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and '-' not in str(inject[conditional]):
conditional = inject[conditional]
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = str(conditional).replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in conditional:
return True
elif "is defined" in conditional:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("'"):
given = given[1:-1]
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
if basedir is None:
basedir = "."
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
from ansible.utils import template
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
role_spec = role_spec.strip()
role_version = ''
default_role_versions = dict(git='master', hg='tip')
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
def role_yaml_parse(role):
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = role_spec_parse(role['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
# Now it is both a parameter to the role and the name that
# ansible-galaxy will install under on the local system.
if 'name' in role and 'name' in role_info:
del role_info['name']
role.update(role_info)
else:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
return role
def json_loads(data):
''' parse a JSON string and return a data structure '''
try:
loaded = json.loads(data)
except ValueError,e:
raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
return loaded
def _clean_data(orig_data, from_remote=False, from_inventory=False):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, basestring):
return orig_data
# when the data is marked as having come from a remote, we always
# replace any print blocks (ie. {{var}}), however when marked as coming
# from inventory we only replace print blocks that contain a call to
# a lookup plugin (ie. {{lookup('foo','bar'))}})
replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
with contextlib.closing(StringIO.StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == '{':
if token == '{%':
block_openings.append(token_start)
elif token == '{{':
print_openings.append(token_start)
elif token[1] == '}':
prev_idx = None
if token == '%}' and block_openings:
prev_idx = block_openings.pop()
elif token == '}}' and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write('{#')
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write('#}')
else:
assert False, 'Unhandled regex match'
return data.getvalue()
def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
'''
walk a complex data structure, and use _clean_data() to
remove any template tags that may exist
'''
if not from_remote and not from_inventory:
raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
if isinstance(orig_data, dict):
data = orig_data.copy()
for key in data:
new_key = _clean_data_struct(key, from_remote, from_inventory)
new_val = _clean_data_struct(data[key], from_remote, from_inventory)
if key != new_key:
del data[key]
data[new_key] = new_val
elif isinstance(orig_data, list):
data = orig_data[:]
for i in range(0, len(data)):
data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
elif isinstance(orig_data, basestring):
data = _clean_data(orig_data, from_remote, from_inventory)
else:
data = orig_data
return data
def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
results = json.loads(data)
except:
if no_exceptions:
return dict(failed=True, parsed=False, msg=raw_data)
else:
raise
if from_remote:
results = _clean_data_struct(results, from_remote, from_inventory)
return results
def serialize_args(args):
'''
Flattens a dictionary args to a k=v string
'''
module_args = ""
for (k,v) in args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
elif isinstance(v, bool):
module_args = "%s=%s %s" % (k, str(v), module_args)
return module_args.strip()
def merge_module_args(current_args, new_args):
'''
merges either a dictionary or string of k=v pairs with another string of k=v pairs,
and returns a new k=v string without duplicates.
'''
if not isinstance(current_args, basestring):
raise errors.AnsibleError("expected current_args to be a basestring")
# we use parse_kv to split up the current args into a dictionary
final_args = parse_kv(current_args)
if isinstance(new_args, dict):
final_args.update(new_args)
elif isinstance(new_args, basestring):
new_args_kv = parse_kv(new_args)
final_args.update(new_args_kv)
return serialize_args(final_args)
def parse_yaml(data, path_hint=None):
''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
stripped_data = data.lstrip()
loaded = None
if stripped_data.startswith("{") or stripped_data.startswith("["):
# since the line starts with { or [ we can infer this is a JSON document.
try:
loaded = json.loads(data)
except ValueError, ve:
if path_hint:
raise errors.AnsibleError(path_hint + ": " + str(ve))
else:
raise errors.AnsibleError(str(ve))
else:
# else this is pretty sure to be a YAML document
loaded = yaml.load(data, Loader=Loader)
return loaded
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if ":{{" in replaced and "}}" in replaced:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None, show_content=True):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if show_content:
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
else:
# most likely displaying a file with sensitive content,
# so don't show any of the actual lines of yaml just the
# line number itself
msg = """Syntax error while loading YAML script, %s
The error appears to have been on line %s, column %s, but may actually
be before there depending on the exact syntax problem.
""" % (path, mark.line + 1, mark.column + 1)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path, vault_password=None):
''' convert a yaml file to a data structure '''
data = None
show_content = True
try:
data = open(path).read()
except IOError:
raise errors.AnsibleError("file could not read: %s" % path)
vault = VaultLib(password=vault_password)
if vault.is_encrypted(data):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
if vault_password is None:
raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
data = vault.decrypt(data)
show_content = False
try:
return parse_yaml(data, path_hint=path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path, show_content)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError, ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
for x in vargs:
if "=" in x:
k, v = x.split("=",1)
options[k.strip()] = unquote(v.strip())
return options
def _validate_both_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise errors.AnsibleError(
"failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
)
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
# we check here as well as in combine_vars() since this
# function can work recursively with nested dicts
_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
def version_info(gitinfo=False):
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def sanitize_output(arg_string):
''' strips private info out of a string '''
private_keys = ('password', 'login_password')
output = []
for part in arg_string.split():
try:
(k, v) = part.split('=', 1)
except ValueError:
v = heuristic_log_sanitize(part)
output.append(v)
continue
if k in private_keys:
v = 'VALUE_HIDDEN'
else:
v = heuristic_log_sanitize(v)
output.append('%s=%s' % (k, v))
output = ' '.join(output)
return output
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=constants.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def parse_extra_vars(extra_vars_opts, vault_pass):
extra_vars = {}
for extra_vars_opt in extra_vars_opts:
extra_vars_opt = to_unicode(extra_vars_opt)
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
# Arguments as YAML
extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
else:
# Arguments as Key-value
extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
return extra_vars
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
vault_pass = None
new_vault_pass = None
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
sshpass = None
becomepass = None
vaultpass = None
become_prompt = ''
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_method.upper()
if become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
if ask_vault_pass:
vaultpass = getpass.getpass(prompt="Vault password: ")
if vaultpass:
vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
return (sshpass, becomepass, vaultpass)
def choose_pass_prompt(options):
if options.ask_su_pass:
return 'su'
elif options.ask_sudo_pass:
return 'sudo'
return options.become_method
def normalize_become_options(options):
options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
if options.become:
pass
elif options.sudo:
options.become = True
options.become_method = 'sudo'
elif options.su:
options.become = True
options.become_method = 'su'
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
"""
helper function for connection plugins to create privilege escalation commands
"""
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
prompt = None
becomecmd = None
shell = shell or '$SHELL'
if method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = exe or C.DEFAULT_SUDO_EXE
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
(exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'su':
exe = exe or C.DEFAULT_SU_EXE
flags = flags or C.DEFAULT_SU_FLAGS
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'pbrun':
exe = exe or 'pbrun'
flags = flags or ''
becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
elif method == 'pfexec':
exe = exe or 'pfexec'
flags = flags or ''
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
if becomecmd is None:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
def make_su_cmd(su_user, executable, cmd):
"""
Helper function for connection plugins to create direct su commands
"""
return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def list_union(a, b):
result = []
for x in a:
if x not in result:
result.append(x)
for x in b:
if x not in result:
result.append(x)
return result
def list_intersection(a, b):
result = []
for x in a:
if x in b and x not in result:
result.append(x)
return result
def list_difference(a, b):
result = []
for x in a:
if x not in b and x not in result:
result.append(x)
for x in b:
if x not in a and x not in result:
result.append(x)
return result
def contains_vars(data):
'''
returns True if the data contains a variable pattern
'''
return "$" in data or "{{" in data
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError, e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception, e:
if include_exceptions:
return (expr, e)
return expr
def listify_lookup_plugin_terms(terms, basedir, inject):
from ansible.utils import template
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and \
not stripped.startswith("/") and \
not stripped.startswith('set([') and \
not LOOKUP_REGEX.search(terms):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and "{{" in new_terms:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def combine_vars(a, b):
_validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
def before_comment(msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
def load_vars(basepath, results, vault_password=None):
"""
Load variables from any potential yaml filename combinations of basepath,
returning result.
"""
paths_to_check = [ "".join([basepath, ext])
for ext in C.YAML_FILENAME_EXTENSIONS ]
found_paths = []
for path in paths_to_check:
found, results = _load_vars_from_path(path, results, vault_password=vault_password)
if found:
found_paths.append(path)
# disallow the potentially confusing situation that there are multiple
# variable files for the same name. For example if both group_vars/all.yml
# and group_vars/all.yaml
if len(found_paths) > 1:
raise errors.AnsibleError("Multiple variable files found. "
"There should only be one. %s" % ( found_paths, ))
return results
## load variables from yaml files/dirs
# e.g. host/group_vars
#
def _load_vars_from_path(path, results, vault_password=None):
"""
Robustly access the file at path and load variables, carefully reporting
errors in a friendly/informative way.
Return the tuple (found, new_results, )
"""
try:
# in the case of a symbolic link, we want the stat of the link itself,
# not its target
pathstat = os.lstat(path)
except os.error, err:
# most common case is that nothing exists at that path.
if err.errno == errno.ENOENT:
return False, results
# otherwise this is a condition we should report to the user
raise errors.AnsibleError(
"%s is not accessible: %s."
" Please check its permissions." % ( path, err.strerror))
# symbolic link
if stat.S_ISLNK(pathstat.st_mode):
try:
target = os.path.realpath(path)
except os.error, err2:
raise errors.AnsibleError("The symbolic link at %s "
"is not readable: %s. Please check its permissions."
% (path, err2.strerror, ))
# follow symbolic link chains by recursing, so we repeat the same
# permissions checks above and provide useful errors.
return _load_vars_from_path(target, results, vault_password)
# directory
if stat.S_ISDIR(pathstat.st_mode):
# support organizing variables across multiple files in a directory
return True, _load_vars_from_folder(path, results, vault_password=vault_password)
# regular file
elif stat.S_ISREG(pathstat.st_mode):
data = parse_yaml_from_file(path, vault_password=vault_password)
if data and type(data) != dict:
raise errors.AnsibleError(
"%s must be stored as a dictionary/hash" % path)
elif data is None:
data = {}
# combine vars overrides by default but can be configured to do a
# hash merge in settings
results = combine_vars(results, data)
return True, results
# something else? could be a fifo, socket, device, etc.
else:
raise errors.AnsibleError("Expected a variable file or directory "
"but found a non-file object at path %s" % (path, ))
def _load_vars_from_folder(folder_path, results, vault_password=None):
"""
Load all variables within a folder recursively.
"""
# this function and _load_vars_from_path are mutually recursive
try:
names = os.listdir(folder_path)
except os.error, err:
raise errors.AnsibleError(
"This folder cannot be listed: %s: %s."
% ( folder_path, err.strerror))
# evaluate files in a stable order rather than whatever order the
# filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
for path in paths:
_found, results = _load_vars_from_path(path, results, vault_password=vault_password)
return results
def update_hash(hash, key, new_value):
''' used to avoid nested .update calls on the parent '''
value = hash.get(key, {})
value.update(new_value)
hash[key] = value
def censor_unlogged_data(data):
'''
used when the no_log: True attribute is passed to a task to keep data from a callback.
NOT intended to prevent variable registration, but only things from showing up on
screen
'''
new_data = {}
for (x,y) in data.iteritems():
if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
new_data[x] = y
new_data['censored'] = 'results hidden due to no_log parameter'
return new_data
def check_mutually_exclusive_privilege(options, parser):
# privilege escalation command line arguments need to be mutually exclusive
if (options.su or options.su_user or options.ask_su_pass) and \
(options.sudo or options.sudo_user or options.ask_sudo_pass) or \
(options.su or options.su_user or options.ask_su_pass) and \
(options.become or options.become_user or options.become_ask_pass) or \
(options.sudo or options.sudo_user or options.ask_sudo_pass) and \
(options.become or options.become_user or options.become_ask_pass):
parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
|
webdev1001/ansible
|
lib/ansible/utils/__init__.py
|
Python
|
gpl-3.0
| 60,245
|
[
"Galaxy",
"VisIt"
] |
eda554cd31658f6a3f759c8615ee80f67537c76fd8d484eb0943091ff7a6ccbb
|
#!/usr/bin/env python
#####
## readProbe.py
## Read in PetaVision probe output
## Display time-course plots and histograms of values
## Should work with any standard probe. Currently tested with LIF, OjaSTDPConn, LCALIF probes.
##
##Dylan Paiton and Sheng Lundquist
#####
import sys
import matplotlib
from readProbeFunc import *
from readProbeParams import *
#Must be done before importing pyplot (or anything from pyplot)
if not dispFigs:
matplotlib.use('Agg')
from os import path, makedirs
from matplotlib.pyplot import *
#Error checking
if len([i for i in scale.keys() if i not in data.keys()]) > 0:
print "readProbe: WARNING: Some of your key values for the scale dictionary do not match anything in the data dictionary. They will be ignored."
if weightMap:
if 'weight*' not in data.keys():
print "readProbe: WARNING: weight* is not set in the data dictionary, but weightMap flag is true. Setting weightMap to false."
weightMap = False
if not path.exists(probeFileDir):
sys.exit("readProbe: ERROR: probeFileDir ("+probeFileDir+") does not exist!")
#Main loop
for filenameTup in filenames:
filename = filenameTup[1]
if not dispFigs:
figOutDir = rootFigOutDir+"/"+filenameTup[0]+"/"
if not path.exists(figOutDir):
makedirs(figOutDir)
print "\n---------------"
print "readProbe: Reading file "+filename
lines = readProbeFile(filename,startTime,endTime) #lines is [time][char]
print "readProbe: Formatting file into data structure..."
lines = [splitLine(line) for line in lines] #lines is now [time][variable][(key),(val)]
numTimeSteps = len(lines) #Uniform for all keys
numArbors = {}
numPreNeurons = {}
numPreConns = {}
stds = {}
print "readProbe: Parsing Keys..."
for key in data.keys():
specificKey = True
if key[len(key)-1] == "*": #Get key value, without the * if it is there
tok = key[:len(key) - 1]
specificKey = False
else:
tok = key
#Check to be sure that the tokens (keys) listed in data are actually in the probe's output
checkTok = [[[tok in string for string in tup] for tup in line] for line in lines]
if not any(checkTok[:]):
sys.exit("readProbe: ERROR: Token '"+tok+"' was not found in the input file. Exiting program.")
if key not in scale: # Set scale for plot to 1 if not defined
scale[key] = 1
workingLines = lines[:] #Make a single copy of the lines, use this copy throughout the loop
numArbors[tok] = getNumArbors(tok,workingLines[0]) #Num arbors should be the same for each time step
#Working lines is [time][preNeuron] and filtered for key of interest
if specificKey:
workingLines = [[float(part[1]) for part in line if part[0] == tok] for line in workingLines]
else:
workingLines = [[float(part[1]) for part in line if part[0].split('_')[0] == tok] for line in workingLines]
#Total number of pre
numPreConns[tok] = len(workingLines[0])
numPreNeurons[tok] = numPreConns[tok] / numArbors[tok]
#workingLines is now a list of lists of lists- [arbor][preNeuron][time]
# number of preNeuron vals in each time step should equal the pre patch size
workingLines = [[[workingLines[timeIndex][preIndex] for timeIndex in xrange(numTimeSteps)] for preIndex in range(numPreNeurons[tok]*arborID,numPreNeurons[tok]*arborID+numPreNeurons[tok])] for arborID in xrange(numArbors[tok])]
print "readProbe: -Formatting key: '" + key + "'"
if key[len(key)-1] == "*": #User has asked for all elements of a particular name
if weightMap and tok == 'weight':
print "readProbe: --Creating weight map..."
wMap = [[mean(workingLines[arborID][neuronID][:]) for neuronID in range(numPreNeurons[tok])] for arborID in range(numArbors[tok])] #List of maps, one for each arbor
if sqrt(numPreNeurons[tok])%1 > 0:
print "readProbe: WARNING: numPreNeurons["+tok+"] is not a perfect square! Using ceil() to avoid overflow."
squareVal = ceil(sqrt(numPreNeurons[tok]))
else:
squareVal = sqrt(numPreNeurons[tok])
wMap = [reshape(array(wMap[arborID]),(sqrt(numPreNeurons[tok]),sqrt(numPreNeurons[tok]))) for arborID in range(numArbors[tok])] #reshape(vect, (nRows,nCols)), writes cols first
if timePlot:
print "readProbe: --Binning values for the time plot..."
stds[key] = []
#get min/max/step across all neurons of the same name and all time
minVals = [min(min(workingLines[arborID][:][:])) for arborID in range(numArbors[tok])]
maxVals = [max(max(workingLines[arborID][:][:])) for arborID in range(numArbors[tok])]
stepWidths = [(maxVals[arborID] - minVals[arborID]) / float(numTCBins) for arborID in range(numArbors[tok])]
boundList = [list(arange(minVals[arborID], maxVals[arborID], stepWidths[arborID])) if stepWidths[arborID] != 0.0 else [0] for arborID in range(numArbors[tok])] # List of separators (edges) for bins
for arborID in range(numArbors[tok]): #TODO: make inline?
boundList[arborID].append(maxVals[arborID]+1) #must be bigger so everything fits into the bin
#workingLines is now [arborID][time][preNeuron]
workingLines = [[[workingLines[arborID][preNeuron][timeStep]
for preNeuron in range(numPreNeurons[tok])]
for timeStep in range(numTimeSteps)]
for arborID in range(numArbors[tok])]
#workingLines is now [arborID][time][bin][preNeuron]
workingLines = [[[[preNeuronVal
for preNeuronVal in timeVals if preNeuronVal >= boundList[arborID][boundEdge] and preNeuronVal < boundList[arborID][boundEdge+1]]
for boundEdge in range(len(boundList[arborID])-1)]
for timeVals in workingLines[arborID]]
for arborID in range(numArbors[tok])]
print "readProbe: --Computing line of best fit..."
for arborID in range(numArbors[tok]):
#Find best line of fit
xVals = {}
yVals = {}
#Allocate arrays for dictionaries
for binKey in range(numTCBins):
xVals[binKey] = []
yVals[binKey] = []
#Iterate through everything to get data points for line of best fit
for time in range(numTimeSteps):
for binNo, bins in enumerate(workingLines[arborID][time]):
if len(bins) != 0:
yVals[binNo].extend(bins)
xVals[binNo].extend([data['t'][0][0][time] for binNo in range(len(bins))]) #Time always has 1 arbor (index 0) and 1 pre-neuron (index 0)
data[key].append([polyfit(xVals[binNo], yVals[binNo], 1) if len(yVals[binNo]) != 0 else array([]) for binNo in range(numTCBins)])
#Calculate standard deviation
stds[key].append([std(yVals[binNo]) if len(yVals[binNo]) != 0 else array([]) for binNo in range(numTCBins)])
else:
data[key] = workingLines
print "readProbe: -Done formatting key '"+key+"'"
time = array(data['t'][0][0][:]) #data[key][arbor][preNeuron]
if weightMap:
tok = 'weight'
for arborID in range(numArbors[tok]):
figure()
imshow(wMap[arborID],aspect='auto',extent=[0,sqrt(numPreNeurons[tok]),0,sqrt(numPreNeurons[tok])])
grid(color='white')
colorbar()
if not dispFigs:
savefig(figOutDir+rootFigName+"_"+filenameTup[0]+"_weightMap"+str(arborID)+".png")
clf()
if timePlot:
for key in data.keys():
if key == 't':
continue
if key[len(key)-1] == "*": #Get key value, without the * if it is there
tok = key[:len(key) - 1]
else:
tok = key
if key[len(key)-1] == "*":
for arborID in range(numArbors[tok]):
figure()
for TCBin in range(numTCBins):
if(len(data[key][arborID][TCBin]) != 0):
plotMe = time * data[key][arborID][TCBin][0] + data[key][arborID][TCBin][1]
plot(time, scale[key]*plotMe, label=key+'_a'+str(arborID)+' std:'+str(stds[key][arborID][TCBin]))
if doLegend:
legend()#bbox_to_anchor=(0., 1.02, 1., .102), ncol = 2, mode="expand", borderaxespad=0.,loc=3)
tight_layout()
if not dispFigs:
savefig(figOutDir+rootFigName+"_"+filenameTup[0]+"_timeCourseAvg"+str(arborID)+".png")
clf()
didPlot = False #Only true if plot is created below
figure()
for key in data.keys(): #must repeat loop because we want all of these plots to be on one figure
if key == 't':
continue
if key[len(key)-1] == "*": #Get key value, without the * if it is there
continue
else:
tok = key
if numArbors[tok] > 1:
continue
arborID = 0
if key == 'a':
countActivity(data,key)
for preNeuronID in range(numPreNeurons[tok]):
plotMe = array(data[key][arborID][preNeuronID][:])
if len(plotMe) != 0:
#Special cases for legend labels on printing
if "_" in key: #Specific pre-neuron and conn is given
keySP = key.split("_")
keyLabel = keySP[0]
arborLabel = keySP[1]
neuronLabel = keySP[2]
figLabel=keyLabel+"_"+filenameTup[0]+"_n"+neuronLabel+"_a"+arborLabel
else:
keyLabel = key
arborLabel = str(arborID)
if key[len(key)-1] == "*": #preNeuron
neuronLabel = 'Avg'
figLabel=keyLabel+"_"+filenameTup[0]+"_n"+neuronLabel+"_a"+arborLabel
else:
neuronLabel = 'Post'
figLabel=keyLabel+"_"+filenameTup[0]+"_n"+neuronLabel
if '_1_' in key:
plot(time, plotMe, ':',label=figLabel)
else:
plot(time, plotMe,label=figLabel)
grid(True)
didPlot = True
if didPlot:
if doLegend:
legend()#bbox_to_anchor=(0., 1.02, 1., .102), ncol = 2, mode="expand", borderaxespad=0.,loc=3)
tight_layout()
if not dispFigs:
savefig(figOutDir+rootFigName+"_"+filenameTup[0]+"_timeCourse.png")
clf()
#Clear lines for this file
#del lines #Will not free until garabe collection
if dispFigs:
show()
print "\nreadProbe: Script Complete...\n"
|
dpaiton/OpenPV
|
pv-core/python/probe_analysis/readProbe.py
|
Python
|
epl-1.0
| 11,571
|
[
"NEURON"
] |
5371d8134e639065899831db7ddb3b2ba5821ca5129f2cb4c732ad2e478e66fa
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amber relaxation."""
from typing import Any, Dict, Sequence, Tuple
from alphafold.common import protein
from alphafold.relax import amber_minimize
from alphafold.relax import utils
import numpy as np
class AmberRelaxation(object):
"""Amber relaxation."""
def __init__(self,
*,
max_iterations: int,
tolerance: float,
stiffness: float,
exclude_residues: Sequence[int],
max_outer_iterations: int,
use_gpu: bool):
"""Initialize Amber Relaxer.
Args:
max_iterations: Maximum number of L-BFGS iterations. 0 means no max.
tolerance: kcal/mol, the energy tolerance of L-BFGS.
stiffness: kcal/mol A**2, spring constant of heavy atom restraining
potential.
exclude_residues: Residues to exclude from per-atom restraining.
Zero-indexed.
max_outer_iterations: Maximum number of violation-informed relax
iterations. A value of 1 will run the non-iterative procedure used in
CASP14. Use 20 so that >95% of the bad cases are relaxed. Relax finishes
as soon as there are no violations, hence in most cases this causes no
slowdown. In the worst case we do 20 outer iterations.
use_gpu: Whether to run on GPU.
"""
self._max_iterations = max_iterations
self._tolerance = tolerance
self._stiffness = stiffness
self._exclude_residues = exclude_residues
self._max_outer_iterations = max_outer_iterations
self._use_gpu = use_gpu
def process(self, *,
prot: protein.Protein) -> Tuple[str, Dict[str, Any], np.ndarray]:
"""Runs Amber relax on a prediction, adds hydrogens, returns PDB string."""
out = amber_minimize.run_pipeline(
prot=prot, max_iterations=self._max_iterations,
tolerance=self._tolerance, stiffness=self._stiffness,
exclude_residues=self._exclude_residues,
max_outer_iterations=self._max_outer_iterations,
use_gpu=self._use_gpu)
min_pos = out['pos']
start_pos = out['posinit']
rmsd = np.sqrt(np.sum((start_pos - min_pos)**2) / start_pos.shape[0])
debug_data = {
'initial_energy': out['einit'],
'final_energy': out['efinal'],
'attempts': out['min_attempts'],
'rmsd': rmsd
}
pdb_str = amber_minimize.clean_protein(prot)
min_pdb = utils.overwrite_pdb_coordinates(pdb_str, min_pos)
min_pdb = utils.overwrite_b_factors(min_pdb, prot.b_factors)
utils.assert_equal_nonterminal_atom_types(
protein.from_pdb_string(min_pdb).atom_mask,
prot.atom_mask)
violations = out['structural_violations'][
'total_per_residue_violations_mask']
return min_pdb, debug_data, violations
|
deepmind/alphafold
|
alphafold/relax/relax.py
|
Python
|
apache-2.0
| 3,343
|
[
"Amber"
] |
9c0b963e0919f98d6254e3477bae7f873331e26d22ed3653c3d8abe33e9e6e42
|
# coding=utf-8
# main codes, call functions at stokes_flow.py
# Zhang Ji, 20170518
import sys
import petsc4py
petsc4py.init(sys.argv)
import numpy as np
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from petsc4py import PETSc
from src.geo import *
from time import time
import pickle
from scipy.io import savemat
from src.ref_solution import *
from scipy.io import loadmat
import warnings
from memory_profiler import profile
def print_case_info(**problem_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
size = comm.Get_size()
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
u = problem_kwargs['u']
PETSc.Sys.Print('sphere radius: %f, delta length: %f, velocity: %f' % (radius, deltaLength, u))
err_msg = "Only 'pf', 'rs', 'tp_rs', and 'lg_rs' methods are accept for this main code. "
assert matrix_method in (
'rs', 'tp_rs', 'lg_rs', 'rs_precondition', 'tp_rs_precondition', 'lg_rs_precondition',
'pf'), err_msg
epsilon = problem_kwargs['epsilon']
if matrix_method in ('rs', 'rs_precondition', 'pf'):
PETSc.Sys.Print('create matrix method: %s, epsilon: %f' % (matrix_method, epsilon))
elif matrix_method in ('tp_rs', 'tp_rs_precondition'):
twoPara_n = problem_kwargs['twoPara_n']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, order: %d' % (
matrix_method, epsilon, twoPara_n))
elif matrix_method in ('lg_rs', 'lg_rs_precondition'):
legendre_m = problem_kwargs['legendre_m']
legendre_k = problem_kwargs['legendre_k']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, m: %d, k: %d, p: %d' % (
matrix_method, epsilon, legendre_m, legendre_k, (legendre_m + 2 * legendre_k + 1)))
solve_method = problem_kwargs['solve_method']
precondition_method = problem_kwargs['precondition_method']
PETSc.Sys.Print(
'solve method: %s, precondition method: %s' % (solve_method, precondition_method))
PETSc.Sys.Print('output file headle: ' + fileHandle)
PETSc.Sys.Print('MPI size: %d' % size)
def get_problem_kwargs(**main_kwargs):
OptDB = PETSc.Options()
radius = OptDB.getReal('r', 1)
deltaLength = OptDB.getReal('d', 0.3)
epsilon = OptDB.getReal('e', -0.3)
u = OptDB.getReal('u', 1)
fileHandle = OptDB.getString('f', 'sphere')
solve_method = OptDB.getString('s', 'gmres')
precondition_method = OptDB.getString('g', 'none')
plot = OptDB.getBool('plot', False)
debug_mode = OptDB.getBool('debug', False)
matrix_method = OptDB.getString('sm', 'pf')
restart = OptDB.getBool('restart', False)
twoPara_n = OptDB.getInt('tp_n', 1)
legendre_m = OptDB.getInt('legendre_m', 3)
legendre_k = OptDB.getInt('legendre_k', 2)
n_sphere_check = OptDB.getInt('n_sphere_check', 2000)
n_node_threshold = OptDB.getInt('n_threshold', 10000)
random_velocity = OptDB.getBool('random_velocity', False)
getConvergenceHistory = OptDB.getBool('getConvergenceHistory', False)
pickProblem = OptDB.getBool('pickProblem', False)
n_obj = OptDB.getInt('n', 1)
n_obj_x = OptDB.getInt('nx', 2)
n_obj_y = OptDB.getInt('ny', n_obj)
distance = OptDB.getReal('dist', 3)
distance_x = OptDB.getReal('distx', distance)
distance_y = OptDB.getReal('disty', distance)
move_delta = np.array([distance_x, distance_y, 1])
# field_range: describe a sector area.
field_range = np.array([[-3, -3, -3], [n_obj_x - 1, n_obj_y - 1, 0] * move_delta + [3, 3, 3]])
n_grid = np.array([n_obj_x, n_obj_y, 1]) * 20
problem_kwargs = {'name': 'spherePrb',
'matrix_method': matrix_method,
'deltaLength': deltaLength,
'epsilon': epsilon,
'delta': deltaLength * epsilon, # for rs method
'd_radia': deltaLength / 2, # for sf method
'solve_method': solve_method,
'precondition_method': precondition_method,
'field_range': field_range,
'n_grid': n_grid,
'plot': plot,
'debug_mode': debug_mode,
'fileHandle': fileHandle,
'region_type': 'rectangle',
'twoPara_n': twoPara_n,
'legendre_m': legendre_m,
'legendre_k': legendre_k,
'radius': radius, 'u': u,
'random_velocity': random_velocity,
'n_obj_x': n_obj_x,
'n_obj_y': n_obj_y,
'move_delta': move_delta,
'restart': restart,
'n_sphere_check': n_sphere_check,
'n_node_threshold': n_node_threshold,
'getConvergenceHistory': getConvergenceHistory,
'pickProblem': pickProblem,
'plot_geo': False, }
for key in main_kwargs:
problem_kwargs[key] = main_kwargs[key]
return problem_kwargs
def main_fun(**main_kwargs):
problem_kwargs = get_problem_kwargs(**main_kwargs)
print_case_info(**problem_kwargs)
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
n_obj_x = problem_kwargs['n_obj_x']
n_obj_y = problem_kwargs['n_obj_y']
move_delta = problem_kwargs['move_delta']
random_velocity = problem_kwargs['random_velocity']
pickProblem = problem_kwargs['pickProblem']
epsilon = problem_kwargs['epsilon']
delta = problem_kwargs['delta']
u = problem_kwargs['u']
method_dict = ('pf', 'rs')
n = int(16 * radius * radius / deltaLength / deltaLength)
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.create_n(n, radius)
sphere_velocity = np.array((u, 0, 0, 0, 0, 0))
if random_velocity:
sphere_velocity = np.random.sample(6) * u
else:
sphere_geo0.set_rigid_velocity(sphere_velocity)
problem = problem_dic[matrix_method](**problem_kwargs)
obj_sphere = obj_dic[matrix_method]()
sphere_geo1 = sphere_geo0.copy()
if matrix_method in ('pf',):
sphere_geo1.create_n(n, radius + deltaLength * epsilon)
obj_sphere_kwargs = {'matrix_method': matrix_method,
'epsilon': epsilon,
'delta': delta, }
obj_sphere.set_data(sphere_geo1, sphere_geo0, name='sphereObj_0_0', **obj_sphere_kwargs)
for i in range(n_obj_x * n_obj_y):
ix = i // n_obj_x
iy = i % n_obj_x
move_dist = np.array([ix, iy, 0]) * move_delta
obj2 = obj_sphere.copy()
move_dist = np.array([ix, iy, 0]) * move_delta
obj2.move(move_dist)
if random_velocity:
sphere_velocity = np.random.sample(6) * u
obj2.get_u_geo().set_rigid_velocity(sphere_velocity)
obj2.set_name('sphereObj_%d_%d' % (ix, iy))
obj2_matrix_method = method_dict[i % len(method_dict)]
obj2_kwargs = {'matrix_method': obj2_matrix_method,
'epsilon': epsilon,
'delta': delta, }
obj2.set_matrix_method(**obj2_kwargs)
problem.add_obj(obj2)
# problem.show_f_nodes()
problem.print_info()
problem.create_matrix()
residualNorm = problem.solve()
geo_check = sphere_geo() # force geo
geo_check.create_n(n * 3, radius)
geo_check.set_rigid_velocity(sphere_velocity)
obj_check = obj_dic[matrix_method]()
obj_check.set_data(geo_check, geo_check, name='checkObj', **obj_sphere_kwargs)
problem.vtk_check(fileHandle + '_check', obj_check)
problem.vtk_self(fileHandle)
obj2.vtk(fileHandle)
force_sphere = obj2.get_force_x()
PETSc.Sys.Print('---->>>%s: Resultant at x axis is %f' % (
str(problem), force_sphere.sum() / (6 * np.pi * radius)))
return True
if __name__ == '__main__':
main_fun()
|
pcmagic/stokes_flow
|
try_code/try_sample.py
|
Python
|
mit
| 8,533
|
[
"VTK"
] |
8068707b16f638eb7fbeb9f86d836ea82d6c536613c5f40a5b0745f4ee9b5b31
|
<<<<<<< HEAD
<<<<<<< HEAD
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing
from multiprocessing import SimpleQueue
from multiprocessing.connection import wait
import threading
import weakref
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
executor = None
def shutting_down():
return _shutdown or executor is None or executor._shutdown_thread
def shutdown_worker():
# This is an upper bound
nb_children_alive = sum(p.is_alive() for p in processes.values())
for i in range(0, nb_children_alive):
call_queue.put_nowait(None)
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
reader = result_queue._reader
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
sentinels = [p.sentinel for p in processes.values()]
assert sentinels
ready = wait([reader] + sentinels)
if reader in ready:
result_item = reader.recv()
else:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = True
executor._shutdown_thread = True
executor = None
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(
BrokenProcessPool(
"A process in the process pool was "
"terminated abruptly while the future was "
"running or pending."
))
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
=======
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing
from multiprocessing import SimpleQueue
from multiprocessing.connection import wait
import threading
import weakref
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
executor = None
def shutting_down():
return _shutdown or executor is None or executor._shutdown_thread
def shutdown_worker():
# This is an upper bound
nb_children_alive = sum(p.is_alive() for p in processes.values())
for i in range(0, nb_children_alive):
call_queue.put_nowait(None)
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
reader = result_queue._reader
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
sentinels = [p.sentinel for p in processes.values()]
assert sentinels
ready = wait([reader] + sentinels)
if reader in ready:
result_item = reader.recv()
else:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = True
executor._shutdown_thread = True
executor = None
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(
BrokenProcessPool(
"A process in the process pool was "
"terminated abruptly while the future was "
"running or pending."
))
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing
from multiprocessing import SimpleQueue
from multiprocessing.connection import wait
import threading
import weakref
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
executor = None
def shutting_down():
return _shutdown or executor is None or executor._shutdown_thread
def shutdown_worker():
# This is an upper bound
nb_children_alive = sum(p.is_alive() for p in processes.values())
for i in range(0, nb_children_alive):
call_queue.put_nowait(None)
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
reader = result_queue._reader
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
sentinels = [p.sentinel for p in processes.values()]
assert sentinels
ready = wait([reader] + sentinels)
if reader in ready:
result_item = reader.recv()
else:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = True
executor._shutdown_thread = True
executor = None
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(
BrokenProcessPool(
"A process in the process pool was "
"terminated abruptly while the future was "
"running or pending."
))
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/concurrent/futures/process.py
|
Python
|
mit
| 52,862
|
[
"Brian"
] |
ce1d5987f70cb25db801023dbecbfc07af4aa428801a3d05e1520a1b324e93be
|
#!/usr/bin/python3
"""This is a small command line utility to grab pictures from a web
site/gui to view them locally. Which links to follow and what to
download can be configured using regular expressions.
.. module:: pic-grab
.. moduleauthor:: Sebastian Schmittner <sebastian@schmittner.pw>
Copyright 2017 Sebastian Schmittner
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import hashlib
import json
import logging
import os
import requests
import re
import sys
from urllib.parse import urlparse, urljoin
from collections import deque
class Grabber(object):
def __init__(self):
self.url_follow_queue = deque()
"pool of urls to visit next"
self.visited_urls = set()
"do not visit those again"
self.session = requests.Session()
"use a persistant session"
self.config = {}
"""Holds the configuration as loaded from
a json file, command line or defaults."""
def process_found_url(self, url):
"""
Check whether the url should be followed or downloaded.
If so, add it to the queue or safe the file.
"""
if url in self.visited_urls:
logging.debug("already visited: %s", url)
return
self.visited_urls.add(url)
logging.debug("processing %s", url)
may_follow = True
for reg in self.config["no_follow"]:
if re.match(reg, url):
logging.debug("will NOT follow: %s (matches %s)", url, reg)
may_follow = False
break
if may_follow:
for reg in self.config["follow"]:
if re.match(reg, url):
self.url_follow_queue.append(url)
logging.debug("will follow: %s (matches %s)", url, reg)
break
for reg in self.config["download"]:
if re.match(reg, url):
filename = os.path.basename(urlparse(url).path)
logging.debug("url %s eligible for download (matches %s)", url, reg)
# os might have filename length restrictions
if len(filename) > 64:
name, ext = os.path.splitext(filename)
filename = hashlib.md5(name.encode()).hexdigest() + ext
out_path = self.config["target"] + "/" + filename
for directory in self.config["ignore_duplicates_in"]:
if os.path.isfile(directory + "/" + filename):
logging.warning("File %s exists. Skipping.", filename)
return
logging.info("Downloading %s", url)
result = self.session.get(url)
if not result.ok:
logging.error("Error fetching file %s.", url)
return
with open(out_path, 'wb') as out_file:
out_file.write(result.content)
def visit_next_url(self):
"""
Pop the next url, retrieve it and scan the content for further links.
"""
url = self.url_follow_queue.popleft()
r = self.session.get(url)
# find more urls
for m in re.finditer(
r"""(https?://[^\s<>]+)|href=['"]([^"']+)|src=['"]([^"']+)""",
r.text):
for g in m.groups():
if g:
logging.debug("raw link %s", g)
new_url = urljoin(url, g)
logging.debug("corrected link %s", new_url)
if urlparse(new_url).netloc != urlparse(url).netloc:
logging.debug("netloc change")
if not self.config["allow_netloc_change"]:
logging.debug("not following to different netloc")
continue
self.process_found_url(new_url)
def main():
"""The main function gets a list of all mailing lists on the given
server and performs (an) action(s) on all lists.
"""
import argparse
logger_cfg = {
"level":
logging.INFO,
"format":
"%(asctime)s %(funcName)s (%(lineno)d) [%(levelname)s]: %(message)s"
}
parser = argparse.ArgumentParser(
description="wget like website mirroring.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-u",
"--url",
nargs="*",
help="Url(s) to start the traversal.")
parser.add_argument(
"-d",
"--download",
nargs="*",
help="regex(s) for files to download.",
default=[r".*\.jpg"])
parser.add_argument(
"-f",
"--follow",
nargs="*",
help="regex(s) for urls to follow.",
default=[r".*\.html", r".*/"])
parser.add_argument(
"-i",
"--ignore-duplicates-in",
nargs="*",
help="If a file with the same name exists in one of these folders, skip it..",
default=["fetched/"])
parser.add_argument(
"-n",
"--no-follow",
nargs="*",
help="regex(s) for urls to NOT follow (takes precedence over follow).",
default=[r".*\.jpg"])
parser.add_argument(
"-t",
"--target",
help="The directory to hold the resulting files.",
default="fetched/")
parser.add_argument(
"-a",
"--allow-netloc-change",
help="If given (or set to True in config file) follow links" +
" to network locations different from the starting url.",
action='store_true')
parser.add_argument(
"-l",
"--log",
help="Set the log level.",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="INFO")
parser.add_argument(
"-c",
"--config",
help="Read config from JSON file."
+ " For the url, the command line takes precedence."
+ " For all other arguments, the config file wins.")
args = parser.parse_args()
logger_cfg["level"] = getattr(logging, args.log)
logging.basicConfig(**logger_cfg)
print("Log messages above level: {}".format(logger_cfg["level"]))
grabber = Grabber()
if args.config:
with open(args.config, "r") as cfg_file:
grabber.config = json.load(cfg_file)
if args.url:
grabber.config["url"] = args.url
if not grabber.config.get("url", None):
logging.critical("No base url(s) given.")
parser.print_help()
sys.exit(1)
else:
logging.info("url: '%s'", grabber.config["url"])
for arg, val in vars(args).items():
if not grabber.config.get(arg, None):
grabber.config[arg] = val
# ensure that the target does not contain a trailing slash
if grabber.config["target"][-1:] == "/":
grabber.config["target"] = grabber.config["target"][:-1]
if grabber.config["target"] not in grabber.config["ignore_duplicates_in"]:
grabber.config["ignore_duplicates_in"].append(grabber.config["target"])
# convert to abs path and mkdir
grabber.config["target"] = os.path.abspath(grabber.config["target"])
if not os.path.isdir(grabber.config["target"]):
os.mkdir(grabber.config["target"])
grabber.url_follow_queue = deque(grabber.config["url"])
logging.info("Starting link tree traversal at %s",
grabber.url_follow_queue)
print("\nPress ctrl+c to stop.\n")
# main loop
while grabber.url_follow_queue:
try:
grabber.visit_next_url()
except requests.exceptions.ConnectionError as ex:
logging.error("Connection error: %s", ex)
# goto main
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n\nStopped")
exit(0)
|
Echsecutor/pic-grab
|
main.py
|
Python
|
gpl-3.0
| 8,444
|
[
"VisIt"
] |
1dc8b156faa3d05ca6f547dce3e15e5c92dc4e2f68bd4ee1a15d2a0f40931393
|
#!/usr/bin/env python
import sys
sys.path.append( '.' )
from optparse import OptionParser
from sfepy.base.base import *
from sfepy.fem import gen_block_mesh
usage = """%prog [options]
Block mesh generator.
"""
help = {
'filename' :
'output file name [default: %default]',
'dims' :
'dimensions of the block [default: %default]',
'shape' :
'shape (counts of nodes in x, y, z) of the block [default: %default]',
'centre' :
'centre of the block [default: %default]',
}
##
# c: 19.06.2008, r: 19.06.2008
def main():
parser = OptionParser( usage = usage, version = "%prog" )
parser.add_option( "-o", "", metavar = 'filename',
action = "store", dest = "output_filename",
default = 'out.vtk', help = help['filename'] )
parser.add_option( "-d", "--dims", metavar = 'dims',
action = "store", dest = "dims",
default = '[1.0, 1.0, 1.0]', help = help['dims'] )
parser.add_option( "-s", "--shape", metavar = 'shape',
action = "store", dest = "shape",
default = '[11, 11, 11]', help = help['shape'] )
parser.add_option( "-c", "--centre", metavar = 'centre',
action = "store", dest = "centre",
default = '[0.0, 0.0, 0.0]', help = help['centre'] )
(options, args) = parser.parse_args()
dims = eval( "nm.array( %s, dtype = nm.float64 )" % options.dims )
shape = eval( "nm.array( %s, dtype = nm.int32 )" % options.shape )
centre = eval( "nm.array( %s, dtype = nm.float64 )" % options.centre )
print dims
print shape
print centre
mesh = gen_block_mesh(dims, shape, centre, name=options.output_filename)
mesh.write( options.output_filename, io = 'auto' )
if __name__ == '__main__':
main()
|
olivierverdier/sfepy
|
script/blockgen.py
|
Python
|
bsd-3-clause
| 1,856
|
[
"VTK"
] |
13890a82d3cab0a231f750aebe572d9e73235f3fc41a07f4fa1fce04c935460c
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from .fake_webapp import EXAMPLE_APP
from splinter.request_handler.status_code import HttpResponseError
class StatusCodeTest(object):
def test_should_visit_an_absent_page_and_get_an_404_error(self):
with self.assertRaises(HttpResponseError):
self.browser.visit(EXAMPLE_APP + "this_page_does_not_exists")
def test_should_visit_index_of_example_app_and_get_200_status_code(self):
self.browser.visit(EXAMPLE_APP)
self.assertEqual(200, self.browser.status_code)
def test_should_be_able_to_print_status_code_with_reason(self):
self.browser.visit(EXAMPLE_APP)
self.assertEqual('200 - OK', str(self.browser.status_code))
|
lrowe/splinter
|
tests/status_code.py
|
Python
|
bsd-3-clause
| 867
|
[
"VisIt"
] |
9ab776a244b7953329f07e12610bdbbd424c8167e4a12dd5bab982ec89c92366
|
#!/usr/bin/env python3
"""Perform quality trimming with the same algorithm as
bwa_trim_read() in bwaseqio.c, BWA. For Solexa quliaty,
the scores are converted to Phred quality for trimming.
Formula to convert Solexa quality to Phred quality is:
Phred = 10 * log_10(1 + 10 ** (Solexa / 10.0))
Formulas to calculate Phred and Solexa quality scores
from sequencing error probability are:
Phred = -10 * log_10(P)
Solexa = -10 * log_10(P / (1 - P))
"""
import sys
import re
import os.path
import signal
import math
from argparse import ArgumentParser
cur = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(cur))
from dnapilib.io_utils import get_file_obj, fastq_record
def solexa_to_phred(x):
return int(round(10 * math.log10(1+10**(x/10.0))))
def illumina_64B(x):
if x == 2:
return 0
else:
return x
def illumina_33(x):
return x
def calc_qual_score(p, solexa):
if solexa:
d = 1.0 - p if not p else 1
return solexa_to_phred(int(-10 * math.log10(p/d)))
else:
return int(-10 * math.log10(p))
def qual_trim(args):
if args.solexa:
args.b = 64
func = solexa_to_phred
elif args.illumina5:
func = illumina_64B
else:
func = illumina_33
if args.b not in (33, 64):
raise Exception("wrong quality score base")
if args.l < 1:
raise Exception("specify longer read length")
if args.p < 0 or args.p > 1:
raise Exception("bad error probability cutoff")
if not args.solexa and args.q < 0:
raise Exception("bad quality score cutoff")
if args.q:
cutoff = args.q
else:
cutoff = calc_qual_score(args.p, args.solexa)
base = args.b
minlen = args.l
ns = re.compile('N', re.IGNORECASE)
fastqs = fastq_record(get_file_obj(args.FASTQ))
for read in fastqs:
read = read.rstrip().split("\n")
qual = read[3]
s, max_s = 0, 0
max_i = len(read[3])
if minlen > max_i:
continue
for i in reversed(range(max_i)):
q = func(ord(qual[i]) - base)
s += cutoff - q
if s < 0:
break
if s > max_s:
max_s, max_i = s, i
read[1] = read[1][:max_i]
read[3] = read[3][:max_i]
n_num = len(ns.findall(read[1]))
if n_num < len(read[1]) and len(read[1]) >= minlen:
print("\n".join(read))
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
prog = os.path.basename(sys.argv[0])
if sys.version_info.major <= 2:
raise ValueError("{} requires python version 3 or higher".format(prog))
parser = ArgumentParser(
description="Perform quality trimming for single-end reads.")
parser.add_argument("FASTQ",
type=str,
help="including stdin or compressed file {zip,gz,tar,bz}")
parser.add_argument("-b",
metavar="BASE",
type=int, default=33,
help="ASCII-encoded quality offset, e.g. 33 or 64 (default: %(default)s)")
parser.add_argument("-p",
metavar="PROB",
type=float, default=0.1,
help="error probability cutoff (default: %(default)s)")
parser.add_argument("-q",
metavar="SCORE",
type=int, default=0,
help="quality score cutoff (default: '-p 0.1')")
parser.add_argument("-l",
type=int, default=16,
metavar="BP",
help="minimum read length in bp (default: %(default)s)")
parser.add_argument("--illumina5",
action="store_true",
help="Illumina 1.5+ encoding marked with 'B'")
parser.add_argument("--solexa",
action="store_true",
help="Solexa encoding")
args = parser.parse_args()
try:
qual_trim(args)
except KeyboardInterrupt: pass
except Exception as e:
sys.exit(prog + ": error: " + str(e))
|
jnktsj/DNApi
|
utils/qual_trim.py
|
Python
|
mit
| 3,943
|
[
"BWA"
] |
7c40250043f0cf8dbdc977b12894c5cfcabc3fe38bc35d84279e965b496f88b0
|
"""
Includes various submodular functions used throughout the submodular examples.
Note that all functions are of the form F(X, A, args), where args is a
dictionary containing additional arguments needed by a particular function.
==============
Copyright Info
==============
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright Brian Dolhansky 2014
bdolmail@gmail.com
"""
import numpy as np
from ml_functions import safe_binary_entropy
"""
The information gain of a subset of variables X_A \subset X, defined as:
F(A) = IG(X_A, Y) = H(Y) - H(Y | X)
Equivalent to:
\sum_{x_A} p(x_A) H( p(Y | X_A) )
This function assumes X and Y are 0/1 valued, such as for document
classification where we have a dictionary of words as features, and a 0/1
label of whether or not a given document belongs to the target class.
Also note that this function is normalized, i.e. F(null set) = 0.
Ref. Krause, Guestrin (http://submodularity.org/submodularity-slides.pdf)
"""
def information_gain_nb(X, A, args):
if len(A) == 0:
return 0.0
Y = args['Y']
A_ind = list(A)
H = safe_binary_entropy(np.mean(Y))
px = X[:, A_ind].mean(axis=0)
sum_x = X[:, A_ind].sum(axis=0).astype(float)
sum_notx = X.shape[0] - sum_x
y_given_x = ((X[:, A_ind] == 1).todense() & (Y == 1)[:, None])
y_given_notx = ((X[:, A_ind] != 1).todense() & (Y == 1)[:, None])
py_given_x = y_given_x.sum(axis=0) / sum_x
py_given_notx = y_given_notx.sum(axis=0) / sum_notx
cond_H = np.multiply(px, safe_binary_entropy(py_given_x)) \
+ np.multiply((1-px), safe_binary_entropy(py_given_notx))
IG = H-cond_H
return IG.sum()
"""
A simple submodular function which is defined as:
f(A) = |X_A|
"""
def cardinality(X, A, args):
return len(A)
|
bdol/bdol-ml
|
submodularity/utils/submodular_functions.py
|
Python
|
lgpl-3.0
| 2,354
|
[
"Brian"
] |
eb11e26b03cc3645cdfbfdcdf0d6fdce05d52f46d0dee99d3685f8f1c1e84ada
|
# LOFAR IMAGING PIPELINE
#
# BBS Source Catalogue List
# Bart Scheers, 2011
# L.H.A.Scheers@uva.nl
# ------------------------------------------------------------------------------
import sys
import string
import numpy as np
import monetdb.sql as db
import logging
V_FREQ = np.log10(74.0/60.0)
WM_FREQ = np.log10(325.0/60.0)
WP_FREQ = np.log10(352.0/60.0)
N_FREQ = np.log10(1400.0/60.0)
def subquery_catalog(cat_id, ra_central, decl_central, fov_radius,
limit_src_type=False):
"""
Retrieve data for a field of view from a given catalog.
"""
if limit_src_type:
src_type = " AND (src_type = 'S' OR src_type = 'C')"
else:
src_type = ""
return """SELECT catsrcid
,catsrcname
,ra
,decl
,ra_err
,decl_err
,pa
,major
,minor
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = %s %s
AND zone BETWEEN CAST(FLOOR(CAST(%s AS DOUBLE) - %s) AS INTEGER)
AND CAST(FLOOR(CAST(%s AS DOUBLE) + %s) AS INTEGER)
AND decl BETWEEN CAST(%s AS DOUBLE) - %s
AND CAST(%s AS DOUBLE) + %s
AND ra BETWEEN CAST(%s AS DOUBLE) - alpha(%s, %s)
AND CAST(%s AS DOUBLE) + alpha(%s, %s)
AND x * COS(RADIANS(%s)) * COS(RADIANS(%s))
+ y * COS(RADIANS(%s)) * SIN(RADIANS(%s))
+ z * SIN(RADIANS(%s)) > COS(RADIANS(%s))""" % (
cat_id, src_type,
decl_central, fov_radius, decl_central, fov_radius,
decl_central, fov_radius, decl_central, fov_radius,
ra_central, fov_radius, decl_central, ra_central, fov_radius, decl_central,
decl_central, ra_central, decl_central, ra_central, decl_central, fov_radius)
def subquery_catalog_association(cat_id, ra_central, decl_central, fov_radius,
assoc_theta, deRuiter_reduced,
limit_src_type=False):
"""
Get association information for two catalogs.
"""
return """SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS catsrcid
,c2.i_int_avg
,c2.i_int_avg_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS assoc_r
FROM (%s) c1
,(%s) c2
WHERE c1.x * c2.x + c1.y * c2.y + c1.z * c2.z > COS(RADIANS(%s))
AND (((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))) < %s""" % (
subquery_catalog(4, ra_central, decl_central, fov_radius),
subquery_catalog(cat_id, ra_central, decl_central, fov_radius, limit_src_type),
assoc_theta, deRuiter_reduced
)
def expected_fluxes_in_fov(conn, ra_central, decl_central, fov_radius,
assoc_theta, bbsfile,
storespectraplots=False, deruiter_radius=0.):
"""Search for VLSS, WENSS and NVSS sources that
are in the given FoV. The FoV is set by its central position
(ra_central, decl_central) out to a radius of fov_radius.
The query looks for cross-matches around the sources, out
to a radius of assoc_theta.
All units are in degrees.
deruiter_radius is a measure for the association uncertainty that takes
position errors into account (see thesis Bart Scheers). If not given
as a positive value, it is read from the TKP config file. If not
available, it defaults to 3.717.
The query returns all vlss sources (id) that are in the FoV.
If so, the counterparts from other catalogues are returned as well
(also their ids).
"""
DERUITER_R = deruiter_radius
if DERUITER_R <= 0:
try:
from tkp.config import config
DERUITER_R = config['source_association']['deruiter_radius']
print "DERUITER_R =",DERUITER_R
except:
DERUITER_R=3.717
if ra_central + alpha(fov_radius, decl_central) > 360:
"This will be implemented soon"
raise BaseException("ra = %s > 360 degrees, not implemented yet" % str(ra_central + alpha(fov_radius, decl_central)))
skymodel = open(bbsfile, 'w')
header = "# (Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='60e6', SpectralIndex='[0.0]', MajorAxis, MinorAxis, Orientation) = format\n\n"
skymodel.write(header)
# This is dimensionless search radius that takes into account
# the ra and decl difference between two sources weighted by
# their positional errors.
deRuiter_reduced = (DERUITER_R/3600)**2.
try:
cursor = conn.cursor()
query = """
SELECT t0.v_catsrcid
,t0.catsrcname
,t1.catsrcid as wm_catsrcid
,t2.catsrcid as wp_catsrcid
,t3.catsrcid as n_catsrcid
,t0.i_int_avg as v_flux
,t1.i_int_avg as wm_flux
,t2.i_int_avg as wp_flux
,t3.i_int_avg as n_flux
,t0.i_int_avg_err AS v_flux_err
,t1.i_int_avg_err AS wm_flux_err
,t2.i_int_avg_err AS wp_flux_err
,t3.i_int_avg_err AS n_flux_err
,t1.assoc_distance_arcsec as wm_assoc_distance_arcsec
,t1.assoc_r as wm_assoc_r
,t2.assoc_distance_arcsec as wp_assoc_distance_arcsec
,t2.assoc_r as wp_assoc_r
,t3.assoc_distance_arcsec as n_assoc_distance_arcsec
,t3.assoc_r as n_assoc_r
,t0.pa
,t0.major
,t0.minor
,t0.ra
,t0.decl
FROM (SELECT c1.catsrcid AS v_catsrcid
,c1.catsrcname
,c1.ra
,c1.decl
,c1.i_int_avg
,c1.i_int_avg_err
,c1.pa
,c1.major
,c1.minor
FROM (%s) c1
) t0
FULL OUTER JOIN
(%s) t1
ON t0.v_catsrcid = t1.v_catsrcid
FULL OUTER JOIN
(%s) t2
ON t0.v_catsrcid = t2.v_catsrcid
FULL OUTER JOIN
(%s) t3
ON t0.v_catsrcid = t3.v_catsrcid
"""
q1 = query % (
subquery_catalog(4, ra_central, decl_central, fov_radius),
subquery_catalog_association(5, ra_central, decl_central,
fov_radius, assoc_theta, deRuiter_reduced,
True),
subquery_catalog_association(6, ra_central, decl_central,
fov_radius, assoc_theta, deRuiter_reduced,
True),
subquery_catalog_association(3, ra_central, decl_central,
fov_radius, assoc_theta, deRuiter_reduced,
False)
)
print q1
#cursor.execute(q1)
results = None #cursor.fetchone()
i = 0
while results:
vlss_catsrcid, vlss_name, wenssm_catsrcid, wenssp_catsrcid, nvss_catsrcid, \
v_flux, wm_flux, wp_flux, n_flux, v_flux_err, wm_flux_err, wp_flux_err, n_flux_err, \
wm_assoc_distance_arcsec, wm_assoc_r, \
wp_assoc_distance_arcsec, wp_assoc_r, \
n_assoc_distance_arcsec, n_assoc_r, \
pa, major, minor, ra, decl = results
i = i + 1
spectrumfiles = []
print "\ni = ", i
# Here we check the cases for the degree of the polynomial spectral index fit
print vlss_catsrcid, wenssm_catsrcid, wenssp_catsrcid, nvss_catsrcid
bbsrow = "%s, %s, %s, %s, " % (vlss_catsrcid, wenssm_catsrcid, wenssp_catsrcid, nvss_catsrcid)
# According to Jess, only sources that have values for all
# three are considered as GAUSSIAN
if pa is not None and major is not None and minor is not None:
#print "Gaussian:", pa, major, minor
shape = "GAUSSIAN, "
else:
#print "POINT"
shape = "POINT, "
bbsrow += "%s, %s, %s, " % (shape, ra2bbshms(ra), decl2bbsdms(decl))
# Stokes I id default, so filed is empty
lognu = []
logflux = []
lognu.append(V_FREQ)
logflux.append(np.log10(v_flux))
if wenssm_catsrcid is not None:
lognu.append(WM_FREQ)
logflux.append(np.log10(wm_flux))
if wenssp_catsrcid is not None:
lognu.append(WP_FREQ)
logflux.append(np.log10(wp_flux))
if nvss_catsrcid is not None:
lognu.append(N_FREQ)
logflux.append(np.log10(n_flux))
# Here we write the expected flux values at 60 MHz, and the fitted spectral index and
# and curvature term
if len(lognu) == 1:
bbsrow += str(round(10**(np.log10(v_flux) + 0.7 * V_FREQ), 2)) + ", , , , , [-0.7]"
elif len(lognu) == 2 or (len(lognu) == 3 and nvss_catsrcid is None):
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 1))
# Default reference frequency is reported, so we leave it empty here;
# Catalogues just report on Stokes I, so others are empty.
bbsrow += "%s, , , , , [%s]" % (str(round(10**p[0], 4)), str(round(p[1],4)))
elif (len(lognu) == 3 and nvss_catsrcid is not None) or len(lognu) == 4:
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 2))
# Default reference frequency is reported, so we leave it empty here
bbsrow += "%s, , , , , [%s, %s]" % (str(round(10**p[0], 4)), str(round(p[1],4)), str(round(p[2],4)))
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "spectrum_%s.eps" % vlss_name)
spectrumfiles.append(spectrumfile)
if pa is not None and major is not None and minor is not None:
# Gaussian source:
bbsrow += ", %s, %s, %s" % (str(round(major, 2)), str(round(minor, 2)), str(round(pa, 2)))
#print bbsrow
skymodel.write(bbsrow + '\n')
results = cursor.fetchone()
if storespectraplots:
print "Spectra available in:", spectrumfiles
skymodel.close()
print "Sky model stored in source table:", bbsfile
except db.Error, e:
logging.warn("Failed on query nr %s; for reason %s" % (query, e))
raise
finally:
cursor.close()
def plotSpectrum(x, y, p, f):
expflux = "Exp. flux: " + str(round(10**p(0),3)) + " Jy"
fig = pylab.figure()
ax = fig.add_subplot(111)
for i in range(len(ax.get_xticklabels())):
ax.get_xticklabels()[i].set_size('x-large')
for i in range(len(ax.get_yticklabels())):
ax.get_yticklabels()[i].set_size('x-large')
ax.set_xlabel(r'$\log \nu/\nu_0$', size='x-large')
ax.set_ylabel('$\log S$', size='x-large')
# Roughly between log10(30/60) and log10(1500/60)
xp = np.linspace(-0.3, 1.5, 100)
ax.plot(x, y, 'o', label='cat fluxes')
ax.plot(0.0, p(0), 'o', color='k', label=expflux )
ax.plot(xp, p(xp), linestyle='--', linewidth=2, label='fit')
pylab.legend(numpoints=1, loc='best')
pylab.grid(True)
pylab.savefig(f, dpi=600)
return f
def decl2bbsdms(d):
"""
Based on function deg2dec Written by Enno Middelberg 2001
http://www.atnf.csiro.au/people/Enno.Middelberg/python/python.html
>>> decl2bbsdms(1.0)
'+01.00.00.00000000'
"""
deg = float(d)
sign = "+"
# test whether the input numbers are sane:
# if negative, store "-" in sign and continue calulation
# with positive value
if deg < 0:
sign = "-"
deg = deg * (-1)
#if deg > 180:
# logging.warn("%s: inputs may not exceed 180!" % deg)
# raise
#if deg > 90:
# print `deg`+" exceeds 90, will convert it to negative dec\n"
# deg=deg-90
# sign="-"
if deg < -90 or deg > 90:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg)
mm = int((deg - int(deg)) * 60)
ss = '%10.8f' % (((deg - int(deg)) * 60 - mm) * 60)
return "%s%02d.%02d.%s" % (sign, hh, mm, string.zfill(ss, 11))
def ra2bbshms(a):
"""
Convert right ascension from float to hms format.
>>> ra2bbshms(1.0)
'00:04:00.00000000'
"""
deg=float(a)
# test whether the input numbers are sane:
if deg < 0 or deg > 360:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg / 15)
mm = int((deg - 15 * hh) * 4)
ss = '%10.8f' % ((4 * deg - 60 * hh - mm) * 60)
return "%02d:%02d:%s" % (hh, mm, string.zfill(ss, 11))
def alpha(theta, decl):
"""
"""
if abs(decl) + theta > 89.9:
return 180.0
else:
return np.degrees(abs(np.arctan(np.sin(np.radians(theta)) /
np.sqrt(abs(np.cos(np.radians(decl - theta)) *
np.cos(np.radians(decl + theta)))))))
#def degrees(r):
# return r * RADIANS_TO_DEGREES
#
#def radians(d):
# return d * DEGREES_TO_RADIANS
|
jjdmol/LOFAR
|
CEP/GSM/bremen/src/gsmutils.py
|
Python
|
gpl-3.0
| 14,187
|
[
"Gaussian"
] |
a9b8feca9d1b6baa82f36e099d21f1a6b2a5e3524b77566933c63ac26795c054
|
#! python
__author__ = 'forgacs.daniel@gmail.com'
__version__ = '0.1'
# Houdini 13
# WORK IN PROGRESS
import time
import hou
def _message(txt):
print( '-----> {0}'.format( txt))
def pyro_setup(objroot = False):
try:
selection = hou.selectedNodes()[0]
except:
print('SELECTION ERRRRROR')
if objroot:
root = hou.node( '/obj')
else:
root = hou.node( selection.parent().path())
srcnodes = {'high' : '', 'low' : ''}
blasts = []
blastCount = 0
mixnodes = {'density' : '', 'vel' : ''}
for src in srcnodes:
srcnodes[src] = selection.createOutputNode('fluidsource',
'fluidsource_' + src +
'_freq')
for i in range(2):
blasts.append( srcnodes[src].createOutputNode(
'blast', 'blast_{}'.format(blastCount)))
if src == 'high':
mixkey = list(mixnodes.keys())[1 - i]
mixnodes[mixkey] = blasts[blastCount].createOutputNode(
'volumemix',
'volumemix_' + mixkey)
blastCount += 1
for k, node in enumerate(mixnodes):
mixnodes[node].setInput(1, blasts[3 - k])
mixnodes[node].parm('mixmethod').set(4 if node == 'density' else 1)
scatter = blasts[0].createOutputNode('scatter')
veltrail = scatter.createOutputNode('volumetrail')
veltrail.setInput(1, mixnodes['vel'])
mixmerge = mixnodes['density'].createOutputNode('merge')
mixmerge.setInput(1, mixnodes['vel'])
sourcenull = mixmerge.createOutputNode('null', 'OUT_smoke_sources')
for k in range( len(blasts)):
blasts[k].parm('group').set('@name=density')
blasts[k].parm('removegrp').set(1)
blasts[k].parm('negate').set(1 if (k+1) % 2 else 0)
# node layout
selection. setPosition( hou.Vector2(0, 0))
srcnodes['high']. setPosition( hou.Vector2(-4, -3))
srcnodes['low']. setPosition( hou.Vector2(4, -3))
blasts[0]. setPosition( hou.Vector2(-6, -5))
blasts[1]. setPosition( hou.Vector2(-2, -5))
blasts[2]. setPosition( hou.Vector2(2, -5))
blasts[3]. setPosition( hou.Vector2(6, -5))
mixnodes['density'].setPosition( hou.Vector2(-4, -8))
mixnodes['vel']. setPosition( hou.Vector2(0, -8))
scatter. setPosition( hou.Vector2(-8, -8))
veltrail. setPosition( hou.Vector2(-7, -10))
mixmerge. setPosition( hou.Vector2(-2, -12))
sourcenull. setPosition( hou.Vector2(-2, -14))
# DOP setup
_message('DOP setup')
dopnodes = { 'solver' : 'pyrosolver',
'applyvorticles' : 'applydata',
'smokeobject' : 'smokeobject',
'vorticlegeo' : 'gasvorticlegeometry',
'resize' : 'gasresizefluiddynamic',
'mergevel' : 'merge',
'vorticleforces' : 'gasvorticleforces',
'mergeadvect' : 'merge',
'vorticlesadvect' : 'gasadvect',
'vorticlestrech' : 'gasvelocitystretch',
'vorticlerecyce' : 'gasvorticlerecycle',
'mergesource' : 'merge',
'source' : 'sourcevolume',
'gravity' : 'gravity',
}
dopnet = root.createNode('dopnet')
for node in dopnodes:
dopnodes[node] = dopnet.createNode(dopnodes[node], node)
dopnodes['solver']. setInput(0, dopnodes['applyvorticles'])
dopnodes['applyvorticles']. setInput(0, dopnodes['smokeobject'])
dopnodes['applyvorticles']. setInput(1, dopnodes['vorticlegeo'])
dopnodes['solver']. setInput(1, dopnodes['resize'])
dopnodes['solver']. setInput(2, dopnodes['mergevel'])
dopnodes['mergevel']. setInput(2, dopnodes['vorticleforces'])
dopnodes['solver']. setInput(3, dopnodes['mergeadvect'])
dopnodes['mergeadvect']. setInput(0, dopnodes['vorticlesadvect'])
dopnodes['mergeadvect']. setInput(1, dopnodes['vorticlestrech'])
dopnodes['mergeadvect']. setInput(2, dopnodes['vorticlerecyce'])
dopnodes['solver']. setInput(4, dopnodes['mergesource'])
dopnodes['mergesource']. setInput(4, dopnodes['source'])
dopnodes['gravity']. setInput(0, dopnodes['solver'])
# parameter settings
scatter.parm('npts').set(250)
srcnodes['low'].parm('divsize').setExpression(
'ch("../{}/divsize")'.format(srcnodes['high'].name()))
parms = { 'eloc' : 0.1,
'size' : 0,
'use_noise' : 1,
'sharpness' : 1,
'grain' : 0,
'element_size' : 0.3,
}
for p in parms:
srcnodes['density'].parm(p).set(parms[p])
# layouts
dopnet. setPosition( hou.Vector2(-2, -16))
_message('#' * 40)
pyro_setup()
_message('DONE')
|
danielforgacs/houdini-tools
|
archive/setupPyro.py
|
Python
|
mit
| 4,241
|
[
"BLAST"
] |
6292e0a4aa71b97d49ade0555e8233a578ba7b1cbc7af7a687906f75184008fb
|
#!/usr/bin/env python
#
# $File: backTrajectory.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
from simuPOP.utils import Trajectory, simulateBackwardTrajectory
from math import exp
def Nt(gen):
'An exponential sim.Population growth demographic model.'
return int((5000) * exp(.00115 * gen))
def fitness(gen, sp):
'Constant positive selection pressure.'
return [1, 1.01, 1.02]
# simulate a trajectory backward in time, from generation 1000
traj = simulateBackwardTrajectory(N=Nt, fitness=fitness, nLoci=2,
endGen=1000, endFreq=[0.1, 0.2])
# matplotlib syntax
#traj.plot('log/backTrajectory.png', set_ylim_top=0.3, set_ylim_bottom=0,
# plot_c_loc=['r', 'b'], set_title_label='Simulated Trajectory (backward-time)')
print('Trajectory simulated with length %s ' % len(traj.traj))
pop = sim.Population(size=Nt(0), loci=[1]*2)
# save Trajectory function in the sim.population's local namespace
# so that the sim.PyEval operator can access it.
pop.dvars().traj = traj.func()
pop.evolve(
initOps=[sim.InitSex()],
preOps=traj.mutators(loci=[0, 1]),
matingScheme=sim.ControlledRandomMating(loci=[0, 1], alleles=[1, 1],
subPopSize=Nt, freqFunc=traj.func()),
postOps=[
sim.Stat(alleleFreq=[0, 1], begin=500, step=100),
sim.PyEval(r"'%4d: %.3f (exp: %.3f), %.3f (exp: %.3f)\n' % (gen, alleleFreq[0][1],"
"traj(gen)[0], alleleFreq[1][1], traj(gen)[1])",
begin=500, step=100)
],
gen=1001 # evolve 1001 generations to reach the end of generation 1000
)
|
BoPeng/simuPOP
|
docs/backTrajectory.py
|
Python
|
gpl-2.0
| 2,557
|
[
"VisIt"
] |
9909ae4d4740d3b3640030a135a572f9761c5154d60be01158138c9948ed8930
|
'''
This Script will generate geometries around the conical intersection, linear
and circle.
It needs a geometry file (xyz) and two vectors (that can be read by np.loadtxt)
'''
import numpy as np
import glob
from collections import namedtuple
from argparse import (ArgumentParser,RawTextHelpFormatter)
from quantumpropagator import (readGeometry,saveTraj,err,retrieve_hdf5_data,
mathematicaListGenerator, gnuSplotCircle, ndprint)
def read_single_arguments(single_inputs):
'''
This funcion reads the command line arguments
'''
d = "Create the goemetry scan and the circle graphic."
parser = ArgumentParser(formatter_class=RawTextHelpFormatter, description=d)
parser.add_argument("-v", "--vectors",
dest="v",
nargs='+',
help="the 3 files: geometry and branching plane vectors")
parser.add_argument("-s", "--scalarProd",
dest="s",
nargs='+',
help="to calculate scalar product between scan \
geometries (given by globalexpression) and branching plane vectors")
parser.add_argument("-l", "--linear",
dest="l",
nargs='+',
help="parameters for the linear displacement.\n"
"Distance :: Double\n"
"Number of points :: Int\n")
parser.add_argument("-c", "--circular",
dest="c",
nargs='+',
help="parameters for the circular displacement.\n"
"Number of ponts in the circle :: Int\n"
"List of radii :: [Double]")
parser.add_argument("-g", "--globalPattern",
dest="g",
type=str,
help="it is the global pattern of output rasscf h5 files\n"
"to create the gnuplot graphic")
args = parser.parse_args()
if args.s != None:
if len(args.s) == 3:
[globE,grad,der] = args.s
single_inputs = single_inputs._replace(globExp=globE)
single_inputs = single_inputs._replace(vectorX=grad)
single_inputs = single_inputs._replace(vectorY=der)
else:
err('this takes 3 arguments: the geometry global expression and the two vectors')
if args.v != None:
if len(args.v) == 3:
[geom,grad,der] = args.v
single_inputs = single_inputs._replace(fileXYZ=geom)
single_inputs = single_inputs._replace(vectorX=grad)
single_inputs = single_inputs._replace(vectorY=der)
else:
err('this takes 3 arguments: the single geometry and the two vectors')
if args.l != None:
if len(args.l) == 2:
single_inputs = single_inputs._replace(linearDisplacement=args.l)
else:
err('this takes 2 arguments: number of points and distance')
if args.c != None:
# without controls because we feel brave
single_inputs = single_inputs._replace(circleScan=args.c)
if args.g != None:
single_inputs = single_inputs._replace(graphsGlob=args.g)
return single_inputs
def scalarProds(expression,vec1fn,vec2fn):
'''
given a scan global expression coordinate and the branching plane vectors,
it calculates the scalar products
expression :: String <- the global expression of files
vec1fn :: String <- filePath
vec2fn :: String <- filePath
'''
GD = np.loadtxt(vec1fn)
NA = np.loadtxt(vec2fn)
allfn = sorted(glob.glob(expression))
(natoms,title,atomTN,_) = readGeometry(allfn[0])
fileN = len(allfn)
allgeom = np.empty((fileN, natoms, 3))
ind = 0
for f in allfn:
(_,_,_,geom) = readGeometry(f)
allgeom[ind] = geom
ind += 1
difference = np.diff(allgeom,axis=0)
GDnorm = np.linalg.norm(GD)
NAnorm = np.linalg.norm(NA)
norms = np.linalg.norm(difference, axis =(1,2))
broadcasted = np.transpose(np.broadcast_to(norms,(3,natoms,fileN-1)))
unitary_move = difference/broadcasted
## I need to divide the vector for the norm
wow = ndprint(np.tensordot(unitary_move,GD),format_string = '{0:7.4f}')
wol = ndprint(np.tensordot(unitary_move,NA),format_string = '{0:7.4f}')
output = '''
Gd norm -> {}
Dc norm -> {}
Scalar product Gd:
{}
Scalar product Dc:
{}
'''.format(GDnorm, NAnorm, wow, wol)
print(output)
def displaceGeom(geom1,xf,yf,linearArgs,circleArgs):
'''
This function takes the geometry and the two vectors and creates a circle
or a line around them. Pretty neat.
geom1 :: FilePath <- the geometry
xf :: FilePath <- the gradient difference vector
yf :: FilePath <- the derivative coupling vector
linearArgs :: [Int, Double] <- a list of two numbers, the number of points
and the distance.
circleArgs :: [Int, Double] <- a list with the first number as the number
of points across the circle, and then a list
of any R
'''
GD = np.loadtxt(xf)
NA = np.loadtxt(yf)
(natoms,title,atomTN,geom) = readGeometry(geom1)
#print(natoms,title,atomType,geom,GD,NA)
labelRoot = geom1.split('.')[0]
if linearArgs != []:
npoints = int(linearArgs[0])
distance = float(linearArgs[1])
for i in np.linspace(-distance,distance,npoints):
fnO = labelRoot + 'ScanGradDiff_{:+08.3f}'.format(i)
fn = fnO.replace('-','N').replace('.','-').replace('+','P')
new = geom + (i*GD)
saveTraj(np.array([new]),atomTN,fn)
for i in np.linspace(-distance,distance,npoints):
fnO = labelRoot + 'ScanDeriCoup_{:+08.3f}'.format(i)
fn = fnO.replace('-','N').replace('.','-').replace('+','P')
new = geom + (i*NA)
saveTraj(np.array([new]),atomTN,fn)
if circleArgs != []:
circles = int(circleArgs[0])
Rlist = [float(a) for a in circleArgs[1:]]
# this false in linspace function avoids the creation of both 0 and 360 degrees
for i in np.linspace(0,360,circles,False):
for R in Rlist:
fnO = labelRoot + 'ScanRing{:+08.3f}_Angle{:+08.3f}'.format(R,i)
fn = fnO.replace('-','N').replace('.','-').replace('+','P')
rad = np.deg2rad(i)
component1 = R * GD * np.sin(rad)
component2 = R * NA * np.cos(rad)
new = geom + component1 + component2
saveTraj(np.array([new]),atomTN,fn)
def graphScan(globalExp):
'''
So, this one takes the global expression of rasscf h5 files and creates the
circle graph. This assumes that you used the {:+08.3f} convention to create this
file.
RingP000-100_AngleP000-000
glob :: string <- global pattern "*.*"
'''
allH5 = sorted(glob.glob(globalExp))
dime = len(allH5)
allH5First = allH5[0]
nstates = len(retrieve_hdf5_data(allH5First,'ROOT_ENERGIES'))
bigArrayE = np.empty((dime,nstates))
bigArray1 = np.empty(dime)
bigArray2 = np.empty(dime)
ind=0
for fileN in allH5:
(dim1,dim2) = transformString(fileN)
energies = retrieve_hdf5_data(fileN,'ROOT_ENERGIES')
bigArrayE[ind] = energies
bigArray1[ind] = dim1
bigArray2[ind] = dim2
ind += 1
# print(bigArray1,bigArray2)
# mathematicaListGenerator(bigArray1,bigArray2,bigArrayE)
gnuSplotCircle(bigArray1,bigArray2,bigArrayE)
def bashcommand(output_fn):
'''
If I have time, this function withh take out vectors from log molcas file
output_fn :: String <- the file molcas output
'''
bashCommand = "ls"
import subprocess
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
return(output)
def transformString(string):
[ring,angle] = string.split('Scan')[1].split('.')[0].split('_')
outputRing = ring.replace('Ring','').replace('P','+').replace('-','.').replace('N','-')
outputAngle = angle.replace('Angle','').replace('P','+').replace('-','.').replace('N','-')
ringD = float(outputRing)
angleD = float(outputAngle)
dim1 = ringD * np.sin(np.deg2rad(angleD))
dim2 = ringD * np.cos(np.deg2rad(angleD))
#print(ringD,angleD,dim1,dim2)
if abs(dim1) < 0.00000001:
dim1 = 0
if abs(dim2) < 0.00000001:
dim2 = 0
return(dim1,dim2)
single_inputs = namedtuple("single_input",
("fileXYZ",
"vectorX",
"vectorY",
"linearDisplacement",
"circleScan",
"graphsGlob",
"globExp"))
def main():
#geom1='CI12.xyz'
#xf='x'
#yf='y'
#circles = 20
'''
circles works like:
generateGeomsAroundConical.py -v CI12.xyz x y -c 20 0.1 0.2 0.3 0.4
the command geom v1 v2 howmany list of Rs
'''
o_inputs = single_inputs("","","",[],[],"","") # defaults
inp = read_single_arguments(o_inputs)
#print(inp)
if inp == o_inputs:
err("You should use this with some arguments... you know... try -h")
if inp.graphsGlob == "":
if inp.globExp == "":
displaceGeom(inp.fileXYZ,inp.vectorX,inp.vectorY,inp.linearDisplacement,
inp.circleScan)
else:
scalarProds(inp.globExp,inp.vectorX,inp.vectorY)
else:
graphScan(inp.graphsGlob)
if __name__ == "__main__":
main()
|
acuzzio/GridQuantumPropagator
|
Scripts/generateGeomsAroundConical.py
|
Python
|
gpl-3.0
| 9,811
|
[
"MOLCAS"
] |
ac2317d8e4c8217feb17fe6b3b2b09cb6039052854c00ede6f033b7d8c007051
|
# -*- coding: utf-8 -*-
#
# DiracDocs documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 25 17:34:37 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import datetime
import os
import sys
import subprocess
sys.path.insert(0, ".")
try:
import fakeEnvironment
except ImportError:
pass
try:
import fakeEnv
except ImportError:
pass
diracRelease = os.environ.get('DIRACVERSION', 'integration')
if os.environ.get('READTHEDOCS') == 'True':
diracRelease = os.path.basename(os.path.abspath("../../"))
if diracRelease.startswith("rel-"):
diracRelease = diracRelease[4:]
print('conf.py: %s as DIRACVERSION' % diracRelease)
# Set this environment variable such tha the documentation
# generated for the various X509* classes is the one with M2Crypto
if 'DIRAC_USE_M2CRYPTO' not in os.environ:
os.environ['DIRAC_USE_M2CRYPTO'] = "Yes"
#...............................................................................
# configuration
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if os.environ.get('READTHEDOCS') == 'True':
sys.path.append(os.path.abspath('.'))
diracPath = os.path.abspath(os.path.join(os.getcwd(), "../.."))
print("DiracPath", diracPath)
buildfolder = "_build"
try:
os.mkdir(os.path.abspath("../" + buildfolder))
except BaseException:
pass
# We need to have the DIRAC module somewhere, or we cannot import it, as
# readtheDocs clones the repo into something based on the branchname
if not os.path.exists("../../DIRAC"):
diracLink = os.path.abspath(os.path.join(os.getcwd(), "../", buildfolder, "DIRAC"))
print("DiracLink", diracLink)
if not os.path.exists(diracLink):
RES = subprocess.check_output(["ln", "-s", diracPath, diracLink])
diracPath = os.path.abspath(os.path.join(diracLink, ".."))
sys.path.insert(0, diracPath)
for path in sys.path:
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + ":" + path
# this is not working at the moment because the DIRAC folder is not found by the buildScriptsDOC script
# print "Pythonpath",os.environ['PYTHONPATH']
# buildCommand = os.path.join( os.getcwd() , "../Tools/buildScriptsDOC.py" )
# scriptdir = os.path.abspath(os.path.join( os.getcwd() , "../", buildfolder, "scripts" ))
# try:
# os.mkdir( scriptdir )
# except:
# pass
# print "command", buildCommand
# code = subprocess.Popen( ["python", buildCommand, scriptdir ], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout , err = code.communicate()
# print "script",stdout
# print "script",err
os.environ["DIRAC"] = diracPath
print("DIRAC ENVIRON", os.environ["DIRAC"])
# re-create the RST files for the command references
buildCommand = os.path.join(os.getcwd(), "../Tools/buildScriptsDocs.py")
code = subprocess.Popen(["python", buildCommand], env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, err = code.communicate()
print("scriptDocs:", stdout)
print("scriptErrs:", err)
# singlehtml build needs too much memory, so we need to create less code documentation
buildtype = "limited" if any("singlehtml" in arg for arg in sys.argv) else "full"
print("Chosing build type:", buildtype)
buildCommand = os.path.join(os.getcwd(), "../Tools/MakeDoc.py")
code = subprocess.Popen(["python", buildCommand, buildtype], env=os.environ,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, err = code.communicate()
print("code", stdout)
print("code", err)
# always update dirac.cfg
buildCommand = os.path.join(os.getcwd(), "../Tools/UpdateDiracCFG.py")
code = subprocess.Popen(["python", buildCommand], env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, err = code.communicate()
if stdout:
print("Config Output", stdout)
if err:
print("Config error", err)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DIRAC'
copyright = u'%s, DIRAC Project' % datetime.datetime.utcnow().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = diracRelease
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%H:%M %d/%m/%Y %Z'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
# ADRI: Ignore old stuff that is not included in the compilation
exclude_trees = ['AdministratorGuide/Configuration/ConfigurationReference']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
html_style = 'dirac.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'sidebarbgcolor':'#D5E2F2'
#}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "DIRAC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/DIRAC-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d/%m/%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DiracDocsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DiracDocs.tex', u'DIRAC Documentation',
u'DIRAC Project.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# packages that cannot be installed in RTD
autodoc_mock_imports = ['lcg_util', 'cx_Oracle', 'fts3', 'XRootD', 'gfal2', 'arc', '_arc',
'matplotlib',
'git',
'numpy', 'irods', 'pylab', 'stomp',
'pythonjsonlogger', 'cmreslogging',
]
# link with the python standard library docs
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7', None),
}
#...............................................................................
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
petricm/DIRAC
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 10,979
|
[
"DIRAC"
] |
a586341af2bff42a8fa63efb8f95e1c694aa902696afc9071ac28f62ed2b54d9
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Checks for SDK updates."""
import datetime
import logging
import os
import sys
import time
import urllib2
import google
import yaml
from google.appengine.api import validation
from google.appengine.api import yaml_object
VERSION_FILE = '../../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error('Could not find version file at %s', version_filename)
return None
version_fh = open_fn(version_filename, 'r')
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class SDKUpdateChecker(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
rpcserver: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
rpcserver,
configs,
isdir=os.path.isdir,
isfile=os.path.isfile,
open_fn=open):
"""Create a new SDKUpdateChecker.
Args:
rpcserver: The AbstractRpcServer to use.
configs: A list of yaml objects or a single yaml object that specify the
configuration of this application.
isdir: Replacement for os.path.isdir (for testing).
isfile: Replacement for os.path.isfile (for testing).
open_fn: Replacement for the open builtin (for testing).
"""
if not isinstance(configs, list):
configs = [configs]
self.rpcserver = rpcserver
self.isdir = isdir
self.isfile = isfile
self.open = open_fn
self.runtimes = set(config.runtime for config in configs)
self.runtime_to_api_version = {}
for config in configs:
self.runtime_to_api_version.setdefault(
config.runtime, set()).add(config.api_version)
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject(isfile=self.isfile, open_fn=self.open)
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
sys.exit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
unsupported_api_versions_found = False
for runtime, api_versions in self.runtime_to_api_version.items():
if 'supported_api_versions' in version:
supported_api_versions = version['supported_api_versions'].get(
runtime, version)['api_versions']
else:
supported_api_versions = version['api_versions']
unsupported_api_versions = sorted(api_versions -
set(supported_api_versions))
if unsupported_api_versions:
unsupported_api_versions_found = True
if len(unsupported_api_versions) == 1:
logging.critical('The requested api_version (%s) is not supported by '
'the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions[0], runtime,
supported_api_versions)
else:
logging.critical('The requested api_versions (%s) are not supported '
'by the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions, runtime,
supported_api_versions)
if unsupported_api_versions_found:
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support an api_version named in
a configuration in self.configs.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
responses = {}
try:
for runtime in self.runtimes:
responses[runtime] = yaml.safe_load(self.rpcserver.Send(
'/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=runtime))
except urllib2.URLError, e:
logging.info('Update check failed: %s', e)
return
try:
latest = sorted(responses.values(), reverse=True,
key=lambda release: _VersionList(release['release']))[0]
except ValueError:
logging.warn('Could not parse this release version')
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
for runtime, response in responses.items():
api_versions = response['api_versions']
obsolete_versions = sorted(
self.runtime_to_api_version[runtime] - set(api_versions))
if len(obsolete_versions) == 1:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions[0],
response, version, force=True)
elif obsolete_versions:
self._Nag(
'The api versions you are using (%s) are obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions,
response, version, force=True)
deprecated_versions = sorted(
self.runtime_to_api_version[runtime].intersection(api_versions[:-1]))
if len(deprecated_versions) == 1:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions[0], response, version)
elif deprecated_versions:
self._Nag(
'The api versions you are using (%s) are deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions, response, version)
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = SDKUpdateChecker.MakeNagFilename()
if self.isfile(nag_filename):
fh = self.open(nag_filename, 'r')
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
return None
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = SDKUpdateChecker.MakeNagFilename()
try:
fh = self.open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print '****************************************************************'
print msg
print '-----------'
print 'Latest SDK:'
print yaml.dump(latest)
print '-----------'
print 'Your SDK:'
print yaml.dump(version)
print '-----------'
print 'Please visit https://developers.google.com/appengine/downloads'
print 'for the latest SDK'
print '****************************************************************'
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = 0.0
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print ('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' %
SDKUpdateChecker.MakeNagFilename())
nag.opt_in = False
else:
print ('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % SDKUpdateChecker.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
|
elsigh/browserscope
|
third_party/appengine_tools/sdk_update_checker.py
|
Python
|
apache-2.0
| 14,468
|
[
"VisIt"
] |
37e13b34a37c3b9d7f79e36f43a71f227a68a353e4b3f404b4fa21290987fa35
|
from .agents import *
import utils as u
class AgentXTypeTwoClass(Agent):
def __init__(self, x=2, y=2):
Agent.__init__(self)
##
# Personalize the identifier of this class.
# Will be used instead of the class name
# in neighbours info
self.name = 'AgentXTypeTwo'
# The possible actions of the agent
self.actions = {
0: "GoNorth",
1: "GoWest",
2: "GoSouth",
3: "GoEast",
4: "NoOp"
}
# THe list of walls bumped
self.walls = []
# The list of the visited position
self.visited_floor = []
# The search tree
self.search_tree = [((0, 0), 4)]
# The position visited by an adversary
self.visited_floor_adv = []
# Current action
self.current_action = 4
# Current position
self.position = (0, 0)
def get_coord(action):
"""
Retrieve the normal coordinates and the backtracked one
Args:
- action (int): The action to make
Return:
- (tuple): The new position
"""
if action == 0: # GoNorth
return self.position[0], self.position[1] + 1
elif action == 1: # GoWest
return self.position[0] - 1, self.position[1]
elif action == 2: # GoSouth
return self.position[0], self.position[1] - 1
elif action == 3: # GoEast
return self.position[0] + 1, self.position[1]
def distance_from_other_agents(neighbors):
"""
Calculate the distance from other agents and return the list with the preferred action to make
Args:
neighbors (list): The complete list of the agent
Return:
(list): A list of tuple with a structure like [(distance, [action, ...]), ...]
"""
distances = []
for (agent_id, agent_type), pos in neighbors:
if self.id != agent_id:
dis_from_other_agent = u.distance(self.position, (self.position[0] + pos[0], self.position[1] + pos[1]))
actions = []
if pos[0] < 0:
actions.append(1) # GoWest
elif pos[0] > 0:
actions.append(3) # GoEast
if pos[1] < 0:
actions.append(2) # GoSouth
elif pos[1] > 0:
actions.append(0) # GoNorth
actions.append(random.randint(0, 3))
distances.append((dis_from_other_agent, actions))
def sorter(dis1, dis2):
if dis1[0] >= dis2[0]:
return -1
else:
return 1
distances.sort(sorter)
return distances
def define_action(neighbors):
"""
Retrieve the action to make. In first time the agent try to take open a new graph (or tree) branch,
if this is not possible then it enter a previously visited branch
Args:
neighbors (list): The list of the neighbors
Return:
(string): the action to make
"""
def decide(action):
"""
Control if the action is possible
Args:
action (int): The action to undertake
Return:
(string) The action to make
(None) If is not possible
"""
coord = get_coord(action)
if coord not in self.walls and coord not in self.visited_floor \
and coord not in self.visited_floor_adv:
# New position
self.position = coord
# New action
self.current_action = action
# Save in the history
self.visited_floor.insert(0, self.position)
self.search_tree.insert(0, (self.position, action))
return self.actions[action]
else:
return None
dis_other_agents = distance_from_other_agents(neighbors)
for dis, actions in dis_other_agents:
# Firstly try the actions calculated with heuristic
for i in actions:
action = decide(i)
if action:
return action
# In this second stage, the agent try to take one of the four action (if it's possible)
for i in range(0, 4):
action = decide(i)
if action:
return action
##
# ====================================================
# Backtracking when there aren't action to make
# ====================================================
if not self.search_tree:
return 'NoOp'
# Retrieve the position and action
(coord_x, coord_y), action = self.search_tree[0]
# Calculate the backtrack action to make
action = (action + 2) % 4
# Remove the first element of search tree
self.search_tree.pop(0)
# Backtrack position
self.position = get_coord(action)
# Backtrack action
self.current_action = action
return self.actions[action]
def retrieve_action(neighbors):
"""
Retrieve an action to make
Args:
neighbors (array): The list of the neighbors
Return:
(string): The action to make
"""
if neighbors:
return define_action(neighbors)
else:
return 'NoOp'
def make_action(status, bump, neighbors):
"""
Select the action to execute
Params:
status (string): 'Dirty' or 'Clean'
bump (string): 'Bump' or 'None'
neighbors (list of tuples): [
( (agent_id, agent_type), (r_x, r_y) ),
...,
...
]
Returns:
(string): one of these commands:
- 'Suck'
- 'GoNorth'
- 'GoSouth'
- 'GoWest'
- 'GoEast'
- 'NoOp' or 'Noop'
"""
# If the search tree is empty, then the agent have finished the visit
if not self.search_tree:
return 'NoOp'
# If the position is dirty, then suck
if status == 'Dirty':
return 'Suck'
# Bumped the wall
if bump == 'Bump':
# Extract the position from the search tree because it can't accessed anymore
if self.search_tree:
self.search_tree.pop(0)
self.walls.append(self.position)
self.position = get_coord((self.current_action + 2) % 4)
# If the agent have bumped the wall or the position is empty, then retrieve the action to make
return retrieve_action(neighbors)
def program(status, bump, neighbors):
"""Main function of the Agent.
Params:
status (string): 'Dirty' or 'Clean'
bump (string): 'Bump' or 'None'
neighbors (list of tuples): [
( (agent_id, agent_type), (r_x, r_y) ),
...,
...
]
Returns:
(string): one of these commands:
- 'Suck'
- 'GoNorth'
- 'GoSouth'
- 'GoWest'
- 'GoEast'
- 'NoOp' or 'Noop'
"""
# Save all the position visited by an other agent as personal visiting
for (agent_id, agent_type), pos in neighbors:
if agent_id != self.id:
self.visited_floor_adv.append((self.position[0] + pos[0], self.position[1] + pos[1]))
return make_action(status, bump, neighbors)
self.program = program
|
valefranz/AI-Project-VacuumEnvironment
|
AgentiStudenti2016/AgentXTypeTwo.py
|
Python
|
apache-2.0
| 9,111
|
[
"VisIt"
] |
5546723548fff155c0f3a7fef1ee9a12047cddb2285453285389f53c3d44191f
|
# -*- coding: utf-8 -*-
# Copyright 2016-2017 Jaap Karssenberg <jaap.karssenberg@gmail.com>
import tests
from zim.tokenparser import *
from zim.formats import ParseTreeBuilder
class TestTokenParser(tests.TestCase):
def testRoundtrip(self):
tree = tests.new_parsetree()
#~ print tree
tb = TokenBuilder()
tree.visit(tb)
tokens = tb.tokens
#~ import pprint; pprint.pprint(tokens)
testTokenStream(tokens)
builder = ParseTreeBuilder(_parsetree_roundtrip=True)
TokenParser(builder).parse(tokens)
newtree = builder.get_parsetree()
self.assertEqual(tree.tostring(), newtree.tostring())
def testTopLevelLists(self):
tree = tests.new_parsetree()
tb = TokenBuilder()
tree.visit(tb)
tokens = tb._tokens # using raw tokens
newtokens = topLevelLists(tokens)
testTokenStream(newtokens)
revtokens = reverseTopLevelLists(newtokens)
def correct_none_attrib(t):
if t[0] == PARAGRAPH and not t[1]:
return (PARAGRAPH, {})
else:
return t
revtokens = map(correct_none_attrib, revtokens)
self.assertEqual(revtokens, tokens)
|
Osndok/zim-desktop-wiki
|
tests/tokenparser.py
|
Python
|
gpl-2.0
| 1,072
|
[
"VisIt"
] |
41056fc020df0495c61807e89a96f8054f44843f0253bb8aafaeb813a5463019
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
UMP2 analytical nuclear gradients
'''
import numpy
from pyscf import lib
from functools import reduce
from pyscf.lib import logger
from pyscf.scf import ucphf
from pyscf.ao2mo import _ao2mo
from pyscf.mp import ump2
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import mp2 as mp2_grad
def grad_elec(mp_grad, t2, atmlst=None, verbose=logger.INFO):
mp = mp_grad.base
log = logger.new_logger(mp, verbose)
time0 = logger.process_clock(), logger.perf_counter()
log.debug('Build ump2 rdm1 intermediates')
d1 = ump2._gamma1_intermediates(mp, t2)
time1 = log.timer_debug1('rdm1 intermediates', *time0)
log.debug('Build ump2 rdm2 intermediates')
mol = mp_grad.mol
with_frozen = not ((mp.frozen is None)
or (isinstance(mp.frozen, (int, numpy.integer)) and mp.frozen == 0)
or (len(mp.frozen) == 0))
moidx = mp.get_frozen_mask()
OA_a, VA_a, OF_a, VF_a = mp2_grad._index_frozen_active(moidx[0], mp.mo_occ[0])
OA_b, VA_b, OF_b, VF_b = mp2_grad._index_frozen_active(moidx[1], mp.mo_occ[1])
orboa = mp.mo_coeff[0][:,OA_a]
orbva = mp.mo_coeff[0][:,VA_a]
orbob = mp.mo_coeff[1][:,OA_b]
orbvb = mp.mo_coeff[1][:,VA_b]
nao, nocca = orboa.shape
nvira = orbva.shape[1]
noccb = orbob.shape[1]
nvirb = orbvb.shape[1]
# Partially transform MP2 density matrix and hold it in memory
# The rest transformation are applied during the contraction to ERI integrals
t2aa, t2ab, t2bb = t2
part_dm2aa = _ao2mo.nr_e2(t2aa.reshape(nocca**2,nvira**2),
numpy.asarray(orbva.T, order='F'), (0,nao,0,nao),
's1', 's1').reshape(nocca,nocca,nao,nao)
part_dm2bb = _ao2mo.nr_e2(t2bb.reshape(noccb**2,nvirb**2),
numpy.asarray(orbvb.T, order='F'), (0,nao,0,nao),
's1', 's1').reshape(noccb,noccb,nao,nao)
part_dm2ab = lib.einsum('ijab,pa,qb->ipqj', t2ab, orbva, orbvb)
part_dm2aa = (part_dm2aa.transpose(0,2,3,1) -
part_dm2aa.transpose(0,3,2,1)) * .5
part_dm2bb = (part_dm2bb.transpose(0,2,3,1) -
part_dm2bb.transpose(0,3,2,1)) * .5
hf_dm1a, hf_dm1b = mp._scf.make_rdm1(mp.mo_coeff, mp.mo_occ)
hf_dm1 = hf_dm1a + hf_dm1b
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
diagidx = numpy.arange(nao)
diagidx = diagidx*(diagidx+1)//2 + diagidx
de = numpy.zeros((len(atmlst),3))
Imata = numpy.zeros((nao,nao))
Imatb = numpy.zeros((nao,nao))
fdm2 = lib.H5TmpFile()
vhf1 = fdm2.create_dataset('vhf1', (len(atmlst),2,3,nao,nao), 'f8')
# 2e AO integrals dot 2pdm
max_memory = max(0, mp.max_memory - lib.current_memory()[0])
blksize = max(1, int(max_memory*.9e6/8/(nao**3*2.5)))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
ip1 = p0
vhf = numpy.zeros((2,3,nao,nao))
for b0, b1, nf in mp2_grad._shell_prange(mol, shl0, shl1, blksize):
ip0, ip1 = ip1, ip1 + nf
dm2bufa = lib.einsum('pi,iqrj->pqrj', orboa[ip0:ip1], part_dm2aa)
dm2bufa+= lib.einsum('qi,iprj->pqrj', orboa, part_dm2aa[:,ip0:ip1])
dm2bufa = lib.einsum('pqrj,sj->pqrs', dm2bufa, orboa)
tmp = lib.einsum('pi,iqrj->pqrj', orboa[ip0:ip1], part_dm2ab)
tmp+= lib.einsum('qi,iprj->pqrj', orboa, part_dm2ab[:,ip0:ip1])
dm2bufa+= lib.einsum('pqrj,sj->pqrs', tmp, orbob)
tmp = None
dm2bufa = dm2bufa + dm2bufa.transpose(0,1,3,2)
dm2bufa = lib.pack_tril(dm2bufa.reshape(-1,nao,nao)).reshape(nf,nao,-1)
dm2bufa[:,:,diagidx] *= .5
dm2bufb = lib.einsum('pi,iqrj->pqrj', orbob[ip0:ip1], part_dm2bb)
dm2bufb+= lib.einsum('qi,iprj->pqrj', orbob, part_dm2bb[:,ip0:ip1])
dm2bufb = lib.einsum('pqrj,sj->pqrs', dm2bufb, orbob)
tmp = lib.einsum('iqrj,sj->iqrs', part_dm2ab, orbob[ip0:ip1])
tmp+= lib.einsum('iqrj,sj->iqsr', part_dm2ab[:,:,ip0:ip1], orbob)
dm2bufb+= lib.einsum('pi,iqrs->srpq', orboa, tmp)
tmp = None
dm2bufb = dm2bufb + dm2bufb.transpose(0,1,3,2)
dm2bufb = lib.pack_tril(dm2bufb.reshape(-1,nao,nao)).reshape(nf,nao,-1)
dm2bufb[:,:,diagidx] *= .5
shls_slice = (b0,b1,0,mol.nbas,0,mol.nbas,0,mol.nbas)
eri0 = mol.intor('int2e', aosym='s2kl', shls_slice=shls_slice)
Imata += lib.einsum('ipx,iqx->pq', eri0.reshape(nf,nao,-1), dm2bufa)
Imatb += lib.einsum('ipx,iqx->pq', eri0.reshape(nf,nao,-1), dm2bufb)
eri0 = None
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,nf,nao,-1)
de[k] -= numpy.einsum('xijk,ijk->x', eri1, dm2bufa) * 2
de[k] -= numpy.einsum('xijk,ijk->x', eri1, dm2bufb) * 2
dm2bufa = dm2bufb = None
# HF part
for i in range(3):
eri1tmp = lib.unpack_tril(eri1[i].reshape(nf*nao,-1))
eri1tmp = eri1tmp.reshape(nf,nao,nao,nao)
vhf[:,i] += numpy.einsum('ijkl,ij->kl', eri1tmp, hf_dm1[ip0:ip1])
vhf[0,i] -= numpy.einsum('ijkl,il->kj', eri1tmp, hf_dm1a[ip0:ip1])
vhf[1,i] -= numpy.einsum('ijkl,il->kj', eri1tmp, hf_dm1b[ip0:ip1])
vhf[:,i,ip0:ip1] += numpy.einsum('ijkl,kl->ij', eri1tmp, hf_dm1)
vhf[0,i,ip0:ip1] -= numpy.einsum('ijkl,jk->il', eri1tmp, hf_dm1a)
vhf[1,i,ip0:ip1] -= numpy.einsum('ijkl,jk->il', eri1tmp, hf_dm1b)
eri1 = eri1tmp = None
vhf1[k] = vhf
log.debug('2e-part grad of atom %d %s = %s', ia, mol.atom_symbol(ia), de[k])
time1 = log.timer_debug1('2e-part grad of atom %d'%ia, *time1)
# Recompute nocc, nvir to include the frozen orbitals and make contraction for
# the 1-particle quantities, see also the kernel function in uccsd_grad module.
mo_a, mo_b = mp.mo_coeff
mo_ea, mo_eb = mp._scf.mo_energy
nao, nmoa = mo_a.shape
nmob = mo_b.shape[1]
nocca = numpy.count_nonzero(mp.mo_occ[0] > 0)
noccb = numpy.count_nonzero(mp.mo_occ[1] > 0)
s0 = mp._scf.get_ovlp()
Imata = reduce(numpy.dot, (mo_a.T, Imata, s0, mo_a)) * -1
Imatb = reduce(numpy.dot, (mo_b.T, Imatb, s0, mo_b)) * -1
dm1a = numpy.zeros((nmoa,nmoa))
dm1b = numpy.zeros((nmob,nmob))
doo, dOO = d1[0]
dvv, dVV = d1[1]
if with_frozen:
dco = Imata[OF_a[:,None],OA_a] / (mo_ea[OF_a,None] - mo_ea[OA_a])
dfv = Imata[VF_a[:,None],VA_a] / (mo_ea[VF_a,None] - mo_ea[VA_a])
dm1a[OA_a[:,None],OA_a] = (doo + doo.T) * .5
dm1a[OF_a[:,None],OA_a] = dco
dm1a[OA_a[:,None],OF_a] = dco.T
dm1a[VA_a[:,None],VA_a] = (dvv + dvv.T) * .5
dm1a[VF_a[:,None],VA_a] = dfv
dm1a[VA_a[:,None],VF_a] = dfv.T
dco = Imatb[OF_b[:,None],OA_b] / (mo_eb[OF_b,None] - mo_eb[OA_b])
dfv = Imatb[VF_b[:,None],VA_b] / (mo_eb[VF_b,None] - mo_eb[VA_b])
dm1b[OA_b[:,None],OA_b] = (dOO + dOO.T) * .5
dm1b[OF_b[:,None],OA_b] = dco
dm1b[OA_b[:,None],OF_b] = dco.T
dm1b[VA_b[:,None],VA_b] = (dVV + dVV.T) * .5
dm1b[VF_b[:,None],VA_b] = dfv
dm1b[VA_b[:,None],VF_b] = dfv.T
else:
dm1a[:nocca,:nocca] = (doo + doo.T) * .5
dm1a[nocca:,nocca:] = (dvv + dvv.T) * .5
dm1b[:noccb,:noccb] = (dOO + dOO.T) * .5
dm1b[noccb:,noccb:] = (dVV + dVV.T) * .5
dm1 = (reduce(numpy.dot, (mo_a, dm1a, mo_a.T)),
reduce(numpy.dot, (mo_b, dm1b, mo_b.T)))
vhf = mp._scf.get_veff(mp.mol, dm1)
Xvo = reduce(numpy.dot, (mo_a[:,nocca:].T, vhf[0], mo_a[:,:nocca]))
XVO = reduce(numpy.dot, (mo_b[:,noccb:].T, vhf[1], mo_b[:,:noccb]))
Xvo+= Imata[:nocca,nocca:].T - Imata[nocca:,:nocca]
XVO+= Imatb[:noccb,noccb:].T - Imatb[noccb:,:noccb]
dm1_resp = _response_dm1(mp, (Xvo,XVO))
dm1a += dm1_resp[0]
dm1b += dm1_resp[1]
time1 = log.timer_debug1('response_rdm1 intermediates', *time1)
Imata[nocca:,:nocca] = Imata[:nocca,nocca:].T
Imatb[noccb:,:noccb] = Imatb[:noccb,noccb:].T
im1 = reduce(numpy.dot, (mo_a, Imata, mo_a.T))
im1+= reduce(numpy.dot, (mo_b, Imatb, mo_b.T))
time1 = log.timer_debug1('response_rdm1', *time1)
log.debug('h1 and JK1')
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = mp_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
zeta = (mo_ea[:,None] + mo_ea) * .5
zeta[nocca:,:nocca] = mo_ea[:nocca]
zeta[:nocca,nocca:] = mo_ea[:nocca].reshape(-1,1)
zeta_a = reduce(numpy.dot, (mo_a, zeta*dm1a, mo_a.T))
zeta = (mo_eb[:,None] + mo_eb) * .5
zeta[noccb:,:noccb] = mo_eb[:noccb]
zeta[:noccb,noccb:] = mo_eb[:noccb].reshape(-1,1)
zeta_b = reduce(numpy.dot, (mo_b, zeta*dm1b, mo_b.T))
dm1 = (reduce(numpy.dot, (mo_a, dm1a, mo_a.T)),
reduce(numpy.dot, (mo_b, dm1b, mo_b.T)))
vhf_s1occ = mp._scf.get_veff(mol, (dm1[0]+dm1[0].T, dm1[1]+dm1[1].T))
p1a = numpy.dot(mo_a[:,:nocca], mo_a[:,:nocca].T)
p1b = numpy.dot(mo_b[:,:noccb], mo_b[:,:noccb].T)
vhf_s1occ = (reduce(numpy.dot, (p1a, vhf_s1occ[0], p1a)) +
reduce(numpy.dot, (p1b, vhf_s1occ[1], p1b))) * .5
time1 = log.timer_debug1('h1 and JK1', *time1)
# Hartree-Fock part contribution
dm1pa = hf_dm1a + dm1[0]*2
dm1pb = hf_dm1b + dm1[1]*2
dm1 = dm1[0] + dm1[1] + hf_dm1
zeta_a += rhf_grad.make_rdm1e(mo_ea, mo_a, mp.mo_occ[0])
zeta_b += rhf_grad.make_rdm1e(mo_eb, mo_b, mp.mo_occ[1])
zeta = zeta_a + zeta_b
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# s[1] dot I, note matrix im1 is not hermitian
de[k] += numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])
de[k] += numpy.einsum('xji,ij->x', s1[:,p0:p1], im1[:,p0:p1])
# h[1] \dot DM, contribute to f1
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ji->x', h1ao, dm1)
# -s[1]*e \dot DM, contribute to f1
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1] )
de[k] -= numpy.einsum('xji,ij->x', s1[:,p0:p1], zeta[:,p0:p1])
# -vhf[s_ij[1]], contribute to f1, *2 for s1+s1.T
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', vhf1[k,0], dm1pa)
de[k] -= numpy.einsum('xij,ij->x', vhf1[k,1], dm1pb)
log.timer('%s gradients' % mp.__class__.__name__, *time0)
return de
def _response_dm1(mp, Xvo):
Xvo, XVO = Xvo
nvira, nocca = Xvo.shape
nvirb, noccb = XVO.shape
nmoa = nocca + nvira
nmob = noccb + nvirb
nova = nocca * nvira
mo_energy = mp._scf.mo_energy
mo_occ = mp.mo_occ
mo_a, mo_b = mp.mo_coeff
def fvind(x):
x1a = x[0,:nova].reshape(Xvo.shape)
x1b = x[0,nova:].reshape(XVO.shape)
dm1a = reduce(numpy.dot, (mo_a[:,nocca:], x1a, mo_a[:,:nocca].T))
dm1b = reduce(numpy.dot, (mo_b[:,noccb:], x1b, mo_b[:,:noccb].T))
va, vb = mp._scf.get_veff(mp.mol, (dm1a+dm1a.T, dm1b+dm1b.T))
va = reduce(numpy.dot, (mo_a[:,nocca:].T, va, mo_a[:,:nocca]))
vb = reduce(numpy.dot, (mo_b[:,noccb:].T, vb, mo_b[:,:noccb]))
return numpy.hstack((va.ravel(), vb.ravel()))
dvo = ucphf.solve(fvind, mo_energy, mo_occ, (Xvo,XVO), max_cycle=30)[0]
dm1a = numpy.zeros((nmoa,nmoa))
dm1a[nocca:,:nocca] = dvo[0]
dm1a[:nocca,nocca:] = dvo[0].T
dm1b = numpy.zeros((nmob,nmob))
dm1b[noccb:,:noccb] = dvo[1]
dm1b[:noccb,noccb:] = dvo[1].T
return dm1a, dm1b
class Gradients(mp2_grad.Gradients):
grad_elec = grad_elec
Grad = Gradients
ump2.UMP2.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
spin = 2,
)
mf = scf.UHF(mol).run()
mp = ump2.UMP2(mf).run()
g1 = mp.Gradients().kernel()
# O 0.0000000000 -0.0000000000 0.1436990190
# H -0.0000000000 0.1097329294 -0.0718495095
# H -0.0000000000 -0.1097329294 -0.0718495095
print(lib.finger(g1) - -0.22418090721297307)
print('-----------------------------------')
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
spin = 2,
)
mf = scf.UHF(mol).run()
mp = ump2.UMP2(mf)
mp.frozen = [0,1,10,11,12]
mp.max_memory = 1
mp.kernel()
g1 = Gradients(mp).kernel()
# O -0.0000000000 -0.0000000000 0.1454782514
# H 0.0000000000 0.1092558730 -0.0727391257
# H -0.0000000000 -0.1092558730 -0.0727391257
print(lib.finger(g1) - -0.22437276158813313)
|
sunqm/pyscf
|
pyscf/grad/ump2.py
|
Python
|
apache-2.0
| 13,859
|
[
"PySCF"
] |
9a9b813854e77a0bdc5e3a9f3152baf5683df7523d5a965c68b8f6e75416a32f
|
#!python
# coding=utf-8
import os
import glob
import shutil
import netCDF4
import logging
import tempfile
import operator
import itertools
import multiprocessing as mp
import pytz
import numpy as np
logger = logging.getLogger("pyncml")
logger.addHandler(logging.NullHandler())
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
raise RuntimeError('You need either lxml or ElementTree')
ncml_namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2'
class DotDict(object):
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
import pprint
return pprint.pformat(vars(self), indent=2)
def apply(input_file, ncml, output_file=None):
if isinstance(ncml, str) and os.path.isfile(ncml):
root = etree.parse(ncml).getroot()
elif isinstance(ncml, str):
root = etree.fromstring(ncml)
elif etree.iselement(ncml):
root = ncml
else:
raise ValueError("Could not parse ncml. \
Did you pass in a valid file path, xml string, or etree Element object?")
if output_file is None:
# In place changes
nc = netCDF4.Dataset(input_file, 'a')
else:
# New file
shutil.copy(input_file, output_file)
nc = netCDF4.Dataset(output_file, 'a')
# Variables
for v in root.findall('{%s}variable' % ncml_namespace):
var_name = v.attrib.get("name")
if var_name is None:
logger.error("No 'name' attribute supplied on the <variable /> tag. Skipping.")
continue
# First, rename variable
old_var_name = v.attrib.get("orgName")
if old_var_name is not None and var_name is not None:
logger.debug("Renaming variable from '{0}' to '{1}'".format(old_var_name, var_name))
nc.renameVariable(old_var_name, var_name)
ncvar = nc.variables.get(var_name)
if ncvar is None:
logger.error("Variabe {0} not found in NetCDF file. Skipping.".format(var_name))
continue
# Add/Remove attributes
for a in v.findall('{%s}attribute' % ncml_namespace):
process_attribute_tag(ncvar, a)
# Removals
for r in v.findall('{%s}remove' % ncml_namespace):
if r.attrib.get("type") == "attribute":
logger.debug("Removing attribute '{0}' from variable '{1}'".format(r.attrib.get("name"), var_name))
ncvar.delncattr(r.attrib.get("name"))
# Global attributes
for a in root.findall('{%s}attribute' % ncml_namespace):
process_attribute_tag(nc, a)
# Dimensions
for d in root.findall('{%s}dimension' % ncml_namespace):
dim_name = d.attrib.get('name')
old_dim_name = d.attrib.get('orgName')
if old_dim_name is not None:
logger.debug("Renaming dimension from '{0}'' to '{1}''".format(old_dim_name, dim_name))
nc.renameDimension(old_dim_name, dim_name)
# Global removals
for r in root.findall('{%s}remove' % ncml_namespace):
if r.attrib.get("type") == "attribute":
logger.debug("Removing global attribute '{0}'".format(r.attrib.get('name')))
nc.delncattr(r.attrib.get("name"))
nc.sync()
return nc
def process_attribute_tag(target, a):
attr_name = a.attrib.get("name")
if attr_name is None:
logger.error("No 'name' attribute supplied on the <attribute /> tag. Skipping.")
return
tipe = a.attrib.get("type")
value = a.attrib.get("value")
# First, reaname attribute
old_attr_name = a.attrib.get('orgName')
if old_attr_name is not None:
logger.debug("Renaming attribute from '{0}'' to '{1}''".format(old_attr_name, attr_name))
target.setncattr(attr_name, target.getncattr(old_attr_name))
target.delncattr(old_attr_name)
if value is not None:
if tipe is not None:
if tipe.lower() in ['float', 'double']:
value = float(value)
elif tipe.lower() in ['int', 'long', 'short']:
value = int(value)
logger.debug("Setting attribute '{0}' to '{1!s}''".format(attr_name, value))
target.setncattr(attr_name, value)
def scan(ncml, apply_to_members=False, cpu_count=None):
cpu_count = cpu_count or max(mp.cpu_count() - 1, 1)
if isinstance(ncml, str) and os.path.isfile(ncml):
root = etree.parse(ncml).getroot()
elif isinstance(ncml, str):
root = etree.fromstring(ncml)
elif etree.iselement(ncml):
root = ncml
else:
raise ValueError("Could not parse ncml. \
Did you pass in a valid file path, xml string, or etree Element object?")
agg = root.find('{%s}aggregation' % ncml_namespace)
if agg is None:
logger.debug("No <aggregation /> element found")
return dict()
timevar_name = agg.attrib.get("dimName")
scan = agg.find('{%s}scan' % ncml_namespace)
if scan is None:
logger.debug("No <scan /> element found")
return dict()
location = os.path.abspath(scan.attrib.get('location'))
if os.path.isfile(location):
files = [os.path.abspath(location)]
else:
suffix = scan.attrib.get('suffix')
subdirs = scan.attrib.get('subdirs')
files = []
if subdirs.lower() == "true":
files = glob.glob(os.path.join(location, "**", "*{0}".format(suffix)))
files += glob.glob(os.path.join(location, "*{0}".format(suffix)))
files = [ os.path.abspath(x) for x in files ]
# Start threading
num_files = len(files)
logger.info("Processing aggregation containing {!s} files".format(num_files))
pool = mp.Pool(cpu_count)
results = []
for i, filepath in enumerate(files):
r = pool.apply_async(scan_file, (etree.tostring(root), filepath, apply_to_members, timevar_name, i + 1, num_files))
results.append(r)
dataset_members = []
for r in results:
dataset_members.append(r.get())
pool.close()
pool.join()
# Generate collection stats
dataset_members = filter(None, dataset_members) # Remove None responses
logger.info("Generating collection stats...")
dataset_members = sorted(dataset_members, key=operator.attrgetter('starting'))
if not dataset_members:
return DotDict(timevar_name=timevar_name,
starting=None,
ending=None,
standard_names=None,
members=[])
dataset_starting = min([ x.starting for x in dataset_members ])
dataset_ending = max([ x.ending for x in dataset_members ])
dataset_variables = itertools.chain.from_iterable([ m.standard_names for m in dataset_members ])
dataset_variables = list(set(dataset_variables))
return DotDict(timevar_name=timevar_name,
starting=dataset_starting,
ending=dataset_ending,
standard_names=dataset_variables,
members=dataset_members)
def scan_file(ncml, filepath, apply_to_members, timevar_name, num, total_num):
logger.info("Processing member ({0}/{1}) - {2} ".format(num, total_num, filepath))
ncml = etree.fromstring(ncml)
nc = None
try:
if apply_to_members is True:
# Apply NcML
tmp_f, tmp_fp = tempfile.mkstemp(prefix="nc")
os.close(tmp_f)
nc = apply(filepath, ncml, output_file=tmp_fp)
else:
nc = netCDF4.Dataset(filepath)
title = "Pyncml Dataset"
if 'name' in nc.ncattrs():
title = nc.name
elif 'title' in nc.ncattrs():
title = nc.title
timevar = nc.variables.get(timevar_name)
if timevar is None:
logger.error("Time variable '{0}' was not found in file '{1}'. Skipping.".format(timevar_name, filepath))
return None
# Start/Stop of NetCDF file
starting = netCDF4.num2date(np.min(timevar[:]),
units=timevar.units,
calendar=getattr(timevar, 'calendar', 'standard'))
ending = netCDF4.num2date(np.max(timevar[:]),
units=timevar.units,
calendar=getattr(timevar, 'calendar', 'standard'))
variables = list(
filter(
None,
[ nc.variables[v].standard_name if hasattr(nc.variables[v], 'standard_name') else None for v in nc.variables.keys() ]
)
)
if starting.tzinfo is None:
starting = starting.replace(tzinfo=pytz.utc)
if ending.tzinfo is None:
ending = ending.replace(tzinfo=pytz.utc)
return DotDict(path=filepath, standard_names=variables, title=title, starting=starting, ending=ending)
except BaseException:
logger.exception("Something went wrong with {0}".format(filepath))
return None
finally:
nc.close()
try:
os.remove(tmp_fp)
except (OSError, UnboundLocalError):
pass
|
axiom-data-science/pyncml
|
pyncml/pyncml.py
|
Python
|
mit
| 9,730
|
[
"NetCDF"
] |
5f5118fda8be847a436b2c9bcffc1fb660280d4cb996ff0e9a377e520f141efa
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Development script to get the multiplicity of the separation facets for some model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import (
AllCoordinationGeometries,
)
if __name__ == "__main__":
allcg = AllCoordinationGeometries()
cg_symbol = "I:12"
all_plane_points = []
cg = allcg[cg_symbol]
# I:12
if cg_symbol == "I:12":
opposite_points = {0: 3, 1: 2, 2: 1, 3: 0, 4: 7, 5: 6, 6: 5, 7: 4, 8: 11, 9: 10, 10: 9, 11: 8}
edges = cg._edges
for edge in edges:
opposite_edge = [opposite_points[edge[0]], opposite_points[edge[1]]]
equiv_plane = list(edge)
equiv_plane.extend(opposite_edge)
equiv_plane.sort()
equiv_plane = tuple(equiv_plane)
all_plane_points.append(equiv_plane)
all_plane_points = list(set(all_plane_points))
all_plane_points = [list(equiv_plane) for equiv_plane in all_plane_points]
print(f"All plane points ({len(all_plane_points):d}) for {cg_symbol} : ")
print(all_plane_points)
|
materialsproject/pymatgen
|
dev_scripts/chemenv/plane_multiplicity.py
|
Python
|
mit
| 1,422
|
[
"pymatgen"
] |
6ecc69c52634a270222611b193bd02096f5f766d3e954c12b933b2b2db69b082
|
from selene.browser import execute_script
from tests.examples.todomvc.pagemodules_approach.pages import tasks
class TestTodoMVC(object):
def teardown(self):
execute_script('localStorage.clear()')
def test_filter_tasks(self):
tasks.visit()
tasks.add('a', 'b', 'c')
tasks.should_be('a', 'b', 'c')
tasks.toggle('b')
tasks.filter_active()
tasks.should_be('a', 'c')
tasks.filter_completed()
tasks.should_be('b')
def test_clear_completed(self):
tasks.visit()
tasks.add('a', 'b', 'c')
tasks.toggle('b')
tasks.clear_completed()
tasks.should_be('a', 'c')
|
SergeyPirogov/selene
|
tests/examples/todomvc/pagemodules_approach/todomvc_test.py
|
Python
|
mit
| 701
|
[
"VisIt"
] |
755bfa541485556cc23dc6fca4d59706e82c7afc66c8b0e05066c832dcde4ddf
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import ssl
import yaml
import operator
from datetime import datetime
from glob import glob
from getpass import getpass
import numpy as np
# Need these lines to run RCMES through SSH without X11
import matplotlib
matplotlib.use('Agg')
import ocw.utils as utils
import ocw.dataset_processor as dsp
from ocw.dataset import Bounds
from ocw.dataset_loader import DatasetLoader
from metrics_and_plots import *
def load_datasets_from_config(extra_opts, *loader_opts):
'''
Generic dataset loading function.
'''
for opt in loader_opts:
loader_name = opt['loader_name']
if loader_name == 'esgf':
if extra_opts['password'] is None:
extra_opts['username'] = raw_input('Enter your ESGF OpenID:\n')
extra_opts['password'] = getpass(
prompt='Enter your ESGF password:\n')
opt['esgf_username'] = extra_opts['username']
opt['esgf_password'] = extra_opts['password']
elif loader_name == 'rcmed':
opt['min_lat'] = extra_opts['min_lat']
opt['max_lat'] = extra_opts['max_lat']
opt['min_lon'] = extra_opts['min_lon']
opt['max_lon'] = extra_opts['max_lon']
opt['start_time'] = extra_opts['start_time']
opt['end_time'] = extra_opts['end_time']
loader = DatasetLoader(*loader_opts)
loader.load_datasets()
return loader.datasets
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
config_file = str(sys.argv[1])
print('Reading the configuration file {}'.format(config_file))
config = yaml.load(open(config_file))
time_info = config['time']
temporal_resolution = time_info['temporal_resolution']
# Read time info
maximum_overlap_period = time_info.get('maximum_overlap_period', False)
if not maximum_overlap_period:
start_time = datetime.strptime(time_info['start_time'].strftime('%Y%m%d'),'%Y%m%d')
end_time = datetime.strptime(time_info['end_time'].strftime('%Y%m%d'),'%Y%m%d')
else:
# These values will be determined after datasets are loaded
start_time, end_time = None, None
# Read space info
space_info = config['space']
if not 'boundary_type' in space_info:
min_lat = space_info['min_lat']
max_lat = space_info['max_lat']
min_lon = space_info['min_lon']
max_lon = space_info['max_lon']
else:
domain = space_info['boundary_type']
if 'CORDEX' in domain:
domain = domain.replace('CORDEX', '').lower()
domain = domain.strip()
min_lat, max_lat, min_lon, max_lon = utils.CORDEX_boundary(domain)
# Additional arguments for the DatasetLoader
extra_opts = {'min_lat': min_lat, 'max_lat': max_lat, 'min_lon': min_lon,
'max_lon': max_lon, 'start_time': start_time,
'end_time': end_time, 'username': None, 'password': None}
# Get the dataset loader options
data_info = config['datasets']
# Extract info we don't want to put into the loader config
# Multiplying Factor to scale obs by. Currently only supported for reference
# (first) dataset. We should instead make this a parameter for each
# loader and Dataset objects.
fact = data_info[0].pop('multiplying_factor', 1)
""" Step 1: Load the datasets """
print('Loading datasets:\n{}'.format(data_info))
datasets = load_datasets_from_config(extra_opts, *data_info)
multiplying_factor = np.ones(len(datasets))
multiplying_factor[0] = fact
names = [dataset.name for dataset in datasets]
for i, dataset in enumerate(datasets):
res = dataset.temporal_resolution()
if res == 'daily' or res == 'monthly':
datasets[i] = dsp.normalize_dataset_datetimes(dataset, res)
if multiplying_factor[i] != 1:
datasets[i].values *= multiplying_factor[i]
""" Step 2: Subset the data for temporal and spatial domain """
# Create a Bounds object to use for subsetting
if maximum_overlap_period:
start_time, end_time = utils.get_temporal_overlap(datasets)
print('Maximum overlap period')
print('start_time: {}'.format(start_time))
print('end_time: {}'.format(end_time))
if temporal_resolution == 'monthly' and end_time.day !=1:
end_time = end_time.replace(day=1)
for i, dataset in enumerate(datasets):
min_lat = np.max([min_lat, dataset.lats.min()])
max_lat = np.min([max_lat, dataset.lats.max()])
min_lon = np.max([min_lon, dataset.lons.min()])
max_lon = np.min([max_lon, dataset.lons.max()])
if not 'boundary_type' in space_info:
bounds = Bounds(lat_min=min_lat,
lat_max=max_lat,
lon_min=min_lon,
lon_max=max_lon,
start=start_time,
end=end_time)
else:
bounds = Bounds(boundary_type=space_info['boundary_type'],
start=start_time,
end=end_time)
for i, dataset in enumerate(datasets):
datasets[i] = dsp.subset(dataset, bounds)
if dataset.temporal_resolution() != temporal_resolution:
datasets[i] = dsp.temporal_rebin(datasets[i], temporal_resolution)
# Temporally subset both observation and model datasets
# for the user specified season
month_start = time_info['month_start']
month_end = time_info['month_end']
average_each_year = time_info['average_each_year']
# For now we will treat the first listed dataset as the reference dataset for
# evaluation purposes.
for i, dataset in enumerate(datasets):
datasets[i] = dsp.temporal_subset(dataset, month_start, month_end,
average_each_year)
reference_dataset = datasets[0]
target_datasets = datasets[1:]
reference_name = names[0]
target_names = names[1:]
# generate grid points for regridding
if config['regrid']['regrid_on_reference']:
new_lat = reference_dataset.lats
new_lon = reference_dataset.lons
else:
delta_lat = config['regrid']['regrid_dlat']
delta_lon = config['regrid']['regrid_dlon']
nlat = (max_lat - min_lat)/delta_lat+1
nlon = (max_lon - min_lon)/delta_lon+1
new_lat = np.linspace(min_lat, max_lat, nlat)
new_lon = np.linspace(min_lon, max_lon, nlon)
# Get flag for boundary checking for regridding. By default, this is set to True
# since the main intent of this program is to evaluate RCMs. However, it can be
# used for GCMs in which case it should be set to False to save time.
boundary_check = config['regrid'].get('boundary_check', True)
# number of target datasets (usually models, but can also be obs / reanalysis)
ntarget = len(target_datasets)
print('Dataset loading completed')
print('Reference data: {}'.format(reference_name))
print('Number of target datasets: {}'.format(ntarget))
for target_name in target_names:
print(target_name)
""" Step 3: Spatial regriding of the datasets """
print('Regridding datasets: {}'.format(config['regrid']))
if not config['regrid']['regrid_on_reference']:
reference_dataset = dsp.spatial_regrid(reference_dataset, new_lat, new_lon)
print('Reference dataset has been regridded')
for i, dataset in enumerate(target_datasets):
target_datasets[i] = dsp.spatial_regrid(dataset, new_lat, new_lon,
boundary_check=boundary_check)
print('{} has been regridded'.format(target_names[i]))
print('Propagating missing data information')
datasets = dsp.mask_missing_data([reference_dataset]+target_datasets)
reference_dataset = datasets[0]
target_datasets = datasets[1:]
""" Step 4: Checking and converting variable units """
print('Checking and converting variable units')
reference_dataset = dsp.variable_unit_conversion(reference_dataset)
for i, dataset in enumerate(target_datasets):
target_datasets[i] = dsp.variable_unit_conversion(dataset)
print('Generating multi-model ensemble')
if len(target_datasets) >= 2.:
target_datasets.append(dsp.ensemble(target_datasets))
target_names.append('ENS')
""" Step 5: Generate subregion average and standard deviation """
if config['use_subregions']:
# sort the subregion by region names and make a list
subregions= sorted(config['subregions'].items(),key=operator.itemgetter(0))
# number of subregions
nsubregion = len(subregions)
print('Calculating spatial averages and standard deviations of {} subregions'
.format(nsubregion))
reference_subregion_mean, reference_subregion_std, subregion_array = (
utils.calc_subregion_area_mean_and_std([reference_dataset], subregions))
target_subregion_mean, target_subregion_std, subregion_array = (
utils.calc_subregion_area_mean_and_std(target_datasets, subregions))
""" Step 6: Write a netCDF file """
workdir = config['workdir']
if workdir[-1] != '/':
workdir = workdir+'/'
print('Writing a netcdf file: ',workdir+config['output_netcdf_filename'])
if not os.path.exists(workdir):
os.system("mkdir -p "+workdir)
if config['use_subregions']:
dsp.write_netcdf_multiple_datasets_with_subregions(
reference_dataset, reference_name, target_datasets, target_names,
path=workdir+config['output_netcdf_filename'],
subregions=subregions, subregion_array=subregion_array,
ref_subregion_mean=reference_subregion_mean,
ref_subregion_std=reference_subregion_std,
model_subregion_mean=target_subregion_mean,
model_subregion_std=target_subregion_std)
else:
dsp.write_netcdf_multiple_datasets_with_subregions(
reference_dataset, reference_name, target_datasets,
target_names,
path=workdir+config['output_netcdf_filename'])
""" Step 7: Calculate metrics and draw plots """
nmetrics = config['number_of_metrics_and_plots']
if config['use_subregions']:
Map_plot_subregion(subregions, reference_dataset, workdir)
if nmetrics > 0:
print('Calculating metrics and generating plots')
for imetric in np.arange(nmetrics)+1:
metrics_name = config['metrics'+'%1d' %imetric]
plot_info = config['plots'+'%1d' %imetric]
file_name = workdir+plot_info['file_name']
print('metrics {0}/{1}: {2}'.format(imetric, nmetrics, metrics_name))
default_shape = (int(np.ceil(np.sqrt(ntarget + 2))),
int(np.ceil(np.sqrt(ntarget + 2))))
if metrics_name == 'Map_plot_bias_of_multiyear_climatology':
row, column = plot_info.get('subplots_array', default_shape)
if 'map_projection' in plot_info.keys():
Map_plot_bias_of_multiyear_climatology(
reference_dataset, reference_name, target_datasets, target_names,
file_name, row, column,
map_projection=plot_info['map_projection'])
else:
Map_plot_bias_of_multiyear_climatology(
reference_dataset, reference_name, target_datasets, target_names,
file_name, row, column)
elif metrics_name == 'Taylor_diagram_spatial_pattern_of_multiyear_climatology':
Taylor_diagram_spatial_pattern_of_multiyear_climatology(
reference_dataset, reference_name, target_datasets, target_names,
file_name)
elif config['use_subregions']:
if (metrics_name == 'Timeseries_plot_subregion_interannual_variability'
and average_each_year):
row, column = plot_info.get('subplots_array', default_shape)
Time_series_subregion(
reference_subregion_mean, reference_name, target_subregion_mean,
target_names, False, file_name, row, column,
x_tick=['Y'+str(i+1)
for i in np.arange(target_subregion_mean.shape[1])])
if (metrics_name == 'Timeseries_plot_subregion_annual_cycle'
and not average_each_year and month_start==1 and month_end==12):
row, column = plot_info.get('subplots_array', (1, 1))
Time_series_subregion(
reference_subregion_mean, reference_name,
target_subregion_mean, target_names, True,
file_name, row, column,
x_tick=['J','F','M','A','M','J','J','A','S','O','N','D'])
if (metrics_name == 'Portrait_diagram_subregion_interannual_variability'
and average_each_year):
Portrait_diagram_subregion(reference_subregion_mean, reference_name,
target_subregion_mean, target_names,
False, file_name)
if (metrics_name == 'Portrait_diagram_subregion_annual_cycle'
and not average_each_year and month_start==1 and month_end==12):
Portrait_diagram_subregion(reference_subregion_mean, reference_name,
target_subregion_mean, target_names,
True, file_name)
else:
print('please check the currently supported metrics')
|
lewismc/climate
|
RCMES/run_RCMES.py
|
Python
|
apache-2.0
| 13,922
|
[
"NetCDF"
] |
e47909ddd8d3245495bfc6b5d5aafa23632cdb6eb5d2fd56c184dde3b7af904e
|
import numpy as np
from numpy import sqrt, empty, zeros
import numba as numba
from numba import jit
from . import Image, IO
from skimage.morphology import skeletonize
@jit(nopython=True, nogil=True)
def sobel_filt(im):
H = im.shape[0]
W = im.shape[1]
buf = empty((H,W))
out = empty((H,W))
# Pre buffer grad
for i in range(H):
scan = im[i]
for j in range(1,W-1):
buf[i,j] = (scan[j+1] - scan[j-1])
buf[i,0] = buf[i,1]
buf[i,-1] = buf[i,-2]
out[0] = 2*buf[0] + 2*buf[1]
for i in range(1, H-1):
for j in range(W):
out[i,j] = buf[i-1,j] + 2*buf[i,j] + buf[i+1,j]
out[-1] = 2*buf[-2] + 2*buf[-1]
return out
@jit(nopython=True, nogil=True)
def grad(im, sigma):
H = im.shape[0]
W = im.shape[1]
smoothed = Image.gaussian(im, sigma)
# smoothed = ndi.gaussian_filter(im, sigma, mode='constant')
# imshow(smoothed, 'smth')
gx = sobel_filt(smoothed)
gy = sobel_filt(smoothed.T).T
# imshow(gy, 'isob')
mag = empty((H,W))
for i in range(H):
for j in range(W):
y, x = gy[i,j], gx[i,j]
mag[i,j] = sqrt(x*x + y*y)
return gx, gy, mag
@jit(nopython=True, nogil=True)
def non_local_suppression(mag, gx, gy):
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
H = mag.shape[0]
W = mag.shape[1]
bool = numba.bool_
local_maxima = np.empty((H,W), bool)
for i in range(1,H-1):
for j in range(1,W-1):
dy = gy[i,j]
dx = gx[i,j]
grad = mag[i,j]
ady = abs(dy)
adx = abs(dx)
if ady == 0 and adx == 0:
local_maxima[i,j] = False
# continue
elif ady < adx:
if dx * dy > 0:
# 0 - 45
w = ady / adx
m1 = (1 - w)*mag[i,j+1] + (w)*mag[i+1,j+1]
m2 = (1 - w)*mag[i,j-1] + (w)*mag[i-1,j-1]
local_maxima[i,j] = grad >= max(m1, m2)
else:
# 135 - 180
w = ady / adx
m1 = (1 - w)*mag[i,j-1] + (w)*mag[i+1,j-1]
m2 = (1 - w)*mag[i,j+1] + (w)*mag[i-1,j+1]
local_maxima[i,j] = grad >= max(m1, m2)
else:
if dx * dy > 0:
# 45 - 90
w = adx / ady
m1 = (1 - w)*mag[i+1,j] + (w)*mag[i+1,j+1]
m2 = (1 - w)*mag[i-1,j] + (w)*mag[i-1,j-1]
local_maxima[i,j] = grad >= max(m1, m2)
else:
# 90 - 135
w = adx / ady
m1 = (1 - w)*mag[i+1,j] + (w)*mag[i+1,j-1]
m2 = (1 - w)*mag[i-1,j] + (w)*mag[i-1,j+1]
local_maxima[i,j] = grad >= max(m1, m2)
return local_maxima
@jit(nopython=True, nogil=True)
def threshold(local_maxima, mag, low, high):
#---- Create two masks at the two thresholds.
H = mag.shape[0]
W = mag.shape[1]
bool_ = numba.bool_
high_mask = empty((H,W), bool_)
low_mask = empty((H,W), bool_)
for i in range(H):
for j in range(W):
mx = local_maxima[i,j]
m = mag[i,j]
high_mask[i,j] = mx and (m > high)
low_mask[i,j] = mx and (m > low)
return low_mask, high_mask
@jit(nopython=True, nogil=True)
def mag_threshold(mag, low, high):
#---- Create two masks at the two thresholds.
H = mag.shape[0]
W = mag.shape[1]
bool_ = numba.bool_
high_mask = empty((H,W), bool_)
low_mask = empty((H,W), bool_)
for i in range(H):
for j in range(W):
m = mag[i,j]
high_mask[i,j] = (m > high)
low_mask[i,j] = (m > low)
return low_mask, high_mask
@jit(nopython=True, nogil=True)
def connect_blobs(blobs, starts, sizes, high_mask, min_length=-1):
H = high_mask.shape[0]
W = high_mask.shape[1]
blobs_out = np.empty_like(blobs)
labels = np.empty(blobs.shape[0])
# kept = []
label = 1
m = 0
for k in range(len(starts)):
s = starts[k]
N = sizes[k]
if N < min_length:
continue
# Check associated values from high_mask, break on match
match = False
for n in range(s, s + N):
i, j = blobs[n,0], blobs[n,1]
if high_mask[i,j]:
match = True
break
if match:
# kept.append(k)
for n in range(s, s + N):
blobs_out[m,1], blobs_out[m,0] = blobs[n,0], blobs[n,1]
labels[m] = label
m += 1
# i, j = blobs[n,0], blobs[n,1]
# output_mask[i,j] = label
label += 1
return blobs_out[:m], labels[:m]
def canny(grey, sigma=1., low_threshold=0.1, high_threshold=0.2, use_quantiles=False):
# if low_threshold is None:
# low_threshold = 0.1 * dtype_limits(image)[1]
# if high_threshold is None:
# high_threshold = 0.2 * dtype_limits(image)[1]
gx, gy, mag = grad(grey, sigma)
local_maxima = non_local_suppression(mag, gx, gy)
#
#---- If use_quantiles is set then calculate the thresholds to use
#
# if use_quantiles:
# if high_threshold > 1.0 or low_threshold > 1.0:
# raise ValueError("Quantile thresholds must not be > 1.0")
# if high_threshold < 0.0 or low_threshold < 0.0:
# raise ValueError("Quantile thresholds must not be < 0.0")
# high_threshold = np.percentile(mag, 100.0 * high_threshold)
# low_threshold = np.percentile(mag, 100.0 * low_threshold)
low_mask, high_mask = threshold(local_maxima, mag, low_threshold, high_threshold)
blobs, starts, sizes = Image.find_segments(low_mask, connectivity=8)
blobs_out, labels = connect_blobs(blobs, starts, sizes, high_mask)
return blobs_out, labels
def double_threshold(im, low_threshold=0.1, high_threshold=0.2, min_length=-1):
low_mask = skeletonize( im >= low_threshold )
high_mask = im >= high_threshold
blobs, starts, sizes = Image.find_segments(low_mask.copy(), connectivity=8)
blobs_out, labels = connect_blobs(blobs, starts, sizes, high_mask, min_length)
return blobs_out, labels
# def labelled_depths(im, D, low_threshold=0.1, high_threshold=0.2, depth_threshold=10, scale=4):
# # low_mask, high_mask = threshold(im, low_threshold, high_threshold)
# low_mask = im >= low_threshold
# high_mask = im >= high_threshold
# blobs, starts, sizes = Image.find_blobs2(low_mask, D, connectivity=8, depth_thresh=depth_threshold, scale=4)
# output_mask = connect_blobs(blobs, starts, sizes, high_mask)
# return output_mask
|
kitizz/edge3d-paper
|
src/tools/Canny.py
|
Python
|
gpl-3.0
| 6,960
|
[
"Gaussian"
] |
c41e6d992d81a07e1bd2832422404e4e8ae2796c7ab7ccfbd26eee440123999a
|
"""
# Copyright (C) 2007 Nathan Ramella (nar@remix.net)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Nathan Ramella <nar@remix.net> or visit http://www.liveapi.org
"""
import sys
import Live
#path = "/Users/ST8/Production/Arduinome/Dev/LiveOSC"
#errorLog = open(path + "/stderr.txt", "w")
#errorLog.write("Starting Error Log")
#sys.stderr = errorLog
#stdoutLog = open(path + "/stdout.txt", "w")
#stdoutLog.write("Starting Standard Out Log")
#sys.stdout = stdoutLog
from livehack import LiveHack
def create_instance(c_instance):
return LiveHack(c_instance)
|
xl7dev/WebShell
|
Udp/LiveHack/__init__.py
|
Python
|
gpl-2.0
| 1,298
|
[
"VisIt"
] |
5d6acbd69a8b26652b9e8940c4a7f9ff24c3220a93585c204f9c5be8f94e0458
|
"""
Acceptance tests for Content Libraries in Studio
"""
from ddt import ddt, data
from nose.plugins.attrib import attr
from flaky import flaky
from .base_studio_test import StudioLibraryTest
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.utils import add_component
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.users import LibraryUsersPage
@attr('shard_2')
@ddt
class LibraryEditPageTest(StudioLibraryTest):
"""
Test the functionality of the library edit page.
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(LibraryEditPageTest, self).setUp()
self.lib_page = LibraryEditPage(self.browser, self.library_key)
self.lib_page.visit()
self.lib_page.wait_until_ready()
def test_page_header(self):
"""
Scenario: Ensure that the library's name is displayed in the header and title.
Given I have a library in Studio
And I navigate to Library Page in Studio
Then I can see library name in page header title
And I can see library name in browser page title
"""
self.assertIn(self.library_info['display_name'], self.lib_page.get_header_title())
self.assertIn(self.library_info['display_name'], self.browser.title)
def test_add_duplicate_delete_actions(self):
"""
Scenario: Ensure that we can add an HTML block, duplicate it, then delete the original.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
Then there are no XBlocks displayed
When I add Text XBlock
Then one XBlock is displayed
When I duplicate first XBlock
Then two XBlocks are displayed
And those XBlocks locators' are different
When I delete first XBlock
Then one XBlock is displayed
And displayed XBlock are second one
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
# Create a new block:
add_component(self.lib_page, "html", "Text")
self.assertEqual(len(self.lib_page.xblocks), 1)
first_block_id = self.lib_page.xblocks[0].locator
# Duplicate the block:
self.lib_page.click_duplicate_button(first_block_id)
self.assertEqual(len(self.lib_page.xblocks), 2)
second_block_id = self.lib_page.xblocks[1].locator
self.assertNotEqual(first_block_id, second_block_id)
# Delete the first block:
self.lib_page.click_delete_button(first_block_id, confirm=True)
self.assertEqual(len(self.lib_page.xblocks), 1)
self.assertEqual(self.lib_page.xblocks[0].locator, second_block_id)
def test_no_edit_visibility_button(self):
"""
Scenario: Ensure that library xblocks do not have 'edit visibility' buttons.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
When I add Text XBlock
Then one XBlock is displayed
And no 'edit visibility' button is shown
"""
add_component(self.lib_page, "html", "Text")
self.assertFalse(self.lib_page.xblocks[0].has_edit_visibility_button)
def test_add_edit_xblock(self):
"""
Scenario: Ensure that we can add an XBlock, edit it, then see the resulting changes.
Given I have a library in Studio with no XBlocks
And I navigate to Library Page in Studio
Then there are no XBlocks displayed
When I add Multiple Choice XBlock
Then one XBlock is displayed
When I edit first XBlock
And I go to basic tab
And set it's text to a fairly trivial question about Battlestar Galactica
And save XBlock
Then one XBlock is displayed
And first XBlock student content contains at least part of text I set
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
# Create a new problem block:
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
# Edit it:
problem_block.edit()
problem_block.open_basic_tab()
problem_block.set_codemirror_text(
"""
>>Who is "Starbuck"?<<
(x) Kara Thrace
( ) William Adama
( ) Laura Roslin
( ) Lee Adama
( ) Gaius Baltar
"""
)
problem_block.save_settings()
# Check that the save worked:
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
self.assertIn("Laura Roslin", problem_block.author_content)
def test_no_discussion_button(self):
"""
Ensure the UI is not loaded for adding discussions.
"""
self.assertFalse(self.browser.find_elements_by_css_selector('span.large-discussion-icon'))
@flaky # TODO fix this, see TNL-2322
def test_library_pagination(self):
"""
Scenario: Ensure that adding several XBlocks to a library results in pagination.
Given that I have a library in Studio with no XBlocks
And I create 10 Multiple Choice XBlocks
Then 10 are displayed.
When I add one more Multiple Choice XBlock
Then 1 XBlock will be displayed
When I delete that XBlock
Then 10 are displayed.
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
for _ in range(10):
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 10)
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 10)
@data('top', 'bottom')
def test_nav_present_but_disabled(self, position):
"""
Scenario: Ensure that the navigation buttons aren't active when there aren't enough XBlocks.
Given that I have a library in Studio with no XBlocks
The Navigation buttons should be disabled.
When I add a multiple choice problem
The Navigation buttons should be disabled.
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
self.assertTrue(self.lib_page.nav_disabled(position))
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertTrue(self.lib_page.nav_disabled(position))
def test_delete_deletes_only_desired_block(self):
"""
Scenario: Ensure that when deleting XBlock only desired XBlock is deleted
Given that I have a library in Studio with no XBlocks
And I create Blank Common Problem XBlock
And I create Checkboxes XBlock
When I delete Blank Problem XBlock
Then Checkboxes XBlock is not deleted
And Blank Common Problem XBlock is deleted
"""
self.assertEqual(len(self.lib_page.xblocks), 0)
add_component(self.lib_page, "problem", "Blank Common Problem")
add_component(self.lib_page, "problem", "Checkboxes")
self.assertEqual(len(self.lib_page.xblocks), 2)
self.assertIn("Blank Common Problem", self.lib_page.xblocks[0].name)
self.assertIn("Checkboxes", self.lib_page.xblocks[1].name)
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 1)
problem_block = self.lib_page.xblocks[0]
self.assertIn("Checkboxes", problem_block.name)
@attr('shard_2')
@ddt
class LibraryNavigationTest(StudioLibraryTest):
"""
Test common Navigation actions
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(LibraryNavigationTest, self).setUp()
self.lib_page = LibraryEditPage(self.browser, self.library_key)
self.lib_page.visit()
self.lib_page.wait_until_ready()
def populate_library_fixture(self, library_fixture):
"""
Create four pages worth of XBlocks, and offset by one so each is named
after the number they should be in line by the user's perception.
"""
self.blocks = [XBlockFixtureDesc('html', str(i)) for i in xrange(1, 41)]
library_fixture.add_children(*self.blocks)
def test_arbitrary_page_selection(self):
"""
Scenario: I can pick a specific page number of a Library at will.
Given that I have a library in Studio with 40 XBlocks
When I go to the 3rd page
The first XBlock should be the 21st XBlock
When I go to the 4th Page
The first XBlock should be the 31st XBlock
When I go to the 1st page
The first XBlock should be the 1st XBlock
When I go to the 2nd page
The first XBlock should be the 11th XBlock
"""
self.lib_page.go_to_page(3)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.go_to_page(4)
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.lib_page.go_to_page(1)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.go_to_page(2)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
def test_bogus_page_selection(self):
"""
Scenario: I can't pick a nonsense page number of a Library
Given that I have a library in Studio with 40 XBlocks
When I attempt to go to the 'a'th page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the 5th page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the -1st page
The input field will be cleared and no change of XBlocks will be made
When I attempt to visit the 0th page
The input field will be cleared and no change of XBlocks will be made
"""
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.go_to_page('a')
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(-1)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(5)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
self.lib_page.go_to_page(0)
self.assertTrue(self.lib_page.check_page_unchanged('1'))
@data('top', 'bottom')
def test_nav_buttons(self, position):
"""
Scenario: Ensure that the navigation buttons work.
Given that I have a library in Studio with 40 XBlocks
The previous button should be disabled.
The first XBlock should be the 1st XBlock
Then if I hit the next button
The first XBlock should be the 11th XBlock
Then if I hit the next button
The first XBlock should be the 21st XBlock
Then if I hit the next button
The first XBlock should be the 31st XBlock
And the next button should be disabled
Then if I hit the previous button
The first XBlock should be the 21st XBlock
Then if I hit the previous button
The first XBlock should be the 11th XBlock
Then if I hit the previous button
The first XBlock should be the 1st XBlock
And the previous button should be disabled
"""
# Check forward navigation
self.assertTrue(self.lib_page.nav_disabled(position, ['previous']))
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.move_forward(position)
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.lib_page.nav_disabled(position, ['next'])
# Check backward navigation
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '21')
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.lib_page.move_back(position)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertTrue(self.lib_page.nav_disabled(position, ['previous']))
def test_library_pagination(self):
"""
Scenario: Ensure that adding several XBlocks to a library results in pagination.
Given that I have a library in Studio with 40 XBlocks
Then 10 are displayed
And the first XBlock will be the 1st one
And I'm on the 1st page
When I add 1 Multiple Choice XBlock
Then 1 XBlock will be displayed
And I'm on the 5th page
The first XBlock will be the newest one
When I delete that XBlock
Then 10 are displayed
And I'm on the 4th page
And the first XBlock is the 31st one
And the last XBlock is the 40th one.
"""
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '1')
self.assertEqual(self.lib_page.xblocks[0].name, '1')
add_component(self.lib_page, "problem", "Multiple Choice")
self.assertEqual(len(self.lib_page.xblocks), 1)
self.assertEqual(self.lib_page.get_page_number(), '5')
self.assertEqual(self.lib_page.xblocks[0].name, "Multiple Choice")
self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator)
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '4')
self.assertEqual(self.lib_page.xblocks[0].name, '31')
self.assertEqual(self.lib_page.xblocks[-1].name, '40')
def test_delete_shifts_blocks(self):
"""
Scenario: Ensure that removing an XBlock shifts other blocks back.
Given that I have a library in Studio with 40 XBlocks
Then 10 are displayed
And I will be on the first page
When I delete the third XBlock
There will be 10 displayed
And the first XBlock will be the first one
And the last XBlock will be the 11th one
And I will be on the first page
"""
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.get_page_number(), '1')
self.lib_page.click_delete_button(self.lib_page.xblocks[2].locator, confirm=True)
self.assertEqual(len(self.lib_page.xblocks), 10)
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertEqual(self.lib_page.xblocks[-1].name, '11')
self.assertEqual(self.lib_page.get_page_number(), '1')
def test_previews(self):
"""
Scenario: Ensure the user is able to hide previews of XBlocks.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I click the toggle previews button
Then the previews are visible
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertTrue(self.lib_page.are_previews_showing())
def test_previews_navigation(self):
"""
Scenario: Ensure preview settings persist across navigation.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
And click the next page button
Then the previews will not be visible
And the first XBlock will be the 11th one
And the last XBlock will be the 20th one
And when I click the toggle previews button
And I click the previous page button
Then the previews will be visible
And the first XBlock will be the first one
And the last XBlock will be the 11th one
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
# Which set of arrows shouldn't matter for this test.
self.lib_page.move_forward('top')
self.assertFalse(self.lib_page.are_previews_showing())
self.assertEqual(self.lib_page.xblocks[0].name, '11')
self.assertEqual(self.lib_page.xblocks[-1].name, '20')
self.lib_page.toggle_previews()
self.lib_page.move_back('top')
self.assertTrue(self.lib_page.are_previews_showing())
self.assertEqual(self.lib_page.xblocks[0].name, '1')
self.assertEqual(self.lib_page.xblocks[-1].name, '10')
def test_preview_state_persistance(self):
"""
Scenario: Ensure preview state persists between page loads.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
And I revisit the page
Then the previews will not be visible
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.lib_page.visit()
self.lib_page.wait_until_ready()
self.assertFalse(self.lib_page.are_previews_showing())
def test_preview_add_xblock(self):
"""
Scenario: Ensure previews are shown when adding new blocks, regardless of preview setting.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I add an XBlock
Then I will be on the 5th page
And the XBlock will have loaded a preview
And when I revisit the library
And I go to the 5th page
Then the top XBlock will be the one I added
And it will not have a preview
And when I add an XBlock
Then the XBlock I added will have a preview
And the top XBlock will not have one.
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
add_component(self.lib_page, "problem", "Checkboxes")
self.assertEqual(self.lib_page.get_page_number(), '5')
first_added = self.lib_page.xblocks[0]
self.assertIn("Checkboxes", first_added.name)
self.assertFalse(self.lib_page.xblocks[0].is_placeholder())
self.lib_page.visit()
self.lib_page.wait_until_ready()
self.lib_page.go_to_page(5)
self.assertTrue(self.lib_page.xblocks[0].is_placeholder())
add_component(self.lib_page, "problem", "Multiple Choice")
# DOM has detatched the element since last assignment
first_added = self.lib_page.xblocks[0]
second_added = self.lib_page.xblocks[1]
self.assertIn("Multiple Choice", second_added.name)
self.assertFalse(second_added.is_placeholder())
self.assertTrue(first_added.is_placeholder())
def test_edit_with_preview(self):
"""
Scenario: Editing an XBlock should show me a preview even if previews are hidden.
Given that I have a library in Studio with 40 XBlocks
Then previews are visible
And when I click the toggle previews button
Then the previews will not be visible
And when I edit the first XBlock
Then the first XBlock will show a preview
And the other XBlocks will still be placeholders
"""
self.assertTrue(self.lib_page.are_previews_showing())
self.lib_page.toggle_previews()
self.assertFalse(self.lib_page.are_previews_showing())
target = self.lib_page.xblocks[0]
target.edit()
target.save_settings()
self.assertFalse(target.is_placeholder())
self.assertTrue(all([xblock.is_placeholder() for xblock in self.lib_page.xblocks[1:]]))
def test_duplicate_xblock_pagination(self):
"""
Scenario: Duplicating an XBlock should not shift the page if the XBlock is not at the end.
Given that I have a library in Studio with 40 XBlocks
When I duplicate the third XBlock
Then the page should not change
And the duplicate XBlock should be there
And it should show a preview
And there should not be more than 10 XBlocks visible.
"""
third_block_id = self.lib_page.xblocks[2].locator
self.lib_page.click_duplicate_button(third_block_id)
self.lib_page.wait_until_ready()
target = self.lib_page.xblocks[3]
self.assertIn('Duplicate', target.name)
self.assertFalse(target.is_placeholder())
self.assertEqual(len(self.lib_page.xblocks), 10)
def test_duplicate_xblock_pagination_end(self):
"""
Scenario: Duplicating an XBlock if it's the last one should bring me to the next page with a preview.
Given that I have a library in Studio with 40 XBlocks
And when I hide previews
And I duplicate the last XBlock
The page should change to page 2
And the duplicate XBlock should be the first XBlock
And it should not be a placeholder
"""
self.lib_page.toggle_previews()
last_block_id = self.lib_page.xblocks[-1].locator
self.lib_page.click_duplicate_button(last_block_id)
self.lib_page.wait_until_ready()
self.assertEqual(self.lib_page.get_page_number(), '2')
target_block = self.lib_page.xblocks[0]
self.assertIn('Duplicate', target_block.name)
self.assertFalse(target_block.is_placeholder())
class LibraryUsersPageTest(StudioLibraryTest):
"""
Test the functionality of the library "Instructor Access" page.
"""
def setUp(self):
super(LibraryUsersPageTest, self).setUp()
# Create a second user for use in these tests:
AutoAuthPage(self.browser, username="second", email="second@example.com", no_login=True).visit()
self.page = LibraryUsersPage(self.browser, self.library_key)
self.page.visit()
def _refresh_page(self):
"""
Reload the page.
"""
self.page = LibraryUsersPage(self.browser, self.library_key)
self.page.visit()
def test_user_management(self):
"""
Scenario: Ensure that we can edit the permissions of users.
Given I have a library in Studio where I am the only admin
assigned (which is the default for a newly-created library)
And I navigate to Library "Instructor Access" Page in Studio
Then there should be one user listed (myself), and I must
not be able to remove myself or my instructor privilege.
When I click Add Instructor
Then I see a form to complete
When I complete the form and submit it
Then I can see the new user is listed as a "User" of the library
When I click to Add Staff permissions to the new user
Then I can see the new user has staff permissions and that I am now
able to promote them to an Admin or remove their staff permissions.
When I click to Add Admin permissions to the new user
Then I can see the new user has admin permissions and that I can now
remove Admin permissions from either user.
"""
def check_is_only_admin(user):
"""
Ensure user is an admin user and cannot be removed.
(There must always be at least one admin user.)
"""
self.assertIn("admin", user.role_label.lower())
self.assertFalse(user.can_promote)
self.assertFalse(user.can_demote)
self.assertFalse(user.can_delete)
self.assertTrue(user.has_no_change_warning)
self.assertIn("Promote another member to Admin to remove your admin rights", user.no_change_warning_text)
self.assertEqual(len(self.page.users), 1)
user = self.page.users[0]
self.assertTrue(user.is_current_user)
check_is_only_admin(user)
# Add a new user:
self.assertTrue(self.page.has_add_button)
self.assertFalse(self.page.new_user_form_visible)
self.page.click_add_button()
self.assertTrue(self.page.new_user_form_visible)
self.page.set_new_user_email('second@example.com')
self.page.click_submit_new_user_form()
# Check the new user's listing:
def get_two_users():
"""
Expect two users to be listed, one being me, and another user.
Returns me, them
"""
users = self.page.users
self.assertEqual(len(users), 2)
self.assertEqual(len([u for u in users if u.is_current_user]), 1)
if users[0].is_current_user:
return users[0], users[1]
else:
return users[1], users[0]
self._refresh_page()
user_me, them = get_two_users()
check_is_only_admin(user_me)
self.assertIn("user", them.role_label.lower())
self.assertTrue(them.can_promote)
self.assertIn("Add Staff Access", them.promote_button_text)
self.assertFalse(them.can_demote)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Add Staff permissions to the new user:
them.click_promote()
self._refresh_page()
user_me, them = get_two_users()
check_is_only_admin(user_me)
self.assertIn("staff", them.role_label.lower())
self.assertTrue(them.can_promote)
self.assertIn("Add Admin Access", them.promote_button_text)
self.assertTrue(them.can_demote)
self.assertIn("Remove Staff Access", them.demote_button_text)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Add Admin permissions to the new user:
them.click_promote()
self._refresh_page()
user_me, them = get_two_users()
self.assertIn("admin", user_me.role_label.lower())
self.assertFalse(user_me.can_promote)
self.assertTrue(user_me.can_demote)
self.assertTrue(user_me.can_delete)
self.assertFalse(user_me.has_no_change_warning)
self.assertIn("admin", them.role_label.lower())
self.assertFalse(them.can_promote)
self.assertTrue(them.can_demote)
self.assertIn("Remove Admin Access", them.demote_button_text)
self.assertTrue(them.can_delete)
self.assertFalse(them.has_no_change_warning)
# Delete the new user:
them.click_delete()
self._refresh_page()
self.assertEqual(len(self.page.users), 1)
user = self.page.users[0]
self.assertTrue(user.is_current_user)
@attr('a11y')
class StudioLibraryA11yTest(StudioLibraryTest):
"""
Class to test Studio pages accessibility.
"""
def test_lib_edit_page_a11y(self):
"""
Check accessibility of LibraryEditPage.
"""
lib_page = LibraryEditPage(self.browser, self.library_key)
lib_page.visit()
lib_page.wait_until_ready()
# There are several existing color contrast errors on this page,
# we will ignore this error in the test until we fix them.
lib_page.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'color-contrast', # TODO: AC-225
'link-href', # TODO: AC-226
'nav-aria-label', # TODO: AC-227
'icon-aria-hidden', # TODO: AC-229
],
})
lib_page.a11y_audit.check_for_accessibility_errors()
|
waheedahmed/edx-platform
|
common/test/acceptance/tests/studio/test_studio_library.py
|
Python
|
agpl-3.0
| 28,096
|
[
"VisIt"
] |
acedcec1253110d1840c9feb1f1983ef4fb4b8b7058e4e02a07d043bf61554c3
|
""" Meager code path measurement tool.
Ned Batchelder
http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html
MIT License.
"""
from __future__ import with_statement
import optparse
import sys
from collections import defaultdict
try:
import ast
from ast import iter_child_nodes
except ImportError: # Python 2.5
from flake8.util import ast, iter_child_nodes
__version__ = '0.3'
class ASTVisitor(object):
"""Performs a depth-first walk of the AST."""
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
for child in iter_child_nodes(node):
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
class PathNode(object):
def __init__(self, name, look="circle"):
self.name = name
self.look = look
def to_dot(self):
print('node [shape=%s,label="%s"] %d;' % (
self.look, self.name, self.dot_id()))
def dot_id(self):
return id(self)
class PathGraph(object):
def __init__(self, name, entity, lineno):
self.name = name
self.entity = entity
self.lineno = lineno
self.nodes = defaultdict(list)
def connect(self, n1, n2):
self.nodes[n1].append(n2)
# Ensure that the destination node is always counted.
self.nodes[n2] = []
def to_dot(self):
print('subgraph {')
for node in self.nodes:
node.to_dot()
for node, nexts in self.nodes.items():
for next in nexts:
print('%s -- %s;' % (node.dot_id(), next.dot_id()))
print('}')
def complexity(self):
""" Return the McCabe complexity for the graph.
V-E+2
"""
num_edges = sum([len(n) for n in self.nodes.values()])
num_nodes = len(self.nodes)
return num_edges - num_nodes + 2
class PathGraphingAstVisitor(ASTVisitor):
""" A visitor for a parsed Abstract Syntax Tree which finds executable
statements.
"""
def __init__(self):
super(PathGraphingAstVisitor, self).__init__()
self.classname = ""
self.graphs = {}
self.reset()
def reset(self):
self.graph = None
self.tail = None
def dispatch_list(self, node_list):
for node in node_list:
self.dispatch(node)
def visitFunctionDef(self, node):
if self.classname:
entity = '%s%s' % (self.classname, node.name)
else:
entity = node.name
name = '%d:1: %r' % (node.lineno, entity)
if self.graph is not None:
# closure
pathnode = self.appendPathNode(name)
self.tail = pathnode
self.dispatch_list(node.body)
bottom = PathNode("", look='point')
self.graph.connect(self.tail, bottom)
self.graph.connect(pathnode, bottom)
self.tail = bottom
else:
self.graph = PathGraph(name, entity, node.lineno)
pathnode = PathNode(name)
self.tail = pathnode
self.dispatch_list(node.body)
self.graphs["%s%s" % (self.classname, node.name)] = self.graph
self.reset()
def visitClassDef(self, node):
old_classname = self.classname
self.classname += node.name + "."
self.dispatch_list(node.body)
self.classname = old_classname
def appendPathNode(self, name):
if not self.tail:
return
pathnode = PathNode(name)
self.graph.connect(self.tail, pathnode)
self.tail = pathnode
return pathnode
def visitSimpleStatement(self, node):
if node.lineno is None:
lineno = 0
else:
lineno = node.lineno
name = "Stmt %d" % lineno
self.appendPathNode(name)
visitAssert = visitAssign = visitAugAssign = visitDelete = visitPrint = \
visitRaise = visitYield = visitImport = visitCall = visitSubscript = \
visitPass = visitContinue = visitBreak = visitGlobal = visitReturn = \
visitSimpleStatement
def visitLoop(self, node):
name = "Loop %d" % node.lineno
self._subgraph(node, name)
visitFor = visitWhile = visitLoop
def visitIf(self, node):
name = "If %d" % node.lineno
self._subgraph(node, name)
def _subgraph(self, node, name, extra_blocks=()):
"""create the subgraphs representing any `if` and `for` statements"""
if self.graph is None:
# global loop
self.graph = PathGraph(name, name, node.lineno)
pathnode = PathNode(name)
self._subgraph_parse(node, pathnode, extra_blocks)
self.graphs["%s%s" % (self.classname, name)] = self.graph
self.reset()
else:
pathnode = self.appendPathNode(name)
self._subgraph_parse(node, pathnode, extra_blocks)
def _subgraph_parse(self, node, pathnode, extra_blocks):
"""parse the body and any `else` block of `if` and `for` statements"""
loose_ends = []
self.tail = pathnode
self.dispatch_list(node.body)
loose_ends.append(self.tail)
for extra in extra_blocks:
self.tail = pathnode
self.dispatch_list(extra.body)
loose_ends.append(self.tail)
if node.orelse:
self.tail = pathnode
self.dispatch_list(node.orelse)
loose_ends.append(self.tail)
else:
loose_ends.append(pathnode)
if pathnode:
bottom = PathNode("", look='point')
for le in loose_ends:
self.graph.connect(le, bottom)
self.tail = bottom
def visitTryExcept(self, node):
name = "TryExcept %d" % node.lineno
self._subgraph(node, name, extra_blocks=node.handlers)
visitTry = visitTryExcept
def visitWith(self, node):
name = "With %d" % node.lineno
self.appendPathNode(name)
self.dispatch_list(node.body)
class McCabeChecker(object):
"""McCabe cyclomatic complexity checker."""
name = 'mccabe'
version = __version__
_code = 'C901'
_error_tmpl = "C901 %r is too complex (%d)"
max_complexity = 0
def __init__(self, tree, filename):
self.tree = tree
@classmethod
def add_options(cls, parser):
parser.add_option('--max-complexity', default=-1, action='store',
type='int', help="McCabe complexity threshold")
parser.config_options.append('max-complexity')
@classmethod
def parse_options(cls, options):
cls.max_complexity = options.max_complexity
def run(self):
if self.max_complexity < 0:
return
visitor = PathGraphingAstVisitor()
visitor.preorder(self.tree, visitor)
for graph in visitor.graphs.values():
if graph.complexity() > self.max_complexity:
text = self._error_tmpl % (graph.entity, graph.complexity())
yield graph.lineno, 0, text, type(self)
def get_code_complexity(code, threshold=7, filename='stdin'):
try:
tree = compile(code, filename, "exec", ast.PyCF_ONLY_AST)
except SyntaxError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to parse %s: %s\n" % (filename, e))
return 0
complx = []
McCabeChecker.max_complexity = threshold
for lineno, offset, text, check in McCabeChecker(tree, filename).run():
complx.append('%s:%d:1: %s' % (filename, lineno, text))
if len(complx) == 0:
return 0
print('\n'.join(complx))
return len(complx)
def get_module_complexity(module_path, threshold=7):
"""Returns the complexity of a module"""
with open(module_path, "rU") as mod:
code = mod.read()
return get_code_complexity(code, threshold, filename=module_path)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
opar = optparse.OptionParser()
opar.add_option("-d", "--dot", dest="dot",
help="output a graphviz dot file", action="store_true")
opar.add_option("-m", "--min", dest="threshold",
help="minimum complexity for output", type="int",
default=1)
options, args = opar.parse_args(argv)
with open(args[0], "rU") as mod:
code = mod.read()
tree = compile(code, args[0], "exec", ast.PyCF_ONLY_AST)
visitor = PathGraphingAstVisitor()
visitor.preorder(tree, visitor)
if options.dot:
print('graph {')
for graph in visitor.graphs.values():
if not options.threshold or graph.complexity() >= options.threshold:
graph.to_dot()
print('}')
else:
for graph in visitor.graphs.values():
if graph.complexity() >= options.threshold:
print(graph.name, graph.complexity())
if __name__ == '__main__':
main(sys.argv[1:])
|
yhoshino11/pytest_example
|
.tox/flake8/lib/python2.7/site-packages/mccabe.py
|
Python
|
mit
| 9,536
|
[
"VisIt"
] |
268219eb10946412f1709993c2432ed93687390bdadc045d8c585a70053c86cd
|
#
# Gramps - a GTK+ based genealogy program
#
# Copyright (C) 2006 Brian Matherly
# Copyright (C) 2008 Stephen George
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: $
import os, glob, sys, shutil
import string
import os.path as path
import subprocess
CONFIGURE_IN = 'configure.in'
CONST_PY_IN = 'src/const.py.in'
TRANSLATE_FOLDER = 'po'
EXTRA_FILES = [ 'COPYING', 'NEWS', 'FAQ', 'AUTHORS']
FULL_COLON_SUBST = "~"
#min required version of NSIS
MIN_NSIS_VERSION = (2,42)
#tools used during build
MAKENSIS_exe = None
SVN_exe = None
po_errs = []
po_oks = []
import gobject
#==== Set up logging system
# need to also set up a logger for when run as a module.
# change to set up a console logger in module global space.
# then add the file logger later once I know the path
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-10s %(levelname)-8s %(message)s',
datefmt='%H:%M',
filename= 'build.log', #path.join(out_dir,'build.log'),
filemode='w')
#create a Handler for the console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
#Set a simle format for console
formatter = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(formatter)
#add the console handler to the root handler
log = logging.getLogger('BuildApp')
log.addHandler(console)
class buildbase(gobject.GObject):
__gsignals__={
"build_progresstext" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [gobject.TYPE_STRING]),
"build_progressfraction" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [gobject.TYPE_FLOAT]),
}
def __init(self):
gobject.GObject.__init__(self)
self.gramps_version = 'VERSION-UNKNOWN'
self.bTarball = bTarball
self.build_root = '.' # the directory were the build source is located
self.out_dir = '.' # the directory to output final installer to, and the expand source to
self.repository_path = '.' #where the source comes from, either SVN root or a tarball
self.bBuildInstaller = True
def getbuild_src(self):
return os.path.join(self.build_root, 'src')
build_src = property(getbuild_src)
def isGrampsRoot(self, root ):
log.debug( 'isGrampsRoot: %s' % root )
if path.isfile(path.join(root, CONFIGURE_IN)):
if path.isfile(path.join(root, CONST_PY_IN)):
if path.isdir(path.join(root, TRANSLATE_FOLDER)):
return True
return False
def getSVNRevision(self, dir ):
log.debug('========== getSVNRevision(%s)' % dir)
cmd = 'svnversion -n %s' % dir
log.debug( "Running: %s" % cmd )
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
log.debug( output )
if err:
for line in err.split('\n'):
log.error(line)
if not stdout:
output = '-UNKNOWN'
return 'SVN' + output
def exportSVN(self, svn_dir, destdir):
'''
svn export PATH1 PATH2
exports a clean directory tree from the working copy specified by PATH1 into PATH2.
All local changes will be preserved, but files not under version control will not be copied.
destdir cannot exist, script will clean up dir first
'''
log.debug('========== exportSVN(%s, %s)' % (svn_dir, destdir) )
cmd = '"%s" export %s %s' % (SVN_exe ,svn_dir, destdir)
log.info( "Running: %s" % cmd)
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
log.info( output )
if err:
log.error(err)
def copyExtraFilesToBuildDir(self, source_path):
'''
A few extra files not in src directory needs to be copied to the build dir
'''
log.debug('========== copyExtraFilesToBuildDir(%s)' % (source_path))
for file in EXTRA_FILES:
outfile = file
if file == 'NEWS':
#Jump through hoops tomake sure the end of line charactors are windows format (wont work on linux!!)
outfile = 'NEWS.TXT' #Lets add .TXT suffix to filename so installer knows to call notepad
fnews = open(os.path.join(source_path,file), 'r')
newslines = fnews.readlines()
newsout = open(os.path.join(self.build_src,outfile), 'w')
newsout.writelines(newslines)
newsout.close()
fnews.close()
else:
shutil.copy(os.path.join(source_path,file), os.path.join(self.build_src,outfile) )
def compileInstallScript(self):
'''
Now we got a build directory, lets create the installation program
'''
log.debug('========== compileInstallScript()')
log.info('Compiling NullSoft install script .... be patient')
# calc path to gramps2.nsi
# need to ensure __file__ has full path, under linux it does not.
thisfilepath = os.path.abspath(__file__)
pth = os.path.relpath(os.path.dirname( thisfilepath ), os.getcwd())
pth2nsis_script = os.path.join(pth, 'gramps2.nsi')
#should tests be more along lines of os.name which returns 'posix', 'nt', 'mac', 'os2', 'ce', 'java', 'riscos'
if sys.platform == 'win32':
cmd = '"%s" /V3 %s' % (MAKENSIS_exe, pth2nsis_script)
elif sys.platform == 'linux2':
#assumption makensis is installed and on the path
cmd = '%s -V3 %s' % (MAKENSIS_exe, pth2nsis_script)
log.info( "Running: %s" % cmd)
# Need to define the following enviroment variables for NSIS script
os.environ['GRAMPS_VER'] = self.gramps_version
os.environ['GRAMPS_BUILD_DIR'] = os.path.abspath(self.build_src)
os.environ['GRAMPS_OUT_DIR'] = os.path.abspath(self.out_dir)
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
log.info( output )
if err:
log.error(err)
def getVersionFromConfigureIn(self, repository_path):
log.debug('========== read_config_in(%s)' % repository_path)
fin = open('%s/configure.in' % repository_path, 'r')
conf_lines = fin.readlines()
fin.close()
return self.getVersionFromLines(conf_lines)
def getVersionFromLines(self, conf_lines):
log.debug('========== getVersionFromLines()')
for line in conf_lines:
if 'AC_INIT(gramps' in line:
junk, ver, junk2 = line.split(',')
elif line[:7] == 'RELEASE':
junk,release = line.split('=')
if 'SVN$' in release:#not a release version
release = self.getSVNRevision( repository_path )
elif not self.bTarball: # This is aRelease, lets make sure svn working copy is prestine
# elif not bTarball: # This is aRelease, lets make sure svn working copy is prestine
test_num = getSVNRevision( repository_path )
if test_num.endswith('M'): # in test_num: #endsWith
log.warning('*==========================================================')
log.warning('* Building a Release from modified SVN Working Copy ')
log.warning('* ===> Creating %s-%s from %s-%s <==' % (ver.strip(), release.strip(),ver.strip(), test_num.strip()) )
log.warning('*==========================================================')
gversion = '%s-%s' % (ver.strip(), release.strip())
gversion = gversion.replace(":", FULL_COLON_SUBST) # if it's a mixed version, then need to replace the : with something else
log.info( 'GrampsVersion: %s' % gversion )
return gversion
def processPO( self ):
log.debug('========== processPO( )')
po_dir = os.path.join(self.build_root, "po")
mo_dir = os.path.join(self.build_src, "lang")
if not os.path.exists(mo_dir):
os.makedirs(mo_dir)
#TODO: find a better way to handle different platforms
if sys.platform == 'win32':
po_files = glob.glob(po_dir + "\*.po")
# no longer using python msgfmt as it doesn't handle plurals (april 2010)
# msgfmtCmd = path.normpath(path.join(sys.prefix, "Tools/i18n/msgfmt.py") )
# GetText Win 32 obtained from http://gnuwin32.sourceforge.net/packages/gettext.htm
# ....\gettext\bin\msgfmt.exe needs to be on the path
msgfmtCmd = 'msgfmt.exe'
#print 'msgfmtCmd = %s' % msgfmtCmd
elif sys.platform == 'linux2':
po_files = glob.glob(po_dir + "/*.po")
msgfmtCmd = "%s/bin/msgfmt" % sys.prefix
else:
po_files = [] #empty list
msgfmtCmd = "UNKNOWN_PLATFORM"
log.debug( msgfmtCmd )
#if not os.path.exists(msgfmtCmd):
# log.error( "msgfmt not found - unable to generate mo files")
# return
log.info( "Generating mo files" )
global po_errs, po_oks
po_total = len(po_files)
po_count = 0
for po_file in po_files:
po_count = po_count + 1
#This will be interesting
self.emit("build_progresstext", 'compiling %s' % po_file)
self.emit("build_progressfraction", po_count/po_total)
lan = os.path.basename(po_file).replace( ".po", "" )
lan_path = os.path.join(mo_dir,lan,"LC_MESSAGES")
if not os.path.exists(lan_path):
os.makedirs(lan_path)
mo_file = os.path.join(lan_path,"gramps.mo")
log.info( mo_file )
cmd = "%s --statistics -o %s %s" % (msgfmtCmd, mo_file, po_file)
#log.debug( cmd )
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
# log.debug( output ) Nothing coming out here, statistics come out stderr ??
if err:
log.info(err) # statistics comming out stderr
po_errs.append(lan)
else:
po_oks.append(lan)
def generateConstPy(self ):
log.debug('========== generate_const.py()')
fin = open(os.path.join(self.build_src,'const.py.in'), 'r')
in_lines = fin.readlines()
fin.close()
fout = open(os.path.join(self.build_src,'const.py'), 'w')
for line in in_lines:
if '@VERSIONSTRING@' in line: #VERSION = "@VERSIONSTRING@"
corrline = line.replace('@VERSIONSTRING@', self.gramps_version.replace(FULL_COLON_SUBST,":") )
fout.write(corrline)
#fout.write('VERSION = "%s"\n'% self.gramps_version.replace(FULL_COLON_SUBST,":"))
#elif '@prefix@' in line: #PREFIXDIR = "@prefix@"
# what to do? , doesnt seem to matter on windows
#elif '@sysconfdir@' in line: #SYSCONFDIR = "@sysconfdir@"
# what to do? , doesnt seem to matter on windows
else:
fout.write(line)
fout.close()
def cleanBuildDir(self):
log.debug( '========== cleanBuildDir()' )
log.info( 'Cleaning build and output directories' )
if sys.platform == 'win32': #both platforms emit different exceptions for the same operation, map the exception here
MY_EXCEPTION = WindowsError
elif sys.platform == 'linux2':
MY_EXCEPTION = OSError
if os.path.exists(self.build_root):
try:
log.info('removing directory: %s' % self.build_root )
shutil.rmtree(self.build_root)
except MY_EXCEPTION, e:
log.error( e )
for file in ['gramps-%s.exe'%self.gramps_version ]: #, 'build.log']:
fname = os.path.join(self.out_dir, file)
if os.path.isfile(fname):
try:
log.info('removing file: %s' % fname )
os.remove(fname)
except MY_EXCEPTION, e:
log.error( e )
def getNSISVersionNumber(self):
#Check version of NSIS, to ensure NSIS is compatible with script features
# >"c:\Program Files\NSIS\makensis.exe" /version
# v2.42
cmd = '"%s" -VERSION' % (MAKENSIS_exe)
log.debug(cmd)
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
if err:
log.error(err)
if sys.platform == 'win32': #'not recognized' in err:
minor =0
major =0
return (major, minor)
#parse the output to get version number into tuple
ver = output[1:].split('.')
major = int(ver[0])
try:
minor = int(ver[1])
except ValueError, e:
m = ver[1]
minor = int(m[:2])
return (major, minor)
def checkForBuildTools(self):
global MAKENSIS_exe, SVN_exe
log.debug( '========== checkForBuildTools()' )
if sys.platform == 'win32':
import _winreg as winreg
# Find NSIS on system
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\NSIS') as key:
nsispath = winreg.QueryValue(key, '')
makensisexe = path.join( nsispath, 'makensis.exe')
if path.isfile( makensisexe ):
MAKENSIS_exe = makensisexe
except WindowsError, e:
log.warning('NSIS not found, in registory')
log.warning('..Testing if makensis is on the path')
MAKENSIS_exe = 'makensis'
#cmd = os.path.join(nsis_dir, MAKENSIS_exe)
cmd = '%s /VERSION' % MAKENSIS_exe
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
if err:
log.error(err)
log.error('....makensis.exe not found on path')
sys.exit(0)
#else:
# log.info("makensis version %s" % output)
# Find msgfmt on system
cmd = os.path.join(msg_dir, 'msgfmt.exe')
proc = subprocess.Popen( cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
status = proc.wait()
output = string.strip(proc.stdout.read())
err = proc.stderr.read()
proc.stderr.close()
proc.stdout.close()
del proc
if not err.startswith(cmd):
#log.error(err)
log.error('msgfmt.exe not found on path')
log.error(' try the -m DIR , --msgdir=DIR option to specify the directory or put it on the path')
sys.exit(0)
# Find SVN on system - optional, if building from tarball
if not bTarball:
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\svn.exe') as key:
svnpath = winreg.QueryValue(key, '')
if path.isfile(svnpath):
SVN_exe = svnpath
except WindowsError, e:
log.warning('SVN not found, in registory')
log.warning('... Hoping svn is on the path')
SVN_exe = 'svn'
elif sys.platform == 'linux2':
#ASSUMPTION: these tools are on the path
#TODO: check for svn on Linux
log.info( 'TODO: Check for svn' )
SVN_exe = 'svn'
#TODO: check for nsis on Linux
log.info( 'TODO: Check for nsis' )
MAKENSIS_exe = 'makensis'
# Check if we are running a compatible vesion of NSIS
vers = self.getNSISVersionNumber()
if vers < MIN_NSIS_VERSION:
log.error( "Require NSIS version %d.%d or later ..... found NSIS version %d.%d" % (MIN_NSIS_VERSION[0],MIN_NSIS_VERSION[1], vers[0], vers[1]) )
log.info("Disabling NSIS compilation ... Please upgrade your NSIS version")
self.bBuildInstaller = False
else:
self.bBuildInstaller = True
log.info( "NSIS version %d.%d" % vers )
def expandTarBall(self, tarball, expand_dir):
# gramps-3.1.0.tar.gz
log.info( 'expandTarBall(%s, %s)' % (tarball, expand_dir) )
if tarfile.is_tarfile(self.repository_path):
tar = tarfile.open(self.repository_path)
tar.extractall(self.out_dir)
tar.close()
base = os.path.basename(self.repository_path)
extractDir = os.path.join(self.out_dir, base.replace('.tar.gz', '') )
try:
os.rename( extractDir, self.build_root)
except WindowsError, e:
log.error("FAILED: extractDir=%s, build_root=%s" % (extractDir, self.build_root))
raise WindowsError, e
else:
log.error( "Sorry %s is not a tar file" % self.repository_path )
def getVersionFromTarBall(self, tarball):
log.debug( 'getVersionFromTarBall(%s)' % (tarball))
if tarfile.is_tarfile(self.repository_path):
tar = tarfile.open(self.repository_path)
members = tar.getnames()
for member in members:
if 'configure.in' in member:
log.debug('Reading version from: %s' % member)
file = tar.extractfile(member)
lines = file.readlines()
vers = self.getVersionFromLines(lines)
tar.close()
log.debug( 'Version (%s)' % (vers) )
return vers
def buildGRAMPS( base, out_dir, bTarball):
bo = buildbase()
bo.repository_path = base
bo.out_dir = out_dir
bo.bTarball = bTarball
bo.bBuildInstaller = bBuildInstaller
if not bo.bTarball and not bo.isGrampsRoot(bo.repository_path):
log.error( '$$$$ BAD Gramps Root specified $$$$')
else:
bo.checkForBuildTools()
if bo.bTarball:
bo.gramps_version = bo.getVersionFromTarBall( bo.repository_path )
bo.build_root = path.normpath(os.path.join(bo.out_dir, 'gramps-%s' % bo.gramps_version))
if bBuildAll:
bo.cleanBuildDir()
bo.expandTarBall(base, bo.out_dir)
bo.copyExtraFilesToBuildDir(bo.build_root )
else: #SVN Build
bo.gramps_version = bo.getVersionFromConfigureIn( base )
bo.build_root = path.normpath(os.path.join(bo.out_dir, 'gramps-%s' % bo.gramps_version))
if bBuildAll:
bo.cleanBuildDir()
os.mkdir(bo.build_root)
bo.exportSVN(os.path.join(base, 'src'), os.path.join(bo.build_root, 'src') )
bo.exportSVN(os.path.join(base, 'po'), os.path.join(bo.build_root, 'po') )
bo.exportSVN(os.path.join(base, 'example'), os.path.join(bo.build_root, 'examples') )
bo.generateConstPy( )
bo.copyExtraFilesToBuildDir(base)
if bBuildAll:
bo.processPO( )
if bo.bBuildInstaller:
bo.compileInstallScript()
if __name__ == '__main__':
import getopt
import os
import sys
import tarfile
usage = '''Create Gramps Windows Installer.
Usage:
python build_GrampsWin32.py [options] [repository_path]
Arguments:
repository_path Path to the repository to build GRAMPS from, this can be either
- The root path of a SVN working copy
- A tarball that has been saved on local disk
- Left blank to build the SVN working copy this file is part of
Options:
-h, --help This help message.
-oDIR, --out=DIR Directory to build files (optional)
--nsis_only Build NSIS only (does not Clean & Build All)
-t --tarball Build release version from Tarball.
-mDIR, --msgdir=DIR Directory to msgfmt.exe
'''
# TODO: nsis_dir option - a path to nsismake (for occasions script cannot work it out)
# TODO: svn_dir option - a path to svn (for occasions script cannot work it out)
# TODO: tortoise_dir Option - accommodate windows user who dont have svn but use tortoiseSVN
repository_path = '.' # Repository - either SVN working copy dir or Tarball file
out_dir = None
bBuildAll = True
bBuildInstaller = True
bTarball = False
msg_dir = ""
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:tm:",
["help", "out=", "nsis_only", "tarball", "msgdir="])
for o, a in opts:
if o in ("-h", "--help"):
print usage
sys.exit(0)
if o in ("-o", "--out"):
out_dir = a
if o in ("--nsis_only"):
bBuildAll = False
if o in ('-t', "--tarball"):
print 'This is a tarball build'
bTarball = True
if o in ("-m", "--msgdir"):
if os.path.isdir( a ):
msg_dir = a
else:
raise getopt.GetoptError, '\nERROR: msgfmt dir does not exist'
if args: #got args use first one as base dir
repository_path = path.normpath(args[0])
else: # no base dir passed in, work out one from current working dir
repository_path = path.normpath("%s/../.." % os.getcwd() )
# raise getopt.GetoptError, '\nERROR: No base directory specified'
if len(args) > 1:
raise getopt.GetoptError, '\nERROR: Too many arguments'
except getopt.GetoptError, msg:
print msg
print '\n %s' % usage
sys.exit(2)
if bTarball:
if not tarfile.is_tarfile(repository_path):
print "Tarball %s not a valid Tarball" % repository_path
sys.exit(1)
else:
if not os.path.isdir(repository_path):
print "WC root directory not found; %s " % repository_path
sys.exit(1)
if out_dir == None:
if bTarball:
out_dir = path.normpath(os.getcwd())
else:
out_dir = path.normpath(os.path.join(repository_path, 'windows'))
log.info("Setting outdir to %s", out_dir)
s_args = ''
for value in sys.argv[1:]:
s_args = s_args + ' %s'%value
print "======= build_GrampsWin32.py %s ========" % s_args
log.debug('Using %s to find python tools' % sys.prefix)
log.info('Platform: %s' % sys.platform)
#==========================
sys.exit(buildGRAMPS(repository_path,out_dir, bTarball))
gobject.type_register(buildbase)
|
rdp/legado
|
windows/builder/build_GrampsWin32.py
|
Python
|
gpl-2.0
| 25,910
|
[
"Brian"
] |
64ba3fc187b7d372a4fb29d411c5c577d1834aa9563594534f293b741f0a0f2a
|
"""
Migration script to create initial tables.
"""
from sqlalchemy import *
from migrate import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
User_table = Table( "galaxy_user", metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "email", TrimmedString( 255 ), nullable=False ),
Column( "username", String( 255 ), index=True ),
Column( "password", TrimmedString( 40 ), nullable=False ),
Column( "external", Boolean, default=False ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
Group_table = Table( "galaxy_group", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", String( 255 ), index=True, unique=True ),
Column( "deleted", Boolean, index=True, default=False ) )
Role_table = Table( "role", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", String( 255 ), index=True, unique=True ),
Column( "description", TEXT ),
Column( "type", String( 40 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ) )
UserGroupAssociation_table = Table( "user_group_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "group_id", Integer, ForeignKey( "galaxy_group.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
UserRoleAssociation_table = Table( "user_role_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
GroupRoleAssociation_table = Table( "group_role_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "group_id", Integer, ForeignKey( "galaxy_group.id" ), index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
GalaxySession_table = Table( "galaxy_session", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=True ),
Column( "remote_host", String( 255 ) ),
Column( "remote_addr", String( 255 ) ),
Column( "referer", TEXT ),
Column( "session_key", TrimmedString( 255 ), index=True, unique=True ), # unique 128 bit random number coerced to a string
Column( "is_valid", Boolean, default=False ),
Column( "prev_session_id", Integer ) # saves a reference to the previous session so we have a way to chain them together
)
Tool_table = Table( "tool", metadata,
Column( "id", Integer, primary_key=True ),
Column( "guid", TrimmedString( 255 ), index=True, unique=True ),
Column( "tool_id", TrimmedString( 255 ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "newer_version_id", Integer, ForeignKey( "tool.id" ), nullable=True ),
Column( "name", TrimmedString( 255 ), index=True ),
Column( "description" , TEXT ),
Column( "user_description" , TEXT ),
Column( "version", TrimmedString( 255 ) ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "external_filename" , TEXT ),
Column( "deleted", Boolean, index=True, default=False ) )
Event_table = Table( 'event', metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "state", TrimmedString( 255 ), index=True ),
Column( "comment", TEXT ) )
ToolEventAssociation_table = Table( "tool_event_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "tool_id", Integer, ForeignKey( "tool.id" ), index=True ),
Column( "event_id", Integer, ForeignKey( "event.id" ), index=True ) )
Category_table = Table( "category", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", TrimmedString( 255 ), index=True, unique=True ),
Column( "description" , TEXT ),
Column( "deleted", Boolean, index=True, default=False ) )
ToolCategoryAssociation_table = Table( "tool_category_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "tool_id", Integer, ForeignKey( "tool.id" ), index=True ),
Column( "category_id", Integer, ForeignKey( "category.id" ), index=True ) )
Tag_table = Table( "tag", metadata,
Column( "id", Integer, primary_key=True ),
Column( "type", Integer ),
Column( "parent_id", Integer, ForeignKey( "tag.id" ) ),
Column( "name", TrimmedString(255) ),
UniqueConstraint( "name" ) )
ToolTagAssociation_table = Table( "tool_tag_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "tool_id", Integer, ForeignKey( "tool.id" ), index=True ),
Column( "tag_id", Integer, ForeignKey( "tag.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "user_tname", TrimmedString(255), index=True),
Column( "value", TrimmedString(255), index=True),
Column( "user_value", TrimmedString(255), index=True) )
ToolAnnotationAssociation_table = Table( "tool_annotation_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "tool_id", Integer, ForeignKey( "tool.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "annotation", TEXT ) )
def upgrade( migrate_engine ):
print __doc__
metadata.bind = migrate_engine
metadata.create_all()
Index( 'ix_tool_annotation_association_annotation', ToolAnnotationAssociation_table.c.annotation, mysql_length=767 ).create()
def downgrade( migrate_engine ):
# Operations to reverse the above upgrade go here.
pass
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/tool_shed/model/migrate/versions/0001_initial_tables.py
|
Python
|
gpl-3.0
| 7,193
|
[
"Galaxy"
] |
42d0375fb430b1ac5176403cd7748243d319b32dceef08b37b0951e901c55522
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.