text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings, user_kpoints_settings and user_<whatever>_settings are
ABSOLUTE. Any new sets you implement must obey this. If a user wants to
override your settings, you assume he knows what he is doing. Do not
magically override user supplied settings. You can issue a warning if you
think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
import abc
import glob
import itertools
import os
import re
import shutil
import warnings
from copy import deepcopy
from itertools import chain
from pathlib import Path
from typing import List, Optional, Tuple, Union
from zipfile import ZipFile
import numpy as np
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.periodic_table import Element, Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar, VaspInput
from pymatgen.io.vasp.outputs import Outcar, Vasprun
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
MODULE_DIR = Path(__file__).resolve().parent
class VaspInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@property
@abc.abstractmethod
def incar(self):
"""Incar object"""
pass
@property
@abc.abstractmethod
def kpoints(self):
"""Kpoints object"""
pass
@property
@abc.abstractmethod
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
# pylint: disable=E1101
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self._config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]["symbol"] if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
# pylint: disable=E1101
potcar = Potcar(self.potcar_symbols, functional=self.potcar_functional)
# warn if the selected POTCARs do not correspond to the chosen
# potcar_functional
for psingle in potcar:
if self.potcar_functional not in psingle.identify_potcar()[0]:
warnings.warn(
"POTCAR data with symbol {} is not known by pymatgen to\
correspond with the selected potcar_functional {}. This POTCAR\
is known to correspond with functionals {}. Please verify that\
you are using the right POTCARs!".format(
psingle.symbol,
self.potcar_functional,
psingle.identify_potcar(mode="data")[0],
),
BadInputSetWarning,
)
return potcar
@property # type: ignore
@deprecated(message="Use the get_vasp_input() method instead.")
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
return {
"INCAR": self.incar,
"KPOINTS": self.kpoints,
"POSCAR": self.poscar,
"POTCAR": self.potcar,
}
def get_vasp_input(self) -> VaspInput:
"""
Returns:
VaspInput
"""
return VaspInput(
incar=self.incar,
kpoints=self.kpoints,
poscar=self.poscar,
potcar=self.potcar,
)
def write_input(
self,
output_dir,
make_dir_if_not_present=True,
include_cif=False,
potcar_spec=False,
zip_output=False,
):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
"""
if potcar_spec:
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
with zopen(os.path.join(output_dir, "POTCAR.spec"), "wt") as f:
f.write("\n".join(self.potcar_symbols))
for k, v in {
"INCAR": self.incar,
"POSCAR": self.poscar,
"KPOINTS": self.kpoints,
}.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
else:
vinput = self.get_vasp_input()
vinput.write_input(output_dir, make_dir_if_not_present=make_dir_if_not_present)
cifname = ""
if include_cif:
s = vinput["POSCAR"].structure
cifname = Path(output_dir) / ("%s.cif" % re.sub(r"\s", "", s.formula))
s.to(filename=cifname)
if zip_output:
filename = self.__class__.__name__ + ".zip"
with ZipFile(filename, "w") as zip:
for file in [
"INCAR",
"POSCAR",
"KPOINTS",
"POTCAR",
"POTCAR.spec",
cifname,
]:
try:
zip.write(file)
os.remove(file)
except FileNotFoundError:
pass
def as_dict(self, verbosity=2):
"""
Args:
verbosity: Verbosity for generated dict. If 1, structure is
excluded.
Returns:
MSONable dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
def _load_yaml_config(fname):
config = loadfn(str(MODULE_DIR / ("%s.yaml" % fname)))
if "PARENT" in config:
parent_config = _load_yaml_config(config["PARENT"])
for k, v in parent_config.items():
if k not in config:
config[k] = v
elif isinstance(v, dict):
v_new = config.get(k, {})
v_new.update(v)
config[k] = v_new
return config
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
"""
def __init__(
self,
structure,
config_dict,
files_to_transfer=None,
user_incar_settings=None,
user_kpoints_settings=None,
user_potcar_settings=None,
constrain_total_magmom=False,
sort_structure=True,
potcar_functional=None,
user_potcar_functional=None,
force_gamma=False,
reduce_structure=None,
vdw=None,
use_structure_charge=False,
standardize=False,
sym_prec=0.1,
international_monoclinic=True,
validate_magmom=True,
):
"""
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
If a None value is given, that key is unset. For example,
{"ENCUT": None} will remove ENCUT from the incar settings.
user_kpoints_settings (dict or Kpoints): Allow user to override kpoints
setting by supplying a dict E.g., {"reciprocal_density": 1000}.
User can also supply Kpoints object. Default is None.
user_potcar_settings (dict: Allow user to override POTCARs. E.g.,
{"Gd": "Gd_3"}. This is generally not recommended. Default is None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
user_potcar_functional (str): Functional to use. Default (None) is to use
the functional in the config dictionary. Valid values:
"PBE", "PBE_52", "PBE_54", "LDA", "LDA_52", "LDA_54", "PW91",
"LDA_US", "PW91_US".
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
vdw: Adds default parameters for van-der-Waals functionals supported
by VASP to INCAR. Supported functionals are: DFT-D2, undamped
DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,
Tkatchenko-Scheffler with iterative Hirshfeld partitioning,
MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and
rVV10.
use_structure_charge (bool): If set to True, then the public
variable used for setting the overall charge of the
structure (structure.charge) is used to set the NELECT
variable in the INCAR
Default is False (structure's overall charge is not used)
standardize (float): Whether to standardize to a primitive standard
cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding.
international_monoclinic (bool): Whether to use international convention
(vs Curtarolo) for monoclinic. Defaults True.
validate_magmom (bool): Ensure that the missing magmom values are filled
in with the vasp default value of 1.0
"""
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
if validate_magmom:
get_valid_magmom_struct(structure, spin_mode="auto", inplace=True)
self._structure = structure
self._config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings or {}
self.user_potcar_settings = user_potcar_settings
self.vdw = vdw.lower() if vdw is not None else None
self.use_structure_charge = use_structure_charge
self.standardize = standardize
self.sym_prec = sym_prec
self.international_monoclinic = international_monoclinic
if self.user_incar_settings.get("KSPACING") and user_kpoints_settings is not None:
warnings.warn(
"You have specified KSPACING and also supplied kpoints "
"settings. KSPACING only has effect when there is no "
"KPOINTS file. Since both settings were given, pymatgen"
"will generate a KPOINTS file and ignore KSPACING."
"Remove the `user_kpoints_settings` argument to enable KSPACING.",
BadInputSetWarning,
)
if self.vdw:
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
try:
self._config_dict["INCAR"].update(vdw_par[self.vdw])
except KeyError:
raise KeyError(
"Invalid or unsupported van-der-Waals "
"functional. Supported functionals are "
"%s." % vdw_par.keys()
)
# read the POTCAR_FUNCTIONAL from the .yaml
self.potcar_functional = self._config_dict.get("POTCAR_FUNCTIONAL", "PBE")
if potcar_functional is not None and user_potcar_functional is not None:
raise ValueError(
"Received both 'potcar_functional' and "
"'user_potcar_functional arguments. 'potcar_functional "
"is deprecated."
)
if potcar_functional:
warnings.warn(
"'potcar_functional' argument is deprecated. Use " "'user_potcar_functional' instead.",
FutureWarning,
)
self.potcar_functional = potcar_functional
elif user_potcar_functional:
self.potcar_functional = user_potcar_functional
# warn if a user is overriding POTCAR_FUNCTIONAL
if self.potcar_functional != self._config_dict.get("POTCAR_FUNCTIONAL"):
warnings.warn(
"Overriding the POTCAR functional is generally not recommended "
" as it significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. Note that some POTCAR symbols specified in "
"the configuration file may not be available in the selected "
"functional.",
BadInputSetWarning,
)
if self.user_potcar_settings:
warnings.warn(
"Overriding POTCARs is generally not recommended as it "
"significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. In many instances, it is better to write a "
"subclass of a desired input set and override the POTCAR in "
"the subclass to be explicit on the differences.",
BadInputSetWarning,
)
for k, v in self.user_potcar_settings.items():
self._config_dict["POTCAR"][k] = v
@property
def structure(self) -> Structure:
"""
:return: Structure
"""
if self.standardize and self.sym_prec:
return standardize_structure(
self._structure,
sym_prec=self.sym_prec,
international_monoclinic=self.international_monoclinic,
)
return self._structure
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
settings = dict(self._config_dict["INCAR"])
for k, v in self.user_incar_settings.items():
if v is None:
try:
del settings[k]
except KeyError:
settings[k] = v
elif k == "KSPACING" and self.user_kpoints_settings != {}:
pass # Ignore KSPACING if user_kpoints_settings are given
else:
settings[k] = v
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0], key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, "magmom"):
mag.append(site.magmom)
elif hasattr(site.specie, "spin"):
mag.append(site.specie.spin)
elif str(site.specie) in v:
if site.specie.symbol == "Co":
warnings.warn(
"Co without oxidation state is initialized low spin by default. If this is "
"not desired, please set the spin on the magmom on the site directly to "
"ensure correct initialization"
)
mag.append(v.get(str(site.specie)))
else:
if site.specie.symbol == "Co":
warnings.warn(
"Co without oxidation state is initialized low spin by default. If this is "
"not desired, please set the spin on the magmom on the site directly to "
"ensure correct initialization"
)
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ("LDAUU", "LDAUJ", "LDAUL"):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = {site.specie.symbol: getattr(site, k.lower()) for site in structure}
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys() and isinstance(v[most_electroneg], dict):
incar[k] = [v[most_electroneg].get(sym, 0) for sym in poscar.site_symbols]
# else, use fallback LDAU value if it exists
else:
incar[k] = [
v.get(sym, 0) if isinstance(v.get(sym, 0), (float, int)) else 0
for sym in poscar.site_symbols
]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar["LDAUU"]) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if "LMAXMIX" not in settings.keys():
# contains f-electrons
if any(el.Z > 56 for el in structure.composition):
incar["LMAXMIX"] = 6
# contains d-electrons
elif any(el.Z > 20 for el in structure.composition):
incar["LMAXMIX"] = 4
else:
for key in list(incar.keys()):
if key.startswith("LDAU"):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0 for mag in incar["MAGMOM"]])
incar["NUPDOWN"] = nupdown
if self.use_structure_charge:
incar["NELECT"] = self.nelect
# Ensure adequate number of KPOINTS are present for the tetrahedron
# method (ISMEAR=-5). If KSPACING is in the INCAR file the number
# of kpoints is not known before calling VASP, but a warning is raised
# when the KSPACING value is > 0.5 (2 reciprocal Angstrom).
# An error handler in Custodian is available to
# correct overly large KSPACING values (small number of kpoints)
# if necessary.
# if "KSPACING" not in self.user_incar_settings.keys():
if self.kpoints is not None:
if np.product(self.kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
if self.user_incar_settings.get("KSPACING", 0) > 0.5 and incar.get("ISMEAR", 0) == -5:
warnings.warn(
"Large KSPACING value detected with ISMEAR = -5. Ensure that VASP "
"generates an adequate number of KPOINTS, lower KSPACING, or "
"set ISMEAR = 0",
BadInputSetWarning,
)
if all(k.is_metal for k in structure.composition.keys()):
if incar.get("NSW", 0) > 0 and incar.get("ISMEAR", 1) < 1:
warnings.warn(
"Relaxation of likely metal with ISMEAR < 1 "
"detected. Please see VASP recommendations on "
"ISMEAR for metals.",
BadInputSetWarning,
)
return incar
@property
def poscar(self) -> Poscar:
"""
:return: Poscar
"""
return Poscar(self.structure)
@property
def nelect(self) -> float:
"""
Gets the default number of electrons for a given structure.
"""
nelectrons_by_element = {p.element: p.nelectrons for p in self.potcar}
nelect = sum(
[
num_atoms * nelectrons_by_element[str(el)]
for el, num_atoms in self.structure.composition.element_composition.items()
]
)
if self.use_structure_charge:
return nelect - self.structure.charge
return nelect
@property
def kpoints(self) -> Union[Kpoints, None]:
"""
Returns a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
If KSPACING is set in user_incar_settings (or the INCAR file), no
file is created because VASP will automatically generate the kpoints.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if self.user_incar_settings.get("KSPACING") or self._config_dict["INCAR"].get("KSPACING"):
if self.user_kpoints_settings == {}:
return None
settings = self.user_kpoints_settings or self._config_dict.get("KPOINTS")
if isinstance(settings, Kpoints):
return settings
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if self.user_incar_settings.get("KSPACING") and self.user_kpoints_settings == {}:
return None
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get("grid_density"):
return Kpoints.automatic_density(self.structure, int(settings["grid_density"]), self.force_gamma)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
if settings.get("reciprocal_density"):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings["reciprocal_density"]), self.force_gamma
)
# If length is in the kpoints_settings use Kpoints.automatic
if settings.get("length"):
return Kpoints.automatic(settings["length"])
# Raise error. Unsure of which kpoint generation to use
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation"
)
def estimate_nbands(self) -> int:
"""
Estimate the number of bands that VASP will initialize a
calculation with by default. Note that in practice this
can depend on # of cores (if not set explicitly)
"""
nions = len(self.structure)
# from VASP's point of view, the number of magnetic atoms are
# the number of atoms with non-zero magmoms, so use Incar as
# source of truth
nmag = len([m for m in self.incar["MAGMOM"] if not np.allclose(m, 0)])
# by definition, if non-spin polarized ignore nmag
if (not nmag) or (self.incar["ISPIN"] == 1):
nbands = np.ceil(self.nelect / 2 + nions / 2)
else:
nbands = np.ceil(0.6 * self.nelect + nmag)
return int(nbands)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(
self,
output_dir: str,
make_dir_if_not_present: bool = True,
include_cif: bool = False,
potcar_spec: bool = False,
zip_output: bool = False,
):
"""
Writes out all input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
"""
super().write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output,
)
for k, v in self.files_to_transfer.items():
with zopen(v, "rb") as fin, zopen(str(Path(output_dir) / k), "wb") as fout:
shutil.copyfileobj(fin, fout)
def calculate_ng(self, max_prime_factor: int = 7, must_inc_2: bool = True) -> Tuple:
"""
Calculates the NGX, NGY, and NGZ values using the information availible in the INCAR and POTCAR
This is meant to help with making initial guess for the FFT grid so we can interact with the Charge density API
Args:
max_prime_factor (int): the valid prime factors of the grid size in each direction
VASP has many different setting for this to handel many compiling options.
For typical MPI options all prime factors up to 7 are allowed
"""
# TODO throw error for Ultrasoft potentials
_RYTOEV = 13.605826
_AUTOA = 0.529177249
_PI = 3.141592653589793238
# TODO Only do this for VASP 6 for now. Older version require more advanced logitc
# get the ENCUT val
if "ENCUT" in self.incar and self.incar["ENCUT"] > 0:
encut = self.incar["ENCUT"]
else:
encut = max([i_species.enmax for i_species in self.all_input["POTCAR"]])
#
_CUTOF = [
np.sqrt(encut / _RYTOEV) / (2 * _PI / (anorm / _AUTOA)) for anorm in self.poscar.structure.lattice.abc
]
_PREC = "Normal" # VASP default
if "PREC" in self.incar:
_PREC = self.incar["PREC"]
if _PREC[0].lower() in {"l", "m", "h"}:
raise NotImplementedError(
"PREC = LOW/MEDIUM/HIGH from VASP 4.x and not supported, Please use NORMA/SINGLE/ACCURATE"
)
if _PREC[0].lower() in {"a", "s"}: # TODO This only works in VASP 6.x
_WFACT = 4
else:
_WFACT = 3
def next_g_size(cur_g_size):
g_size = int(_WFACT * cur_g_size + 0.5)
return next_num_with_prime_factors(g_size, max_prime_factor, must_inc_2)
ng_vec = [*map(next_g_size, _CUTOF)]
if _PREC[0].lower() in {"a", "n"}: # TODO This works for VASP 5.x and 6.x
finer_g_scale = 2
else:
finer_g_scale = 1
return ng_vec, [ng_ * finer_g_scale for ng_ in ng_vec]
# Helper functions to determine valid FFT grids for VASP
def next_num_with_prime_factors(n: int, max_prime_factor: int, must_inc_2: bool = True) -> int:
"""
Return the next number greater than or equal to n that only has the desired prime factors
Args:
n (int): Initial guess at the grid density
max_prime_factor (int): the maximum prime factor
must_inc_2 (bool): 2 must be a prime factor of the result
Returns:
int: first product of of the prime_factors that is >= n
"""
if max_prime_factor < 2:
raise ValueError("Must choose a maximum prime factor greater than 2")
prime_factors = primes_less_than(max_prime_factor)
for new_val in itertools.count(start=n):
if must_inc_2 and new_val % 2 != 0:
continue
cur_val_ = new_val
for j in prime_factors:
while cur_val_ % j == 0:
cur_val_ //= j
if cur_val_ == 1:
return new_val
raise ValueError("No factorable number found, not possible.")
def primes_less_than(max_val: int) -> List[int]:
"""
Get the primes less than or equal to the max value
"""
res = []
for i in range(2, max_val + 1):
for j in range(2, i):
if i % j == 0:
break
else:
res.append(i)
return res
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = _load_yaml_config("MITRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPScanRelaxSet(DictSet):
"""
Class for writing a relaxation input set using the accurate and numerically
efficient r2SCAN variant of the Strongly Constrained and Appropriately Normed
(SCAN) metaGGA density functional.
Notes:
1. This functional is officially supported in VASP 6.0.0 and above. On older version,
source code may be obtained by contacting the authors of the referenced manuscript.
The original SCAN functional, available from VASP 5.4.3 onwards, maybe used instead
by passing `user_incar_settings={"METAGGA": "SCAN"}` when instantiating this InputSet.
r2SCAN and SCAN are expected to yield very similar results.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCARs include the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
References:
James W. Furness, Aaron D. Kaplan, Jinliang Ning, John P. Perdew, and Jianwei Sun.
Accurate and Numerically Efficient r2SCAN Meta-Generalized Gradient Approximation.
The Journal of Physical Chemistry Letters 0, 11 DOI: 10.1021/acs.jpclett.0c02405
"""
CONFIG = _load_yaml_config("MPSCANRelaxSet")
def __init__(self, structure, bandgap=0, **kwargs):
"""
Args:
structure (Structure): Input structure.
bandgap (int): Bandgap of the structure in eV. The bandgap is used to
compute the appropriate k-point density and determine the
smearing settings.
Metallic systems (default, bandgap = 0) use a KSPACING value of 0.22
and Methfessel-Paxton order 2 smearing (ISMEAR=2, SIGMA=0.2).
Non-metallic systems (bandgap > 0) use the tetrahedron smearing
method (ISMEAR=-5, SIGMA=0.05). The KSPACING value is
calculated from the bandgap via Eqs. 25 and 29 of Wisesa, McGill,
and Mueller [1] (see References). Note that if 'user_incar_settings'
or 'user_kpoints_settings' override KSPACING, the calculation from
bandgap is not performed.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional. rvv10 is the only
dispersion correction available for SCAN at this time.
**kwargs: Same as those supported by DictSet.
References:
[1] P. Wisesa, K.A. McGill, T. Mueller, Efficient generation of
generalized Monkhorst-Pack grids through the use of informatics,
Phys. Rev. B. 93 (2016) 1–10. doi:10.1103/PhysRevB.93.155109.
"""
super().__init__(structure, MPScanRelaxSet.CONFIG, **kwargs)
self.bandgap = bandgap
self.kwargs = kwargs
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations require PBE_52 or PBE_54!")
# self.kwargs.get("user_incar_settings", {
updates = {}
# select the KSPACING and smearing parameters based on the bandgap
if self.bandgap == 0:
updates["KSPACING"] = 0.22
updates["SIGMA"] = 0.2
updates["ISMEAR"] = 2
else:
rmin = 25.22 - 2.87 * bandgap # Eq. 25
kspacing = 2 * np.pi * 1.0265 / (rmin - 1.0183) # Eq. 29
# cap the KSPACING at a max of 0.44, per internal benchmarking
if 0.22 < kspacing < 0.44:
updates["KSPACING"] = kspacing
else:
updates["KSPACING"] = 0.44
updates["ISMEAR"] = -5
updates["SIGMA"] = 0.05
# Don't overwrite things the user has supplied
if self.user_incar_settings.get("KSPACING"):
del updates["KSPACING"]
if self.user_incar_settings.get("ISMEAR"):
del updates["ISMEAR"]
if self.user_incar_settings.get("SIGMA"):
del updates["SIGMA"]
if self.vdw:
if self.vdw != "rvv10":
warnings.warn(
"Use of van der waals functionals other than rVV10 " "with SCAN is not supported at this time. "
)
# delete any vdw parameters that may have been added to the INCAR
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
for k, v in vdw_par[self.vdw].items():
try:
del self._config_dict["INCAR"][k]
except KeyError:
pass
self._config_dict["INCAR"].update(updates)
class MPMetalRelaxSet(MPRelaxSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project, but with tuning for metals. Key things are a denser
k point density, and a
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"ISMEAR": 1, "SIGMA": 0.2})
self._config_dict["KPOINTS"].update({"reciprocal_density": 200})
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = _load_yaml_config("MPHSERelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
"""
Creates input files for a static calculation.
"""
def __init__(
self,
structure,
prev_incar=None,
prev_kpoints=None,
lepsilon=False,
lcalcpol=False,
reciprocal_density=100,
small_gap_multiply=None,
**kwargs,
):
"""
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
lcalcpol (bool): Whether to turn on evaluation of the Berry phase approximations
for electronic polarization
reciprocal_density (int): For static calculations, we usually set the
reciprocal density by volume. This is a convenience arg to change
that, rather than using user_kpoints_settings. Defaults to 100,
which is ~50% more than that of standard relaxation calculations.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, str):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
self.small_gap_multiply = small_gap_multiply
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else Incar(parent_incar)
incar.update(
{
"IBRION": -1,
"ISMEAR": -5,
"LAECHG": True,
"LCHARG": True,
"LORBIT": 11,
"LVHAR": True,
"LWAVE": False,
"NSW": 0,
"ALGO": "Normal",
}
)
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
# tighter ediff for DFPT
incar["EDIFF"] = 1e-5
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(self.user_incar_settings.keys()):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get("LDAU"):
u = incar.get("LDAUU", [])
j = incar.get("LDAUJ", [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ("LDAUU", "LDAUL", "LDAUJ"):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
kpoints = super().kpoints
# Prefer to use k-point scheme from previous run
# except for when lepsilon = True is specified
if kpoints is not None:
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if (self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst) and (not self.lepsilon):
k_div = [kp + 1 if kp % 2 == 1 else kp for kp in kpoints.kpts[0]] # type: ignore
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self.prev_kpoints = vasprun.kpoints
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# multiply the reciprocal density if needed
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = self.reciprocal_density * self.small_gap_multiply[1]
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPStaticSet, other than prev_incar
and prev_structure and prev_kpoints which are determined from
the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPScanStaticSet(MPScanRelaxSet):
"""
Creates input files for a static calculation using the accurate and numerically
efficient r2SCAN variant of the Strongly Constrainted and Appropriately Normed
(SCAN) metaGGA functional.
"""
def __init__(self, structure, bandgap=0, prev_incar=None, lepsilon=False, lcalcpol=False, **kwargs):
"""
Args:
structure (Structure): Structure from previous run.
bandgap (float): Bandgap of the structure in eV. The bandgap is used to
compute the appropriate k-point density and determine the
smearing settings.
prev_incar (Incar): Incar file from previous run.
lepsilon (bool): Whether to add static dielectric calculation
lcalcpol (bool): Whether to turn on evaluation of the Berry phase approximations
for electronic polarization.
**kwargs: kwargs supported by MPScanRelaxSet.
"""
super().__init__(structure, bandgap, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else Incar(parent_incar)
incar.update({"LREAL": False, "NSW": 0, "LORBIT": 11, "LVHAR": True, "ISMEAR": -5})
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in list(self.user_incar_settings.keys()):
# For user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = get_structure_from_prev_run(vasprun, outcar)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPScanStaticSet, other than prev_incar
which is determined from the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPHSEBSSet(MPHSERelaxSet):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Gap" mode behaves just like the "Uniform" mode, however, if starting
from a previous calculation, the VBM and CBM k-points will automatically
be added to ``added_kpoints``.
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
"""
def __init__(
self,
structure,
user_incar_settings=None,
added_kpoints=None,
mode="Gap",
reciprocal_density=None,
copy_chgcar=True,
kpoints_line_density=20,
**kwargs,
):
"""
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid.
reciprocal_density (int): k-point density to use for uniform mesh.
copy_chgcar (bool): Whether to copy the CHGCAR of a previous run.
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictSet.
"""
super().__init__(structure, **kwargs)
self.user_incar_settings = user_incar_settings or {}
self._config_dict["INCAR"].update(
{
"NSW": 0,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISYM": 3,
"LCHARG": False,
"NELMIN": 5,
}
)
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
if not reciprocal_density or "reciprocal_density" not in self.user_kpoints_settings:
self.reciprocal_density = 50
else:
self.reciprocal_density = reciprocal_density or self.user_kpoints_settings["reciprocal_density"]
self.kpoints_line_density = kpoints_line_density
self.copy_chgcar = copy_chgcar
@property
def kpoints(self) -> Kpoints:
"""
:return: Kpoints
"""
kpts = [] # type: List[Union[int, float, None]]
weights = [] # type: List[Union[float, None]]
all_labels = [] # type: List[Union[str, None]]
structure = self.structure
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(structure, self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1).get_ir_reciprocal_mesh(grid[0])
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
for k, f in enumerate(frac_k_points):
kpts.append(f)
weights.append(0.0)
all_labels.append(labels[k])
comment = "HSE run along symmetry lines" if self.mode.lower() == "line" else "HSE run on uniform grid"
return Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts, # type: ignore
kpts_weights=weights,
labels=all_labels,
)
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# note: recommend not standardizing the cell because we want to retain
# k-points
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_calc is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
if self.mode.lower() == "gap":
added_kpoints = []
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
self.added_kpoints.extend(added_kpoints)
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPHSEBSStaticSet, other than
prev_structure which is determined from the previous calc dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNonSCFSet(MPRelaxSet):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
"""
def __init__(
self,
structure,
prev_incar=None,
mode="line",
nedos=2001,
dedos=0.005,
reciprocal_density=100,
sym_prec=0.1,
kpoints_line_density=20,
optics=False,
copy_chgcar=True,
nbands_factor=1.2,
small_gap_multiply=None,
**kwargs,
):
"""
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line, Uniform or Boltztrap mode supported.
nedos (int): nedos parameter. Default to 2001.
dedos (float): setting nedos=0 and uniform mode in from_prev_calc,
an automatic nedos will be calculated using the total energy range
divided by the energy step dedos
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
copy_chgcar: Whether to copy the old CHGCAR when starting from a
previous calculation.
nbands_factor (float): Multiplicative factor for NBANDS when starting
from a previous calculation. Choose a higher number if you are
doing an LOPTICS calculation.
small_gap_multiply ([float, float]): When starting from a previous
calculation, if the gap is less than 1st index, multiply the default
reciprocal_density by the 2nd index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.dedos = dedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
if self.mode.lower() not in ["line", "uniform", "boltztrap"]:
raise ValueError("Supported modes for NonSCF runs are 'Line', " "'Uniform' and 'Boltztrap!")
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn("It is recommended to use Uniform mode with a high " "NEDOS for optics calculations.")
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update(self.prev_incar.items())
# Overwrite necessary INCAR parameters from previous runs
incar.update(
{
"IBRION": -1,
"LCHARG": False,
"LORBIT": 11,
"LWAVE": False,
"NSW": 0,
"ISYM": 0,
"ICHARG": 11,
}
)
if self.mode.lower() == "uniform":
# use tetrahedron method for DOS and optics calculations
incar.update({"ISMEAR": -5, "ISYM": 2})
else:
# if line mode, can't use ISMEAR=-5; also use small sigma to avoid
# partial occupancies for small band gap materials.
# finally, explicit k-point generation (needed for bolztrap mode)
# is incompatible with ISMEAR = -5.
incar.update({"ISMEAR": 0, "SIGMA": 0.01})
incar.update(self.user_incar_settings)
if self.mode.lower() in "uniform":
# Set smaller steps for DOS and optics output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
# override pymatgen kpoints if provided
user_kpoints = self.user_kpoints_settings
if isinstance(user_kpoints, Kpoints):
return user_kpoints
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points,
labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points),
)
elif self.mode.lower() == "boltztrap":
kpoints = Kpoints.automatic_density_by_vol(self.structure, self.reciprocal_density)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(self.structure, symprec=self.sym_prec).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(
comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts,
kpts_weights=weights,
)
else:
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
return super().kpoints
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Get a Magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i["tot"] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = self.reciprocal_density * self.small_gap_multiply[1]
self.kpoints_line_density = self.kpoints_line_density * self.small_gap_multiply[1]
# automatic setting of nedos using the energy range and the energy step dedos
if self.nedos == 0:
emax = max([eigs.max() for eigs in vasprun.eigenvalues.values()])
emin = min([eigs.min() for eigs in vasprun.eigenvalues.values()])
self.nedos = int((emax - emin) / self.dedos)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPNonSCFSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPSOCSet(MPStaticSet):
"""
An input set for running spin-orbit coupling (SOC) calculations.
"""
def __init__(
self,
structure,
saxis=(0, 0, 1),
copy_chgcar=True,
nbands_factor=1.2,
reciprocal_density=100,
small_gap_multiply=None,
magmom=None,
**kwargs,
):
"""
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg: ``magmom = [[0,0,2], ...]``
saxis (tuple): magnetic moment orientation
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
reciprocal_density (int): density of k-mesh by reciprocal volume.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
magmom (list[list[float]]): Override for the structure magmoms.
**kwargs: kwargs supported by MPStaticSet.
"""
if not hasattr(structure[0], "magmom") and not isinstance(structure[0].magmom, list):
raise ValueError(
"The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]"
)
super().__init__(structure, reciprocal_density=reciprocal_density, **kwargs)
self.saxis = saxis
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
self.magmom = magmom
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update(self.prev_incar.items())
# Overwrite necessary INCAR parameters from previous runs
incar.update({"ISYM": -1, "LSORBIT": "T", "ICHARG": 11, "SAXIS": list(self.saxis)})
incar.update(self.user_incar_settings)
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if "MAGMOM" in self.prev_incar:
del self.prev_incar["magmom"]
# Get a magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# override magmom if provided
if self.magmom:
self._structure = self._structure.copy(site_properties={"magmom": self.magmom})
# magmom has to be 3D for SOC calculation.
if hasattr(self._structure[0], "magmom"):
if not isinstance(self._structure[0].magmom, list):
self._structure = self._structure.copy(
site_properties={"magmom": [[0, 0, site.magmom] for site in self._structure]}
)
else:
raise ValueError("Neither the previous structure has magmom " "property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = self.reciprocal_density * self.small_gap_multiply[1]
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPSOCSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNMRSet(MPStaticSet):
"""
Init a MPNMRSet.
"""
def __init__(self, structure, mode="cs", isotopes=None, prev_incar=None, reciprocal_density=100, **kwargs):
"""
Args:
structure (Structure): Structure to compute
mode (str): The NMR calculation to run
"cs": for Chemical Shift
"efg" for Electric Field Gradient
isotopes (list): list of Isotopes for quadrupole moments
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
**kwargs: kwargs supported by MPStaticSet.
"""
self.mode = mode
self.isotopes = isotopes if isotopes else []
super().__init__(structure, prev_incar=prev_incar, reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
if self.mode.lower() == "cs":
incar.update(
{
"LCHIMAG": True,
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LNMR_SYM_RED": True,
"NELMIN": 10,
"NSLPLINE": True,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
elif self.mode.lower() == "efg":
isotopes = {ist.split("-")[0]: ist for ist in self.isotopes}
quad_efg = [Species(p).get_nmr_quadrupole_moment(isotopes.get(p, None)) for p in self.poscar.site_symbols]
incar.update(
{
"ALGO": "FAST",
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LEFG": True,
"QUAD_EFG": quad_efg,
"NELMIN": 10,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
incar.update(self.user_incar_settings)
return incar
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
"""
def __init__(self, structure, potim=0.015, **kwargs):
"""
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
kwargs:
Parameters supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2, "POTIM": potim})
self._config_dict["INCAR"].pop("NPAR", None)
class MVLGWSet(DictSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research. This is a
flexible input set for GW calculations.
Note that unlike all other input sets in this module, the PBE_54 series of
functional is set as the default. These have much improved performance for
GW calculations.
A typical sequence is mode="STATIC" -> mode="DIAG" -> mode="GW" ->
mode="BSE". For all steps other than the first one (static), the
recommendation is to use from_prev_calculation on the preceding run in
the series.
"""
CONFIG = _load_yaml_config("MVLGWSet")
SUPPORTED_MODES = ("DIAG", "GW", "STATIC", "BSE")
def __init__(
self,
structure,
prev_incar=None,
nbands=None,
reciprocal_density=100,
mode="STATIC",
copy_wavecar=True,
nbands_factor=5,
ncores=16,
**kwargs,
):
r"""
Args:
structure (Structure): Input structure.
prev_incar (Incar/string): Incar file from previous run.
mode (str): Supported modes are "STATIC" (default), "DIAG", "GW",
and "BSE".
nbands (int): For subsequent calculations, it is generally
recommended to perform NBANDS convergence starting from the
NBANDS of the previous run for DIAG, and to use the exact same
NBANDS for GW and BSE. This parameter is used by
from_previous_calculation to set nband.
copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and associated
files when starting from a previous calculation.
nbands_factor (int): Multiplicative factor for NBANDS when starting
from a previous calculation. Only applies if mode=="DIAG".
Need to be tested for convergence.
ncores (int): Numbers of cores used for the calculation. VASP will alter
NBANDS if it was not dividable by ncores. Only applies if
mode=="DIAG".
**kwargs: All kwargs supported by DictSet. Typically,
user_incar_settings is a commonly used option.
"""
super().__init__(structure, MVLGWSet.CONFIG, **kwargs)
self.prev_incar = prev_incar
self.nbands = nbands
self.reciprocal_density = reciprocal_density
self.mode = mode.upper()
if self.mode not in MVLGWSet.SUPPORTED_MODES:
raise ValueError("%s not one of the support modes : %s" % (self.mode, MVLGWSet.SUPPORTED_MODES))
self.kwargs = kwargs
self.copy_wavecar = copy_wavecar
self.nbands_factor = nbands_factor
self.ncores = ncores
@property
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(self.structure, self.reciprocal_density, force_gamma=True)
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else Incar(parent_incar)
if self.mode == "DIAG":
# Default parameters for diagonalization calculation.
incar.update({"ALGO": "Exact", "NELM": 1, "LOPTICS": True, "LPEAD": True})
elif self.mode == "GW":
# Default parameters for GW calculation.
incar.update({"ALGO": "GW0", "NELM": 1, "NOMEGA": 80, "ENCUTGW": 250})
incar.pop("EDIFF", None)
incar.pop("LOPTICS", None)
incar.pop("LPEAD", None)
elif self.mode == "BSE":
# Default parameters for BSE calculation.
incar.update({"ALGO": "BSE", "ANTIRES": 0, "NBANDSO": 20, "NBANDSV": 20})
if self.nbands:
incar["NBANDS"] = self.nbands
# Respect user set INCAR.
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = vasprun.final_structure
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self.nbands = int(vasprun.parameters["NBANDS"])
if self.mode.upper() == "DIAG":
self.nbands = int(np.ceil(self.nbands * self.nbands_factor / self.ncores) * self.ncores)
# copy WAVECAR, WAVEDER (derivatives)
files_to_transfer = {}
if self.copy_wavecar:
for fname in ("WAVECAR", "WAVEDER", "WFULL"):
w = sorted(glob.glob(str(Path(prev_calc_dir) / (fname + "*"))))
if w:
if fname == "WFULL":
for f in w:
fname = Path(f).name
fname = fname.split(".")[0]
files_to_transfer[fname] = f
else:
files_to_transfer[fname] = str(w[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="DIAG", **kwargs):
"""
Generate a set of Vasp input files for GW or BSE calculations from a
directory of previous Exact Diag Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml of previous vasp run.
mode (str): Supported modes are "STATIC", "DIAG" (default), "GW",
and "BSE".
**kwargs: All kwargs supported by MVLGWSet, other than structure,
prev_incar and mode, which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, mode=mode, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
"""
def __init__(
self, structure, k_product=50, bulk=False, auto_dipole=False, set_mix=True, sort_structure=True, **kwargs
):
"""
:param structure: Structure
:param k_product: default to 50, kpoint number * length for a & b
directions, also for c direction in bulk calculations
:param bulk:
:param auto_dipole:
:param set_mix:
:param sort_structure:
:param kwargs: Other kwargs supported by :class:`DictSet`.
"""
super().__init__(structure, **kwargs)
if sort_structure:
structure = structure.get_sorted_structure()
self.k_product = k_product
self.bulk = bulk
self.auto_dipole = auto_dipole
self.kwargs = kwargs
self.set_mix = set_mix
self.kpt_calc = None
slab_incar = {
"EDIFF": 1e-4,
"EDIFFG": -0.02,
"ENCUT": 400,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISIF": 3,
}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["LVTOT"] = True
if self.set_mix:
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
if self.auto_dipole:
weights = [s.species.weight for s in structure]
center_of_mass = np.average(structure.frac_coords, weights=weights, axis=0)
slab_incar["IDIPOL"] = 3
slab_incar["LDIPOL"] = True
slab_incar["DIPOL"] = center_of_mass
self._config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super().kpoints
kpt.comment = "Automatic mesh"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lattice_abc = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lattice_abc[0] + 0.5),
int(self.k_product / lattice_abc[1] + 0.5),
1,
]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / lattice_abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt
def as_dict(self, verbosity=2):
"""
:param verbosity: Verbosity of dict. E.g., whether to include Structure.
:return: MSONAble dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class MVLGBSet(MPRelaxSet):
"""
Class for writing a vasp input files for grain boundary calculations, slab
or bulk.
"""
def __init__(self, structure, k_product=40, slab_mode=False, is_metal=True, **kwargs):
r"""
Args:
structure(Structure): provide the structure
k_product: Kpoint number * length for a & b directions, also for c
direction in bulk calculations. Default to 40.
slab_mode (bool): Defaults to False. Use default (False) for a
bulk supercell. Use True if you are performing calculations on a
slab-like (i.e., surface) of the GB, for example, when you are
calculating the work of separation.
is_metal (bool): Defaults to True. This determines whether an ISMEAR of
1 is used (for metals) or not (for insulators and semiconductors)
by default. Note that it does *not* override user_incar_settings,
which can be set by the user to be anything desired.
**kwargs:
Other kwargs supported by :class:`MPRelaxSet`.
"""
super().__init__(structure, **kwargs)
self.k_product = k_product
self.slab_mode = slab_mode
self.is_metal = is_metal
@property
def kpoints(self):
"""
k_product, default to 40, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object.
kpt = super().kpoints
kpt.comment = "Generated by pymatgen's MVLGBSet"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lengths = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lengths[0] + 0.5),
int(self.k_product / lengths[1] + 0.5),
int(self.k_product / lengths[2] + 0.5),
]
if self.slab_mode:
kpt_calc[2] = 1
kpt.kpts[0] = kpt_calc
return kpt
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
# The default incar setting is used for metallic system, for
# insulator or semiconductor, ISMEAR need to be changed.
incar.update(
{
"LCHARG": False,
"NELM": 60,
"PREC": "Normal",
"EDIFFG": -0.02,
"ICHARG": 0,
"NSW": 200,
"EDIFF": 0.0001,
}
)
if self.is_metal:
incar["ISMEAR"] = 1
incar["LDAU"] = False
if self.slab_mode:
# for clean grain boundary and bulk relaxation, full optimization
# relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.
incar["ISIF"] = 2
incar["NELMIN"] = 8
incar.update(self.user_incar_settings)
return incar
class MVLRelax52Set(DictSet):
"""
Implementation of VaspInputSet utilizing the public Materials Project
parameters for INCAR & KPOINTS and VASP's recommended PAW potentials for
POTCAR.
Keynotes from VASP manual:
1. Recommended potentials for calculations using vasp.5.2+
2. If dimers with short bonds are present in the compound (O2, CO,
N2, F2, P2, S2, Cl2), it is recommended to use the h potentials.
Specifically, C_h, O_h, N_h, F_h, P_h, S_h, Cl_h
3. Released on Oct 28, 2018 by VASP. Please refer to VASP
Manual 1.2, 1.3 & 10.2.1 for more details.
"""
CONFIG = _load_yaml_config("MVLRelax52Set")
def __init__(self, structure, **kwargs):
"""
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, MVLRelax52Set.CONFIG, **kwargs)
else:
super().__init__(structure, MVLRelax52Set.CONFIG, user_potcar_functional="PBE_52", **kwargs)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("Please select from PBE_52 and PBE_54!")
self.kwargs = kwargs
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
"""
Args:
structures: List of Structure objects.
unset_encut (bool): Whether to unset ENCUT.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super().__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self._config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self._config_dict["INCAR"]:
self._config_dict["INCAR"]["EDIFF"] = self._config_dict["INCAR"].pop("EDIFF_PER_ATOM")
# NEB specific defaults
defaults = {
"IMAGES": len(structures) - 2,
"IBRION": 1,
"ISYM": 0,
"LCHARG": False,
"LDAU": False,
}
self._config_dict["INCAR"].update(defaults)
@property
def poscar(self):
"""
:return: Poscar for structure of first end point.
"""
return Poscar(self.structures[0])
@property
def poscars(self):
"""
:return: List of Poscars.
"""
return [Poscar(s) for s in self.structures]
@staticmethod
def _process_structures(structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i, site in enumerate(s):
t = np.round(prev[i].frac_coords - site.frac_coords)
if np.any(np.abs(t) > 0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(
self,
output_dir,
make_dir_if_not_present=True,
write_cif=False,
write_path_cif=False,
write_endpoint_inputs=False,
):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
output_dir = Path(output_dir)
if make_dir_if_not_present and not output_dir.exists():
output_dir.mkdir(parents=True)
self.incar.write_file(str(output_dir / "INCAR"))
self.kpoints.write_file(str(output_dir / "KPOINTS"))
self.potcar.write_file(str(output_dir / "POTCAR"))
for i, p in enumerate(self.poscars):
d = output_dir / str(i).zfill(2)
if not d.exists():
d.mkdir(parents=True)
p.write_file(str(d / "POSCAR"))
if write_cif:
p.structure.to(filename=str(d / "{}.cif".format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(self.structures[0], user_incar_settings=self.user_incar_settings)
for image in ["00", str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(str(output_dir / image / "INCAR"))
end_point_param.kpoints.write_file(str(output_dir / image / "KPOINTS"))
end_point_param.potcar.write_file(str(output_dir / image / "POTCAR"))
if write_path_cif:
sites = set()
lat = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species, site.frac_coords, lat))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=str(output_dir / "path.cif"))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2, spin_polarized=False, **kwargs):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.000001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": time_step,
"PREC": "Low",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
}
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MPMDSet(MPRelaxSet):
"""
This a modified version of the old MITMDSet pre 2018/03/12.
This set serves as the basis for the amorphous skyline paper.
(1) Aykol, M.; Dwaraknath, S. S.; Sun, W.; Persson, K. A. Thermodynamic
Limit for Synthesis of Metastable Inorganic Materials. Sci. Adv. 2018,
4 (4).
Class for writing a vasp md run. This DOES NOT do multiple stage runs.
Precision remains normal, to increase accuracy of stress tensor.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, spin_polarized=False, **kwargs):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.00001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": 2,
"PREC": "Normal",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
"ADDGRID": True,
}
if Element("H") in structure.species:
defaults["POTIM"] = 0.5
defaults["NSW"] = defaults["NSW"] * 4
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MVLNPTMDSet(MITMDSet):
"""
Class for writing a vasp md run in NPT ensemble.
Notes:
To eliminate Pulay stress, the default ENCUT is set to a rather large
value of ENCUT, which is 1.5 * ENMAX.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2, spin_polarized=False, **kwargs):
r"""
Args:
structure (Structure): input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps(int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
user_incar_settings = kwargs.get("user_incar_settings", {})
# NPT-AIMD default settings
defaults = {
"IALGO": 48,
"ISIF": 3,
"LANGEVIN_GAMMA": [10] * structure.ntypesp,
"LANGEVIN_GAMMA_L": 1,
"MDALGO": 3,
"PMASS": 10,
"PSTRESS": 0,
"SMASS": 0,
}
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super().__init__(structure, start_temp, end_temp, nsteps, time_step, spin_polarized, **kwargs)
# Set NPT-AIMD ENCUT = 1.5 * VASP_default
enmax = [self.potcar[i].keywords["ENMAX"] for i in range(structure.ntypesp)]
encut = max(enmax) * 1.5
self._config_dict["INCAR"]["ENCUT"] = encut
class MVLScanRelaxSet(MPRelaxSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCAR including the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
"""
def __init__(self, structure, **kwargs):
r"""
Args:
structure (Structure): input structure.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# choose PBE_52 unless the user specifies something else
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_52", **kwargs)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations required PBE_52 or PBE_54!")
updates = {
"ADDGRID": True,
"EDIFF": 1e-05,
"EDIFFG": -0.05,
"LASPH": True,
"LDAU": False,
"METAGGA": "SCAN",
"NELM": 200,
}
if kwargs.get("vdw", "").lower() == "rvv10":
updates["BPARAM"] = 15.7 # This is the correct BPARAM for SCAN+rVV10
self._config_dict["INCAR"].update(updates)
class LobsterSet(MPRelaxSet):
"""
Input set to prepare VASP runs that can be digested by Lobster (See cohp.de)
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(
self,
structure: Structure,
isym: int = 0,
ismear: int = -5,
reciprocal_density: int = None,
address_basis_file: str = None,
user_supplied_basis: dict = None,
user_potcar_settings: dict = {"W": "W_sv"},
**kwargs,
):
"""
Args:
structure (Structure): input structure.
isym (int): ISYM entry for INCAR, only isym=-1 and isym=0 are allowed
ismear (int): ISMEAR entry for INCAR, only ismear=-5 and ismear=0 are allowed
reciprocal_density (int): density of k-mesh by reciprocal volume
user_supplied_basis (dict): dict including basis functions for all elements in structure,
e.g. {"Fe": "3d 3p 4s", "O": "2s 2p"}; if not supplied, a standard basis is used
address_basis_file (str): address to a file similar to "BASIS_PBE_54_standaard.yaml"
in pymatgen.io.lobster.lobster_basis
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
from pymatgen.io.lobster import Lobsterin
warnings.warn("Make sure that all parameters are okay! This is a brand new implementation.")
if isym not in (-1, 0):
raise ValueError("Lobster cannot digest WAVEFUNCTIONS with symmetry")
if ismear not in (-5, 0):
raise ValueError("Lobster usually works with ismear=-5 or ismear=0")
# newest potcars are preferred
# Choose PBE_54 unless the user specifies a different potcar_functional
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_54", **kwargs)
# reciprocal density
if self.user_kpoints_settings is not None:
if not reciprocal_density or "reciprocal_density" not in self.user_kpoints_settings:
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = reciprocal_density or self.user_kpoints_settings["reciprocal_density"]
else:
if not reciprocal_density:
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = reciprocal_density
self.isym = isym
self.ismear = ismear
self.user_supplied_basis = user_supplied_basis
self.address_basis_file = address_basis_file
# predefined basis! Check if the basis is okay! (charge spilling and bandoverlaps!)
if user_supplied_basis is None and address_basis_file is None:
basis = Lobsterin.get_basis(structure=structure, potcar_symbols=self.potcar_symbols)
elif address_basis_file is not None:
basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=self.potcar_symbols,
address_basis_file=address_basis_file,
)
elif user_supplied_basis is not None:
# test if all elements from structure are in user_supplied_basis
for atomtype in structure.symbol_set:
if atomtype not in user_supplied_basis:
raise ValueError("There are no basis functions for the atom type " + str(atomtype))
basis = [key + " " + value for key, value in user_supplied_basis.items()]
lobsterin = Lobsterin(settingsdict={"basisfunctions": basis})
nbands = lobsterin._get_nbands(structure=structure)
update_dict = {
"EDIFF": 1e-6,
"NSW": 0,
"LWAVE": True,
"ISYM": isym,
"NBANDS": nbands,
"IBRION": -1,
"ISMEAR": ismear,
"LORBIT": 11,
"ICHARG": 0,
"ALGO": "Normal",
}
self._config_dict["INCAR"].update(update_dict)
self._config_dict["KPOINTS"].update({"reciprocal_density": self.reciprocal_density})
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
"""
:param path: Path to get the vasprun.xml and OUTCAR.
:param parse_dos: Whether to parse dos. Defaults to True.
:param parse_eigen: Whether to parse eigenvalue. Defaults to True.
:return:
"""
path = Path(path)
vruns = list(glob.glob(str(path / "vasprun.xml*")))
outcars = list(glob.glob(str(path / "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError("Unable to get vasprun.xml/OUTCAR from prev calculation in %s" % path)
vsfile_fullpath = str(path / "vasprun.xml")
outcarfile_fullpath = str(path / "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
return (
Vasprun(vsfile, parse_dos=parse_dos, parse_eigen=parse_eigen),
Outcar(outcarfile),
)
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i["tot"] for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters["MAGMOM"]})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError("length of list {} not the same as" "structure".format(l_val))
return structure.copy(site_properties=site_properties)
def standardize_structure(structure, sym_prec=0.1, international_monoclinic=True):
"""
Get the symmetrically standardized structure.
Args:
structure (Structure): The structure.
sym_prec (float): Tolerance for symmetry finding for standardization.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
The symmetrized structure.
"""
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError("Standardizing cell failed! VPA old: {}, VPA new: {}".format(vpa_old, vpa_new))
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError("Standardizing cell failed! Old structure doesn't match new.")
return new_structure
class BadInputSetWarning(UserWarning):
"""
Warning class for bad but legal inputs.
"""
pass
def batch_write_input(
structures,
vasp_input_set=MPRelaxSet,
output_dir=".",
make_dir_if_not_present=True,
subfolder=None,
sanitize=False,
include_cif=False,
potcar_spec=False,
zip_output=False,
**kwargs,
):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
**kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure.
"""
output_dir = Path(output_dir)
for i, s in enumerate(structures):
formula = re.sub(r"\s+", "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = output_dir / subdir
else:
d = output_dir / "{}_{}".format(formula, i)
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(
str(d),
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output,
)
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]},
)
def get_valid_magmom_struct(structure, inplace=True, spin_mode="auto"):
"""
Make sure that the structure is valid magmoms based on the kind of caculation
Fill in missing Magmom values
Args:
structure: The input structure
inplace: True - edit the magmom of the input structurel; False - return new structure
spin_mode: "scalar"/"vector"/"none"/"auto" only first letter (s/v/n) is needed.
dictates how the spin configuration will be determined.
- auto: read the existing magmom values and decide
- scalar: use a single scalar value (for spin up/down)
- vector: use a vector value for spin-orbit systems
- none: Remove all the magmom information
Returns:
New structure if inplace == False
"""
default_values = {"s": 1.0, "v": [1.0, 1.0, 1.0], "n": None}
if spin_mode[0].lower() == "a":
mode = "n"
for isite in structure.sites:
if "magmom" not in isite.properties or isite.properties["magmom"] is None:
pass
elif isinstance(isite.properties["magmom"], float):
if mode == "v":
raise TypeError("Magmom type conflict")
mode = "s"
elif len(isite.properties["magmom"]) == 3:
if mode == "s":
raise TypeError("Magmom type conflict")
mode = "v"
else:
raise TypeError("Unrecognized Magmom Value")
else:
mode = spin_mode[0].lower()
if not inplace:
new_struct = structure.copy()
else:
new_struct = structure
for isite in new_struct.sites:
if mode == "n":
if "magmom" in isite.properties:
isite.properties.pop("magmom")
elif "magmom" not in isite.properties or isite.properties["magmom"] is None:
isite.properties["magmom"] = default_values[mode]
if not inplace:
return new_struct
return None
|
gmatteo/pymatgen
|
pymatgen/io/vasp/sets.py
|
Python
|
mit
| 118,728
|
[
"BoltzTrap",
"VASP",
"pymatgen"
] |
b5015cceff2c01db0a9bf653f731bf7607cf06cddb697af97bc3995e946b2cc0
|
# First we need to import the libraries that
# we need
# Import the time library so that we can make
# the program pause for a fixed amount of time
import time
# Import the Raspberry Pi GPIO libraries that
# allow us to connect the Raspberry Pi to
# other physical devices via the General
# Purpose Input-Output (GPIO) pins
import RPi.GPIO as GPIO
# Now we need to set-up the General Purpose
# Input-Ouput (GPIO) pins
# Set up the GPIO library to use Raspberry Pi
# board pin numbers
GPIO.setmode(GPIO.BOARD)
# Set up the pin numbers we are using for each LED
RED=11
AMBER=16
GREEN=7
# Define the pin for the switch
SWITCH=22
# Set Pin 11, 16 and 7 on the GPIO header to act as an output
GPIO.setup(RED,GPIO.OUT)
GPIO.setup(AMBER,GPIO.OUT)
GPIO.setup(GREEN,GPIO.OUT)
# Set up pin 22 (SWITCH) to act as an input
GPIO.setup(SWITCH,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
# This loop runs forever and runs the traffic lights sequence
while True:
# Turn on the green LED
GPIO.output(GREEN,GPIO.HIGH)
print "Green"
ButtonPressed = False
# Wait until a pedestrian presses the switch
print "Press button"
while not ButtonPressed:
# Wait for 2 seconds
time.sleep(1)
ButtonPressed = GPIO.input(SWITCH)
print "Button pressed"
# Turn off the green LED
GPIO.output(GREEN,GPIO.LOW)
# Turn on the amber LED
GPIO.output(AMBER,GPIO.HIGH)
print "Amber"
# Wait for 2 seconds
time.sleep(2)
GPIO.output(AMBER,GPIO.LOW)
# Turn on the red LED
GPIO.output(RED,GPIO.HIGH)
print "Red"
# Wait for 4 seconds
time.sleep(4)
# Turn off the red LED
GPIO.output(RED,GPIO.LOW)
# Now flash the amber light
count = 5
while count > 0:
# Turn on the amber LED
print "Flash amber"
GPIO.output(AMBER,GPIO.HIGH)
time.sleep(1)
# Turn off the amber LED
GPIO.output(AMBER,GPIO.LOW)
time.sleep(1)
count = count - 1
# End of code
|
bobrathbone/pischools
|
traffic_led/PedestrianCrossing.py
|
Python
|
gpl-3.0
| 1,970
|
[
"Amber"
] |
2bf367fe985af7ec62becf77c07122dec9546856754fe0cd0be0596f86f7fa86
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
# import matplotlib.pyplot as plt
from PIL import Image
from difflib import SequenceMatcher
from PIL import *
from PIL import ImageEnhance
import time
from pytesseract import image_to_string, image_to_boxes
import os
def getNameArea(imageFolderPath):
"""
@param imagePath: the path to the folder contains the answerSheet
This function trys to access the scaned file and crop the name area. Then save the name area into a new image file.
"""
allfile = os.listdir(imagePath)
allfile.remove('.DS_Store')
print(allfile)
for element in allfile:
image = cv2.imread(imagePath+element)
thePositionOfE = findcoordinateOfName(imagePath+element)
cropNamePart(image, thePositionOfE,element)
# name = "sample/" + element
def findcoordinateOfName(path):
"""
@param path: the path of answersheet
@return: the coordinate of "e" in the name in answersheet
this function will find the word of "name" in answersheet and return the coordinate.
"""
image = cv2.imread(path)
height, width = image.shape[:2]
crop_img = image[ 0:int(height/3), 0:width]
cv2.imwrite("temp.png", crop_img)
image = Image.open("temp.png")
box = image_to_boxes(image).split('\n')
width , height = image.size
coordinate = []
for i in range(len(box)):
flag = False
if (box[i][0] == 'n' and box[i + 1][0] == 'a' and box[i + 2][0] == 'm' and box[i + 3][0] == 'e'):
for j in range(0, 4):
flag = True
coordinate.append(box[i+j])
if(flag):
break
coorE = coordinate[3].split(" ")
return (( int(coorE[1]) , height - int(coorE[4])), ( int(coorE[3]), height - int(coorE[2])))
def cropNamePart(image, thePositionOfE,fileName):
"""
@param image: the answerSheet
@param thePositionOfE: the coordinate of "e" in the answreSheet
@param fileName: file name of saved image
this function try to crop the name area in answerSheet and save the file into local disk.
"""
# calculate bounds of interest
dh = thePositionOfE[1][1] - thePositionOfE[0][1]
upper = thePositionOfE[0][1] - 2 * dh
lower = thePositionOfE[1][1] + int(3.5 * dh)
left = thePositionOfE[1][0]
right = left + 40 * (thePositionOfE[1][0] - thePositionOfE[0][0])
crop_img = image[ upper:lower, left:right]
fileName = "sample/" + fileName
cv2.imwrite(fileName, crop_img)
# image = Image.open("temp.png")
# return image
def findLengthAndHeight(contour):
"""
@param contour: the contour of one shape, maybe one letter, maybe two letters
@return: the length of this contour
"""
# contour = answerBox[0].getContour()
x_axis = []
y_axis = []
for point in contour:
x_axis.append(point[0][0])
y_axis.append(point[0][1])
x_axis.sort()
y_axis.sort()
length = x_axis[-1] - x_axis[0]
height = y_axis[-1] - y_axis[0]
return length
def checkSpecial(cnt, contours,smallestWide, crop_image):
"""
@param cnt: the contour of the letter
@param contours: all of contours in the image
@param smallestWide: a list of one element which store the smallest wide, use list to pass by reference
@param crop_image: the image contains the name
@return: the box of the letter
This function will check the i and j in the handwriting
"""
[x, y, w, h] = cv2.boundingRect(cnt)
if w < smallestWide[0]:
smallestWide[0] = w
for contour in contours:
[x1, y1, w1, h1] = cv2.boundingRect(contour)
'''
if x - 1.3 * w <x1 < x + 2 * w and \
0.1*w < w1 < 2 * w and \
cv2.contourArea(contour) < 0.5 * cv2.contourArea(cnt) and \
h1 > 0.1*h and \
w < 2*smallestWide[0] and \
y1 < y:
cv2.drawContours(crop_image, [contour], -1, (0, 255, 0), 2)
'''
if x - 1.3 * w <x1 < x + 2 * w :
if 0.1*w < w1 < 2 * w :
if cv2.contourArea(contour) < 0.7 * cv2.contourArea(cnt) :
if h1 > 0.1*h:
if w < 3*smallestWide[0] :
if y1 < y:
cv2.drawContours(crop_image, [contour], -1, (0, 255, 0), 2)
return x, y1,abs(x1-x+w1), y-y1+h
return x, y, w, h
if __name__ == '__main__':
# get the sample from original image
imagePath2 = "/Users/gengruijie/Desktop/未命名文件夹/OneDrive/学习" + \
"/cs/课外/Github/AutoGrading/learning/Ruijie/handwriting_recognization/original_file/"
# getNameArea(imagePath2)
imagePath = "/Users/gengruijie/Desktop/未命名文件夹/OneDrive/学习/cs/课外/Github/AutoGrading/learning/Ruijie/handwriting_recognization/sample/"
image = cv2.imread(imagePath+"temp3.png")
res = image
# convert image to grayscale
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
# two threshold method.
# The first one is normal threshold method
# The second one is use Gaussian method which has better effect.
# ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY)
thresh=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
cnts = sorted(cnts, key=lambda student: student[0][0][0], reverse=False)
# 找到外面的那个框,长度大于2/3的框
i = 0
lengthList = []
temp = res.copy()
for cnt in cnts:
lengthList.append(findLengthAndHeight(cnt))
# if True:
# temp = res.copy()
# cv2.drawContours(temp, [cnt], -1, (0, 255, 0), 2)
# cv2.imwrite(str(i) + ".png", temp)
# i += 1
largestCnt = cnts[lengthList.index(max(lengthList))]
x_axis = []
y_axis = []
for point in largestCnt:
x_axis.append(point[0][0])
y_axis.append(point[0][1])
x_axis.sort()
y_axis.sort()
maxX = x_axis[-1] - 0.03*(x_axis[-1] - x_axis[0])
minX = x_axis[0] + 0.03*(x_axis[-1] - x_axis[0])
maxY = y_axis[-1] - 0.05*(y_axis[-1] - y_axis[0])
minY = y_axis[0] + 0.03*(y_axis[-1] - y_axis[0])
crop_img = temp[ int(minY): int(maxY), int(minX): int(maxX)]
gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for cnt in contours:
# # print(cv2.contourArea(cnt))
# if cv2.contourArea(cnt) > 8 and cv2.contourArea(cnt) < 3000:
# [x, y, w, h] = cv2.boundingRect(cnt)
# print(cv2.contourArea(cnt))
# print(h)
# if h > 25:
# cv2.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 2)
# roi = thresh[y:y + h, x:x + w]
# roismall = cv2.resize(roi, (10, 10))
# cv2.imshow('norm', im)
# key = cv2.waitKey(0)
#
# if key == 27: # (escape to quit)
# sys.exit()
# elif key in keys:
# responses.append(int(chr(key)))
# sample = roismall.reshape((1, 100))
# samples = np.append(samples, sample, 0)
cnts = sorted(cnts, key=lambda cnts: cnts[0][0][0], reverse=False)
cv2.drawContours(crop_img, cnts, -1, (0, 255, 0), 2)
cv2.imshow("this is all contours", crop_img)
smallestWide = [10000]
for cnt in cnts:
[_, _, w, h] = cv2.boundingRect(cnt)
if not(h > (maxY - minY) * 0.2 and w < (maxX-minX)*0.5 ):
continue
# special is i and j
x,y,w,h = checkSpecial(cnt, cnts,smallestWide, crop_img)
cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi = thresh[y:y + h, x:x + w]
roismall = cv2.resize(roi, (10, 10))
cv2.imshow("norm" , crop_img)
key = cv2.waitKey(0)
if key == 50:
cv2.imwrite("sample/Indivisible"+ ".png", crop_img)
elif key == 49:
cv2.imwrite("sample/Indivisible"+ ".png", crop_img)
# cv2.drawContours(temp, [cnts[14]], -1, (0, 255, 0), 2)
# cv2.imwrite("1111"+ ".png", crop_img)
|
Hubert51/AutoGrading
|
learning/Ruijie/handwriting_recognization/main.py
|
Python
|
mit
| 9,146
|
[
"Gaussian"
] |
6b0f7796f9b38c88c61d62b8015686cd50f4653a13841e0bcd9358d05d20f339
|
"""
Acceptance tests for Studio related to the textbooks.
"""
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
from ...pages.studio.textbook_upload import TextbookUploadPage
from ...pages.lms.textbook_view import TextbookViewPage
from ...tests.helpers import disable_animations
from nose.plugins.attrib import attr
@attr('shard_2')
class TextbooksTest(StudioCourseTest):
"""
Test that textbook functionality is working properly on studio side
"""
def setUp(self, is_staff=True):
"""
Install a course with no content using a fixture.
"""
super(TextbooksTest, self).setUp(is_staff)
self.textbook_upload_page = TextbookUploadPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.textbook_upload_page.visit()
disable_animations(self)
self.textbook_view_page = TextbookViewPage(self.browser, self.course_id)
def test_create_first_book_message(self):
"""
Scenario: A message is displayed on the textbooks page when there are no uploaded textbooks
Given that I am viewing the Textbooks page in Studio
And I have not yet uploaded a textbook
Then I see a message stating that I have not uploaded any textbooks
"""
message = self.textbook_upload_page.get_element_text('.wrapper-content .no-textbook-content')
self.assertIn("You haven't added any textbooks", message)
def test_new_textbook_upload(self):
"""
Scenario: View Live link for textbook is correctly populated
Given that I am viewing the Textbooks page in Studio
And I have uploaded a PDF textbook and save the new textbook information
Then the "View Live" link contains a link to the textbook in the LMS
"""
self.textbook_upload_page.upload_new_textbook()
self.assertTrue(self.textbook_upload_page.is_view_live_link_worked())
@attr('a11y')
def test_textbook_page_a11y(self):
"""
Uploads a new textbook
Runs an accessibility test on the textbook page in lms
"""
self.textbook_upload_page.upload_new_textbook()
self.textbook_view_page.visit()
self.textbook_view_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # AC-500
'skip-link', # AC-501
'link-href', # AC-502
'section' # AC-503
],
})
self.textbook_view_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
def test_pdf_viewer_a11y(self):
"""
Uploads a new textbook
Runs an accessibility test on the pdf viewer frame in lms
"""
self.textbook_upload_page.upload_new_textbook()
self.textbook_view_page.visit()
self.textbook_view_page.switch_to_pdf_frame(self)
self.textbook_view_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # will always fail because pdf.js converts pdf to divs with transparent text
'html-lang', # AC-504
'meta-viewport', # AC-505
'skip-link', # AC-506
'link-href', # AC-507
],
})
self.textbook_view_page.a11y_audit.check_for_accessibility_errors()
|
shabab12/edx-platform
|
common/test/acceptance/tests/studio/test_studio_textbooks.py
|
Python
|
agpl-3.0
| 3,445
|
[
"VisIt"
] |
d0ae586c7bc2aeb9107886e1042415fec4ed5b1c1029757a20f5b6a4af4a5f2f
|
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### Read all timesteps of the halos point files (vtu) which have fewer
### halo points and the raw-particles (vtu) which have many more halo points.
### For the few halo points, we create a glyph filter, scaling the glyph size
### and coloring by the magnitude density. For the many halo points, we create
### 5 threshold filters, whose ranges are dynamically reset at each time step
### so that each threshold contains roughly the same number of points. These
### thresholds are also colored by velocity magnitude, which we have to compute
### specifically in this case because the data files don't have that array to
### begin with.
###
### Input Files:
###
### 1) DataExploration/Output/Cosmology/data/run-1/halos-%d.vtu
### 2) DataExploration/Data/Cosmology/data/analysis/raw-particles/499-%d.vtu
###
### Output Files:
###
### A cinema dataset into: DataExploration/Output/Cosmology/volume_time
###
import os, sys, math
from paraview.simple import *
from paraview import data_exploration as wx
from cinema_utilities import *
# Need this one to directly rescale transfer functions to data range
from vtkPVServerManagerRenderingPython import *
LoadDistributedPlugin('RGBZView', ns=globals())
# -----------------------------------------------------------------------------
# Helper methods
# -----------------------------------------------------------------------------
def buildSpectralLUT(name):
return GetLookupTableForArray( name,
1,
RGBPoints = [0.0, 0.368627, 0.309804, 0.635294, 90.00072000576006, 0.196078, 0.533333, 0.741176, 180.00144001152012, 0.4, 0.760784, 0.647059, 270.0021600172801, 0.670588, 0.866667, 0.643137, 360.00288002304023, 0.901961, 0.960784, 0.596078, 450.00360002880024, 1.0, 1.0, 0.74902, 540.0043200345602, 0.996078, 0.878431, 0.545098, 630.0050400403203, 0.992157, 0.682353, 0.380392, 720.0057600460805, 0.956863, 0.427451, 0.262745, 810.0064800518404, 0.835294, 0.243137, 0.309804, 900.0, 0.619608, 0.00392157, 0.258824],
NanColor = [0.500008, 0.0, 0.0],
ColorSpace = 'RGB',
ScalarRangeInitialized=1.0,
LockScalarRange=0)
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
path_root = '/media/scott/CINEMA FAT'
output_working_dir = os.path.join(path_root, 'DataExploration/Output/Cosmology/point_thresholds/')
title = "499-2 - Probe the Cosmic Structure of the Dark Universe"
description = """
In the standard model of cosmology, dark energy and dark matter
together account for 95 percent of the mass energy of the universe;
however, their ultimate origin remains a mystery. The Argonne
Leadership Computing Facility (ALCF) will allocate significant
supercomputing resources towards unraveling one of the key
puzzles-the nature of the dark energy causing the universe to
accelerate its current expansion rate.
"""
analysis = wx.AnalysisManager( output_working_dir, title, description,
author="Salman Habib and Katrin Heitmann",
code_name="HACC",
code_version="HACC 0.1",
cores=128)
# -----------------------------------------------------------------------------
# Image size, camera angles, and view information
# -----------------------------------------------------------------------------
resolution = 500
#phi_angles = [ float(r) for r in range(0, 360, 15)]
#theta_angles = [ -60.0, -45.0, -30.0, -15.0, 0, 15.0, 30.0, 45.0, 60.0 ]
# A small number of camera angles for when we're testing our pipeline and such
phi_angles = [ 180.0, 270.0 ]
theta_angles = [ 15.0, 45.0 ]
distance = 420
rotation_axis = [0.0, 1.0, 0.0]
center_of_rotation = [64.69269952178001, 65.57341161370277, 65.48730944097042]
# -----------------------------------------------------------------------------
# Set up lists of files to process
# -----------------------------------------------------------------------------
# List of halo files to include
halo_file_format = 'DataExploration/Data/Cosmology/data/run-1/halos-%d.vtu'
halo_file_names = [ os.path.join(path_root, halo_file_format % i) for i in xrange(0, 451, 50) ]
# List of point density files to include
# Data/Cosmology/data/analysis/raw-particles
points_file_format = 'DataExploration/Data/Cosmology/data/analysis/raw-particles/499-%d.vtu'
points_file_names = [ os.path.join(path_root, points_file_format % i) for i in xrange(0, 451, 50) ]
# -----------------------------------------------------------------------------
# Create data exploration
# -----------------------------------------------------------------------------
id = 'composite'
title = '3D composite'
description = "Show Threshold Densities and Halos."
analysis.register_analysis(id, title, description, '{time}/{theta}/{phi}/{filename}', wx.CompositeImageExporter.get_data_type())
fng = analysis.get_file_name_generator(id)
# -----------------------------------------------------------------------------
# Set up pipelines
# -----------------------------------------------------------------------------
halos_reader = XMLUnstructuredGridReader( FileName=halo_file_names )
glyph = Glyph(Input = halos_reader, GlyphType="Sphere", GlyphTransform="Transform2" )
glyph.Scalars = ['POINTS', 'magnitude']
glyph.ScaleFactor = 0.005
glyph.ScaleMode = 'scalar'
glyph.GlyphMode = 'All Points'
glyph.GlyphType.ThetaResolution = 16
glyph.GlyphType.PhiResolution = 16
# Create reader for large halo particle files
point_reader = XMLUnstructuredGridReader( FileName=points_file_names )
outline = Outline(Input=point_reader)
outlineRep = Show(outline)
outlineRep.ColorArrayName = [None, '']
outlineRep.DiffuseColor = [0.0, 0.0, 0.0]
outlineRep.LineWidth = 1.0
# create a new 'Calculator'
calculator1 = Calculator(Input=point_reader)
calculator1.ResultArrayName = 'magnitude'
calculator1.Function = 'sqrt((vx*vx)+(vy*vy)+(vz*vz))'
# create some new 'Threshold' filters
#den1 = Threshold(Input = calculator1, Scalars=['POINTS', 'magnitude'], ThresholdRange=[900, 17000] )
den1 = Threshold(Input = calculator1, Scalars=['POINTS', 'magnitude'], ThresholdRange=[900, 3601] )
den2 = Threshold(Input = calculator1, Scalars=['POINTS', 'magnitude'], ThresholdRange=[700, 900] )
den3 = Threshold(Input = calculator1, Scalars=['POINTS', 'magnitude'], ThresholdRange=[500, 700] )
den4 = Threshold(Input = calculator1, Scalars=['POINTS', 'magnitude'], ThresholdRange=[300, 500] )
den5 = Threshold(Input = calculator1, Scalars=['POINTS', 'magnitude'], ThresholdRange=[100, 300] )
# -----------------------------------------------------------------------------
# Representations
# -----------------------------------------------------------------------------
###
### The second argument here is a View, but don't bother passing one in with
### your background color of choice set. The View you pass in will get clobbered
### with a custom one. Instead, use the camera handlers get_view() method to
### get your hands on the custom view created in the CompositeImageExporter, set
### your background color on that, then put it back using set_view(). See the
### "Customize view" section, below.
###
camera_handler = wx.ThreeSixtyCameraHandler(fng, None, phi_angles, theta_angles, center_of_rotation, rotation_axis, distance)
# Arguments: file_name_generator, view, focal_point, view_up, camera_position
#camera_handler = wx.FixCameraHandler(fng, None, [64.693, 65.573, 65.487], [0, 0, 1], [64.693, 65.573 + 420, 65.487] )
points_colors = [('POINT_DATA', 'magnitude')]
# These three arrays must be parallel to each other
composite_list = [ outline, glyph, den1, den2, den3, den4, den5 ]
composite_description = [ {'name': 'Outline'}, {'name': 'Halos'}, {'name': '[900, +]', 'parent':'Magnitude Thresholds'}, {'name': '[700, 900]', 'parent':'Magnitude Thresholds'}, {'name': '[500, 700]', 'parent':'Magnitude Thresholds'}, {'name': '[300, 500]', 'parent':'Magnitude Thresholds'}, {'name': '[100, 300]', 'parent':'Magnitude Thresholds'} ]
composite_colors = [ [('SOLID_COLOR', [0.0, 0.0, 0.0])], points_colors, points_colors, points_colors, points_colors, points_colors, points_colors ]
luts = {
"magnitude" : buildSpectralLUT('magnitude')
}
# -----------------------------------------------------------------------------
# Data exploration
# -----------------------------------------------------------------------------
exporter = wx.CompositeImageExporter(fng,
composite_list,
composite_colors,
luts,
camera_handler,
[resolution,resolution],
composite_description,
format='png') # 0, 0)
exporter.set_analysis(analysis)
# -----------------------------------------------------------------------------
# Custumize view and some representations
# -----------------------------------------------------------------------------
exporter.view.Background = [1.0, 1.0, 1.0]
exporter.view.OrientationAxesVisibility = 0
exporter.view.CenterAxesVisibility = 0
calculatorRepr = Show(calculator1, exporter.view)
den1Repr = Show(den1, exporter.view)
den2Repr = Show(den2, exporter.view)
den3Repr = Show(den3, exporter.view)
den4Repr = Show(den4, exporter.view)
den5Repr = Show(den5, exporter.view)
# -----------------------------------------------------------------------------
# Processing
# -----------------------------------------------------------------------------
analysis.begin()
for time in range(0, len(halo_file_names), 1):
GetAnimationScene().TimeKeeper.Time = float(time)
fng.update_active_arguments(time=time)
print "moving to timestep ",time
# The point of these two lines is to allow the histogram filter to be
# updated for the time step and then to reset the ranges on the thresholds
# so that for this time step, each threshold contains roughly the same
# number of points.
UpdatePipeline(time)
#eachTimePipelineChanges(time, histogram1, calculator1, [den1, den2, den3, den4, den5])
# Now rescale the transfer function for the 'magnitude' array for the data
# range of the current step. You could argue this isn't what you'd want, but
# it results in more colorful data
#vtkSMPVRepresentationProxy.RescaleTransferFunctionToDataRange(calculatorRepr.SMProxy)
vtkSMTransferFunctionProxy.RescaleTransferFunction(den1Repr.SMProxy, 0.0, 900.0, False)
vtkSMTransferFunctionProxy.RescaleTransferFunction(den2Repr.SMProxy, 0.0, 900.0, False)
vtkSMTransferFunctionProxy.RescaleTransferFunction(den3Repr.SMProxy, 0.0, 900.0, False)
vtkSMTransferFunctionProxy.RescaleTransferFunction(den4Repr.SMProxy, 0.0, 900.0, False)
vtkSMTransferFunctionProxy.RescaleTransferFunction(den5Repr.SMProxy, 0.0, 900.0, False)
# Trigger the exporter to write out the next batch of images
exporter.UpdatePipeline(time)
analysis.end()
|
Kitware/cinema
|
scripts/data_generation/cosmology/hacc_composite_time.py
|
Python
|
bsd-3-clause
| 11,672
|
[
"ParaView"
] |
bde61b01933e131b2eacffb8721e7882a54b3c865aa62d73ccae2be711137880
|
#!/usr/bin/env python
import sys
lines_gaff = sys.stdin.readlines()
improper_style_name = 'cvff'
sys.stdout.write(' write_once("In Settings") {\n')
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
atypes = line[:11].split('-')
atype1 = atypes[0].strip()
atype2 = atypes[1].strip()
atype3 = atypes[2].strip()
atype4 = atypes[3].strip()
at1 = atype1.replace('X','*')
at2 = atype2.replace('X','*')
at3 = atype3.replace('X','*')
at4 = atype4.replace('X','*')
impropertype = '@improper:'+atype1+'-'+atype2+'-'+atype3+'-'+atype4
#sys.stdout.write(' '+impropertype+' @atom:'+at1+' @atom:'+at2+' @atom:'+at3+' @atom:'+at4+'\n')
# Oops. This is incorrect.
# In moltemplate, the central atom is the first atom,
# In "gaff.dat", the central atom is the third atom
# http://archive.ambermd.org/201307/0519.html
#impropertype = '@improper:'+atype3+'-'+atype1+'-'+atype2+'-'+atype4
tokens= line[11:].split()
Kn = float(tokens[0])
dn = float(tokens[1])
n = int(float(tokens[2]))
comments=' '.join(tokens[3:])
if len(comments.strip()) > 0:
comments = ' # ' + comments
if (dn < 0.001):
sys.stdout.write(' improper_coeff '+impropertype+' '+improper_style_name+' '+str(Kn)+' 1 '+str(n)+comments+'\n')
elif (179.999 < abs(dn) < 180.001):
sys.stdout.write(' improper_coeff '+impropertype+' '+improper_style_name+' '+str(Kn)+' -1 '+str(n)+comments+'\n')
else:
sys.stderr.write('Error: Illegal bondImproper parameters:\n'
' As of 2013-8-03, LAMMPS doens hot have an improper style\n'
' which can handle impropers with gamma != 0 or 180\n')
exit(-1)
sys.stdout.write(' } # (end of improper_coeffs)\n')
sys.stdout.write('\n')
sys.stdout.write(' write_once("Data Impropers By Type (gaff_imp.py)") {\n')
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
atypes = line[:11].split('-')
atype1 = atypes[0].strip()
atype2 = atypes[1].strip()
atype3 = atypes[2].strip()
atype4 = atypes[3].strip()
at1 = atype1.replace('X','*')
at2 = atype2.replace('X','*')
at3 = atype3.replace('X','*')
at4 = atype4.replace('X','*')
impropertype = '@improper:'+atype1+'-'+atype2+'-'+atype3+'-'+atype4
sys.stdout.write(' '+impropertype+' @atom:'+at1+' @atom:'+at2+' @atom:'+at3+' @atom:'+at4+'\n')
# The improper-angle is the angle between the planes
# defined by at1,at2,at3, and at2,at3,at3
# and we list the atoms in this order.
# NOTE: In "gaff.dat", the central atom is the third atom (at3)
# so we have to take this into account when matching atom order.
# http://archive.ambermd.org/201307/0519.html
sys.stdout.write(' } # (end of Impropers By Type)\n')
sys.stdout.write('\n')
# NOTE: AMBER documentation is not clear how the improper angle is defined.
# It's not clear if we should be using the dihedral angle between
# planes I-J-K and J-K-L. As of 2014-4, improper_style cvff does this.
# Even if we create improper interactions with the angle defined between
# the wrong planes, at least the minima should be the same
# (0 degrees or 180 degrees).
# So I'm not too worried we are getting this detail wrong long as
# we generate new impropers realizing that the 3rd atom (K) is the
# central atom (according to AMBER conventions).
#
# http://structbio.vanderbilt.edu/archives/amber-archive/2007/0408.php
#
# Currently, we only apply improper torsional angles for atoms
# in a planar conformations. Is it clear?
# Junmei
|
yidongxiainl/lammps
|
tools/moltemplate/moltemplate/force_fields/convert_AMBER_files_to_LT_files/amberparm_improper_to_lt.py
|
Python
|
gpl-2.0
| 3,681
|
[
"Amber",
"LAMMPS"
] |
32b11a134a5fb43ed01b10259228bf125a05432211d96037bf78f9d049c24745
|
#-*-coding:utf-8-*-
"""
@package butility.base
@brief Most fundamental base types
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
from __future__ import unicode_literals
from __future__ import division
from butility.future import (with_metaclass,
PY2)
__all__ = ['Error', 'Interface', 'Meta', 'abstractmethod',
'NonInstantiatable', 'is_mutable', 'smart_deepcopy', 'wraps', 'GraphIterator',
'Singleton', 'LazyMixin', 'capitalize', 'equals_eps', 'tagged_file_paths', 'TRACE',
'set_log_level', 'partial', 'parse_key_value_string', 'parse_string_value', 'size_to_int',
'frequncy_to_seconds', 'int_to_size_string', 'load_package', 'load_files', 'load_file',
'ProxyMeta']
from functools import (wraps,
partial)
import logging
import os
import sys
import imp
from abc import (abstractmethod,
ABCMeta)
from copy import deepcopy
from itertools import chain
from collections import deque
from inspect import isroutine
from .path import Path
log = logging.getLogger('butility.base')
# ==============================================================================
# @name Constants
# ------------------------------------------------------------------------------
# @{
container_types = (list, set, tuple)
# The TRACE log level, between DEBUG and INFO
TRACE = int((logging.INFO + logging.DEBUG) / 2)
# -- End Constants -- @}
# ==============================================================================
# @name Logging
# ------------------------------------------------------------------------------
# @{
# Adjust logging configuration
# It's basically setup that will be there whenever someone uses the basic parts of the core package#
# That's how it should be though, TRACE should be there, and code relies on it.
setattr(logging, 'TRACE', TRACE)
logging.addLevelName(TRACE, 'TRACE')
def set_log_level(logger, level):
"""Set the loggers and its handlers log level to the given one"""
for handler in logger.handlers:
handler.setLevel(level)
logger.setLevel(level)
# -- End Logging -- @}
# ==============================================================================
# \name Exceptions
# ------------------------------------------------------------------------------
# Basic Exception Types
# \{
class Error(Exception):
"""Most foundational framework exception"""
__slots__ = ()
# end class Error
# -- End Exceptions -- \}
# ==============================================================================
# @name Routines
# ------------------------------------------------------------------------------
# @{
def is_mutable(value):
"""Recursively check if the given value is mutable.
A value is considered mutable if at least one contained value is mutable
@param value a possibly nested value of built-in types
@return true if value is mutable"""
if isinstance(value, (str, int, float, type(None))):
return False
# end check immutable
if isinstance(value, (list, dict)):
return True
# end check mutable
if isinstance(value, tuple):
for item in value:
if is_mutable(item):
return True
# end abort recursion if item is mutable
# end for each item to check in tuple
# end handle tuple value
return False
def smart_deepcopy(value):
"""Create a deep copy of value only if this is necessary as its value has mutable parts.
@return a deep copy of value if value was mutable
@note checking for its mutability will cost additional time - its a trade-off between memory and
CPU cycles"""
if is_mutable(value):
return deepcopy(value)
return value
def capitalize(self):
"""@return self with first letter capitalized"""
return self[0].upper() + self[1:]
def equals_eps(float_left, float_right, epsilon=sys.float_info.epsilon):
"""@return True if float_left equals float_right within an error of epsilon"""
return abs(float_left - float_right) <= epsilon
def parse_string_value(string):
"""@return the actual numeric instance the value string represents. May be a list, if it starts
with '['."""
if string.startswith('['):
try:
return eval(string)
except Exception:
raise ValueError("Failed to parse '%s' as a list" % (string))
# end handle conversion
# end handle lists
if string in ('on', 'yes', 'true', 'True'):
return True
if string in ('off', 'no', 'false', 'False'):
return False
# more conversions are not required, as they are handled by the schema
return string
def parse_key_value_string(string, separator='='):
"""@return tuple(key, value), whereas key is what's on the left side of the separator, and value
is either a numerical value, string, or list of scalars.
If separator is not in string, value will be None
@param string the k{separator}v string to parse
@param separator
@throws ValueError if string is malformatted"""
if separator not in string:
return string, None
k, v = string.split(separator, 1)
return k, parse_string_value(v)
# ==============================================================================
# \name Filesystem Utilities
# ------------------------------------------------------------------------------
# \{
def tagged_file_paths(directory, taglist, pattern=None):
"""Finds tagged files in given directories and return them.
The files retrieved can be files like "file.ext" or can be files that contain tags. Tags are '.'
separated tokens that are to be matched with the tags in taglist in order.
All tags must match to have it returned by this function.
Suppose you have two paths, one is a global one in a read-only location,
another is a local one in the user's home.
The list of file-paths (bapp.path instances) returned would be all matching files from the global path and
all matching files from the local one, sorted such that the file with the smallest amount
of tags come first, files with more tags (more specialized ones) will come after that.
@param directory iterable of directory paths to look in for files, or a single directory
@param taglist list or tuple of tags of tags, like a tag for the operating system, or the user name, e.g.
('win', 'project', 'maya')
@param pattern simple fnmatch pattern as used for globs or a list of them (allowing to match several
different patterns at once)
@return list of matches file paths (as mrv Path)
"""
log.debug('obtaining tagged files from %s, tags = %s', directory, ', '.join(taglist))
# verify input
###############
directory_list = directory
if not isinstance(directory, container_types):
directory_list = [directory]
# end convert to type we require
pattern_list = pattern
if not isinstance(pattern, container_types):
pattern_list = [pattern]
# end convert pattern type
# GET ALL FILES IN THE GIVEN DIRECTORY_LIST
########################################
matched_files = list()
for folder in directory_list:
for pattern in pattern_list:
matched_files.extend(Path(folder).files(pattern))
# END for each pattern/glob
# end for each directory
# APPLY THE PATTERN SEARCH
############################
tag_match_list = list()
for tagged_file in sorted(matched_files):
filetags = os.path.split(tagged_file)[1].split('.')[1:-1]
# match the tags - take the file if all can be found
num_matched = 0
for tag in taglist:
if tag in filetags:
num_matched += 1
if num_matched == len(filetags):
tag_match_list.append((num_matched, tagged_file))
# end for each tagged file
out_files = list()
for _, tagged_file in sorted(tag_match_list):
out_files.append(tagged_file)
# end for each sorted tag
return out_files
def load_package(package_directory, module_name):
"""unconditionally Imports a package, which is described by a path to a directory
@param package_directory a folder containing an __init__.py[co] file
@param module_name the name of the module in sys.modules
@return the imported module object"""
imp.load_module(module_name, None, str(package_directory), ('', '', imp.PKG_DIRECTORY))
return sys.modules[module_name]
def _load_files(path, files, on_error):
"""load all python \a files from \a path
@return list of loaded files as full paths"""
res = list()
def py_filter(f):
return f.endswith('.py') and not \
f.startswith('__')
# end filter
for filename in filter(py_filter, files):
py_file = os.sep.join([path, filename])
(mod_name, _) = os.path.splitext(os.path.basename(py_file))
try:
load_file(py_file, mod_name)
except Exception:
log.error("Failed to load %s from %s", mod_name, py_file, exc_info=True)
on_error(py_file, mod_name)
else:
log.info("loaded %s into module %s", py_file, mod_name)
res.append(py_file)
# end handle result
# end for eahc file to load
return res
def load_files(path, recurse=False, on_error=lambda f, m: None):
"""Load all .py files found in the given directory, or load the file it points to
@param path either path to directory, or path to py file.
@param recurse if True, path will be searched for usable files recursively
@param on_error f(py_file, module_name) => None to perform an action when importing a module
fails. It may raise to abort the entire operation. Note that an exception is set when called.
@return a list of files loaded successfully"""
# if we should recurse, we just use the standard dirwalk.
# we use topdown so top directories should be loaded before their
# subdirectories and we follow symlinks, since it seems likely that's
# what people will expect
res = list()
path = Path(path)
if path.isfile():
res += _load_files(path.dirname(), [path.basename()], on_error)
else:
seen = None
for seen, (path, dirs, files) in enumerate(os.walk(path, topdown=True, followlinks=True)):
res += _load_files(path, files, on_error)
if not recurse:
break
# end handle recursion
# end for each directory to walk
if seen is None:
log.log(logging.TRACE, "Didn't find any plugin files at '%s'", path)
# end
# end handle file or directory
return res
def load_file(python_file, module_name):
"""Load the contents of the given python file into a module of the given name.
If the module is already loaded, it will be reloaded
@return the loaded module object
@throws Exception any exception raised when trying to load the module"""
imp.load_source(module_name, str(python_file))
return sys.modules[module_name]
# -- End Filesystem Utilities -- @}
# -- End Routines -- @}
# ==============================================================================
# \name Meta-Classes
# ------------------------------------------------------------------------------
# Our basic meta classes which allow us to manipulate all class level functions
# at will to automated otherwise tedious processes.
# \{
class Meta(ABCMeta):
"""A base class for all other meta-classes used in the @ref bapp package.
It provides facilities to automatically wrap methods into decorators which
perform certain tasks, like additional logging for improved debugging.
* All subclasses of Interface are put into bapp as well, allowing their access
through bapp.InterfaceName.
* Puts new types into bapp if the type itself (not its subtype) has the 'place_into_root_package' set to True
"""
# -------------------------
# @name Subclass Interface
# Methods for use by subclasses
# @{
@classmethod
def _class_attribute_value(cls, clsdict, bases, attribute):
"""@return value found at clsdict[attribute] or bases.mro().__dict__[attribute] in standard search
order, or None if nothing was found.
@note useful if you store information for digestion by your metaclasson on the type itself, or
on base classes of that type. This method basically emulates inheritance.
@param cls
@param clsdict
@param bases
@param attribute string identifying the attribute in the class dicts to look at"""
def iterate_clsdicts():
for base in bases:
for mro_cls in base.mro():
yield mro_cls.__dict__
# end for each base
# end for each
# iterate top down
for cls_dict in reversed(list(chain(iterate_clsdicts(), (clsdict, )))):
rval = cls_dict.get(attribute)
if rval:
return rval
# end for each clsdict to iterate
return None
# -- End Subclass Interface -- @}
class ProxyMeta(Meta):
"""Redirect all calls as defined in first base class to the configured proxy member.
It allows to aggregate existing implementations, while overriding only specific methods, which is useful
to add or adjust behavriour generally, without having to alter existing implementations or create
@note this meta-class will only be active the type using this metaclass. Therefore, subtypes will not be proxied
again, which makes no sense here"""
# -------------------------
# @name Configuration
# @{
# Member to which to redirect calls, such as getattr(self._proxy, name)(*args, **kwargs)
proxy_class_attr = '_proxy_attr'
# An attribute with an iterable of names of read-write methods
# Subclasses may then implement them differently
rw_methods_class_attr = '_rw_methods_'
# Class to use to obtain a list of methods to implement, as provided on the type using this meta-class
# If it is None, the first base (which is not a metaclass) will be used automatically
type_to_implement_attr = '_proxy_type'
# As above, but allows the meta-class to define such a type. It will only be used if the type we are building
# doesn't set its own type to implement, using the type_to_implement_attr
type_to_implement = None
# -- End Configuration -- @}
# -------------------------
# @name Subclass Interface
# @{
@classmethod
def _create_method(cls, method_name, is_readonly, proxy_attr):
"""@return a new method named method_name that does not alter it's instance
@note all additional arguments are mainly for your information
@param cls this metaclass instance
@param method_name name of method that is to be created
@param is_readonly if True, the method must not change the underlying object
@param proxy_attr the name of the attribute on instance that keeps the proxy instance."""
def func(instance, *args, **kwargs):
return getattr(getattr(instance, proxy_attr), method_name)(*args, **kwargs)
func.__name__ = method_name
return func
@classmethod
def _is_routine(cls, name, candidate):
"""@return True if this item is a routine we should proxy in the newly created type.
@note this method should implement all filtering needed, and will get all members of the
dict of the type we are to implement."""
return not name.startswith('_') and isroutine(candidate)
# -- End Subclass Interface -- @}
def __new__(metacls, clsname, bases, clsdict):
"""Create a proxy-method for every method we have to re-implement if it is not overridden in the
derived class"""
found = False
# In py3, metatypes are bases, in py2 they are sitting in a special class attribute
if PY2:
found |= issubclass(clsdict.get('__metaclass__', type), metacls)
else:
for base in bases:
if base and issubclass(base, metacls):
found = True
break
# end
# end for each base to check
# end
# If we are not creating a direct subtype of this meta-class, don't do anything as it makes no sense
# to have a multi-proxy
if not found:
return super(ProxyMeta, metacls).__new__(metacls, clsname, bases, clsdict)
# end
proxy_attr = metacls._class_attribute_value(clsdict, bases, metacls.proxy_class_attr)
assert proxy_attr, "A proxy attribute must be set in member %s" % metacls.proxy_class_attr
rw_method_names = metacls._class_attribute_value(clsdict, bases, metacls.rw_methods_class_attr) or tuple()
type_to_implement = metacls._class_attribute_value(clsdict, bases, metacls.type_to_implement_attr) or \
metacls.type_to_implement
if type_to_implement is None:
for base in bases:
if not issubclass(base, Meta):
type_to_implement = base
break
# end
# end for each base
# end search type to implement in bases
if type_to_implement is None:
msg = "Couldn't find type to implement, neither on new type '%s'" % clsname
msg += ", nor on the meta cls, or in the new type's bases"
raise AssertionError(msg)
# end
for name, value in type_to_implement.__dict__.items():
if not metacls._is_routine(name, value) or name in clsdict:
continue
# for now, just create a simple varargs method that allows everything
# Could use new.code|new.function to do it dynamically, or make code to eval ... its overkill though
clsdict[name] = metacls._create_method(name, name not in rw_method_names, proxy_attr)
# end for each method to check for
return super(ProxyMeta, metacls).__new__(metacls, clsname, bases, clsdict)
# end class ProxyMeta
# end class Meta
# -- End Meta-Classes -- \}
# ==============================================================================
# \name Mixins
# ------------------------------------------------------------------------------
# A category of classes from which you can derive to add a certain interface
# to your type. You might have to implement some protocol methods though,
# depending on the actual mixin.
# \{
class LazyMixin(object):
"""Base class providing an interface to lazily retrieve attribute values upon
first access. This is efficient as objects can be created without causing
overhead at creation time, delaying necessary overhead to the time the
respective attribute is actually used.
If slots are used, memory will only be reserved once the attribute
is actually accessed and retrieved the first time. All future accesses will
return the cached value as stored in the Instance's dict or slot.
Here is how you implement your subtype
@snippet bapp/tests/doc/test_examples.py LazyMixinExample Implementation
In code, you can use the lazy attributes natively, its entirely transparent
to the caller.
Ideally, this system is used for internal attributes which will be set on first
use, maybe by reading from a file or a slow device.
@snippet bapp/tests/doc/test_examples.py LazyMixinExample Example
"""
__slots__ = tuple()
def __getattr__(self, attr):
"""Whenever an attribute is requested that we do not know, we allow it
to be created and set. Next time the same attribute is requested, it is simply
returned from our dict/slots."""
self._set_cache_(attr)
# will raise in case the cache was not created
return object.__getattribute__(self, attr)
def _set_cache_(self, attr):
"""This method should be overridden in the derived class.
It should check whether the attribute named by `attr` can be created
and cached. Do nothing if you do not know the attribute or call your subclass'
_set_cache_ method
The derived class may create as many additional attributes as it deems
necessary."""
pass
def _clear_cache_(self, lazy_attributes):
"""Delete all of the given lazy_attributes from this instance.
This will force the respective cache to be recreated
@param lazy_attributes iterable of names of attributes which are to be deleted"""
for attr in lazy_attributes:
try:
del(self, attr)
except AttributeError:
pass
# end ignore non-existing keys
# end for each attribute
# -- End Mixins -- \}
# ==============================================================================
# \name Basic Types
# ------------------------------------------------------------------------------
# Implementations for types suitable to serve as base for derived types
# \{
class Interface(with_metaclass(Meta, object)):
"""base class for all interfaces"""
# Slots help to protect against typos when assigning variables, keep instances small, and document the
# types member variables
__slots__ = tuple()
def supports(self, interface_type):
"""@return True if this instance supports the interface of the given type
@param interface_type type of the interface/class you require this instance to be derived from, or a
tuple of interfaces or classes
@note useful if you only have a weak reference of your interface instance
or proxy which is a case where the ordinary `isinstance(obj, iInterface)`
will not work"""
return isinstance(self, interface_type)
class NonInstantiatable(object):
"""A mixin which will makes it impossible to instantiate derived types
@throws TypeError if someone tries to create an instance"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
"""Prevents instantiation"""
raise TypeError("This type cannot be instantiated")
# end class NonInstantiatable
class Singleton(object):
""" Singleton classes can be derived from this class,
you can derive from other classes as long as Singleton comes first (and class doesn't override __new__) """
def __new__(cls, *p, **k):
# explicitly query the classes dict to allow subclassing of singleton types.
# Querying with hasattr would follow the inheritance graph
if '_the_instance' not in cls.__dict__:
cls._the_instance = super(Singleton, cls).__new__(cls)
return cls._the_instance
# end class Singleton
class GraphIterator(with_metaclass(Meta, object)):
"""A generic, none-recursive implementation of a graph-iterator, which is able to handle cycles.
Its meant to be subclassed to make the interface more approachable
@attention this approach is only useful if you don't care about the order or of your nodes are able
to provide all the information you like right away (like information about the parent)
@todo add a test for this type - its not even indirectly used yet. Alternatively, remove it if its not used
by anybody"""
__slots__ = ()
# -------------------------
# @name Constants
# @{
upstream = 'upstream' # a direction towards the root
downstream = 'downstream' # a direction towards childdren
directions = [upstream, downstream]
breadth_first = 'breadth_first' # visit every node in each level of a tree first
depth_first = 'depth_first' # traverse along each branch to each leaf node, and backtrack
traversal_types = [breadth_first, depth_first]
# -- End Constants -- @}
# -------------------------
# @name Configuration
# @{
# if True, the root of the iteration will not be returned, otherwise it will.
skip_root_node = False
# visit_once if True, items will only be returned once, although they might be encountered
# several times if there are loops for instance, or cross-overs. If you have self-loops, this is the only
# way to prevent endless loop.
# @note this costs time as a tracking set has to be kept and updated, so you should set it as required.
# Its enabled by default to prevent costly bugs - turn it of if you do cycle checks yourself
visit_once = True
# max_depth define at which level the iteration should not go deeper
# - if -1, there is no limit
# - if 0, you would only get root_item
# + e.g. if 1, you would only get the root_item and the first level of predessessors/successors
max_depth = -1
# -- End Configuration -- @}
# -------------------------
# @name Subclass Methods
# These methods are to be implemented or customized by subclasses
# @{
@abstractmethod
def _successors(self, node):
"""@return an iterable of successor nodes (i.e. output nodes) of the given node"""
@abstractmethod
def _predecessors(self, node):
"""@return an iterable of predecessor nodes (i.e. input nodes) of the given node"""
def _stop_iteration(self, node, depth):
"""
@return True for `node` at `depth` to stop the search
in that direction. The respective node will not be returned either."""
return False
def _prune_node(self, node, depth):
"""@return True if `node` at `depth` be pruned from result, so that it is not returned"""
return False
def _iter_(self, root_node, direction, traversal_type):
"""
@return iterator yielding tuples with (level, node), where the level indicates number of nodes between
the the root node and the returned `node`.
@param root_node the node with which to start the iteration
@param direction specifies search direction, either `upstream` or `downstream`, which are provided
as constants on this type.
@param traversal_type one of the constants in `traversal_types`, either `breadth_first` or `depth_first`
"""
# VERIFY INPUT
assert direction in self.directions, "invalid direction: %s" % direction
assert traversal_type in self.traversal_types, "invalid traversal type: %s" % traversal_type
# PREPARE ALGORITHM
visited = set()
stack = deque()
if traversal_type == self.breadth_first:
add_to_stack = lambda nlist, depth: stack.extendleft((depth, node) for node in reversed(nlist))
# end addToStck brnach first
else:
add_to_stack = lambda nlist, depth: stack.extend((depth, node) for node in nlist)
# end obtain add_to_stack function
# adjust function to define direction
if direction == self.downstream:
nodes_in_direction = self._successors
else:
nodes_in_direction = self._predecessors
# end obtain direction
if self.skip_root_node:
add_to_stack(nodes_in_direction(root_node), 1)
else:
stack.append((0, root_node))
# end skip root node from result
stop = self._stop_iteration
prune = self._prune_node
visit_once = self.visit_once
max_depth = self.max_depth
# NON-RECURSIVE SEARCH
while stack:
depth, node = stack.pop() # depth of node, node
if node in visited:
continue
# end handle visit_once
if visit_once:
visited.add(node)
# end update visited
if stop(node, depth):
continue
# end handle stop iteration
if not prune(node, depth):
yield node, depth
# end yield node
# only continue to next level if this is appropriate !
new_depth = depth + 1
if max_depth > -1 and new_depth > max_depth:
continue
# end skip node if depth level is reached
add_to_stack(nodes_in_direction(node), new_depth)
# END for each item on work stack
# -- End Subclass Methods -- @}
# -- End Basic Types -- \}
# ==============================================================================
# @name Unit Conversion Utilities
# ------------------------------------------------------------------------------
# @{
data_unit_multipliers = {
'k': 1024,
'm': 1024 ** 2,
'g': 1024 ** 3,
't': 1024 ** 4,
'p': 1024 ** 5,
'%': 1,
}
time_unit_multipliers = {
's': 1,
'h': 60 ** 2,
'd': 60 ** 2 * 24,
'w': 60 ** 2 * 24 * 7,
'm': 60 ** 2 * 24 * 30,
'y': 60 ** 2 * 24 * 365
}
def size_to_int(size):
"""Converts a size to the respective integer
@param size string like 1M or 2T, or 35.5K
"""
unit = size[-1].lower()
if unit in '0123456789':
return int(size)
# end handle no unit
try:
return int(data_unit_multipliers[unit] * float(size[:-1]))
except KeyError:
raise ValueError("Invalid unit: '%s'" % unit)
# end handle errors gracefully
def frequncy_to_seconds(time_string):
"""@return seconds identified by the given time-string, like 14s, or 14w
@throw ValueError"""
try:
return int(time_string[:-1]) * time_unit_multipliers[time_string[-1].lower()]
except (KeyError, ValueError):
raise ValueError(
"Could not parse '%s' - should be something like <integer><unit>, like 14s, or 12d" % time_string)
# end handle frequency conversion
def int_to_size_string(size):
"""@return a string suitable for input into size_to_int(), scaling dynamically depending on the actual `size`"""
asize = abs(size)
if asize < 1024 ** 2:
divider, unit = 1024, 'K'
elif asize < 1024 ** 3:
divider, unit = 1024 ** 2, 'M'
elif asize < 1024 ** 4:
divider, unit = 1024 ** 3, 'G'
elif asize < 1024 ** 5:
divider, unit = 1024 ** 4, 'T'
else:
divider, unit = 1024 ** 5, 'P'
# end handle sizes
return '%.2f%s' % (size / float(divider), unit)
# -- End Unit Conversion Utilities -- \}
|
Byron/bcore
|
src/python/butility/base.py
|
Python
|
lgpl-3.0
| 30,451
|
[
"VisIt"
] |
8c318a284f6b979d4e2097dddc77eba79204e4fa787d6e1e609e7662fbbe23f9
|
"""
SliceCompareViewerWidget
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkRenderer
from vtk import vtkInteractorStyleUser
from vtk import vtkCellPicker
from vtk import vtkImageMapToColors
from vtk import vtkColorTransferFunction
from vtk import vtkDataSetMapper
from vtk import vtkActor
from vtk import vtkImageMathematics
from PySide.QtGui import QGridLayout
from PySide.QtGui import QWidget
from PySide.QtCore import Signal
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from ui.Interactor import Interactor
from core.vtkDrawing import CreateSquare
from core.vtkDrawing import CreateLine
class SliceCompareViewerWidget(QWidget, Interactor):
"""
SliceCompareViewerWidget shows slices that you can scroll through. Slicing happens
in world coordinates. It can be synced to another slicer widget.
"""
slicePositionChanged = Signal(object)
mouseMoved = Signal(object)
def __init__(self):
super(SliceCompareViewerWidget, self).__init__()
self.renderer = vtkRenderer()
self.renderer.SetBackground(0.0, 0.0, 0.0)
self.renderer.SetLayer(0)
# Overlay renderer which is synced with the default renderer
self.rendererOverlay = vtkRenderer()
self.rendererOverlay.SetLayer(1)
self.rendererOverlay.SetInteractive(0)
self.renderer.GetActiveCamera().AddObserver("ModifiedEvent", self._syncCameras)
self.rwi = QVTKRenderWindowInteractor(parent=self)
self.rwi.SetInteractorStyle(vtkInteractorStyleUser())
self.rwi.GetRenderWindow().AddRenderer(self.renderer)
self.rwi.GetRenderWindow().AddRenderer(self.rendererOverlay)
self.rwi.GetRenderWindow().SetNumberOfLayers(2)
# Set camera to parallel
camera = self.renderer.GetActiveCamera()
camera.SetParallelProjection(1)
# Add new observers for mouse wheel
self.AddObserver(self.rwi, "CharEvent", self.charTyped)
self.AddObserver(self.rwi, "MouseWheelBackwardEvent", self.mouseWheelChanged)
self.AddObserver(self.rwi, "MouseWheelForwardEvent", self.mouseWheelChanged)
self.AddObserver(self.rwi, "MouseMoveEvent", self.mouseMovedEvent, 1)
self.picker = vtkCellPicker()
self.picker.SetTolerance(1e-6)
self.locator = []
self.setStyleSheet("background-color: #333")
layout = QGridLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.rwi)
self.setLayout(layout)
def _syncCameras(self, camera, ev):
"""
Camera modified event callback. Copies the parameters of
the renderer camera into the camera of the overlay so they
stay synced at all times.
"""
self.rendererOverlay.GetActiveCamera().ShallowCopy(camera)
def charTyped(self, arg1, arg2):
# print arg1.GetKeyCode()
pass
def setLocatorPosition(self, position):
for actor in self.locator:
actor.SetPosition(position[0], position[1], position[2])
def setFixedImageData(self, fixed):
self.fixedImagedata = fixed
def setSlicerWidget(self, fixed, moving):
self.fixedSlicerWidget = fixed
self.movingSlicerWidget = moving
self.slicer = None
def mouseWheelChanged(self, arg1, arg2):
pass
def mouseMovedEvent(self, arg1, arg2):
self.rwi.HideCursor()
x, y = arg1.GetEventPosition()
camera = self.renderer.GetActiveCamera()
cameraFP = list(camera.GetFocalPoint()) + [1.0]
self.renderer.SetWorldPoint(cameraFP[0], cameraFP[1], cameraFP[2], cameraFP[3])
self.renderer.WorldToDisplay()
# Convert the selection point into world coordinates.
self.renderer.SetDisplayPoint(x, y, 1)
self.renderer.DisplayToWorld()
worldCoords = self.renderer.GetWorldPoint()
pickPosition = map(lambda x: x / worldCoords[3], worldCoords[0:-1])
self.mouseMoved.emit(pickPosition)
def adjustTransferFunction(self, transferfunction, lower, upper):
transfer = vtkColorTransferFunction()
val1 = [0.0 for _ in range(6)]
val2 = [0.0 for _ in range(6)]
transferfunction.GetNodeValue(0, val1)
transferfunction.GetNodeValue(1, val2)
val1[0] = lower
val2[0] = upper
transfer.AddRGBPoint(val1[0], val1[1], val1[2], val1[3], val1[4], val1[5])
transfer.AddRGBPoint(val2[0], val2[1], val2[2], val2[3], val2[4], val2[5])
transfer.Build()
return transfer
def updateCompareView(self):
fixedSlice = self.fixedSlicerWidget.slicer.GetResliceOutput()
movingSlice = self.movingSlicerWidget.slicer.GetResliceOutput()
window = self.fixedSlicerWidget.slicer.GetWindow()
level = self.fixedSlicerWidget.slicer.GetLevel()
lower = level - window / 2.0
upper = level + window / 2.0
fixedTransfer = self.adjustTransferFunction(self.fixedSlicerWidget.transfer, lower, upper)
movingTransfer = self.adjustTransferFunction(self.movingSlicerWidget.transfer, lower, upper)
fixedMap = vtkImageMapToColors()
fixedMap.SetInputData(fixedSlice)
fixedMap.SetLookupTable(fixedTransfer)
movingMap = vtkImageMapToColors()
movingMap.SetInputData(movingSlice)
movingMap.SetLookupTable(movingTransfer)
maths = vtkImageMathematics()
maths.SetInputConnection(0, fixedMap.GetOutputPort())
maths.SetInputConnection(1, movingMap.GetOutputPort())
maths.SetOperationToAdd()
maths.Update()
# self.blender = vtkImageBlend()
# self.blender.SetOpacity(0, 0.5)
# self.blender.SetOpacity(1, 0.5)
# self.blender.AddInputConnection(fixedMap.GetOutputPort())
# self.blender.AddInputConnection(movingMap.GetOutputPort())
# redChannel = vtkImageExtractComponents()
# greenChannel = vtkImageExtractComponents()
# blueChannel = vtkImageExtractComponents()
# redChannel.SetInputConnection(self.blender.GetOutputPort())
# greenChannel.SetInputConnection(self.blender.GetOutputPort())
# blueChannel.SetInputConnection(self.blender.GetOutputPort())
# redChannel.SetComponents(0)
# greenChannel.SetComponents(1)
# blueChannel.SetComponents(2)
# redScale = vtkImageShiftScale()
# greenScale = vtkImageShiftScale()
# blueScale = vtkImageShiftScale()
# redScale.SetInputConnection(redChannel.GetOutputPort())
# greenScale.SetInputConnection(greenChannel.GetOutputPort())
# blueScale.SetInputConnection(blueChannel.GetOutputPort())
# redScale.SetScale(2.0)
# greenScale.SetScale(2.0)
# blueScale.SetScale(2.0)
# result = vtkImageAppendComponents()
# result.AddInputConnection(redScale.GetOutputPort())
# result.AddInputConnection(greenScale.GetOutputPort())
# result.AddInputConnection(blueScale.GetOutputPort())
# fixedMap.Update()
# otherMath = vtkImageMathematics()
# otherMath.SetOperationToMax()
# otherMath.AddInputData(maths.GetOutput())
# otherMath.SetConstantK(1.0)
if not hasattr(self, "dataSetMapper"):
self.dataSetMapper = vtkDataSetMapper()
# Do not establish a vtk pipeline connection!
# Otherwise the pipeline will be executed on every render call...
self.dataSetMapper.SetInputData(maths.GetOutput())
if not hasattr(self, "actor"):
self.actor = vtkActor()
self.actor.SetMapper(self.dataSetMapper)
self.renderer.AddViewProp(self.actor)
orig = self.fixedSlicerWidget.slicer.GetOrigin()
self.actor.SetPosition(orig[0], orig[1], orig[2])
if not self.locator:
bounds = self.fixedImagedata.GetBounds()
size = [bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]]
meanSize = sum(size) / len(size)
width = meanSize / 20.0
color = [1.0, 1.0, 1.0]
square = CreateSquare(width, color, 2)
square.GetProperty().SetLineWidth(2)
color = [0.2, 0.2, 0.2]
line1 = CreateLine([0, width / 2.0, 0], [0, 10000, 0], color)
line2 = CreateLine([0, -width / 2.0, 0], [0, -10000, 0], color)
line3 = CreateLine([width / 2.0, 0, 0], [10000, 0, 0], color)
line4 = CreateLine([-width / 2.0, 0, 0], [-10000, 0, 0], color)
self.locator = [square, line1, line2, line3, line4] # , otherSquare]
for actor in self.locator:
self.rendererOverlay.AddViewProp(actor)
def render(self):
self.renderer.Render()
self.rwi.GetRenderWindow().Render()
|
berendkleinhaneveld/Registrationshop
|
ui/widgets/SliceCompareViewerWidget.py
|
Python
|
mit
| 7,860
|
[
"VTK"
] |
1216a66d2c7699bd61d6c41c5338103a2f20d6701e679c304dd80fca48636f9b
|
import tempfile
import os
import subprocess
import shutil
from boto.s3.connection import S3Connection, Bucket, Key
def test_bwa():
work_dir = tempfile.mkdtemp()
create_config(work_dir)
create_manifest(work_dir)
# Call Pipeline
try:
subprocess.check_call(['toil-bwa', 'run',
os.path.join(work_dir, 'jstore'),
'--manifest', os.path.join(work_dir, 'manifest.txt'),
'--config', os.path.join(work_dir, 'config.txt'),
'--retryCount', '1'])
finally:
shutil.rmtree(work_dir)
conn = S3Connection()
b = Bucket(conn, 'cgl-driver-projects')
k = Key(b)
k.key = 'test/ci/ci_test.bam'
k.delete()
def create_config(path):
"""Creates manifest file for test at path"""
fpath = os.path.join(path, 'config.txt')
with open(fpath, 'w') as f:
f.write('ssec:\n'
'library: test\n'
'program_unit: 12345\n'
'platform: ILLUMINA\n'
'rg_line:\n'
'output-dir: s3://cgl-driver-projects/test/ci\n'
'ref: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa\n'
'amb: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa.amb\n'
'ann: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa.ann\n'
'bwt: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa.bwt\n'
'fai: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa.fai\n'
'pac: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa.pac\n'
'sa: s3://cgl-pipeline-inputs/alignment/ci/hg38_chr6.fa.sa\n'
'alt:\n'
'file_size: 1G\n'
'sort: True\n'
'trim: False\n'
'suffix:\n')
def create_manifest(path):
"""Creates config file for test at path"""
fpath = os.path.join(path, 'manifest.txt')
with open(fpath, 'w') as f:
f.write('ci_test\t'
's3://cgl-pipeline-inputs/alignment/ci/r1_trunc.fq.gz\t'
's3://cgl-pipeline-inputs/alignment/ci/r2_trunc.fq.gz\n')
|
jpfeil/toil-scripts
|
src/toil_scripts/bwa_alignment/test/test_bwa_alignment.py
|
Python
|
apache-2.0
| 2,209
|
[
"BWA"
] |
0da8642e5dcd6b94a86d19ccfd31a01fe8ef3c6f9b6e8ee83006ceee1db1ebc0
|
#!/usr/bin/env python
#
#
# This file is part of do_x3dna
#
# Author: Rajendra Kumar
# Copyright (C) 2014-2018 Rajendra Kumar
#
# do_x3dna uses 3DNA package (http://x3dna.org).
# Please cite the original publication of the 3DNA package:
# Xiang-Jun Lu & Wilma K. Olson (2003)
# 3DNA: a software package for the analysis, rebuilding and visualization of
# three-dimensional nucleic acid structures
# Nucleic Acids Res. 31(17), 5108-21.
#
# do_x3dna is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# do_x3dna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with do_x3dna. If not, see <http://www.gnu.org/licenses/>.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#============================================================================
import os
import sys
import argparse
import numpy as np
import dnaMD
description="""DESCRIPTION
===========
Global Deformation Energy of the DNA
This can be used to Global Deformation Energy of the DNA from the simulations. At first, elastic matrix from reference
DNA (most often free or unbound DNA) is calculated and subsequently this matrix is used to calculate deformation free
energy of probe DNA (most often bound DNA).
"""
inputRefFileHelp=\
"""Name of input reference file (hdf5 file).
File containing parameters of reference DNA for which global elastic properties will
be calculated. Most often it is free or unbound DNA.
This file should contain the required parameters. It should be hdf5 storage file.
"""
inputProbeFileHelp=\
"""Name of input probe file (hdf5 file).
File containing parameters of probe DNA for which global deformation energy will
be calculated. Most often it is bound DNA.
This file should contain the required parameters. It should be hdf5 storage file.
"""
outputFileHelp=\
"""Name of output file in csv format.
This file will contain the energy values as a function of time.
"""
energyTermHelp=\
"""Energy terms to be calculated.
For which motions, energy should be calculated.
Following keywords are available:
* all : (Default) All below listed energy terms will be calculated
* full : Use entire elastic matrix -- all motions with their coupling
* diag : Use diagonal of elastic matrix -- all motions but no coupling
* b1 : Only bending-1 motion
* b2 : Only bending-2 motion
* stretch : Only stretching motion
* twist : Only Twisting motions
* st_coupling : Only stretch-twist coupling motion
* bs_coupling : Only Bending and stretching coupling
* bt_coupling : Only Bending and Twisting coupling
* bb_coupling : Only bending-1 and bending-2 coupling
* bend : Both bending motions with their coupling
* st : Stretching and twisting motions with their coupling
* bs : Bending (b1, b2) and stretching motions with their coupling
* bt : Bending (b1, b2) and twisting motions with their coupling
In case of elasticity type "ST", only following four energy terms are available "all", "diag", "stretch", "twist" and
"st_coupling".
The terms should provided as comma separated values. e.g. -et "full,diag,b1,b2,stretch,twist".
"""
totalBpHelp=\
"""Total number of basepair in DNA/RNA.
It is an essential input.
"""
bpFirstHelp=\
"""Basepair number of first base-pair.
Usually it is one. Therefore, if this option is not provided, base-pair
numbering will start from one.
In rare cases, base-pair numbering might start with other number. In those
cases, use this option to start numbering of basepair from other number than
one.
"""
esTypeHelp=\
"""Elastic Properties type.
Two keywords are accepted: "BST" or "ST".
* "BST" : Bending-Stretching-Twisting --- All motions are considered
" "ST" : Stretching-Twisting --- Bending motions are ignored.
WARNING: For accurate calculation of bending motions, DNA structures in trajectory must
be superimposed on a reference structure (See Publication's Method Section).
"""
bpStartHelp=\
"""First BP/BPS of DNA after which parameter will be extracted.
If it is not given, first basepair or base-step will be considered.
"""
bpEndHelp=\
"""Last BP/BPS of DNA upto which parameter will be extracted.
If it is not given, last basepair or base-step will be considered.
"""
paxisHelp=\
"""Principal axis parallel to global helical-axis
Three keywords are accepted: "X", "Y" and "Z". Only require when bending motions are
included in the calculation.
"""
errorHelp=\
""" Error of elastic modulus
If this option is used, elastic modulus will be calculated as a function of time. Therefore,
options such as frameGap will be essential.
Error methods are as following:
* "none" : No error calculation (Default).
* "acf": Using autocorrelation function to determine autocoprrelation time and used as time
to get the independent frame.
* "block": Block averaging error
* "std": standard deviation
In case of "acf" and "block", gromacs tool "g_analyze" or "gmx analyze" will be used. Either
of these tools should be in path for error calculation.
"""
toolsHelp=\
"""Tools to calculate autocorrelation time or bloack averaging error.
By default it is g_analyze (Gromacs-4.5.x/4.6.x versions). For newer versions, use "gmx analyze".
"""
enGlobalTypes = ['full', 'diag', 'stretch', 'twist', 'st_coupling', 'b1', 'b2',
'bend', 'bs_coupling', 'bt_coupling', 'bb_coupling', 'st', 'bs', 'bt' ]
def main():
parser, args = parseArguments()
# Input file
inputRefFile = None
if args.inputRefFile is not None:
inputRefFile = args.inputRefFile.name
args.inputRefFile.close()
else:
showErrorAndExit(parser, "No Input File for Reference DNA!!!\n")
# Input file
inputProbeFile = None
if args.inputProbeFile is not None:
inputProbeFile = args.inputProbeFile.name
args.inputProbeFile.close()
else:
showErrorAndExit(parser, "No Input File for Probe DNA!!!\n")
# Determine file-extension type
inputFileExtension = os.path.splitext(inputRefFile)[1]
if inputFileExtension not in ['.h5', '.hdf5', '.hdf']:
showErrorAndExit(parser, "Input file for Reference DNA should be in HDF5 (h5, hdf5 or hdf extension) format.\n")
inputFileExtension = os.path.splitext(inputProbeFile)[1]
if inputFileExtension not in ['.h5', '.hdf5', '.hdf']:
showErrorAndExit(parser, "Input file for probe DNA should be in HDF5 (h5, hdf5 or hdf extension) format.\n")
# Total number of base-pair
firstBP = args.firstBP
totalBP = None
if args.totalBP is None:
showErrorAndExit(parser, "No total number of BP!!!\n")
else:
totalBP = args.totalBP
# Determine start and end-bp
toMinusBP = 2
startBP = args.startBP
if startBP is None:
startBP = firstBP
endBP = args.endBP
if endBP is None:
endBP = firstBP + totalBP - toMinusBP
# Check consistency of start bp
if (startBP < firstBP) or (startBP > totalBP+firstBP-toMinusBP):
msg = 'The requested start bp {0} is out side of {1}-{2} range.'.format(startBP, firstBP, totalBP+firstBP-toMinusBP)
showErrorAndExit(parser, msg)
# Check consistency of end-bp
if endBP is not None:
if startBP > endBP:
msg = 'The requested end bp {0} is larger than requested start bp {1}!!!'.format(endBP, startBP)
showErrorAndExit(parser, msg)
if (endBP > totalBP+firstBP-toMinusBP) or (endBP < firstBP):
msg = 'The requested end bp {0} is out side of {1}-{2} range.'.format(endBP, firstBP, totalBP+firstBP-toMinusBP)
showErrorAndExit(parser, msg)
# Define DNA segement here
bp = [startBP, endBP]
if args.esType == 'BST' and args.paxis is None:
showErrorAndExit(parser, 'To calculate bending, principal axis parallel to helical axis is required.')
# Check energy terms and make a list
outEnergyTerms = checkEnergyTerms(args)
# initialize DNA object
dna = dnaMD.dnaEY(totalBP, esType=args.esType, filename=inputRefFile, startBP=firstBP)
complexDna = dnaMD.DNA(totalBP, filename=inputProbeFile, startBP=firstBP)
# Check if mask is in object
if dna.dna.mask is not None:
masked = True
else:
masked = False
time, energy = dna.getGlobalDeformationEnergy(bp, complexDna, paxis=args.paxis, which=outEnergyTerms, masked=masked,
outFile = args.outputFile)
if args.err_type is not None:
error = dnaMD.get_error(time, list(energy.values()), len(outEnergyTerms), err_type=args.err_type, tool=args.tool)
sys.stdout.write("==============================================\n")
sys.stdout.write('{0:<16}{1:>14}{2:>14}\n'.format('Energy(kJ/mol)', 'Average', 'Error'))
sys.stdout.write("----------------------------------------------\n")
for i in range(len(outEnergyTerms)):
sys.stdout.write('{0:<16}{1:>14.3f}{2:>14.3f}\n'.format(outEnergyTerms[i], np.mean(energy[outEnergyTerms[i]]),
error[i]))
sys.stdout.write("==============================================\n\n")
def checkEnergyTerms(args):
if args.esType == 'BST':
energyTerms = enGlobalTypes
else:
energyTerms = enGlobalTypes[:5]
outEnergyTerms = args.energyTerms
if 'all' in outEnergyTerms:
outEnergyTerms = energyTerms
else:
for key in outEnergyTerms:
if key not in energyTerms:
raise ValueError('{0} is not a supported keyword.\n Use from the following list: \n{1}'.format(
outEnergyTerms, energyTerms))
return outEnergyTerms
def parseArguments():
parser = argparse.ArgumentParser(prog='dnaMD globalEnergy',
description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-ir', '--input-ref', action='store',
type=argparse.FileType('rb'), metavar='ref_dna.h5',
dest='inputRefFile', required=False, help=inputRefFileHelp)
parser.add_argument('-ip', '--input-probe', action='store',
type=argparse.FileType('rb'), metavar='probe_dna.h5',
dest='inputProbeFile', required=False, help=inputProbeFileHelp)
parser.add_argument('-o', '--output', action='store',
type=str, metavar='output.dat',
dest='outputFile', help=outputFileHelp)
parser.add_argument('-et', '--energy-terms', action='store',
type=lambda s: [item.rstrip().lstrip() for item in s.split(',')],
metavar='"full,diag,strecth,twist"', default='all',
dest='energyTerms', help=energyTermHelp)
parser.add_argument('-tbp', '--total-bp', action='store',
type=int, metavar='total-bp-number',
dest='totalBP', help=totalBpHelp)
parser.add_argument('-estype', '--elasticity-type', action='store',
dest='esType', metavar='esType', default='ST',
choices = ['ST', 'BST'],
type=str, help=esTypeHelp)
parser.add_argument('-bs', '--bp-start', action='store',
type=int, metavar='bp/s-start-number',
default=1,
dest='startBP', help=bpStartHelp)
parser.add_argument('-be', '--bp-end', action='store',
type=int, dest='endBP',
metavar='bp/s-end-number',
help=bpEndHelp)
parser.add_argument('-paxis', '--principal-axis', action='store',
type=str, metavar='X', default=None,
choices=['X', 'Y', 'Z'],
dest='paxis', help=paxisHelp)
parser.add_argument('-em', '--error-method', action='store',
type=str, metavar='block', default='block',
choices=['std', 'acf', 'block'],
dest='err_type', help=errorHelp)
parser.add_argument('-gt', '--gromacs-tool', action='store',
type=str, metavar='gmx analyze', default='gmx analyze',
dest='tool', help=toolsHelp)
parser.add_argument('-fbp', '--first-bp', action='store',
type=int, metavar='1', default=1,
dest='firstBP', help=bpFirstHelp)
idx = sys.argv.index("globalEnergy") + 1
args = parser.parse_args(args=sys.argv[idx:])
return parser, args
def showErrorAndExit(parser, message):
parser.print_help()
print("\n===== ERROR =======")
print(message)
print("See Usage Above!!!")
sys.exit(False)
if __name__ == '__main__':
main()
|
rjdkmr/do_x3dna
|
dnaMD/dnaMD/commands/globalEnergy.py
|
Python
|
gpl-3.0
| 14,043
|
[
"Gromacs"
] |
aaebc0a0368259f62f49e4c48d1073291d1f47078242c3fe6c667dc8e4ea2c92
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
xarray with MetPy Tutorial
==========================
`xarray <http://xarray.pydata.org/>`_ is a powerful Python package that provides N-dimensional
labeled arrays and datasets following the Common Data Model. While the process of integrating
xarray features into MetPy is ongoing, this tutorial demonstrates how xarray can be used
within the current version of MetPy. MetPy's integration primarily works through accessors
which allow simplified projection handling and coordinate identification. Unit and calculation
support is currently available in a limited fashion, but should be improved in future
versions.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Any import of metpy will activate the accessors
import metpy.calc as mpcalc
from metpy.testing import get_test_data
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# While xarray can handle a wide variety of n-dimensional data (essentially anything that can
# be stored in a netCDF file), a common use case is working with model output. Such model
# data can be obtained from a THREDDS Data Server using the siphon package, but for this
# tutorial, we will use an example subset of GFS data from Hurrican Irma (September 5th,
# 2017).
# Open the netCDF file as a xarray Dataset
data = xr.open_dataset(get_test_data('irma_gfs_example.nc', False))
# View a summary of the Dataset
print(data)
#########################################################################
# Preparing Data
# --------------
#
# To make use of the data within MetPy, we need to parse the dataset for projection
# information following the CF conventions. For this, we use the
# ``data.metpy.parse_cf()`` method, which will return a new, parsed ``DataArray`` or
# ``Dataset``.
#
# Additionally, we rename our data variables for easier reference.
# To parse the full dataset, we can call parse_cf without an argument, and assign the returned
# Dataset.
data = data.metpy.parse_cf()
# If we instead want just a single variable, we can pass that variable name to parse_cf and
# it will return just that data variable as a DataArray.
data_var = data.metpy.parse_cf('Temperature_isobaric')
# If we want only a subset of variables, we can pass a list of variable names as well.
data_subset = data.metpy.parse_cf(['u-component_of_wind_isobaric',
'v-component_of_wind_isobaric'])
# To rename variables, supply a dictionary between old and new names to the rename method
data = data.rename({
'Vertical_velocity_pressure_isobaric': 'omega',
'Relative_humidity_isobaric': 'relative_humidity',
'Temperature_isobaric': 'temperature',
'u-component_of_wind_isobaric': 'u',
'v-component_of_wind_isobaric': 'v',
'Geopotential_height_isobaric': 'height'
})
#########################################################################
# Units
# -----
#
# MetPy's DataArray accessor has a ``unit_array`` property to obtain a ``pint.Quantity`` array
# of just the data from the DataArray (metadata is removed) and a ``convert_units`` method to
# convert the the data from one unit to another (keeping it as a DataArray). For now, we'll
# just use ``convert_units`` to convert our temperature to ``degC``.
data['temperature'].metpy.convert_units('degC')
#########################################################################
# Coordinates
# -----------
#
# You may have noticed how we directly accessed the vertical coordinates above using their
# names. However, in general, if we are working with a particular DataArray, we don't have to
# worry about that since MetPy is able to parse the coordinates and so obtain a particular
# coordinate type directly. There are two ways to do this:
#
# 1. Use the ``data_var.metpy.coordinates`` method
# 2. Use the ``data_var.metpy.x``, ``data_var.metpy.y``, ``data_var.metpy.longitude``,
# ``data_var.metpy.latitude``, ``data_var.metpy.vertical``, ``data_var.metpy.time``
# properties
#
# The valid coordinate types are:
#
# - x
# - y
# - longitude
# - latitude
# - vertical
# - time
#
# (Both approaches are shown below)
#
# The ``x``, ``y``, ``vertical``, and ``time`` coordinates cannot be multidimensional,
# however, the ``longitude`` and ``latitude`` coordinates can (which is often the case for
# gridded weather data in its native projection). Note that for gridded data on an
# equirectangular projection, such as the GFS data in this example, ``x`` and ``longitude``
# will be identical (as will ``y`` and ``latitude``).
# Get multiple coordinates (for example, in just the x and y direction)
x, y = data['temperature'].metpy.coordinates('x', 'y')
# If we want to get just a single coordinate from the coordinates method, we have to use
# tuple unpacking because the coordinates method returns a generator
vertical, = data['temperature'].metpy.coordinates('vertical')
# Or, we can just get a coordinate from the property
time = data['temperature'].metpy.time
# To verify, we can inspect all their names
print([coord.name for coord in (x, y, vertical, time)])
#########################################################################
# Indexing and Selecting Data
# ---------------------------
#
# MetPy provides wrappers for the usual xarray indexing and selection routines that can handle
# quantities with units. For DataArrays, MetPy also allows using the coordinate axis types
# mentioned above as aliases for the coordinates. And so, if we wanted 850 hPa heights,
# we would take:
print(data['height'].metpy.sel(vertical=850 * units.hPa))
#########################################################################
# For full details on xarray indexing/selection, see
# `xarray's documentation <http://xarray.pydata.org/en/stable/indexing.html>`_.
#########################################################################
# Projections
# -----------
#
# Getting the cartopy coordinate reference system (CRS) of the projection of a DataArray is as
# straightforward as using the ``data_var.metpy.cartopy_crs`` property:
data_crs = data['temperature'].metpy.cartopy_crs
print(data_crs)
#########################################################################
# The cartopy ``Globe`` can similarly be accessed via the ``data_var.metpy.cartopy_globe``
# property:
data_globe = data['temperature'].metpy.cartopy_globe
print(data_globe)
#########################################################################
# Calculations
# ------------
#
# Most of the calculations in `metpy.calc` will accept DataArrays by converting them
# into their corresponding unit arrays. While this may often work without any issues, we must
# keep in mind that because the calculations are working with unit arrays and not DataArrays:
#
# - The calculations will return unit arrays rather than DataArrays
# - Broadcasting must be taken care of outside of the calculation, as it would only recognize
# dimensions by order, not name
#
# As an example, we calculate geostropic wind at 500 hPa below:
lat, lon = xr.broadcast(y, x)
f = mpcalc.coriolis_parameter(lat)
dx, dy = mpcalc.lat_lon_grid_deltas(lon, lat, initstring=data_crs.proj4_init)
heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]
u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
print(u_geo)
print(v_geo)
#########################################################################
# Also, a limited number of calculations directly support xarray DataArrays or Datasets (they
# can accept *and* return xarray objects). Right now, this includes
#
# - Derivative functions
# - ``first_derivative``
# - ``second_derivative``
# - ``gradient``
# - ``laplacian``
# - Cross-section functions
# - ``cross_section_components``
# - ``normal_component``
# - ``tangential_component``
# - ``absolute_momentum``
# - Smoothing functions
# - ``smooth_gaussian``
# - ``smooth_n_point``
# - ``smooth_window``
# - ``smooth_rectangular``
# - ``smooth_circular``
#
# More details can be found by looking at the documentation for the specific function of
# interest.
#########################################################################
# There is also the special case of the helper function, ``grid_deltas_from_dataarray``, which
# takes a ``DataArray`` input, but returns unit arrays for use in other calculations. We could
# rewrite the above geostrophic wind example using this helper function as follows:
heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]
lat, lon = xr.broadcast(y, x)
f = mpcalc.coriolis_parameter(lat)
dx, dy = mpcalc.grid_deltas_from_dataarray(heights)
u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
print(u_geo)
print(v_geo)
#########################################################################
# Plotting
# --------
#
# Like most meteorological data, we want to be able to plot these data. DataArrays can be used
# like normal numpy arrays in plotting code, which is the recommended process at the current
# point in time, or we can use some of xarray's plotting functionality for quick inspection of
# the data.
#
# (More detail beyond the following can be found at `xarray's plotting reference
# <http://xarray.pydata.org/en/stable/plotting.html>`_.)
# A very simple example example of a plot of 500 hPa heights
data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}].plot()
plt.show()
#########################################################################
# Let's add a projection and coastlines to it
ax = plt.axes(projection=ccrs.LambertConformal())
data['height'].metpy.loc[{'time': time[0],
'vertical': 500. * units.hPa}].plot(ax=ax, transform=data_crs)
ax.coastlines()
plt.show()
#########################################################################
# Or, let's make a full 500 hPa map with heights, temperature, winds, and humidity
# Select the data for this time and level
data_level = data.metpy.loc[{time.name: time[0], vertical.name: 500. * units.hPa}]
# Create the matplotlib figure and axis
fig, ax = plt.subplots(1, 1, figsize=(12, 8), subplot_kw={'projection': data_crs})
# Plot RH as filled contours
rh = ax.contourf(x, y, data_level['relative_humidity'], levels=[70, 80, 90, 100],
colors=['#99ff00', '#00ff00', '#00cc00'])
# Plot wind barbs, but not all of them
wind_slice = slice(5, -5, 5)
ax.barbs(x[wind_slice], y[wind_slice],
data_level['u'].metpy.unit_array[wind_slice, wind_slice].to('knots'),
data_level['v'].metpy.unit_array[wind_slice, wind_slice].to('knots'),
length=6)
# Plot heights and temperature as contours
h_contour = ax.contour(x, y, data_level['height'], colors='k', levels=range(5400, 6000, 60))
h_contour.clabel(fontsize=8, colors='k', inline=1, inline_spacing=8,
fmt='%i', rightside_up=True, use_clabeltext=True)
t_contour = ax.contour(x, y, data_level['temperature'], colors='xkcd:deep blue',
levels=range(-26, 4, 2), alpha=0.8, linestyles='--')
t_contour.clabel(fontsize=8, colors='xkcd:deep blue', inline=1, inline_spacing=8,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Add geographic features
ax.add_feature(cfeature.LAND.with_scale('50m'), facecolor=cfeature.COLORS['land'])
ax.add_feature(cfeature.OCEAN.with_scale('50m'), facecolor=cfeature.COLORS['water'])
ax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='#c7c783', zorder=0)
ax.add_feature(cfeature.LAKES.with_scale('50m'), facecolor=cfeature.COLORS['water'],
edgecolor='#c7c783', zorder=0)
# Set a title and show the plot
ax.set_title('500 hPa Heights (m), Temperature (\u00B0C), Humidity (%) at '
+ time[0].dt.strftime('%Y-%m-%d %H:%MZ').item())
plt.show()
#########################################################################
# What Could Go Wrong?
# --------------------
#
# Depending on your dataset and what you are trying to do, you might run into problems with
# xarray and MetPy. Below are examples of some of the most common issues
#
# - Multiple coordinate conflict
# - An axis not being available
# - An axis not being interpretable
# - Arrays not broadcasting in calculations
#
# **Coordinate Conflict**
#
# Code:
#
# ::
#
# x = data['Temperature'].metpy.x
#
# Error Message:
#
# ::
#
# /home/user/env/MetPy/metpy/xarray.py:305: UserWarning: More than
# one x coordinate present for variable "Temperature".
#
# Fix:
#
# Manually assign the coordinates using the ``assign_coordinates()`` method on your DataArray,
# or by specifying the ``coordinates`` argument to the ``parse_cf()`` method on your Dataset,
# to map the ``time``, ``vertical``, ``y``, ``latitude``, ``x``, and ``longitude`` axes (as
# applicable to your data) to the corresponding coordinates.
#
# ::
#
# data['Temperature'].assign_coordinates({'time': 'time', 'vertical': 'isobaric',
# 'y': 'y', 'x': 'x'})
# x = data['Temperature'].metpy.x
#
# or
#
# ::
#
# temperature = data.metpy.parse_cf('Temperature',
# coordinates={'time': 'time', 'vertical': 'isobaric',
# 'y': 'y', 'x': 'x'})
# x = temperature.metpy.x
#
# **Axis Unavailable**
#
# Code:
#
# ::
#
# data['Temperature'].metpy.vertical
#
# Error Message:
#
# ::
#
# AttributeError: vertical attribute is not available.
#
# This means that your data variable does not have the coordinate that was requested, at
# least as far as the parser can recognize. Verify that you are requesting a
# coordinate that your data actually has, and if it still is not available,
# you will need to manually specify the coordinates as discussed above.
#
# **Axis Not Interpretable**
#
# Code:
#
# ::
#
# x, y, ensemble = data['Temperature'].metpy.coordinates('x', 'y', 'ensemble')
#
# Error Message:
#
# ::
#
# AttributeError: 'ensemble' is not an interpretable axis
#
# This means that you are requesting a coordinate that MetPy is (currently) unable to parse.
# While this means it cannot be recognized automatically, you can still obtain your desired
# coordinate directly by accessing it by name. If you have a need for systematic
# identification of a new coordinate type, we welcome pull requests for such new functionality
# on GitHub!
#
# **Broadcasting in Calculations**
#
# Code:
#
# ::
#
# theta = mpcalc.potential_temperature(data['isobaric3'], data['temperature'])
#
# Error Message:
#
# ::
#
# ValueError: operands could not be broadcast together with shapes (9,31,81,131) (31,)
#
# This is a symptom of the incomplete integration of xarray with MetPy's calculations; the
# calculations currently convert the DataArrays to unit arrays that do not recognize which
# coordinates match with which. And so, we must do some manipulations.
#
# Fix 1 (xarray broadcasting):
#
# ::
#
# pressure, temperature = xr.broadcast(data['isobaric3'], data['temperature'])
# theta = mpcalc.potential_temperature(pressure, temperature)
#
# Fix 2 (unit array broadcasting):
#
# ::
#
# theta = mpcalc.potential_temperature(
# data['isobaric3'].metpy.unit_array[None, :, None, None],
# data['temperature'].metpy.unit_array
# )
#
|
ahaberlie/MetPy
|
tutorials/xarray_tutorial.py
|
Python
|
bsd-3-clause
| 15,531
|
[
"NetCDF"
] |
67c589af4c21b66812b824a38143557c95adf4fe9bf681499454195ea23ca1be
|
# Copyright 2004 by Harry Zuzan. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Classes for accessing the information in Affymetrix cel files.
Functions:
read Read a cel file and store its contents in a Record
Classes:
Record Contains the information from a cel file
"""
# We use print in the doctests
from __future__ import print_function
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.Affy.CelFile")
__docformat__ = "restructuredtext en"
class Record(object):
"""Stores the information in a cel file
Example usage:
>>> from Bio.Affy import CelFile
>>> with open('Affy/affy_v3_example.CEL') as handle:
... c = CelFile.read(handle)
...
>>> print(c.ncols, c.nrows)
5 5
>>> print(c.intensities)
[[ 234. 170. 22177. 164. 22104.]
[ 188. 188. 21871. 168. 21883.]
[ 188. 193. 21455. 198. 21300.]
[ 188. 182. 21438. 188. 20945.]
[ 193. 20370. 174. 20605. 168.]]
>>> print(c.stdevs)
[[ 24. 34.5 2669. 19.7 3661.2]
[ 29.8 29.8 2795.9 67.9 2792.4]
[ 29.8 88.7 2976.5 62. 2914.5]
[ 29.8 76.2 2759.5 49.2 2762. ]
[ 38.8 2611.8 26.6 2810.7 24.1]]
>>> print(c.npix)
[[25 25 25 25 25]
[25 25 25 25 25]
[25 25 25 25 25]
[25 25 25 25 25]
[25 25 25 25 25]]
"""
def __init__(self):
self.version = None
self.GridCornerUL = None
self.GridCornerUR = None
self.GridCornerLR = None
self.GridCornerLL = None
self.DatHeader = None
self.Algorithm = None
self.AlgorithmParameters = None
self.NumberCells = None
self.intensities = None
self.stdevs = None
self.npix = None
self.nrows = None
self.ncols = None
self.nmask = None
self.mask = None
self.noutliers = None
self.outliers = None
self.modified = None
def read(handle):
"""
Read the information in a cel file, and store it in a Record.
"""
# Needs error handling.
# Needs to know the chip design.
record = Record()
section = ""
for line in handle:
if not line.strip():
continue
# Set current section
if line[:5] == "[CEL]":
section = "CEL"
elif line[:8] == "[HEADER]":
section = "HEADER"
elif line[:11] == "[INTENSITY]":
section = "INTENSITY"
record.intensities = numpy.zeros((record.nrows, record.ncols))
record.stdevs = numpy.zeros((record.nrows, record.ncols))
record.npix = numpy.zeros((record.nrows, record.ncols), int)
elif line[:7] == "[MASKS]":
section = "MASKS"
record.mask = numpy.zeros((record.nrows, record.ncols))
elif line[:10] == "[OUTLIERS]":
section = "OUTLIERS"
record.outliers = numpy.zeros((record.nrows, record.ncols))
elif line[:10] == "[MODIFIED]":
section = "MODIFIED"
record.modified = numpy.zeros((record.nrows, record.ncols))
elif line[0] == "[":
# This would be an unknown section
section = ""
elif section == "CEL":
keyword, value = line.split("=", 1)
if keyword == 'Version':
record.version = int(value)
elif section == "HEADER":
# Set record.ncols and record.nrows, remaining data goes into
# record.header dict
keyword, value = line.split("=", 1)
if keyword == "Cols":
record.ncols = int(value)
elif keyword == "Rows":
record.nrows = int(value)
elif keyword == 'GridCornerUL':
x, y = value.split()
record.GridCornerUL = (int(x), int(y))
elif keyword == 'GridCornerUR':
x, y = value.split()
record.GridCornerUR = (int(x), int(y))
elif keyword == 'GridCornerLR':
x, y = value.split()
record.GridCornerLR = (int(x), int(y))
elif keyword == 'GridCornerLL':
x, y = value.split()
record.GridCornerLL = (int(x), int(y))
elif keyword == 'DatHeader':
record.DatHeader = value.strip('\n\r')
elif keyword == 'Algorithm':
record.Algorithm = value.strip('\n\r')
elif keyword == 'AlgorithmParameters':
record.AlgorithmParameters = value.strip('\n\r')
elif section == "INTENSITY":
if "NumberCells" in line:
record.NumberCells = int(line.split("=", 1)[1])
elif "CellHeader" in line:
pass
else:
words = line.split()
y = int(words[0])
x = int(words[1])
record.intensities[x, y] = float(words[2])
record.stdevs[x, y] = float(words[3])
record.npix[x, y] = int(words[4])
elif section == "MASKS":
if "NumberCells" in line:
record.nmask = int(line.split("=", 1)[1])
elif "CellHeader" in line:
pass
else:
words = line.split()
y = int(words[0])
x = int(words[1])
record.mask[x, y] = int(1)
elif section == "OUTLIERS":
if "NumberCells" in line:
record.noutliers = int(line.split("=", 1)[1])
elif "CellHeader" in line:
pass
else:
words = line.split()
y = int(words[0])
x = int(words[1])
record.outliers[x, y] = int(1)
elif section == "MODIFIED":
if "NumberCells" in line:
record.nmodified = int(line.split("=", 1)[1])
elif "CellHeader" in line:
pass
else:
words = line.split()
y = int(words[0])
x = int(words[1])
record.modified[x, y] = float(words[2])
else:
continue
return record
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Affy/CelFile.py
|
Python
|
gpl-2.0
| 6,587
|
[
"Biopython"
] |
e76fc9bfd9a0fcb4adfad3c956272e595fac5d547b99e9d0558b68a24c3db109
|
"""
DIRAC.WorkloadManagementSystem.JobWrapper package
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/JobWrapper/__init__.py
|
Python
|
gpl-3.0
| 190
|
[
"DIRAC"
] |
2dde212d336616d0d299c58a856e731303b74e89f6586b597d4ddd7ce72caaf4
|
#!/usr/bin/env python
import pysam
import argparse, sys
import math, time, re
from collections import Counter
from argparse import RawTextHelpFormatter
__author__ = "Colby Chiang (cc2qe@virginia.edu)"
__version__ = "$Revision: 0.0.1 $"
__date__ = "$Date: 2015-04-22 09:31 $"
# --------------------------------------
# define functions
def get_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="\
vcf_group_multiline.py\n\
author: " + __author__ + "\n\
version: " + __version__ + "\n\
description: Group multiline variants prior vcf_paste.py")
parser.add_argument(metavar='vcf', dest='input_vcf', nargs='?', type=argparse.FileType('r'), default=None, help='VCF input (default: stdin)')
# parse the arguments
args = parser.parse_args()
# if no input, check if part of pipe and if so, read stdin.
if args.input_vcf == None:
if sys.stdin.isatty():
parser.print_help()
exit(1)
else:
args.input_vcf = sys.stdin
# send back the user input
return args
class Vcf(object):
def __init__(self):
self.file_format = 'VCFv4.2'
# self.fasta = fasta
self.reference = ''
self.sample_list = []
self.info_list = []
self.format_list = []
self.alt_list = []
self.add_format('GT', 1, 'String', 'Genotype')
def add_header(self, header):
for line in header:
if line.split('=')[0] == '##fileformat':
self.file_format = line.rstrip().split('=')[1]
elif line.split('=')[0] == '##reference':
self.reference = line.rstrip().split('=')[1]
elif line.split('=')[0] == '##INFO':
a = line[line.find('<')+1:line.find('>')]
r = re.compile(r'(?:[^,\"]|\"[^\"]*\")+')
self.add_info(*[b.split('=')[1] for b in r.findall(a)])
elif line.split('=')[0] == '##ALT':
a = line[line.find('<')+1:line.find('>')]
r = re.compile(r'(?:[^,\"]|\"[^\"]*\")+')
self.add_alt(*[b.split('=')[1] for b in r.findall(a)])
elif line.split('=')[0] == '##FORMAT':
a = line[line.find('<')+1:line.find('>')]
r = re.compile(r'(?:[^,\"]|\"[^\"]*\")+')
self.add_format(*[b.split('=')[1] for b in r.findall(a)])
elif line[0] == '#' and line[1] != '#':
self.sample_list = line.rstrip().split('\t')[9:]
# return the VCF header
def get_header(self, include_samples=True):
if include_samples:
header = '\n'.join(['##fileformat=' + self.file_format,
'##fileDate=' + time.strftime('%Y%m%d'),
'##reference=' + self.reference] + \
[i.hstring for i in self.info_list] + \
[a.hstring for a in self.alt_list] + \
[f.hstring for f in self.format_list] + \
['\t'.join([
'#CHROM',
'POS',
'ID',
'REF',
'ALT',
'QUAL',
'FILTER',
'INFO',
'FORMAT'] + \
self.sample_list
)])
else:
header = '\n'.join(['##fileformat=' + self.file_format,
'##fileDate=' + time.strftime('%Y%m%d'),
'##reference=' + self.reference] + \
[i.hstring for i in self.info_list] + \
[a.hstring for a in self.alt_list] + \
[f.hstring for f in self.format_list] + \
['\t'.join([
'#CHROM',
'POS',
'ID',
'REF',
'ALT',
'QUAL',
'FILTER',
'INFO']
)])
return header
def add_info(self, id, number, type, desc):
if id not in [i.id for i in self.info_list]:
inf = self.Info(id, number, type, desc)
self.info_list.append(inf)
def add_alt(self, id, desc):
if id not in [a.id for a in self.alt_list]:
alt = self.Alt(id, desc)
self.alt_list.append(alt)
def add_format(self, id, number, type, desc):
if id not in [f.id for f in self.format_list]:
fmt = self.Format(id, number, type, desc)
self.format_list.append(fmt)
def add_sample(self, name):
self.sample_list.append(name)
# get the VCF column index of a sample
# NOTE: this is zero-based, like python arrays
def sample_to_col(self, sample):
return self.sample_list.index(sample) + 9
class Info(object):
def __init__(self, id, number, type, desc):
self.id = str(id)
self.number = str(number)
self.type = str(type)
self.desc = str(desc)
# strip the double quotes around the string if present
if self.desc.startswith('"') and self.desc.endswith('"'):
self.desc = self.desc[1:-1]
self.hstring = '##INFO=<ID=' + self.id + ',Number=' + self.number + ',Type=' + self.type + ',Description=\"' + self.desc + '\">'
class Alt(object):
def __init__(self, id, desc):
self.id = str(id)
self.desc = str(desc)
# strip the double quotes around the string if present
if self.desc.startswith('"') and self.desc.endswith('"'):
self.desc = self.desc[1:-1]
self.hstring = '##ALT=<ID=' + self.id + ',Description=\"' + self.desc + '\">'
class Format(object):
def __init__(self, id, number, type, desc):
self.id = str(id)
self.number = str(number)
self.type = str(type)
self.desc = str(desc)
# strip the double quotes around the string if present
if self.desc.startswith('"') and self.desc.endswith('"'):
self.desc = self.desc[1:-1]
self.hstring = '##FORMAT=<ID=' + self.id + ',Number=' + self.number + ',Type=' + self.type + ',Description=\"' + self.desc + '\">'
class Variant(object):
def __init__(self, var_list, vcf):
self.chrom = var_list[0]
self.pos = int(var_list[1])
self.var_id = var_list[2]
self.ref = var_list[3]
self.alt = var_list[4]
if var_list[5] == '.':
self.qual = 0
else:
self.qual = float(var_list[5])
self.filter = var_list[6]
self.sample_list = vcf.sample_list
self.info_list = vcf.info_list
self.info = dict()
self.format_list = vcf.format_list
self.active_formats = list()
self.gts = dict()
# fill in empty sample genotypes
if len(var_list) < 8:
sys.stderr.write('\nError: VCF file must have at least 8 columns\n')
exit(1)
if len(var_list) < 9:
var_list.append("GT")
# make a genotype for each sample at variant
for s in self.sample_list:
try:
s_gt = var_list[vcf.sample_to_col(s)].split(':')[0]
self.gts[s] = Genotype(self, s, s_gt)
# import the existing fmt fields
for j in zip(var_list[8].split(':'), var_list[vcf.sample_to_col(s)].split(':')):
self.gts[s].set_format(j[0], j[1])
except IndexError:
self.gts[s] = Genotype(self, s, './.')
self.info = dict()
i_split = [a.split('=') for a in var_list[7].split(';')] # temp list of split info column
for i in i_split:
if len(i) == 1:
i.append(True)
self.info[i[0]] = i[1]
def set_info(self, field, value):
if field in [i.id for i in self.info_list]:
self.info[field] = value
else:
sys.stderr.write('\nError: invalid INFO field, \"' + field + '\"\n')
exit(1)
def get_info(self, field):
return self.info[field]
def get_info_string(self):
i_list = list()
for info_field in self.info_list:
if info_field.id in self.info.keys():
if info_field.type == 'Flag':
i_list.append(info_field.id)
else:
i_list.append('%s=%s' % (info_field.id, self.info[info_field.id]))
return ';'.join(i_list)
def get_format_string(self):
f_list = list()
for f in self.format_list:
if f.id in self.active_formats:
f_list.append(f.id)
return ':'.join(f_list)
def genotype(self, sample_name):
if sample_name in self.sample_list:
return self.gts[sample_name]
else:
sys.stderr.write('\nError: invalid sample name, \"' + sample_name + '\"\n')
def get_var_string(self):
s = '\t'.join(map(str,[
self.chrom,
self.pos,
self.var_id,
self.ref,
self.alt,
'%0.2f' % self.qual,
self.filter,
self.get_info_string(),
self.get_format_string(),
'\t'.join(self.genotype(s).get_gt_string() for s in self.sample_list)
]))
return s
class Genotype(object):
def __init__(self, variant, sample_name, gt):
self.format = dict()
self.variant = variant
self.set_format('GT', gt)
def set_format(self, field, value):
if field in [i.id for i in self.variant.format_list]:
self.format[field] = value
if field not in self.variant.active_formats:
self.variant.active_formats.append(field)
# sort it to be in the same order as the format_list in header
self.variant.active_formats.sort(key=lambda x: [f.id for f in self.variant.format_list].index(x))
else:
sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n')
exit(1)
def get_format(self, field):
return self.format[field]
def get_gt_string(self):
g_list = list()
for f in self.variant.active_formats:
if f in self.format:
if type(self.format[f]) == float:
g_list.append('%0.2f' % self.format[f])
else:
g_list.append(self.format[f])
else:
g_list.append('.')
return ':'.join(map(str,g_list))
# primary function
def sv_genotype(vcf_file):
in_header = True
header = []
breakend_dict = {} # cache to hold unmatched generic breakends for genotyping
vcf = Vcf()
vcf_out = sys.stdout
# read input VCF
for line in vcf_file:
if in_header:
if line[0] == '#':
header.append(line)
if line[1] != '#':
vcf_samples = line.rstrip().split('\t')[9:]
continue
else:
in_header = False
vcf.add_header(header)
# if detailed:
vcf.add_format('GQ', 1, 'Float', 'Genotype quality')
vcf.add_format('SQ', 1, 'Float', 'Phred-scaled probability that this site is variant (non-reference in this sample')
vcf.add_format('GL', 'G', 'Float', 'Genotype Likelihood, log10-scaled likelihoods of the data given the called genotype for each possible gen\
otype generated from the reference and alternate alleles given the sample ploidy')
vcf.add_format('DP', 1, 'Integer', 'Read depth')
vcf.add_format('RO', 1, 'Integer', 'Reference allele observation count, with partial observations recorded fractionally')
vcf.add_format('AO', 'A', 'Integer', 'Alternate allele observations, with partial observations recorded fractionally')
vcf.add_format('QR', 1, 'Integer', 'Sum of quality of reference observations')
vcf.add_format('QA', 'A', 'Integer', 'Sum of quality of alternate observations')
vcf.add_format('RS', 1, 'Integer', 'Reference allele split-read observation count, with partial observations recorded fractionally')
vcf.add_format('AS', 'A', 'Integer', 'Alternate allele split-read observation count, with partial observations recorded fractionally')
vcf.add_format('RP', 1, 'Integer', 'Reference allele paired-end observation count, with partial observations recorded fractionally')
vcf.add_format('AP', 'A', 'Integer', 'Alternate allele paired-end observation count, with partial observations recorded fractionally')
vcf.add_format('AB', 'A', 'Float', 'Allele balance, fraction of observations from alternate allele, QA/(QR+QA)')
# write the output header
if len(vcf_samples) > 0:
vcf_out.write(vcf.get_header(include_samples=True) + '\n')
else:
vcf_out.write(vcf.get_header(include_samples=False) + '\n')
v = line.rstrip().split('\t')
var = Variant(v, vcf)
# genotype generic breakends
if var.info['SVTYPE']=='BND':
if var.info['MATEID'] in breakend_dict:
var2 = var
var = breakend_dict[var.info['MATEID']]
chromA = var.chrom
chromB = var2.chrom
posA = var.pos
posB = var2.pos
# confidence intervals
ciA = [posA + ci for ci in map(int, var.info['CIPOS'].split(','))]
ciB = [posB + ci for ci in map(int, var2.info['CIPOS'].split(','))]
# infer the strands from the alt allele
if var.alt[-1] == '[' or var.alt[-1] == ']':
o1 = '+'
else: o1 = '-'
if var2.alt[-1] == '[' or var2.alt[-1] == ']':
o2 = '+'
else: o2 = '-'
else:
breakend_dict[var.var_id] = var
continue
else:
chromA = var.chrom
chromB = var.chrom
posA = var.pos
posB = int(var.get_info('END'))
# confidence intervals
ciA = [posA + ci for ci in map(int, var.info['CIPOS'].split(','))]
ciB = [posB + ci for ci in map(int, var.info['CIEND'].split(','))]
if var.get_info('SVTYPE') == 'DEL':
o1, o2 = '+', '-'
elif var.get_info('SVTYPE') == 'DUP':
o1, o2 = '-', '+'
elif var.get_info('SVTYPE') == 'INV':
o1, o2 = '+', '+'
# # increment the negative strand values (note position in VCF should be the base immediately left of the breakpoint junction)
# if o1 == '-': posA += 1
# if o2 == '-': posB += 1
# # if debug: print posA, posB
# # for i in xrange(len(bam_list)):
# for sample in sample_list:
# '''
# Breakend A
# '''
# # Count splitters
# ref_counter_a = Counter()
# spl_counter_a = Counter()
# ref_scaled_counter_a = Counter()
# spl_scaled_counter_a = Counter()
# for ref_read in sample.bam.fetch(chromA, max(posA - padding, 0), posA + padding + 1):
# if not ref_read.is_duplicate and not ref_read.is_unmapped:
# for p in xrange(ref_read.pos + 1, ref_read.aend + 1):
# if p - ref_read.pos >= splflank and ref_read.aend - p >= splflank:
# ref_counter_a[p] += 1
# ref_scaled_counter_a[p] += (1-10**(-ref_read.mapq/10.0))
# for spl_read in sample.spl_bam.fetch(chromA, max(posA - padding, 0), posA + padding + 1):
# if not spl_read.is_duplicate and not spl_read.is_unmapped:
# if o1 == '+' and spl_read.cigar[0][0] == 0:
# # if debug: print 'o1+', spl_read.aend
# spl_counter_a[spl_read.aend] += 1
# spl_scaled_counter_a[spl_read.aend] += (1-10**(-spl_read.mapq/10.0))
# elif o1 == '-' and spl_read.cigar[-1][0] == 0:
# # if debug: print 'o1-', spl_read.pos + 1
# spl_counter_a[spl_read.pos + 1] += 1
# spl_scaled_counter_a[spl_read.pos + 1] += (1-10**(-spl_read.mapq/10.0))
# # Count paired-end discordant and concordants
# (conc_counter_a,
# disc_counter_a,
# conc_scaled_counter_a,
# disc_scaled_counter_a) = count_pairedend(chromA, posA, ciA,
# chromB, posB, ciB,
# o1, o2,
# var.info['SVTYPE'],
# sample,
# z, discflank)
# '''
# Breakend B
# '''
# # Count splitters
# ref_counter_b = Counter()
# spl_counter_b = Counter()
# ref_scaled_counter_b = Counter()
# spl_scaled_counter_b = Counter()
# for ref_read in sample.bam.fetch(chromB, max(posB - padding, 0), posB + padding + 1):
# if not ref_read.is_duplicate and not ref_read.is_unmapped:
# for p in xrange(ref_read.pos + 1, ref_read.aend + 1):
# if p - ref_read.pos >= splflank and ref_read.aend - p >= splflank:
# ref_counter_b[p] += 1
# ref_scaled_counter_b[p] += (1-10**(-ref_read.mapq/10.0))
# for spl_read in sample.spl_bam.fetch(chromB, max(posB - padding, 0), posB + padding + 1):
# if not spl_read.is_duplicate and not spl_read.is_unmapped:
# if o2 == '+' and spl_read.cigar[0][0] == 0:
# spl_counter_b[spl_read.aend] += 1
# # if debug: print 'o2+', spl_read.aend
# spl_scaled_counter_b[spl_read.aend] += (1-10**(-spl_read.mapq/10.0))
# elif o2 == '-' and spl_read.cigar[-1][0] == 0:
# # if debug: print 'o2-', spl_read.pos + 1
# spl_counter_b[spl_read.pos + 1] += 1
# spl_scaled_counter_b[spl_read.pos + 1] += (1-10**(-spl_read.mapq/10.0))
# # tally up the splitters
# sr_ref_a = int(round(sum(ref_counter_a[p] for p in xrange(posA - split_slop, posA + split_slop + 1)) / float(2 * split_slop + 1)))
# sr_spl_a = sum(spl_counter_a[p] for p in xrange(posA-split_slop, posA+split_slop + 1))
# sr_ref_b = int(round(sum(ref_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1)) / float(2 * split_slop + 1)))
# sr_spl_b = sum(spl_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1))
# sr_ref_scaled_a = sum(ref_scaled_counter_a[p] for p in xrange(posA - split_slop, posA + split_slop + 1)) / float(2 * split_slop + 1)
# sr_spl_scaled_a = sum(spl_scaled_counter_a[p] for p in xrange(posA-split_slop, posA+split_slop + 1))
# sr_ref_scaled_b = sum(ref_scaled_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1)) / float(2 * split_slop + 1)
# sr_spl_scaled_b = sum(spl_scaled_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1))
# # Count paired-end discordants and concordants
# (conc_counter_b,
# disc_counter_b,
# conc_scaled_counter_b,
# disc_scaled_counter_b) = count_pairedend(chromB, posB, ciB,
# chromA, posA, ciA,
# o2, o1,
# var.info['SVTYPE'],
# sample,
# z, discflank)
# if debug:
# print '--------------------'
# print sample.name
# print 'sr_a', '(ref, alt)', sr_ref_a, sr_spl_a
# print 'pe_a', '(ref, alt)', conc_counter_a, disc_counter_a
# print 'sr_b', '(ref, alt)', sr_ref_b, sr_spl_b
# print 'pe_b', '(ref, alt)', conc_counter_b, disc_counter_b
# print 'sr_a_scaled', '(ref, alt)', sr_ref_scaled_a, sr_spl_scaled_a
# print 'pe_a_scaled', '(ref, alt)', conc_scaled_counter_a, disc_scaled_counter_a
# print 'sr_b_scaled', '(ref, alt)', sr_ref_scaled_b, sr_spl_scaled_b
# print 'pe_b_scaled', '(ref, alt)', conc_scaled_counter_b, disc_scaled_counter_b
# # merge the breakend support
# split_ref = 0 # set these to zero unless there are informative alt bases for the ev type
# disc_ref = 0
# split_alt = sr_spl_a + sr_spl_b
# if split_alt > 0:
# split_ref = sr_ref_a + sr_ref_b
# disc_alt = disc_counter_a + disc_counter_b
# if disc_alt > 0:
# disc_ref = conc_counter_a + conc_counter_b
# if split_alt == 0 and disc_alt == 0:
# split_ref = sr_ref_a + sr_ref_b
# disc_ref = conc_counter_a + conc_counter_b
# split_scaled_ref = 0 # set these to zero unless there are informative alt bases for the ev type
# disc_scaled_ref = 0
# split_scaled_alt = sr_spl_scaled_a + sr_spl_scaled_b
# if int(split_scaled_alt) > 0:
# split_scaled_ref = sr_ref_scaled_a + sr_ref_scaled_b
# disc_scaled_alt = disc_scaled_counter_a + disc_scaled_counter_b
# if int(disc_scaled_alt) > 0:
# disc_scaled_ref = conc_scaled_counter_a + conc_scaled_counter_b
# if int(split_scaled_alt) == 0 and int(disc_scaled_alt) == 0: # if no alt alleles, set reference
# split_scaled_ref = sr_ref_scaled_a + sr_ref_scaled_b
# disc_scaled_ref = conc_scaled_counter_a + conc_scaled_counter_b
# if split_scaled_alt + split_scaled_ref + disc_scaled_alt + disc_scaled_ref > 0:
# # get bayesian classifier
# if var.info['SVTYPE'] == "DUP": is_dup = True
# else: is_dup = False
# gt_lplist = bayes_gt(int(split_weight * split_scaled_ref) + int(disc_weight * disc_scaled_ref), int(split_weight * split_scaled_alt) + int(disc_weight * disc_scaled_alt), is_dup)
# gt_idx = gt_lplist.index(max(gt_lplist))
# # print log probabilities of homref, het, homalt
# if debug:
# print gt_lplist
# # set the overall variant QUAL score and sample specific fields
# var.genotype(sample.name).set_format('GL', ','.join(['%.0f' % x for x in gt_lplist]))
# var.genotype(sample.name).set_format('DP', int(split_scaled_ref + split_scaled_alt + disc_scaled_ref + disc_scaled_alt))
# var.genotype(sample.name).set_format('AO', int(split_scaled_alt + disc_scaled_alt))
# var.genotype(sample.name).set_format('RO', int(split_scaled_ref + disc_scaled_ref))
# # if detailed:
# var.genotype(sample.name).set_format('AS', int(split_scaled_alt))
# var.genotype(sample.name).set_format('RS', int(split_scaled_ref))
# var.genotype(sample.name).set_format('AP', int(disc_scaled_alt))
# var.genotype(sample.name).set_format('RP', int(disc_scaled_ref))
# # assign genotypes
# gt_sum = 0
# for gt in gt_lplist:
# try:
# gt_sum += 10**gt
# except OverflowError:
# gt_sum += 0
# if gt_sum > 0:
# gt_sum_log = math.log(gt_sum, 10)
# sample_qual = abs(-10 * (gt_lplist[0] - gt_sum_log)) # phred-scaled probability site is non-reference in this sample
# if 1 - (10**gt_lplist[gt_idx] / 10**gt_sum_log) == 0:
# phred_gq = 200
# else:
# phred_gq = abs(-10 * math.log(1 - (10**gt_lplist[gt_idx] / 10**gt_sum_log), 10))
# var.genotype(sample.name).set_format('GQ', phred_gq)
# var.genotype(sample.name).set_format('SQ', sample_qual)
# var.qual += sample_qual
# if gt_idx == 1:
# var.genotype(sample.name).set_format('GT', '0/1')
# elif gt_idx == 2:
# var.genotype(sample.name).set_format('GT', '1/1')
# elif gt_idx == 0:
# var.genotype(sample.name).set_format('GT', '0/0')
# else:
# var.genotype(sample.name).set_format('GQ', '.')
# var.genotype(sample.name).set_format('SQ', '.')
# var.genotype(sample.name).set_format('GT', './.')
# else:
# var.genotype(sample.name).set_format('GT', './.')
# var.qual = 0
# var.genotype(sample.name).set_format('GQ', '.')
# var.genotype(sample.name).set_format('GL', '.')
# var.genotype(sample.name).set_format('DP', 0)
# var.genotype(sample.name).set_format('AO', 0)
# var.genotype(sample.name).set_format('RO', 0)
# # if detailed:
# var.genotype(sample.name).set_format('AS', 0)
# var.genotype(sample.name).set_format('RS', 0)
# var.genotype(sample.name).set_format('AP', 0)
# var.genotype(sample.name).set_format('RP', 0)
# after all samples have been processed, write
vcf_out.write(var.get_var_string() + '\n')
if var.info['SVTYPE'] == 'BND':
var2.qual = var.qual
var2.active_formats = var.active_formats
var2.genotype = var.genotype
vcf_out.write(var2.get_var_string() + '\n')
vcf_out.close()
return
# --------------------------------------
# main function
def main():
# parse the command line args
args = get_args()
# call primary function
sv_genotype(args.input_vcf)
# close the files
args.input_vcf.close()
# initialize the script
if __name__ == '__main__':
try:
sys.exit(main())
except IOError, e:
if e.errno != 32: # ignore SIGPIPE
raise
|
abelhj/svtools
|
svtools/bin/svtyper/scripts/vcf_group_multiline.py
|
Python
|
mit
| 27,563
|
[
"pysam"
] |
0b0e0b4d9fa2fb61002abcd1e529e84daef249e8d6fc481c574c41ad1dbac7f7
|
#! /usr/env/python
"""
Python implementation of VoronoiDelaunayGrid, a class used to create and manage
unstructured, irregular grids for 2D numerical models.
Do NOT add new documentation here. Grid documentation is now built in a semi-
automated fashion. To modify the text seen on the web, edit the files
`docs/text_for_[gridfile].py.txt`.
"""
import numpy as np
from six.moves import range
from landlab.grid.base import (ModelGrid, CORE_NODE, BAD_INDEX_VALUE,
INACTIVE_LINK)
from landlab.core.utils import (as_id_array, sort_points_by_x_then_y,
argsort_points_by_x_then_y,
anticlockwise_argsort_points)
from .decorators import return_readonly_id_array
from scipy.spatial import Voronoi
def simple_poly_area(x, y):
"""Calculates and returns the area of a 2-D simple polygon.
Input vertices must be in sequence (clockwise or counterclockwise). *x*
and *y* are arrays that give the x- and y-axis coordinates of the
polygon's vertices.
Parameters
----------
x : ndarray
x-coordinates of of polygon vertices.
y : ndarray
y-coordinates of of polygon vertices.
Returns
-------
out : float
Area of the polygon
Examples
--------
>>> import numpy as np
>>> from landlab.grid.voronoi import simple_poly_area
>>> x = np.array([3., 1., 1., 3.])
>>> y = np.array([1.5, 1.5, 0.5, 0.5])
>>> simple_poly_area(x, y)
2.0
If the input coordinate arrays are 2D, calculate the area of each polygon.
Note that when used in this mode, all polygons must have the same
number of vertices, and polygon vertices are listed column-by-column.
>>> x = np.array([[ 3., 1., 1., 3.],
... [-2., -2., -1., -1.]]).T
>>> y = np.array([[1.5, 1.5, 0.5, 0.5],
... [ 0., 1., 2., 0.]]).T
>>> simple_poly_area(x, y)
array([ 2. , 1.5])
"""
# For short arrays (less than about 100 elements) it seems that the
# Python sum is faster than the numpy sum. Likewise for the Python
# built-in abs.
return .5 * abs(sum(x[:-1] * y[1:] - x[1:] * y[:-1]) +
x[-1] * y[0] - x[0] * y[-1])
def calculate_link_lengths(pts, link_from, link_to):
"""Calculates and returns length of links between nodes.
Parameters
----------
pts : Nx2 numpy array containing (x,y) values
link_from : 1D numpy array containing index numbers of nodes at starting
point ("from") of links
link_to : 1D numpy array containing index numbers of nodes at ending point
("to") of links
Returns
-------
out : ndarray
1D numpy array containing horizontal length of each link
Examples
--------
>>> import numpy as np
>>> from landlab.grid.voronoi import calculate_link_lengths
>>> pts = np.array([[0.,0.],[3.,0.],[3.,4.]]) # 3:4:5 triangle
>>> lfrom = np.array([0,1,2])
>>> lto = np.array([1,2,0])
>>> calculate_link_lengths(pts, lfrom, lto)
array([ 3., 4., 5.])
"""
dx = pts[link_to, 0] - pts[link_from, 0]
dy = pts[link_to, 1] - pts[link_from, 1]
link_length = np.sqrt(dx * dx + dy * dy)
return link_length
class VoronoiDelaunayGrid(ModelGrid):
"""
This inherited class implements an unstructured grid in which cells are
Voronoi polygons and nodes are connected by a Delaunay triangulation. Uses
scipy.spatial module to build the triangulation.
Create an unstructured grid from points whose coordinates are given
by the arrays *x*, *y*.
Parameters
----------
x : array_like
x-coordinate of points
y : array_like
y-coordinate of points
reorient_links (optional) : bool
whether to point all links to the upper-right quadrant
Returns
-------
VoronoiDelaunayGrid
A newly-created grid.
Examples
--------
>>> from numpy.random import rand
>>> from landlab.grid import VoronoiDelaunayGrid
>>> x, y = rand(25), rand(25)
>>> vmg = VoronoiDelaunayGrid(x, y) # node_x_coords, node_y_coords
>>> vmg.number_of_nodes
25
>>> import numpy as np
>>> x = [0, 0.1, 0.2, 0.3,
... 1, 1.1, 1.2, 1.3,
... 2, 2.1, 2.2, 2.3,]
>>> y = [0, 1, 2, 3,
... 0, 1, 2, 3,
... 0, 1, 2, 3]
>>> vmg = VoronoiDelaunayGrid(x, y)
>>> vmg.node_x # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 2. ,
0.1, 1.1, 2.1,
0.2, 1.2, 2.2,
0.3, 1.3, 2.3])
>>> vmg.node_y # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0.,
1., 1., 1.,
2., 2., 2.,
3., 3., 3.])
"""
def __init__(self, x=None, y=None, reorient_links=True, **kwds):
"""
Create a Voronoi Delaunay grid from a set of points.
Create an unstructured grid from points whose coordinates are given
by the arrays *x*, *y*.
Parameters
----------
x : array_like
x-coordinate of points
y : array_like
y-coordinate of points
reorient_links (optional) : bool
whether to point all links to the upper-right quadrant
Returns
-------
VoronoiDelaunayGrid
A newly-created grid.
Examples
--------
>>> from numpy.random import rand
>>> from landlab.grid import VoronoiDelaunayGrid
>>> x, y = rand(25), rand(25)
>>> vmg = VoronoiDelaunayGrid(x, y) # node_x_coords, node_y_coords
>>> vmg.number_of_nodes
25
"""
if (x is not None) and (y is not None):
self._initialize(x, y, reorient_links)
super(VoronoiDelaunayGrid, self).__init__(**kwds)
def _initialize(self, x, y, reorient_links=True):
"""
Creates an unstructured grid around the given (x,y) points.
"""
x = np.asarray(x, dtype=float).reshape((-1, ))
y = np.asarray(y, dtype=float).reshape((-1, ))
if x.size != y.size:
raise ValueError('x and y arrays must have the same size')
# Make a copy of the points in a 2D array (useful for calls to geometry
# routines, but takes extra memory space).
pts = np.zeros((len(x), 2))
pts[:, 0] = x
pts[:, 1] = y
self.pts = sort_points_by_x_then_y(pts)
x = self.pts[:, 0]
y = self.pts[:, 1]
# NODES AND CELLS: Set up information pertaining to nodes and cells:
# - number of nodes
# - node x, y coordinates
# - default boundary status
# - interior and boundary nodes
# - nodes associated with each cell and active cell
# - cells and active cells associated with each node
# (or BAD_VALUE_INDEX if none)
#
# Assumptions we make here:
# - all interior (non-perimeter) nodes have cells (this should be
# guaranteed in a Delaunay triangulation, but there may be
# special cases)
# - all cells are active (later we'll build a mechanism for the user
# specify a subset of cells as active)
#
self._node_x = x
self._node_y = y
[self._node_status, self._core_nodes, self._boundary_nodes] = \
self._find_perimeter_nodes_and_BC_set(pts)
[self._cell_at_node, self._node_at_cell] = \
self._node_to_cell_connectivity(self._node_status,
self.number_of_cells)
active_cell_at_node = self.cell_at_node[self.core_nodes]
# ACTIVE CELLS: Construct Voronoi diagram and calculate surface area of
# each active cell.
vor = Voronoi(self.pts)
self.vor = vor
self._area_of_cell = np.zeros(self.number_of_cells)
for node in self._node_at_cell:
xv = vor.vertices[vor.regions[vor.point_region[node]], 0]
yv = vor.vertices[vor.regions[vor.point_region[node]], 1]
self._area_of_cell[self.cell_at_node[node]] = (
simple_poly_area(xv, yv))
# LINKS: Construct Delaunay triangulation and construct lists of link
# "from" and "to" nodes.
(self._node_at_link_tail,
self._node_at_link_head,
_,
self._face_width) = \
self._create_links_and_faces_from_voronoi_diagram(vor)
self._status_at_link = np.full(len(self._node_at_link_tail),
INACTIVE_LINK, dtype=int)
# Sort them by midpoint coordinates
self._sort_links_by_midpoint()
# Optionally re-orient links so that they all point within upper-right
# semicircle
if reorient_links:
self._reorient_links_upper_right()
# LINKS: Calculate link lengths
self._link_length = calculate_link_lengths(self.pts,
self.node_at_link_tail,
self.node_at_link_head)
# LINKS: inlink and outlink matrices
# SOON TO BE DEPRECATED
self._setup_inlink_and_outlink_matrices()
# ACTIVE LINKS: Create list of active links, as well as "from" and "to"
# nodes of active links.
self._reset_link_status_list()
# NODES & LINKS: IDs and directions of links at each node
self._create_links_and_link_dirs_at_node()
# LINKS: set up link unit vectors and node unit-vector sums
self._create_link_unit_vectors()
# create link x, y:
self._create_link_face_coords()
self._create_neighbors()
@property
def number_of_patches(self):
"""Number of patches.
Returns the number of patches over the grid.
LLCATS: PINF
"""
try:
return self._number_of_patches
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._number_of_patches
@property
def nodes_at_patch(self):
"""Get the four nodes at the corners of each patch in a regular grid.
LLCATS: PINF NINF CONN
"""
try:
return self._nodes_at_patch
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._nodes_at_patch
@property
@return_readonly_id_array
def patches_at_node(self):
"""
Return a (nnodes, max_voronoi_polygon_sides) array of patches at nodes.
The patches are returned in LL standard order (ccw from E), with any
nonexistent patches recorded after the ids of existing faces.
Nonexistent patches are ID'ed as -1.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 3)
>>> mg.patches_at_node # doctest: +SKIP
array([[ 0, 2, -1, -1, -1, -1],
[ 1, 3, 0, -1, -1, -1],
[ 4, 1, -1, -1, -1, -1],
[ 5, 2, -1, -1, -1, -1],
[ 6, 8, 5, 2, 0, 3],
[ 7, 9, 6, 3, 1, 4],
[ 7, 4, -1, -1, -1, -1],
[ 5, 8, -1, -1, -1, -1],
[ 8, 6, 9, -1, -1, -1],
[ 9, 7, -1, -1, -1, -1]])
LLCATS: NINF PINF CONN
"""
try:
return self._patches_at_node
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._patches_at_node
@property
@return_readonly_id_array
def links_at_patch(self):
"""Returns the links forming each patch.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.links_at_patch
array([[ 3, 2, 0],
[ 5, 1, 2],
[ 6, 3, 4],
[ 8, 7, 5],
[10, 9, 6],
[11, 8, 9]])
LLCATS: LINF PINF CONN
"""
try:
return self._links_at_patch
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._links_at_patch
@property
@return_readonly_id_array
def patches_at_link(self):
"""Returns the patches adjoined to each link.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.patches_at_link
array([[ 0, -1],
[ 1, -1],
[ 0, 1],
[ 0, 2],
[ 2, -1],
[ 1, 3],
[ 2, 4],
[ 3, -1],
[ 3, 5],
[ 4, 5],
[ 4, -1],
[ 5, -1]])
LLCATS: PINF LINF CONN
"""
try:
return self._patches_at_link
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._patches_at_link
def _find_perimeter_nodes_and_BC_set(self, pts):
"""
Uses a convex hull to locate the perimeter nodes of the Voronoi grid,
then sets them as fixed value boundary nodes.
It then sets/updates the various relevant node lists held by the grid,
and returns *node_status*, *core_nodes*, *boundary_nodes*.
"""
# Calculate the convex hull for the set of points
from scipy.spatial import ConvexHull
hull = ConvexHull(pts, qhull_options='Qc') # see below why we use 'Qt'
# The ConvexHull object lists the edges that form the hull. We need to
# get from this list of edges the unique set of nodes. To do this, we
# first flatten the list of vertices that make up all the hull edges
# ("simplices"), so it becomes a 1D array. With that, we can use the
# set() function to turn the array into a set, which removes duplicate
# vertices. Then we turn it back into an array, which now contains the
# set of IDs for the nodes that make up the convex hull.
# The next thing to worry about is the fact that the mesh perimeter
# might contain nodes that are co-planar (that is, co-linear in our 2D
# world). For example, if you make a set of staggered points for a
# hexagonal lattice using make_hex_points(), there will be some
# co-linear points along the perimeter. The ones of these that don't
# form convex corners won't be included in convex_hull_nodes, but they
# are nonetheless part of the perimeter and need to be included in
# the list of boundary_nodes. To deal with this, we pass the 'Qt'
# option to ConvexHull, which makes it generate a list of coplanar
# points. We include these in our set of boundary nodes.
convex_hull_nodes = np.array(list(set(hull.simplices.flatten())))
coplanar_nodes = hull.coplanar[:, 0]
boundary_nodes = as_id_array(np.concatenate(
(convex_hull_nodes, coplanar_nodes)))
# Now we'll create the "node_status" array, which contains the code
# indicating whether the node is interior and active (=0) or a
# boundary (=1). This means that all perimeter (convex hull) nodes are
# initially flagged as boundary code 1. An application might wish to
# change this so that, for example, some boundaries are inactive.
node_status = np.zeros(len(pts[:, 0]), dtype=np.int8)
node_status[boundary_nodes] = 1
# It's also useful to have a list of interior nodes
core_nodes = as_id_array(np.where(node_status == 0)[0])
# save the arrays and update the properties
self._node_status = node_status
self._core_cells = np.arange(len(core_nodes), dtype=np.int)
self._node_at_cell = core_nodes
self._boundary_nodes = boundary_nodes
# Return the results
return node_status, core_nodes, boundary_nodes
def _create_cell_areas_array(self):
"""Set up an array of cell areas."""
self._cell_areas = self.active_cell_areas
return self._cell_areas
@staticmethod
def _node_to_cell_connectivity(node_status, ncells):
"""Set up node connectivity.
Creates and returns the following arrays:
* For each node, the ID of the corresponding cell, or
BAD_INDEX_VALUE if the node has no cell.
* For each cell, the ID of the corresponding node.
Parameters
----------
node_status : ndarray of ints
1D array containing the boundary status code for each node.
ncells : ndarray of ints
Number of cells (must equal the number of occurrences of CORE_NODE
in node_status).
Examples
--------
>>> from landlab import VoronoiDelaunayGrid as vdg
>>> import numpy as np
>>> from landlab.grid import BAD_INDEX_VALUE
>>> ns = np.array([1, 0, 0, 1, 0]) # 3 interior, 2 boundary nodes
>>> [node_cell, cell_node] = vdg._node_to_cell_connectivity(ns, 3)
>>> node_cell[1:3]
array([0, 1])
>>> node_cell[0] == BAD_INDEX_VALUE
True
>>> cell_node
array([1, 2, 4])
"""
assert ncells == np.count_nonzero(node_status == CORE_NODE), \
'ncells must equal number of CORE_NODE values in node_status'
cell = 0
node_cell = np.ones(len(node_status), dtype=int) * BAD_INDEX_VALUE
cell_node = np.zeros(ncells, dtype=int)
for node in range(len(node_cell)):
if node_status[node] == CORE_NODE:
node_cell[node] = cell
cell_node[cell] = node
cell += 1
return node_cell, cell_node
@staticmethod
def _create_links_from_triangulation(tri):
"""Create links from a Delaunay triangulation.
From a Delaunay Triangulation of a set of points, contained in a
scipy.spatial.Delaunay object "tri", creates and returns:
* a numpy array containing the ID of the "from" node for each link
* a numpy array containing the ID of the "to" node for each link
* the number of links in the triangulation
Examples
--------
>>> from scipy.spatial import Delaunay
>>> import numpy as np
>>> from landlab.grid import VoronoiDelaunayGrid as vdg
>>> pts = np.array([[ 0., 0.], [ 1., 0.], [ 1., 0.87],
... [-0.5, 0.87], [ 0.5, 0.87], [ 0., 1.73],
... [ 1., 1.73]])
>>> dt = Delaunay(pts)
>>> [myfrom,myto,nl] = vdg._create_links_from_triangulation(dt)
>>> print myfrom, myto, nl # doctest: +SKIP
[5 3 4 6 4 3 0 4 1 1 2 6] [3 4 5 5 6 0 4 1 0 2 4 2] 12
"""
# Calculate how many links there will be and create the arrays.
#
# The number of links equals 3 times the number of triangles minus
# half the number of shared links. Finding out the number of shared
# links is easy: for every shared link, there is an entry in the
# tri.neighbors array that is > -1 (indicating that the triangle has a
# neighbor opposite a given vertex; in other words, two triangles are
# sharing an edge).
num_shared_links = np.count_nonzero(tri.neighbors > -1)
num_links = 3 * tri.nsimplex - num_shared_links // 2
link_fromnode = np.zeros(num_links, dtype=int)
link_tonode = np.zeros(num_links, dtype=int)
# Sweep through the list of triangles, assigning "from" and "to" nodes
# to the list of links.
#
# The basic algorithm works as follows. For each triangle, we will add
# its 3 edges as links. However, we have to make sure that each shared
# edge is added only once. To do this, we keep track of whether or not
# each triangle has been processed yet using a boolean array called
# "tridone". When we look at a given triangle, we check each vertex in
# turn. If there is no neighboring triangle opposite that vertex, then
# we need to add the corresponding edge. If there is a neighboring
# triangle but we haven't processed it yet, we also need to add the
# edge. If neither condition is true, then this edge has already been
# added, so we skip it.
link_id = 0
tridone = np.zeros(tri.nsimplex, dtype=bool)
for t in range(tri.nsimplex): # loop over triangles
for i in range(0, 3): # loop over vertices & neighbors
if tri.neighbors[t, i] == -1 or not tridone[
tri.neighbors[t, i]]:
link_fromnode[link_id] = tri.simplices[
t, np.mod(i + 1, 3)]
link_tonode[link_id] = tri.simplices[
t, np.mod(i + 2, 3)]
link_id += 1
tridone[t] = True
# save the results
# self.node_at_link_tail = link_fromnode
# self.node_at_link_head = link_tonode
# Return the results
return link_fromnode, link_tonode, num_links
@staticmethod
def _is_valid_voronoi_ridge(vor, n):
SUSPICIOUSLY_BIG = 40000000.0
return (vor.ridge_vertices[n][0] != -1 and
vor.ridge_vertices[n][1] != -1 and
np.amax(np.abs(vor.vertices[
vor.ridge_vertices[n]])) < SUSPICIOUSLY_BIG)
@staticmethod
def _create_links_and_faces_from_voronoi_diagram(vor):
"""
From a Voronoi diagram object created by scipy.spatial.Voronoi(),
builds and returns:
1. Arrays of link tail and head nodes
2. Array of link IDs for each active link
3. Array containing with of each face
Parameters
----------
vor : scipy.spatial.Voronoi
Voronoi object initialized with the grid nodes.
Returns
-------
out : tuple of ndarrays
- link_fromnode = "from" node for each link (len=num_links)
- link_tonode = "to" node for each link (len=num_links)
- active_links = link ID for each active link
(len=num_active_links)
- face_width = width of each face (len=num_active_links
Examples
--------
>>> import numpy as np
>>> from landlab.grid import VoronoiDelaunayGrid as vdg
>>> pts = np.array([[0., 0.], [1., 0.], [-0.5, 0.87], [0.5, 0.87],
... [1.5, 0.87], [0., 1.73], [1., 1.73]])
>>> from scipy.spatial import Voronoi
>>> vor = Voronoi(pts)
>>> [tn,hn,al,fw] = vdg._create_links_and_faces_from_voronoi_diagram(
... vor)
>>> tn
array([0, 0, 0, 1, 1, 2, 3, 2, 3, 6, 6, 6])
>>> hn
array([1, 2, 3, 3, 4, 3, 4, 5, 5, 3, 4, 5])
>>> al
array([2, 3, 5, 6, 8, 9])
>>> fw
array([ 0.57669199, 0.57669199, 0.575973 , 0.575973 , 0.57836419,
0.57836419])
"""
# Each Voronoi "ridge" corresponds to a link. The Voronoi object has an
# attribute ridge_points that contains the IDs of the nodes on either
# side (including ridges that have one of their endpoints undefined).
# So, we set the number of links equal to the number of ridges.
num_links = len(vor.ridge_points)
# Create the arrays for link from and to nodes
link_fromnode = -np.ones(num_links, dtype=int)
link_tonode = -np.ones(num_links, dtype=int)
# Ridges along the perimeter of the grid will have one of their
# endpoints undefined. The endpoints of each ridge are contained in
# vor.ridge_vertices, and an undefined vertex is flagged with -1.
# Ridges with both vertices defined correspond to faces and active
# links, while ridges with an undefined vertex correspond to inactive
# links. So, to find the number of active links, we subtract from the
# total number of links the number of occurrences of an undefined
# vertex.
num_active_links = num_links \
- np.count_nonzero(np.array(vor.ridge_vertices) == -1)
# Create arrays for active links and width of faces (which are Voronoi
# ridges).
active_links = -np.ones(num_active_links, dtype=int)
face_width = -np.ones(num_active_links)
# Find the order to sort by link midpoints
link_midpoints = np.zeros((num_links, 2))
for i in range(num_links):
link_midpoints[i][:] = (vor.points[vor.ridge_points[i, 0]] +
vor.points[vor.ridge_points[i, 1]])/2.
ind = argsort_points_by_x_then_y(link_midpoints)
# Loop through the list of ridges. For each ridge, there is a link, and
# its "from" and "to" nodes are the associated "points". In addition,
# if the ridge endpoints are defined, we have a face and an active
# link, so we add them to our arrays as well.
j = 0
for i in range(num_links):
link_fromnode[i] = vor.ridge_points[ind[i], 0]
link_tonode[i] = vor.ridge_points[ind[i], 1]
face_corner1 = vor.ridge_vertices[ind[i]][0]
face_corner2 = vor.ridge_vertices[ind[i]][1]
# means it's a valid face
if VoronoiDelaunayGrid._is_valid_voronoi_ridge(vor, ind[i]):
dx = vor.vertices[face_corner2, 0] - \
vor.vertices[face_corner1, 0]
dy = vor.vertices[face_corner2, 1] - \
vor.vertices[face_corner1, 1]
face_width[j] = np.sqrt(dx * dx + dy * dy)
active_links[j] = i
j += 1
return link_fromnode, link_tonode, active_links, face_width
def _reorient_links_upper_right(self):
"""Reorient links to all point within the upper-right semi-circle.
Notes
-----
"Upper right semi-circle" means that the angle of the link with respect
to the vertical (measured clockwise) falls between -45 and +135. More
precisely, if :math:`\theta' is the angle,
:math:`-45 \ge \theta < 135`.
For example, the link could point up and left as much as -45, but not
-46. It could point down and right as much as 134.9999, but not 135. It
will never point down and left, or up-but-mostly-left, or
right-but-mostly-down.
Examples
--------
>>> from landlab.grid import HexModelGrid
>>> hg = HexModelGrid(3, 2, 1., reorient_links=True)
>>> hg.node_at_link_tail
array([0, 0, 0, 1, 1, 2, 3, 2, 3, 3, 4, 5])
>>> hg.node_at_link_head
array([1, 2, 3, 3, 4, 3, 4, 5, 5, 6, 6, 6])
"""
# Calculate the horizontal (dx) and vertical (dy) link offsets
link_dx = self.node_x[self.node_at_link_head] - \
self.node_x[self.node_at_link_tail]
link_dy = self.node_y[self.node_at_link_head] - \
self.node_y[self.node_at_link_tail]
# Calculate the angle, clockwise, with respect to vertical, then rotate
# by 45 degrees counter-clockwise (by adding pi/4)
link_angle = np.arctan2(link_dx, link_dy) + np.pi / 4
# The range of values should be -180 to +180 degrees (but in radians).
# It won't be after the above operation, because angles that were
# > 135 degrees will now have values > 180. To correct this, we
# subtract 360 (i.e., 2 pi radians) from those that are > 180 (i.e.,
# > pi radians).
link_angle -= 2 * np.pi * (link_angle >= np.pi)
# Find locations where the angle is negative; these are the ones we
# want to flip
(flip_locs, ) = np.where(link_angle < 0.)
# If there are any flip locations, proceed to switch their fromnodes
# and tonodes; otherwise, we're done
if len(flip_locs) > 0:
# Temporarily story the fromnode for these
fromnode_temp = self.node_at_link_tail[flip_locs]
# The fromnodes now become the tonodes, and vice versa
self._node_at_link_tail[
flip_locs] = self.node_at_link_head[flip_locs]
self._node_at_link_head[flip_locs] = fromnode_temp
def _create_patches_from_delaunay_diagram(self, pts, vor):
"""
Uses a delaunay diagram drawn from the provided points to
generate an array of patches and patch-node-link connectivity.
Returns ...
DEJH, 10/3/14, modified May 16.
"""
from scipy.spatial import Delaunay
from landlab.core.utils import anticlockwise_argsort_points_multiline
from .cfuncs import find_rows_containing_ID, \
create_patches_at_element, create_links_at_patch
tri = Delaunay(pts)
assert np.array_equal(tri.points, vor.points)
nodata = -1
self._nodes_at_patch = as_id_array(tri.simplices)
# self._nodes_at_patch = np.empty_like(_nodes_at_patch)
self._number_of_patches = tri.simplices.shape[0]
# get the patches in order:
patches_xy = np.empty((self._number_of_patches, 2), dtype=float)
patches_xy[:, 0] = np.mean(self.node_x[self._nodes_at_patch],
axis=1)
patches_xy[:, 1] = np.mean(self.node_y[self._nodes_at_patch],
axis=1)
orderforsort = argsort_points_by_x_then_y(patches_xy)
self._nodes_at_patch = self._nodes_at_patch[orderforsort, :]
patches_xy = patches_xy[orderforsort, :]
# get the nodes around the patch in order:
nodes_xy = np.empty((3, 2), dtype=float)
# perform a CCW sort without a line-by-line loop:
patch_nodes_x = self.node_x[self._nodes_at_patch]
patch_nodes_y = self.node_y[self._nodes_at_patch]
anticlockwise_argsort_points_multiline(patch_nodes_x, patch_nodes_y,
out=self._nodes_at_patch)
# need to build a squared off, masked array of the patches_at_node
# the max number of patches for a node in the grid is the max sides of
# the side-iest voronoi region.
max_dimension = len(max(vor.regions, key=len))
self._patches_at_node = np.full(
(self.number_of_nodes, max_dimension), nodata, dtype=int)
self._nodes_at_patch = as_id_array(self._nodes_at_patch)
self._patches_at_node = as_id_array(self._patches_at_node)
create_patches_at_element(self._nodes_at_patch,
self.number_of_nodes,
self._patches_at_node)
# build the patch-link connectivity:
self._links_at_patch = np.empty((self._number_of_patches, 3),
dtype=int)
create_links_at_patch(self._nodes_at_patch, self._links_at_node,
self._number_of_patches, self._links_at_patch)
patch_links_x = self.x_of_link[self._links_at_patch]
patch_links_y = self.y_of_link[self._links_at_patch]
anticlockwise_argsort_points_multiline(patch_links_x, patch_links_y,
out=self._links_at_patch)
self._patches_at_link = np.empty((self.number_of_links, 2),
dtype=int)
self._patches_at_link.fill(-1)
create_patches_at_element(self._links_at_patch, self.number_of_links,
self._patches_at_link)
# a sort of the links will be performed here once we have corners
self._patches_created = True
def _create_neighbors(self):
"""Create the _neighbors_at_node property.
"""
self._neighbors_at_node = self.links_at_node.copy()
nodes_at_link = np.empty((self.number_of_links, 2))
nodes_at_link[:, 0] = self.node_at_link_tail
nodes_at_link[:, 1] = self.node_at_link_head
both_nodes = nodes_at_link[self.links_at_node]
nodes = np.arange(self.number_of_nodes, dtype=int)
# ^we have to do this, as for a hex it's possible that mg.nodes is
# returned not just in ID order.
for i in range(both_nodes.shape[1]):
centernottail = np.not_equal(both_nodes[:, i, 0], nodes)
centernothead = np.not_equal(both_nodes[:, i, 1], nodes)
self._neighbors_at_node[centernottail, i] = both_nodes[
centernottail, i, 0]
self._neighbors_at_node[centernothead, i] = both_nodes[
centernothead, i, 1]
# restamp the missing links:
self._neighbors_at_node[
self.links_at_node == BAD_INDEX_VALUE] = BAD_INDEX_VALUE
def save(self, path, clobber=False):
"""Save a grid and fields.
This method uses cPickle to save a Voronoi grid as a cPickle file.
At the time of coding, this is the only convenient output format
for Voronoi grids, but support for netCDF is likely coming.
All fields will be saved, along with the grid.
The recommended suffix for the save file is '.grid'. This will
be added to your save if you don't include it.
This method is equivalent to
:py:func:`~landlab.io.native_landlab.save_grid`, and
:py:func:`~landlab.io.native_landlab.load_grid` can be used to
load these files.
Caution: Pickling can be slow, and can produce very large files.
Caution 2: Future updates to Landlab could potentially render old
saves unloadable.
Parameters
----------
path : str
Path to output file.
clobber : bool (defaults to false)
Set to true to allow overwriting
Examples
--------
>>> from landlab import VoronoiDelaunayGrid
>>> import numpy as np
>>> import os
>>> x = np.random.rand(20)
>>> y = np.random.rand(20)
>>> vmg = VoronoiDelaunayGrid(x,y)
>>> vmg.save('./mytestsave.grid')
>>> os.remove('mytestsave.grid') #to remove traces of this test
LLCATS: GINF
"""
import os
from six.moves import cPickle
if os.path.exists(path) and not clobber:
raise ValueError('file exists')
(base, ext) = os.path.splitext(path)
if ext != '.grid':
ext = ext + '.grid'
path = base + ext
with open(path, 'wb') as fp:
cPickle.dump(self, fp)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
csherwood-usgs/landlab
|
landlab/grid/voronoi.py
|
Python
|
mit
| 34,968
|
[
"NetCDF"
] |
7a0763d37180aa2ac8443157f5f75df3e49603788cc1f6314ac55586b0947516
|
"""
This module supports simple linear one-port networks based on the
following ideal components:
V independent voltage source
I independent current source
R resistor
C capacitor
L inductor
These components are converted to s-domain models and so capacitor and
inductor components can be specified with initial voltage and
currents, respectively, to model transient responses.
One-ports can either be connected in series (+) or parallel (|) to
create a new one-port.
Copyright 2014--2022 Michael Hayes, UCECE
"""
from __future__ import division
from .functions import Heaviside, cos, exp
from .sym import omega0sym, tsym, ksym, oo
from .symbols import j, t, s
from .network import Network
from .immittancemixin import ImmittanceMixin
from .impedance import impedance
from .admittance import admittance
from .voltage import voltage
from .current import current
from sympy import Derivative, Integral
from warnings import warn
__all__ = ('V', 'I', 'v', 'i', 'R', 'NR', 'L', 'C', 'G', 'Y', 'Z',
'Vdc', 'Vstep', 'Idc', 'Istep', 'Vac', 'sV', 'sI',
'Iac', 'Vnoise', 'Inoise',
'Par', 'Ser', 'Xtal', 'FerriteBead', 'CPE', 'series', 'parallel',
'ladder')
def _check_oneport_args(args):
for arg1 in args:
if not isinstance(arg1, OnePort):
raise ValueError('%s not a OnePort' % arg1)
class OnePort(Network, ImmittanceMixin):
"""One-port network
There are four major types of OnePort:
VoltageSource
CurrentSource
Impedance
Admittance
ParSer for combinations of OnePort
Attributes: Y, Z, Voc, Isc, y, z, voc, isc
Y = Y(s) admittance
Z = Z(s) impedance
Voc open-circuit voltage in appropriate transform domain
Isc short-circuit current in appropriate transform domain
y = y(t) impulse response of admittance
z = z(t) impulse response of impedance
voc = voc(t) open-circuit voltage time response
isc = isc(t) short-circuit current time response
"""
# Dimensions and separations of component with horizontal orientation.
height = 0.3
hsep = 0.5
width = 1
wsep = 0.5
_Z = None
_Y = None
_Voc = None
_Isc = None
@property
def impedance(self):
if self._Z is not None:
return self._Z
if self._Y is not None:
return 1 / self._Y
if self._Voc is not None:
return impedance(0)
if self._Isc is not None:
return 1 / admittance(0)
raise ValueError('_Isc, _Voc, _Y, or _Z undefined for %s' % self)
@property
def admittance(self):
if self._Y is not None:
return self._Y
return 1 / self.impedance
@property
def Voc(self):
"""Open-circuit voltage."""
if self._Voc is not None:
return self._Voc
if self._Isc is not None:
return self._Isc._mul(self.impedance)
if self._Z is not None or self._Y is not None:
return SuperpositionVoltage(0)
raise ValueError('_Isc, _Voc, _Y, or _Z undefined for %s' % self)
@property
def Isc(self):
"""Short-circuit current."""
if self._Isc is not None:
return self._Isc
return self.Voc._mul(self.admittance)
@property
def V(self):
"""Open-circuit voltage."""
return self.Voc
@property
def I(self):
"""Open-circuit current. Except for a current source this is zero."""
return SuperpositionCurrent(0)
@property
def i(self):
"""Open-circuit time-domain current. Except for a current source this
is zero."""
return self.I.time()
def __add__(self, OP):
"""Series combination"""
return Ser(self, OP)
def __or__(self, OP):
"""Parallel combination"""
return Par(self, OP)
@property
def has_series_I(self):
return self.is_current_source
@property
def has_parallel_V(self):
return self.is_voltage_source
def chain(self, TP):
"""Chain to a two-port. This is experimental."""
if isinstance(TP, OnePort):
raise ValueError('Cannot chain oneport with a oneport')
from .twoport import TwoPort
if not isinstance(TP, TwoPort):
raise ValueError('%s not a twoport' % TP)
return TP.source(self)
def series(self, OP):
"""Series combination"""
return Ser(self, OP)
def parallel(self, OP):
"""Parallel combination"""
return Par(self, OP)
def ladder(self, *args):
"""Create (unbalanced) ladder network"""
return Ladder(self, *args)
def lsection(self, OP2):
"""Create L section (voltage divider)"""
if not issubclass(OP2.__class__, OnePort):
raise TypeError('Argument not ', OnePort)
return LSection(self, OP2)
def tsection(self, OP2, OP3):
"""Create T section"""
if not issubclass(OP2.__class__, OnePort):
raise TypeError('Argument not ', OnePort)
if not issubclass(OP3.__class__, OnePort):
raise TypeError('Argument not ', OnePort)
return TSection(self, OP2, OP3)
def expand(self):
return self
def load(self, OP2):
if not issubclass(OP2.__class__, OnePort):
raise TypeError('Load argument not ', OnePort)
return LoadCircuit(self, OP2)
@property
def voc(self):
"""Open-circuit time-domain voltage."""
return self.Voc.time()
@property
def isc(self):
"""Short-circuit time-domain current."""
return self.Isc.time()
@property
def v(self):
"""Open-circuit time-domain voltage."""
return self.voc
@property
def z(self):
"""Impedance impulse-response."""
return self.impedance.time()
@property
def y(self):
"""Admittance impulse-response."""
return self.admittance.time()
def thevenin(self):
"""Simplify to a Thevenin network"""
new = self.simplify()
Voc = new.Voc
Z = new.impedance
if Voc.is_superposition and not Z.is_real:
warn('Detected superposition with reactive impedance, using s-domain.')
Z1 = Z
V1 = Voc.laplace()
elif Voc.is_ac:
Z1 = Z.subs(j * Voc.ac_keys()[0])
V1 = Voc.select(Voc.ac_keys()[0])
elif Voc.is_dc:
Z1 = Z.subs(0)
V1 = Voc(0)
else:
V1 = Voc
Z1 = Z
V1 = V1.cpt()
Z1 = Z1.cpt()
if Voc == 0:
return Z1
if Z == 0:
return V1
return Ser(Z1, V1)
def norton(self):
"""Simplify to a Norton network"""
new = self.simplify()
Isc = new.Isc
Y = new.admittance
if Isc.is_superposition and not Y.is_real:
warn('Detected superposition with reactive impedance, using s-domain.')
Y1 = Y
I1 = Isc.laplace()
elif Isc.is_ac:
Y1 = Y.subs(j * Isc.ac_keys()[0])
I1 = Isc.select(Isc.ac_keys()[0])
elif Isc.is_dc:
Y1 = Y.subs(0)
I1 = Isc(0)
else:
I1 = Isc
Y1 = Y
I1 = I1.cpt()
Y1 = Y1.cpt()
if Isc == 0:
return Y1
if Y == 0:
return I1
return Par(Y1, I1)
def s_model(self):
"""Convert to s-domain."""
if self._Voc is not None:
if self._Voc == 0:
return Z(self.impedance)
Voc = self._Voc.laplace()
if self.Z == 0:
return V(Voc)
return Ser(V(Voc), Z(self.impedance))
elif self._Isc is not None:
if self._Isc == 0:
return Y(self.admittance)
Isc = self._Isc.laplace()
if self.admittance == 0:
return I(Isc)
return Par(I(Isc), Y(self.admittance))
elif self._Z is not None:
return Z(self._Z)
elif self._Y is not None:
return Y(self._Y)
raise RuntimeError('Internal error')
def noise_model(self):
"""Convert to noise model."""
from .symbols import omega
if not isinstance(self, (R, G, Y, Z)):
return self
R1 = self.R
if R1 != 0:
Vn = Vnoise('sqrt(4 * k_B * T * %s)' % R1(j * omega))
return self + Vn
return self
def i_equation(self, v, kind='t'):
raise NotImplementedError('i_equation not defined')
def v_equation(self, i, kind='t'):
raise NotImplementedError('v_equation not defined')
def _Zkind(self, kind):
# This is for determining impedances
if not isinstance(kind, str):
# AC
domain = kind
elif kind in ('super', 'time', 't'):
domain = 't'
elif kind in ('laplace', 'ivp', 's'):
domain = 's'
elif kind == 'dc':
domain = 0
elif kind.startswith('n'):
domain = 'f'
else:
raise RuntimeError('Unhandled circuit kind ' + kind)
return self.Z(domain)
class ParSer(OnePort):
"""Parallel/serial class"""
def __str__(self):
str = ''
for m, arg in enumerate(self.args):
argstr = arg.__str__()
if isinstance(arg, ParSer) and arg.__class__ != self.__class__:
argstr = '(' + argstr + ')'
str += argstr
if m != len(self.args) - 1:
str += ' %s ' % self._operator
return str
def _repr_pretty_(self, p, cycle):
p.text(self.pretty())
def _repr_latex_(self):
return '$%s$' % self.latex()
def pretty(self, **kwargs):
str = ''
for m, arg in enumerate(self.args):
argstr = arg.pretty(**kwargs)
if isinstance(arg, ParSer) and arg.__class__ != self.__class__:
argstr = '(' + argstr + ')'
str += argstr
if m != len(self.args) - 1:
str += ' %s ' % self._operator
return str
def pprint(self):
print(self.pretty())
def latex(self):
str = ''
for m, arg in enumerate(self.args):
argstr = arg.latex()
if isinstance(arg, ParSer) and arg.__class__ != self.__class__:
argstr = '(' + argstr + ')'
str += argstr
if m != len(self.args) - 1:
str += ' %s ' % self._operator
return str
def _combine(self, arg1, arg2):
if arg1.__class__ != arg2.__class__:
if self.__class__ == Ser:
if isinstance(arg1, V) and arg1.Voc == 0:
return arg2
if isinstance(arg2, V) and arg2.Voc == 0:
return arg1
if isinstance(arg1, (R, Z)) and arg1.impedance == 0:
return arg2
if isinstance(arg2, (R, Z)) and arg2.impedance == 0:
return arg1
if self.__class__ == Par:
if isinstance(arg1, I) and arg1.Isc == 0:
return arg2
if isinstance(arg2, I) and arg2.Isc == 0:
return arg1
if isinstance(arg1, (Y, G)) and arg1.admittance == 0:
return arg2
if isinstance(arg2, (Y, G)) and arg2.admittance == 0:
return arg1
return None
if self.__class__ == Ser:
if isinstance(arg1, I):
return None
if isinstance(arg1, Vdc):
return Vdc(arg1.v0 + arg2.v0)
# Could simplify Vac here if same frequency
if isinstance(arg1, V):
return V(arg1 + arg2)
if isinstance(arg1, R):
return R(arg1._R + arg2._R)
if isinstance(arg1, L):
# The currents should be the same!
if arg1.i0 != arg2.i0 or arg1.has_ic != arg2.has_ic:
raise ValueError('Series inductors with different'
' initial currents!')
i0 = arg1.i0 if arg1.has_ic else None
return L(arg1.L + arg2.L, i0)
if isinstance(arg1, G):
return G(arg1._G * arg2._G / (arg1._G + arg2._G))
if isinstance(arg1, C):
v0 = arg1.v0 + arg2.v0 if arg1.has_ic or arg2.has_ic else None
return C(arg1.C * arg2.C / (arg1.C + arg2.C), v0)
return None
elif self.__class__ == Par:
if isinstance(arg1, V):
return None
if isinstance(arg1, Idc):
return Idc(arg1.i0 + arg2.i0)
# Could simplify Iac here if same frequency
if isinstance(arg1, I):
return I(arg1 + arg2)
if isinstance(arg1, G):
return G(arg1._G + arg2._G)
if isinstance(arg1, C):
# The voltages should be the same!
if arg1.v0 != arg2.v0 or arg1.has_ic != arg2.has_ic:
raise ValueError('Parallel capacitors with different'
' initial voltages!')
v0 = arg1.v0 if arg1.has_ic else None
return C(arg1.C + arg2.C, v0)
if isinstance(arg1, R):
return R(arg1._R * arg2._R / (arg1._R + arg2._R))
if isinstance(arg1, L):
i0 = arg1.i0 + arg2.i0 if arg1.has_ic or arg2.has_ic else None
return L(arg1.L * arg2.L / (arg1.L + arg2.L), i0)
return None
else:
raise TypeError('Undefined class')
def simplify(self, deep=True):
"""Perform simple simplifications, such as parallel resistors,
series inductors, etc., rather than collapsing to a Thevenin
or Norton network.
This does not expand compound components such as crystal
or ferrite bead models. Use expand() first.
"""
# Simplify args (recursively) and combine operators if have
# Par(Par(A, B), C) etc.
new = False
newargs = []
for m, arg in enumerate(self.args):
if isinstance(arg, ParSer):
arg = arg.simplify(deep)
new = True
if arg.__class__ == self.__class__:
newargs.extend(arg.args)
else:
newargs.append(arg)
else:
newargs.append(arg)
if new:
self = self.__class__(*newargs)
# Scan arg list looking for compatible combinations.
# Could special case the common case of two args.
new = False
args = list(self.args)
for n in range(len(args)):
arg1 = args[n]
if arg1 is None:
continue
if isinstance(arg1, ParSer):
continue
for m in range(n + 1, len(args)):
arg2 = args[m]
if arg2 is None:
continue
if isinstance(arg2, ParSer):
continue
# TODO, think how to simplify things such as
# Par(Ser(V1, R1), Ser(R2, V2)).
# Could do Thevenin/Norton transformations.
newarg = self._combine(arg1, arg2)
if newarg is not None:
# print('Combining', arg1, arg2, 'to', newarg)
args[m] = None
arg1 = newarg
new = True
args[n] = arg1
if new:
args = [arg for arg in args if arg is not None]
if len(args) == 1:
return args[0]
self = self.__class__(*args)
return self
def expand(self):
"""Expand compound components such as crystals or ferrite bead
models into R, L, G, C, V, I"""
newargs = []
for m, arg in enumerate(self.args):
newarg = arg.expand()
newargs.append(newarg)
return self.__class__(*newargs)
def s_model(self):
"""Convert to s-domain."""
args = [arg.s_model() for arg in self.args]
return (self.__class__(*args))
def noise_model(self):
"""Convert to noise model."""
args = [arg.noise_model() for arg in self.args]
return (self.__class__(*args))
@property
def Isc(self):
return self.cct.Isc(1, 0)
@property
def Voc(self):
return self.cct.Voc(1, 0)
class Par(ParSer):
"""Parallel class"""
_operator = '|'
is_parallel = True
def __init__(self, *args):
if len(args) < 2:
raise ValueError('Par requires at least two args')
_check_oneport_args(args)
super(Par, self).__init__()
self.args = args
for n, arg1 in enumerate(self.args):
for arg2 in self.args[n + 1:]:
if isinstance(arg1, V) and isinstance(arg2, V):
raise ValueError('Voltage sources connected in parallel'
' %s and %s' % (arg1, arg2))
elif isinstance(arg1, V):
print('Warn: redundant component %s in parallel with voltage source %s' % (arg2, arg1))
elif isinstance(arg2, V):
print('Warn: redundant component %s in parallel with voltage source %s' % (arg1, arg2))
@property
def width(self):
total = 0
for arg in self.args:
val = arg.width
if val > total:
total = val
return total + 2 * self.wsep
@property
def height(self):
total = 0
for arg in self.args:
total += arg.height
return total + (len(self.args) - 1) * self.hsep
def _net_make(self, netlist, n1=None, n2=None, dir='right'):
s = []
if n1 is None:
n1 = netlist._node
n3, n4 = netlist._node, netlist._node
H = [(arg.height + self.hsep) * 0.5 for arg in self.args]
N = len(H)
num_branches = N // 2
# Draw component in centre if have odd number in parallel.
if N & 1:
s.append(self.args[N // 2]._net_make(netlist, n3, n4, dir))
na, nb = n3, n4
s.append('W %s %s; %s=%s' % (n1, n3, dir, self.wsep))
if dir == 'right':
updir, downdir = 'up', 'down'
else:
updir, downdir = 'right', 'left'
# Draw components above centre
for n in range(num_branches):
if not (N & 1) and n == 0:
sep = H[N // 2 - 1]
else:
sep = H[N // 2 - n] + H[N // 2 - 1 - n]
nc, nd = netlist._node, netlist._node
s.append('W %s %s; %s=%s' % (na, nc, updir, sep))
s.append('W %s %s; %s=%s' % (nb, nd, updir, sep))
s.append(self.args[N // 2 - 1 - n]._net_make(netlist, nc, nd, dir))
na, nb = nc, nd
na, nb = n3, n4
# Draw components below centre
for n in range(num_branches):
if not (N & 1) and n == 0:
sep = H[(N + 1) // 2]
else:
sep = H[(N + 1) // 2 + n] + H[(N + 1) // 2 - 1 + n]
nc, nd = netlist._node, netlist._node
s.append('W %s %s; %s=%s' % (na, nc, downdir, sep))
s.append('W %s %s; %s=%s' % (nb, nd, downdir, sep))
s.append(self.args[(N + 1) // 2 + n]._net_make(netlist, nc, nd, dir))
na, nb = nc, nd
if n2 is None:
n2 = netlist._node
s.append('W %s %s; %s=%s' % (n4, n2, dir, self.wsep))
return '\n'.join(s)
@property
def has_parallel_V(self):
for cpt1 in self.args:
if cpt1.has_parallel_V:
return True
return False
@property
def admittance(self):
Y = 0
for arg in self.args:
Y += arg.admittance
return Y
@property
def impedance(self):
return 1 / self.admittance
@property
def Isc(self):
I = 0
for arg in self.args:
I += arg.Isc
return I
class Ser(ParSer):
"""Series class"""
_operator = '+'
is_series = True
def __init__(self, *args):
if len(args) < 2:
raise ValueError('Ser requires at least two args')
_check_oneport_args(args)
super(Ser, self).__init__()
self.args = args
for n, arg1 in enumerate(self.args):
for arg2 in self.args[n + 1:]:
if isinstance(arg1, I) and isinstance(arg2, I):
raise ValueError('Current sources connected in series'
' %s and %s' % (arg1, arg2))
elif isinstance(arg1, I):
print('Warn: redundant component %s in series with current source %s' % (arg2, arg1))
elif isinstance(arg2, I):
print('Warn: redundant component %s in series with current source %s' % (arg1, arg2))
@property
def height(self):
total = 0
for arg in self.args:
val = arg.height
if val > total:
total = val
return total
@property
def width(self):
total = 0
for arg in self.args:
total += arg.width
return total + (len(self.args) - 1) * self.wsep
def _net_make(self, netlist, n1=None, n2=None, dir='right'):
s = []
if n1 is None:
n1 = netlist._node
for arg in self.args[:-1]:
n3 = netlist._node
s.append(arg._net_make(netlist, n1, n3, dir))
n1 = netlist._node
s.append('W %s %s; %s=%s' % (n3, n1, dir, self.wsep))
if n2 is None:
n2 = netlist._node
s.append(self.args[-1]._net_make(netlist, n1, n2, dir))
return '\n'.join(s)
@property
def has_series_I(self):
for cpt1 in self.args:
if cpt1.has_series_I:
return True
return False
@property
def admittance(self):
return 1 / self.impedance
@property
def impedance(self):
Z = 0
for arg in self.args:
Z += arg.impedance
return Z
@property
def Voc(self):
V = 0
for arg in self.args:
V += arg.Voc
return V
class R(OnePort):
"""Resistor"""
is_resistor = True
is_noiseless = False
def __init__(self, Rval='R', **kwargs):
self.kwargs = kwargs
self.args = (Rval, )
self._R = cexpr(Rval)
self._Z = impedance(self._R, causal=True)
def i_equation(self, v, kind='t'):
return SuperpositionCurrent(SuperpositionVoltage(v).select(kind) / self._Z).select(kind)
def v_equation(self, i, kind='t'):
return SuperpositionVoltage(SuperpositionCurrent(i).select(kind) * self._Z).select(kind)
class NR(R):
"""Noiseless resistor"""
is_noiseless = True
class G(OnePort):
"""Conductor"""
is_conductor = True
is_noiseless = False
def __init__(self, Gval='G', **kwargs):
self.kwargs = kwargs
self.args = (Gval, )
self._G = cexpr(Gval)
self._Z = impedance(1 / self._G, causal=True)
def _net_make(self, netlist, n1=None, n2=None, dir='right'):
if n1 == None:
n1 = netlist._node
if n2 == None:
n2 = netlist._node
opts_str = self._opts_str(dir)
return 'R? %s %s {%s}; %s' % (n1, n2, 1 / self._G, opts_str)
def i_equation(self, v, kind='t'):
return SuperpositionCurrent(SuperpositionVoltage(v).select(kind) / self._Z).select(kind)
def v_equation(self, i, kind='t'):
return SuperpositionVoltage(SuperpositionCurrent(i).select(kind) * self._Z).select(kind)
class NG(G):
"""Noiseless conductor"""
is_noiseless = True
class L(OnePort):
"""Inductor
Inductance Lval, initial current i0"""
is_inductor = True
def __init__(self, Lval='L', i0=None, **kwargs):
self.kwargs = kwargs
self.has_ic = i0 is not None
if i0 is None:
i0 = 0
if self.has_ic:
self.args = (Lval, i0)
else:
self.args = (Lval, )
Lval = cexpr(Lval)
i0 = cexpr(i0)
self.L = Lval
self.i0 = i0
self._Z = impedance(s * Lval, causal=True)
self._Voc = SuperpositionVoltage(LaplaceDomainExpression(-i0 * Lval))
self.zeroic = self.i0 == 0
def i_equation(self, v, kind='t'):
from .sym import tausym
if kind in ('t', 'time', 'super'):
u = tausym
v = expr(v).subs(t, u)
return SuperpositionCurrent(expr(Integral(v.expr, (u, -oo, tsym))) / self.L).select(kind)
return SuperpositionCurrent(SuperpositionVoltage(v).select(kind) / self._Zkind(kind)).select(kind)
def v_equation(self, i, kind='t'):
if kind in ('t', 'time', 'super'):
return SuperpositionVoltage(self.L * expr(Derivative(i.expr, t))).select(kind)
return SuperpositionVoltage(SuperpositionCurrent(i).select(kind) * self._Zkind(kind)).select(kind)
class C(OnePort):
"""Capacitor
Capacitance Cval, initial voltage v0"""
is_capacitor = True
def __init__(self, Cval='C', v0=None, **kwargs):
self.kwargs = kwargs
self.has_ic = v0 is not None
if v0 is None:
v0 = 0
if self.has_ic:
self.args = (Cval, v0)
else:
self.args = (Cval, )
Cval = cexpr(Cval)
v0 = cexpr(v0)
self.C = Cval
self.v0 = v0
self._Z = impedance(1 / (s * Cval), causal=True)
self._Voc = SuperpositionVoltage(LaplaceDomainExpression(v0 / s))
self.zeroic = self.v0 == 0
def i_equation(self, v, kind='t'):
if kind in ('t', 'time', 'super'):
return SuperpositionCurrent(self.C * expr(Derivative(v.expr, t))).select(kind)
return SuperpositionCurrent(SuperpositionVoltage(v).select(kind) / self._Zkind(kind)).select(kind)
def v_equation(self, i, kind='t'):
from .sym import tausym
if kind in ('t', 'time', 'super'):
u = tausym
i = expr(i).subs(t, u)
return SuperpositionVoltage(expr(Integral(i.expr, (u, -oo, tsym))) / self.C).select(kind)
return SuperpositionVoltage(SuperpositionCurrent(i).select(kind) * self._Zkind(kind)).select(kind)
class CPE(OnePort):
"""Constant phase element
This has an impedance 1 / (s**alpha * K). When alpha == 0, the CPE is
equivalent to a resistor of resistance 1 / K. When alpha == 1, the CPE is
equivalent to a capacitor of capacitance K.
When alpha == 0.5 (default), the CPE is a Warburg element.
The phase of the impedance is -pi * alpha / 2.
Note, when alpha is non-integral, the impedance cannot be represented
as a rational function and so there are no poles or zeros. So
don't be suprised if Lcapy throws an occasional wobbly."""
def __init__(self, K, alpha=0.5, **kwargs):
self.kwargs = kwargs
self.args = (K, alpha)
K = cexpr(K)
alpha = cexpr(alpha)
self.K = K
self.alpha = alpha
self._Z = impedance(1 / (s ** alpha * K), causal=True)
class Y(OnePort):
"""General admittance."""
def __init__(self, Yval='Y', **kwargs):
self.kwargs = kwargs
self.args = (Yval, )
Yval = admittance(Yval)
self._Y = Yval
class Z(OnePort):
"""General impedance."""
def __init__(self, Zval='Z', **kwargs):
self.kwargs = kwargs
self.args = (Zval, )
Zval = impedance(Zval)
self._Z = Zval
class VoltageSourceBase(OnePort):
is_voltage_source = True
cpt_type = 'V'
is_noisy = False
def v_equation(self, i, kind='t'):
return SuperpositionVoltage(self.voc).select(kind)
class sV(VoltageSourceBase):
"""Arbitrary s-domain voltage source"""
netkeyword = 's'
def __init__(self, Vval, **kwargs):
self.kwargs = kwargs
self.args = (Vval, )
Vval = LaplaceDomainExpression(Vval)
self._Voc = SuperpositionVoltage(LaplaceDomainExpression(Vval))
class V(VoltageSourceBase):
"""Arbitrary voltage source"""
def __init__(self, Vval='V', **kwargs):
self.kwargs = kwargs
self.args = (Vval, )
self._Voc = SuperpositionVoltage(Vval)
class Vstep(VoltageSourceBase):
"""Step voltage source (s domain voltage of v / s)."""
netkeyword = 'step'
def __init__(self, v, **kwargs):
self.kwargs = kwargs
self.args = (v, )
v = cexpr(v)
self._Voc = SuperpositionVoltage(TimeDomainExpression(v) * Heaviside(t))
self.v0 = v
class Vdc(VoltageSourceBase):
"""DC voltage source (note a DC voltage source of voltage V has
an s domain voltage of V / s)."""
netkeyword = 'dc'
def __init__(self, Vval, **kwargs):
self.kwargs = kwargs
self.args = (Vval, )
Vval = cexpr(Vval)
self._Voc = SuperpositionVoltage(cexpr(Vval, dc=True))
self.v0 = Vval
@property
def voc(self):
return voltage(self.v0)
class Vac(VoltageSourceBase):
"""AC voltage source."""
netkeyword = 'ac'
def __init__(self, V, phi=None, omega=None, **kwargs):
self.kwargs = kwargs
if phi is None and omega is None:
self.args = (V, )
elif phi is not None and omega is None:
self.args = (V, phi)
elif phi is None and omega is not None:
self.args = (V, 0, omega)
else:
self.args = (V, phi, omega)
if phi is None:
phi = 0
if omega is None:
omega = omega0sym
omega = expr(omega)
V = cexpr(V)
phi = cexpr(phi)
# Note, cos(-pi / 2) is not quite zero.
self.omega = omega
self.v0 = V
self.phi = phi
self._Voc = SuperpositionVoltage(phasor(self.v0 * exp(j * self.phi),
omega=self.omega))
@property
def voc(self):
return voltage(self.v0 * cos(self.omega * t + self.phi))
class Vnoise(VoltageSourceBase):
"""Noise voltage source."""
netkeyword = 'noise'
is_noisy = True
def __init__(self, V, nid=None, **kwargs):
self.kwargs = kwargs
V1 = AngularFourierNoiseDomainVoltage(V, nid=nid)
self.args = (V, V1.nid)
self._Voc = SuperpositionVoltage(V1)
class v(VoltageSourceBase):
"""Arbitrary t-domain voltage source"""
def __init__(self, vval, **kwargs):
self.kwargs = kwargs
self.args = (vval, )
Vval = TimeDomainExpression(vval)
self._Voc = SuperpositionVoltage(Vval)
class CurrentSourceBase(OnePort):
is_current_source = True
cpt_type = 'I'
is_noisy = False
@property
def I(self):
"""Open-circuit current of a current source. To achieve this the
open-circuit voltage needs to be infinite."""
return self.Isc
def i_equation(self, v, kind='t'):
return SuperpositionCurrent(self.isc).select(kind)
class sI(CurrentSourceBase):
"""Arbitrary s-domain current source"""
netkeyword = 's'
def __init__(self, Ival, **kwargs):
if isinstance(Ival, str) and Ival == 'I':
warn('Current I is being considered as the imaginary number')
self.kwargs = kwargs
self.args = (Ival, )
Ival = LaplaceDomainExpression(Ival)
self._Isc = SuperpositionCurrent(LaplaceDomainExpression(Ival))
class I(CurrentSourceBase):
"""Arbitrary current source"""
def __init__(self, Ival='Is', **kwargs):
if isinstance(Ival, str) and Ival == 'I':
warn('Current I is being considered as the imaginary number')
self.kwargs = kwargs
self.args = (Ival, )
self._Isc = SuperpositionCurrent(Ival)
class Istep(CurrentSourceBase):
"""Step current source (s domain current of i / s)."""
netkeyword = 'step'
def __init__(self, Ival, **kwargs):
if isinstance(Ival, str) and Ival == 'I':
warn('Current I is being considered as the imaginary number')
self.kwargs = kwargs
self.args = (Ival, )
Ival = cexpr(Ival)
self._Isc = SuperpositionCurrent(TimeDomainExpression(Ival) * Heaviside(t))
self.i0 = Ival
class Idc(CurrentSourceBase):
"""DC current source (note a DC current source of current i has
an s domain current of i / s)."""
netkeyword = 'dc'
def __init__(self, Ival, **kwargs):
if isinstance(Ival, str) and Ival == 'I':
warn('Current I is being considered as the imaginary number')
self.kwargs = kwargs
self.args = (Ival, )
Ival = cexpr(Ival)
self._Isc = SuperpositionCurrent(cexpr(Ival, dc=True))
self.i0 = Ival
@property
def isc(self):
return current(self.i0)
class Iac(CurrentSourceBase):
"""AC current source."""
netkeyword = 'ac'
def __init__(self, I, phi=0, omega=None, **kwargs):
self.kwargs = kwargs
if phi is None and omega is None:
self.args = (I, )
elif phi is not None and omega is None:
self.args = (I, phi)
elif phi is None and omega is not None:
self.args = (I, 0, omega)
else:
self.args = (I, phi, omega)
if phi is None:
phi = 0
if omega is None:
omega = omega0sym
omega = cexpr(omega)
I = cexpr(I)
phi = cexpr(phi)
self.omega = omega
self.i0 = I
self.phi = phi
self._Isc = SuperpositionCurrent(phasor(self.i0 * exp(j * self.phi),
omega=self.omega))
@property
def isc(self):
return current(self.i0 * cos(self.omega * t + self.phi))
class Inoise(CurrentSourceBase):
"""Noise current source."""
netkeyword = 'noise'
is_noisy = True
def __init__(self, I, nid=None, **kwargs):
self.kwargs = kwargs
I1 = AngularFourierNoiseDomainCurrent(I, nid=nid)
self._Isc = SuperpositionCurrent(I1)
self.args = (I, I1.nid)
class i(CurrentSourceBase):
"""Arbitrary t-domain current source"""
def __init__(self, Ival, **kwargs):
self.kwargs = kwargs
self.args = (Ival, )
Ival = TimeDomainExpression(Ival)
self._Isc = SuperpositionCurrent(Ival)
class Xtal(OnePort):
"""Crystal
This is modelled as a series R, L, C circuit in parallel
with C0 (a Butterworth van Dyke model). Note,
harmonic resonances are not modelled.
"""
def __init__(self, C0, R1, L1, C1, **kwargs):
self.kwargs = kwargs
self.C0 = cexpr(C0)
self.R1 = cexpr(R1)
self.L1 = cexpr(L1)
self.C1 = cexpr(C1)
self._Z = self.expand().impedance
self.args = (C0, R1, L1, C1)
def expand(self):
return (R(self.R1) + L(self.L1) + C(self.C1)) | C(self.C0)
def _net_make(self, netlist, n1=None, n2=None, dir='right'):
# TODO: draw this with a symbol
net = self.expand()
return net._net_make(netlist, n1, n2, dir)
class FerriteBead(OnePort):
"""Ferrite bead (lossy inductor)
This is modelled as a series resistor (Rs) connected
to a parallel R, L, C network (Rp, Lp, Cp).
"""
def __init__(self, Rs, Rp, Cp, Lp, **kwargs):
self.kwargs = kwargs
self.Rs = cexpr(Rs)
self.Rp = cexpr(Rp)
self.Cp = cexpr(Cp)
self.Lp = cexpr(Lp)
self._Z = self.expand().impedance
self.args = (Rs, Rp, Cp, Lp)
def expand(self):
return R(self.Rs) + (R(self.Rp) + L(self.Lp) + C(self.Cp))
def _net_make(self, netlist, n1=None, n2=None, dir='right'):
# TODO: draw this with a symbol
net = self.expand()
return net._net_make(netlist, n1, n2, dir)
class LoadCircuit(Network):
"""Circuit comprised of a load oneport connected in parallel with a
source oneport."""
def __init__(self, source_OP, load_OP):
self.source_OP = source_OP
self.load_OP = load_OP
self.vnet = source_OP | load_OP
self.inet = source_OP + load_OP
self.args = (source_OP, load_OP)
@property
def V(self):
"""Voltage across load."""
return self.vnet.Voc
@property
def v(self):
"""Time-domain voltage across load."""
return self.vnet.voc
@property
def I(self):
"""Current into load."""
return self.inet.Isc
@property
def i(self):
"""Time-domain current into load."""
return self.inet.isc
def _net_make(self, netlist, n1=None, n2=None, dir='right'):
# TODO: draw this better rather than as a oneport.
return self.vnet._net_make(netlist, n1, n2, dir)
class ControlledSource(OnePort):
"""These components are controlled one-ports."""
pass
class CCVS(ControlledSource):
def __init__(self, control, value, **kwargs):
self.kwargs = kwargs
self.args = (control, value)
self._Voc = SuperpositionVoltage(0)
self._Z = impedance(0)
class CCCS(ControlledSource):
def __init__(self, control, value, **kwargs):
self.kwargs = kwargs
self.args = (control, value)
self._Isc = SuperpositionCurrent(0)
self._Y = admittance(0)
class VCVS(ControlledSource):
def __init__(self, value, **kwargs):
self.kwargs = kwargs
self.args = (value, )
self._Voc = SuperpositionVoltage(0)
self._Z = impedance(0)
class VCCS(ControlledSource):
def __init__(self, value, **kwargs):
self.kwargs = kwargs
self.args = (value, )
self._Isc = SuperpositionCurrent(0)
self._Y = admittance(0)
class Dummy(OnePort):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
self.args = args
class K(Dummy):
"""Coupling coefficient"""
def __init__(self, L1, L2, K, **kwargs):
if K is ksym or (isinstance(K, Expr) and K.var is ksym) :
warn("""
Coupling coefficient %s is the discrete Fourier domain variable.
You can override it using %s = symbol('%s', force=True).""" % (K, K, K))
self.kwargs = kwargs
self.args = (L1, L2, K)
self.K = cexpr(K)
class W(Dummy):
"""Wire (short)"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.args = ()
self._Z = impedance(0)
class O(Dummy):
"""Open circuit"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.args = ()
self._Y = admittance(0)
class P(O):
"""Port (open circuit)"""
pass
class Mass(L):
"""Mass
Mass mval, initial velocity v0"""
pass
class Spring(C):
"""Spring
Spring constant kval, initial force f0"""
pass
class Damper(R):
"""Damper
Friction coeff rval"""
pass
def series(*args):
"""Create a series combination of a number of components.
Args that are None are ignored. If there is only one
non-None component, return that component."""
args = [net for net in args if net is not None]
if args == []:
return None
if len(args) == 1:
return args[0]
return Ser(*args)
def parallel(*args):
"""Create a parallel combination of a number of components.
Args that are None are ignored. If there is only one
non-None component, return that component."""
args = [net for net in args if net is not None]
if args == []:
return None
if len(args) == 1:
return args[0]
return Par(*args)
def ladder(*args, **kwargs):
"""Create a ladder oneport network with alternating series and shunt components.
If an arg is None, the component is ignored.
ladder(R(1), C(2), R(3)) is equivalent to R(1) + (C(1) | R(3))
ladder(None, R(1), C(2), R(3)) is equivalent to R(1) | (C(1) + R(3))
"""
start_series = kwargs.pop('start_series', True)
if len(args) == 0:
return None
elif len(args) == 1:
return args[0]
elif start_series:
return series(args[0], ladder(*args[1:], start_series = not start_series))
else:
return parallel(args[0], ladder(*args[1:], start_series = not start_series))
# Imports at end to circumvent circular dependencies
from .expr import Expr, expr
from .cexpr import cexpr
from .sexpr import LaplaceDomainExpression
from .texpr import TimeDomainExpression
from .phasor import phasor
from .noiseomegaexpr import AngularFourierNoiseDomainCurrent, AngularFourierNoiseDomainVoltage
from .superpositionvoltage import SuperpositionVoltage
from .superpositioncurrent import SuperpositionCurrent
from .twoport import Ladder, LSection, TSection
|
mph-/lcapy
|
lcapy/oneport.py
|
Python
|
lgpl-2.1
| 41,294
|
[
"CRYSTAL"
] |
ca3cdf0ae8127a7065cf80b8189255c466b1ae2d94bea02a31f64ddab47be579
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# expand - emulate shell wild card expansion
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Script to provide users with a means of expanding patterns to files and
directories in their home directories. This script tries to mimic basic shell
wild card expansion.
"""
import os
import glob
import shared.returnvalues as returnvalues
from shared.base import client_id_dir, invisible_path
from shared.functional import validate_input_and_cert
from shared.functionality.ls import select_all_javascript, \
selected_file_actions_javascript
from shared.init import initialize_main_variables, find_entry
from shared.parseflags import all, long_list, recursive
from shared.settings import load_settings
from shared.validstring import valid_user_path
def signature():
"""Signature of the main function"""
defaults = {'flags': [''], 'path': ['.'], 'with_dest': ['false']}
return ['dir_listings', defaults]
def handle_file(
listing,
filename,
file_with_dir,
actual_file,
flags='',
dest='',
show_dest=False,
):
"""handle a file"""
# Build entire line before printing to avoid newlines
# Recursion can get here when called without explicit invisible files
if invisible_path(file_with_dir):
return
file_obj = {
'object_type': 'direntry',
'type': 'file',
'name': filename,
'file_with_dir': file_with_dir,
'flags': flags,
}
if show_dest:
file_obj['file_dest'] = dest
listing.append(file_obj)
def handle_expand(
output_objects,
listing,
base_dir,
real_path,
flags='',
dest='',
depth=0,
show_dest=False,
):
"""Recursive function to expand paths in a way not unlike ls, but only
files are interesting in this context. The order of recursively expanded
paths is different from that in ls since it simplifies the code and
doesn't really matter to the clients.
"""
# Sanity check
if depth > 255:
output_objects.append({'object_type': 'error_text', 'text'
: 'Error: file recursion maximum exceeded!'
})
return (output_objects, returnvalues.SYSTEM_ERROR)
# references to '.' or similar are stripped by abspath
if real_path + os.sep == base_dir:
base_name = relative_path = '.'
else:
base_name = os.path.basename(real_path)
relative_path = real_path.replace(base_dir, '')
if invisible_path(relative_path):
return
if os.path.isfile(real_path):
handle_file(
listing,
relative_path,
relative_path,
real_path,
flags,
dest,
show_dest,
)
else:
try:
contents = os.listdir(real_path)
except Exception, exc:
output_objects.append({'object_type': 'error_text', 'text'
: 'Failed to list contents of %s: %s'
% (base_name, exc)})
return (output_objects, returnvalues.SYSTEM_ERROR)
# Filter out dot files unless '-a' is used
if not all(flags):
contents = [i for i in contents if not i.startswith('.')]
contents.sort()
if not recursive(flags) or depth < 0:
for name in contents:
path = real_path + os.sep + name
rel_path = path.replace(base_dir, '')
if os.path.isfile(path):
handle_file(
listing,
rel_path,
rel_path,
path,
flags,
os.path.join(dest, os.path.basename(rel_path)),
show_dest,
)
else:
# Force pure content listing first by passing a negative depth
handle_expand(
output_objects,
listing,
base_dir,
real_path,
flags,
dest,
-1,
show_dest,
)
for name in contents:
path = real_path + os.sep + name
rel_path = path.replace(base_dir, '')
if os.path.isdir(path):
handle_expand(
output_objects,
listing,
base_dir,
path,
flags,
os.path.join(dest, name),
depth + 1,
show_dest,
)
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
flags = ''.join(accepted['flags'])
pattern_list = accepted['path']
show_dest = accepted['with_dest'][0].lower() == 'true'
listing = []
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
status = returnvalues.OK
settings_dict = load_settings(client_id, configuration)
javascript = '%s\n%s' % (select_all_javascript(),
selected_file_actions_javascript())
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = '%s Files' % configuration.short_title
title_entry['javascript'] = javascript
output_objects.append({'object_type': 'header',
'text': '%s Files' % configuration.short_title
})
location_pre_html = \
"""
<div class='files'>
<table class='files'>
<tr class='title'><td class='centertext'>
Working directory:
</td></tr>
<tr><td class='centertext'>
"""
output_objects.append({'object_type': 'html_form', 'text'
: location_pre_html})
for pattern in pattern_list:
links = []
links.append({'object_type': 'link', 'text':
'%s HOME' % configuration.short_title,
'destination': 'ls.py?path=.'})
prefix = ''
parts = pattern.split(os.sep)
for i in parts:
prefix = os.path.join(prefix, i)
links.append({'object_type': 'link', 'text': i,
'destination': 'ls.py?path=%s' % prefix})
output_objects.append({'object_type': 'multilinkline', 'links'
: links})
location_post_html = """
</td></tr>
</table>
</div>
<br />
"""
output_objects.append({'object_type': 'html_form', 'text'
: location_post_html})
more_html = \
"""
<div class='files'>
<form method='post' name='fileform' onSubmit='return selectedFilesAction();'>
<table class='files'>
<tr class='title'><td class='centertext' colspan=2>
Advanced file actions
</td></tr>
<tr><td>
Action on paths selected below
(please hold mouse cursor over button for a description):
</td>
<td class='centertext'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='v' />
<input type='submit' title='Show concatenated contents (cat)' onClick='document.pressed=this.value' value='cat' />
<input type='submit' onClick='document.pressed=this.value' value='head' title='Show first lines (head)' />
<input type='submit' onClick='document.pressed=this.value' value='tail' title='Show last lines (tail)' />
<input type='submit' onClick='document.pressed=this.value' value='wc' title='Count lines/words/chars (wc)' />
<input type='submit' onClick='document.pressed=this.value' value='stat' title='Show details (stat)' />
<input type='submit' onClick='document.pressed=this.value' value='touch' title='Update timestamp (touch)' />
<input type='submit' onClick='document.pressed=this.value' value='truncate' title='truncate! (truncate)' />
<input type='submit' onClick='document.pressed=this.value' value='rm' title='delete! (rm)' />
<input type='submit' onClick='document.pressed=this.value' value='rmdir' title='Remove directory (rmdir)' />
<input type='submit' onClick='document.pressed=this.value' value='submit' title='Submit file (submit)' />
</td></tr>
</table>
</form>
</div>
"""
output_objects.append({'object_type': 'html_form', 'text'
: more_html})
dir_listings = []
output_objects.append({
'object_type': 'dir_listings',
'dir_listings': dir_listings,
'flags': flags,
'show_dest': show_dest,
})
first_match = None
for pattern in pattern_list:
# Check directory traversal attempts before actual handling to avoid
# leaking information about file system layout while allowing
# consistent error messages
unfiltered_match = glob.glob(base_dir + pattern)
match = []
for server_path in unfiltered_match:
real_path = os.path.abspath(server_path)
if not valid_user_path(real_path, base_dir, True):
logger.warning('%s tried to %s restricted path %s! (%s)'
% (client_id, op_name, real_path, pattern))
continue
match.append(real_path)
if not first_match:
first_match = real_path
# Now actually treat list of allowed matchings and notify if no
# (allowed) match
if not match:
output_objects.append({'object_type': 'file_not_found',
'name': pattern})
status = returnvalues.FILE_NOT_FOUND
for real_path in match:
if real_path + os.sep == base_dir:
relative_path = '.'
else:
relative_path = real_path.replace(base_dir, '')
entries = []
dir_listing = {
'object_type': 'dir_listing',
'relative_path': relative_path,
'entries': entries,
'flags': flags,
}
dest = ''
if show_dest:
if os.path.isfile(real_path):
dest = os.path.basename(real_path)
elif recursive(flags):
# references to '.' or similar are stripped by abspath
if real_path + os.sep == base_dir:
dest = ''
else:
# dest = os.path.dirname(real_path).replace(base_dir, "")
dest = os.path.basename(real_path) + os.sep
handle_expand(
output_objects,
entries,
base_dir,
real_path,
flags,
dest,
0,
show_dest,
)
dir_listings.append(dir_listing)
output_objects.append({'object_type': 'html_form', 'text'
: """
<div class='files'>
<table class='files'>
<tr class='title'><td class='centertext'>
Filter paths (wildcards like * and ? are allowed)
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />
<input type='text' name='path' value='' />
<input type='submit' value='Filter' />
</form>
</td></tr>
</table>
</div>
"""
% flags})
# Short/long format buttons
htmlform = \
"""<table class='files'>
<tr class='title'><td class='centertext' colspan=4>
File view options
</td></tr>
<tr><td colspan=4><br /></td></tr>
<tr class='title'><td>Parameter</td><td>Setting</td><td>Enable</td><td>Disable</td></tr>
<tr><td>Long format</td><td>
%s</td><td>"""\
% long_list(flags)\
+ """
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />"""\
% (flags + 'l')
for entry in pattern_list:
htmlform += "<input type='hidden' name='path' value='%s' />"\
% entry
htmlform += \
"""
<input type='submit' value='On' /><br />
</form>
</td><td>
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />"""\
% flags.replace('l', '')
for entry in pattern_list:
htmlform += "<input type='hidden' name='path' value='%s' />"\
% entry
htmlform += \
"""
<input type='submit' value='Off' /><br />
</form>
</td></tr>
"""
# Recursive output
htmlform += \
"""
<!-- Non-/recursive list buttons -->
<tr><td>Recursion</td><td>
%s</td><td>"""\
% recursive(flags)
htmlform += \
"""
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />"""\
% (flags + 'r')
for entry in pattern_list:
htmlform += " <input type='hidden' name='path' value='%s' />"\
% entry
htmlform += \
"""
<input type='submit' value='On' /><br />
</form>
</td><td>
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />"""\
% flags.replace('r', '')
for entry in pattern_list:
htmlform += "<input type='hidden' name='path' value='%s' />"\
% entry
htmlform += \
"""
<input type='submit' value='Off' /><br />
</form>
</td></tr>
"""
htmlform += \
"""
<!-- Show dot files buttons -->
<tr><td>Show hidden files</td><td>
%s</td><td>"""\
% all(flags)
htmlform += \
"""
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />"""\
% (flags + 'a')
for entry in pattern_list:
htmlform += "<input type='hidden' name='path' value='%s' />"\
% entry
htmlform += \
"""
<input type='submit' value='On' /><br />
</form>
</td><td>
<form method='post' action='ls.py'>
<input type='hidden' name='output_format' value='html' />
<input type='hidden' name='flags' value='%s' />"""\
% flags.replace('a', '')
for entry in pattern_list:
htmlform += "<input type='hidden' name='path' value='%s' />"\
% entry
htmlform += \
"""
<input type='submit' value='Off' /><br />
</form>
</td></tr>
</table>
"""
# show flag buttons after contents to avoid
output_objects.append({'object_type': 'html_form', 'text'
: htmlform})
# create upload file form
if first_match:
# use first match for current directory
# Note that base_dir contains an ending slash
if os.path.isdir(first_match):
dir_path = first_match
else:
dir_path = os.path.dirname(first_match)
if dir_path + os.sep == base_dir:
relative_dir = '.'
else:
relative_dir = dir_path.replace(base_dir, '')
output_objects.append({'object_type': 'html_form', 'text'
: """
<br />
<table class='files'>
<tr class='title'><td class='centertext' colspan=2>
Edit file
</td><td><br /></td></tr>
<tr><td>
Fill in the path of a file to edit and press 'edit' to open that file in the<br />
online file editor. Alternatively a file can be selected for editing through<br />
the listing of personal files.
</td><td colspan=2 class='righttext'>
<form name='editor' method='post' action='editor.py'>
<input type='hidden' name='output_format' value='html' />
<input name='current_dir' type='hidden' value='%(dest_dir)s' />
<input type='text' name='path' size=50 value='' />
<input type='submit' value='edit' />
</form>
</td></tr>
</table>
<br />
<table class='files'>
<tr class='title'><td class='centertext' colspan=4>
Create directory
</td></tr>
<tr><td>
Name of new directory to be created in current directory (%(dest_dir)s)
</td><td class='righttext' colspan=3>
<form action='mkdir.py' method=post>
<input name='path' size=50 />
<input name='current_dir' type='hidden' value='%(dest_dir)s' />
<input type='submit' value='Create' name='mkdirbutton' />
</form>
</td></tr>
</table>
<br />
<form enctype='multipart/form-data' action='textarea.py' method='post'>
<table class='files'>
<tr class='title'><td class='centertext' colspan=4>
Upload file
</td></tr>
<tr><td colspan=4>
Upload file to current directory (%(dest_dir)s)
</td></tr>
<tr><td colspan=2>
Extract package files (.zip, .tar.gz, .tar.bz2)
</td><td colspan=2>
<input type='checkbox' name='extract_0' />
</td></tr>
<tr><td colspan=2>
Submit mRSL files (also .mRSL files included in packages)
</td><td colspan=2>
<input type='checkbox' name='submitmrsl_0' checked />
</td></tr>
<tr><td>
File to upload
</td><td class='righttext' colspan=3>
<input name='fileupload_0_0_0' type='file' />
</td></tr>
<tr><td>
Optional remote filename (extra useful in windows)
</td><td class='righttext' colspan=3>
<input name='default_remotefilename_0' type='hidden' value='%(dest_dir)s' />
<input name='remotefilename_0' type='text' size='50' value='%(dest_dir)s' />
<input type='submit' value='Upload' name='sendfile' />
</td></tr>
</table>
</form>
"""
% {'dest_dir': relative_dir + os.sep}})
return (output_objects, status)
|
heromod/migrid
|
mig/shared/functionality/expand.py
|
Python
|
gpl-2.0
| 19,186
|
[
"Brian"
] |
0d1d078ebf9c6a319bddf26ba852d3b1b4dcb17706f40f7f780cc3ae8e570c3a
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import uuid
import mock
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
from nova.tests.compute.monitors import test_monitors
from nova.tests.objects import test_migration
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [{
'label': 'forza-napoli',
'dev_type': 'foo',
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1'}] if self.pci_support else []
self.pci_stats = [{
'count': 1,
'vendor_id': 'v1',
'product_id': 'p1',
'extra_info': {'extra_k1': 'v1'}}] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"stats": {
"num_instances": "1",
},
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.update_call_count = 0
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus
}
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def test_update_usage_only_for_tracked(self):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
instance = self._fake_instance(flavor=flavor, task_state=None)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(1, 'current_workload')
def test_claim_and_audit(self):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0)
self.tracker.instance_claim(self.context, instance, self.limits)
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute['free_ram_mb'])
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
def test_claim_and_abort(self):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
def test_instance_claim_with_oversubscription(self):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
def test_additive_claims(self):
self.limits['vcpu'] = 2
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance = self._fake_instance(flavor=flavor)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(flavor=flavor)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
def test_context_claim_with_exception(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
def test_instance_context_claim(self):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
instance = self._fake_instance(flavor=flavor)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
def test_cpu_stats(self):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self, ctxt):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(objects.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = objects.Migration()
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create('fake')
def test_claim(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_abort(self):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_additive_claims(self):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
def test_claim_and_audit(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_same_host(self):
self.limits['vcpu'] = 3
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(
id=11, name="destflavor", **dest_dict)
# make an instance of src_type:
instance = self._fake_instance(flavor=src_type)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'], 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_revert(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.drop_resize_claim(self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_revert_reserve_source(self):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
def test_dupe_filter(self):
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_set_instance_host_and_node(self):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
srajag/nova
|
nova/tests/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 49,365
|
[
"exciting"
] |
9c0a156dab6766c605e8d1958b7086997130ff6b26e82cb6b8aa5a5a9afd0193
|
"""
Acceptance tests for Home Page (My Courses / My Libraries).
"""
from __future__ import absolute_import
import datetime
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.studio.index import DashboardPage
from common.test.acceptance.tests.helpers import AcceptanceTest, get_selected_option_text, select_option_by_text
from .base_studio_test import StudioCourseTest
class CreateLibraryTest(AcceptanceTest):
"""
Test that we can create a new content library on the studio home page.
"""
def setUp(self):
"""
Load the helper for the home page (dashboard page)
"""
super(CreateLibraryTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
class StudioLanguageTest(AcceptanceTest):
""" Test suite for the Studio Language """
shard = 21
def setUp(self):
super(StudioLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_studio_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in studio. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
"""
dummy_language = u'Dummy Language (Esperanto)'
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, dummy_language)
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), dummy_language)
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
class ArchivedCourseTest(StudioCourseTest):
""" Tests that archived courses appear in their own list. """
def setUp(self, is_staff=True, test_xss=False):
"""
Load the helper for the home page (dashboard page)
"""
super(ArchivedCourseTest, self).setUp(is_staff=is_staff, test_xss=test_xss)
self.dashboard_page = DashboardPage(self.browser)
def populate_course_fixture(self, course_fixture):
current_time = datetime.datetime.now()
course_start_date = current_time - datetime.timedelta(days=60)
course_end_date = current_time - datetime.timedelta(days=90)
course_fixture.add_course_details({
'start_date': course_start_date,
'end_date': course_end_date
})
|
ESOedX/edx-platform
|
common/test/acceptance/tests/studio/test_studio_home.py
|
Python
|
agpl-3.0
| 3,149
|
[
"VisIt"
] |
4a0a4e92fec10efcfd06ae2b4956efe4d987ef52b6c941c8ff0a117006da549e
|
"""
This is an example for an MD simulation of a simple Lennard-Jones fluid
with ESPResSo++. We will start with particles at random positions within
the simulation box interacting via a shifted Lennard-Jones type potential
with an interaction cutoff at 2.5.
Newtons equations of motion are integrated with a Velocity-Verlet integrator.
The canonical (NVT) ensemble is realized by using a Langevin thermostat.
In order to prevent explosion due to strongly overlapping volumes of
random particles the system needs to be warmed up first.
Warm-up is accomplished by using a repelling-only LJ interaction
(cutoff=1.12246, shift=0.25) with a force capping at radius 0.6
and initial small LJ epsilon value of 0.1.
During warmup epsilon is gradually increased to its final value 1.0.
After warm-up the system is equilibrated using the full uncapped LJ Potential.
If a system still explodes during warmup or equilibration, warmup time
could be increased by increasing warmup_nloops and the capradius could
be set to another value. Depending on the system (number of particles, density, ...)
it could also be necessary to vary sigma during warmup.
The simulation consists of the following steps:
1. specification of the main simulation parameters
2. setup of the system, random number generator and parallelisation
3. setup of the integrator and simulation ensemble
4. adding the particles
5. setting up interaction potential for the warmup
6. running the warmup loop
7. setting up interaction potential for the equilibration
8. running the equilibration loop
9. writing configuration to a file
"""
# import the ESPResSo++ python module
import espressopp
import chemlab
########################################################################
# 1. specification of the main simulation parameters #
########################################################################
# number of particles
Npart = 1500
# density of particles
rho = 0.8442
# length of simulation box
L = pow(Npart/rho, 1.0/3.0)
# cubic simulation box of size L
box = (L, L, L)
# cutoff of the short range potential
r_cutoff = 2.5
# VerletList skin size (also used for domain decomposition)
skin = 0.4
# the temperature of the system
temperature = 1.0
# time step for the velocity verlet integrator
dt = 0.005
# Lennard Jones epsilon during equilibration phase
epsilon = 1.0
# Lennard Jones sigma during warmup and equilibration
sigma = 1.0
# interaction cut-off used during the warm-up phase
warmup_cutoff = pow(2.0, 1.0/6.0)
# number of warm-up loops
warmup_nloops = 100
# number of integration steps performed in each warm-up loop
warmup_isteps = 200
# total number of integration steps of the warm-up phase
total_warmup_steps = warmup_nloops * warmup_isteps
# initial value for LJ epsilon at beginning of warmup
epsilon_start = 0.1
# final value for LJ epsilon at end of warmup
epsilon_end = 1.0
# increment epsilon by epsilon delta after each warmup_loop
epsilon_delta = (epsilon_end - epsilon_start) / warmup_nloops
# force capping radius
capradius = 0.6
# number of equilibration loops
equil_nloops = 1000
# number of integration steps performed in each equilibration loop
equil_isteps = 100
# print ESPResSo++ version and compile info
print espressopp.Version().info()
# print simulation parameters (useful to have them in a log file)
print "Npart = ", Npart
print "rho = ", rho
print "L = ", L
print "box = ", box
print "r_cutoff = ", r_cutoff
print "skin = ", skin
print "temperature = ", temperature
print "dt = ", dt
print "epsilon = ", epsilon
print "sigma = ", sigma
print "warmup_cutoff = ", warmup_cutoff
print "warmup_nloops = ", warmup_nloops
print "warmup_isteps = ", warmup_isteps
print "total_warmup_steps = ", total_warmup_steps
print "epsilon_start = ", epsilon_start
print "epsilon_end = ", epsilon_end
print "epsilon_delta = ", epsilon_delta
print "capradius = ", capradius
print "equil_nloops = ", equil_nloops
print "equil_isteps = ", equil_isteps
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
system.rng = espressopp.esutil.RNG()
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, warmup_cutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
print "NCPUs = ", NCPUs
print "nodeGrid = ", nodeGrid
print "cellGrid = ", cellGrid
########################################################################
# 3. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create e Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 1.0
# set temperature
thermostat.temperature = temperature
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
## steps 2. and 3. could be short-cut by the following expression:
## system, integrator = espressopp.standard_system.Default(box, warmup_cutoff, skin, dt, temperature)
########################################################################
# 4. adding the particles #
########################################################################
print "adding ", Npart, " particles to the system ..."
particle_ids = []
for pid in range(1, Npart+1):
# get a 3D random coordinate within the box
pos = system.bc.getRandomPos()
# add a particle with particle id pid and coordinate pos to the system
# coordinates are automatically folded according to periodic boundary conditions
# the following default values are set for each particle:
# (type=0, mass=1.0, velocity=(0,0,0), charge=0.0)
system.storage.addParticle(pid, pos)
particle_ids.append(pid)
system.storage.modifyParticle(pid, 'res_id', pid)
# distribute the particles to parallel CPUs
system.storage.decompose()
########################################################################
# 5. setting up interaction potential for the warmup #
########################################################################
# create a verlet list that uses a cutoff radius = warmup_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, warmup_cutoff)
# create a force capped Lennard-Jones potential
# the potential is automatically shifted so that U(r=cutoff) = 0.0
LJpot = espressopp.interaction.LennardJonesCapped(epsilon=epsilon_start, sigma=sigma, cutoff=warmup_cutoff, caprad=capradius, shift='auto')
# create a force capped Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJonesCapped(verletlist)
# tell the interaction to use the above defined force capped Lennard-Jones potential
# between 2 particles of type 0
interaction.setPotential(type1=0, type2=0, potential=LJpot)
########################################################################
# 6. running the warmup loop
########################################################################
# make the force capping interaction known to the system
system.addInteraction(interaction)
print "starting warm-up ..."
# print some status information (time, measured temperature, pressure,
# pressure tensor (xy only), kinetic energy, potential energy, total energy, boxsize)
espressopp.tools.info(system, integrator)
for step in range(warmup_nloops):
# perform warmup_isteps integraton steps
integrator.run(warmup_isteps)
# decrease force capping radius in the potential
LJpot.epsilon += epsilon_delta
# update the type0-type0 interaction to use the new values of LJpot
interaction.setPotential(type1=0, type2=0, potential=LJpot)
# print status info
espressopp.tools.info(system, integrator)
print "warmup finished"
# remove the force capping interaction from the system
system.removeInteraction(0)
# the equilibration uses a different interaction cutoff therefore the current
# verlet list is not needed any more and would waste only CPU time
verletlist.disconnect()
########################################################################
# 7. setting up interaction potential for the equilibration #
########################################################################
# create a new verlet list that uses a cutoff radius = r_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, r_cutoff)
# define a Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJones(verletlist)
# use a Lennard-Jones potential between 2 particles of type 0
# the potential is automatically shifted so that U(r=cutoff) = 0.0
# if the potential should not be shifted set shift=0.0
potential = interaction.setPotential(type1=0, type2=0,
potential=espressopp.interaction.LennardJones(
epsilon=epsilon, sigma=sigma, cutoff=r_cutoff, shift=0.0))
########################################################################
# 8. running the equilibration loop #
########################################################################
# add the new interaction to the system
system.addInteraction(interaction)
# since the interaction cut-off changed the size of the cells that are used
# to speed up verlet list builds should be adjusted accordingly
system.storage.cellAdjust()
# set all integrator timers to zero again (they were increased during warmup)
integrator.resetTimers()
# set integrator time step to zero again
integrator.step = 0
print "starting equilibration ..."
# print inital status information
espressopp.tools.info(system, integrator)
for step in range(equil_nloops):
# perform equilibration_isteps integration steps
integrator.run(equil_isteps)
# print status information
espressopp.tools.info(system, integrator)
print "equilibration finished"
########################################################################
# 9. writing configuration to file #
########################################################################
gro_file = chemlab.files_io.GROFile.load_data(
system,
'output.gro',
['A1', 'B1'],
['MOL'],
particle_ids)
gro_file.write(force=True)
print "finished."
|
cgchemlab/chemlab
|
examples/chain_growth_catalytic/prepare_sample_system.py
|
Python
|
gpl-3.0
| 11,964
|
[
"ESPResSo"
] |
cfe70260e3f1c953c680f5b62db409f8e60659394caeef0a9996d6681e210af2
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
import calendar
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENT, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.webinterface_handler_wsgi_utils import Field
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, this useragent cannot use the service."
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, this useragent cannot use the service."
_log(msg)
return _write(req, msg)
arg_file = file_content
arg_mode = mode
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
return _write(req, msg)
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
return _write(req, msg)
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
return _write(req, msg)
if isinstance(arg_file, Field):
arg_file = arg_file.value
# write temporary file:
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]
if permitted_dbcollids != ['*']: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
return _write(req, msg)
# run upload command
bibupload_path = CFG_BINDIR + '/bibupload -u batchupload'
run_shell_command('%s %s %s' % (bibupload_path, arg_mode, filename))
msg = "[INFO] %s %s %s" % (bibupload_path, arg_mode, filename)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1"):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1, 'invalid_marc': 2}
# write temporary file:
if filetype == 'marcxml':
metafile = metafile.value
else:
metafile = _transform_input_to_marcxml(file_input=metafile.value)
user_info = collect_user_info(req)
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# check MARCXML validity
if filetype == 'marcxml':
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
return (error_codes['invalid_marc'], msg)
# run upload command:
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
jobid = task_low_level_submission('bibupload', user_info['nickname'], mode, "--name=" + metafilename, "--priority=" + priority, "-t", date, filename)
else:
jobid = task_low_level_submission('bibupload', user_info['nickname'], mode, "--name=" + metafilename, "--priority=" + priority, filename)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1"):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.fullname == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
tempfile.tempdir = CFG_TMPSHAREDDIR
# Move document to be uploaded to temporary folder
tmp_file = tempfile.mktemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
filename = tempfile.mktemp(prefix=identifier + '_')
filedesc = open(filename, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
jobid = task_low_level_submission('bibupload', user, "--" + mode, "--name=" + docfile, "--priority=" + priority, date, filename)
else:
jobid = task_low_level_submission('bibupload', user, "--" + mode, "--name=" + docfile, "--priority=" + priority, filename)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def check_date(date):
""" Check if date is correct
@return:
0 - Default or correct date
3 - Incorrect format
4 - Date does not exist
"""
if not date or date == "yyyy-mm-dd":
return 0
correct_format = re.match("2[01]\d\d-[01]?\d-[0-3]?\d", date)
if not correct_format:
return 3
#separate year, month, day
date = correct_format.group(0).split("-")
try:
calendar.weekday(int(date[0]), int(date[1]), int(date[2]))
except ValueError:
return 4
return 0
def check_time(time):
""" Check if time is correct
@return:
0 - Default or correct time
1 - Incorrect format
"""
if not time or time == "hh:mm:ss":
return 0
correct_format = re.match("[0-2]\d:[0-5]\d:[0-5]\d", time)
if not correct_format:
return 1
return 0
def user_authorization(req, ln):
""" Check user authorization to visit page """
_ = gettext_set_language(ln)
user_info = collect_user_info(req)
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
if user_info['email'] == 'guest':
error_msg = _("Guests are not authorized to run batchuploader")
else:
error_msg = _("The user '%s' is not authorized to run batchuploader" % \
(cgi.escape(user_info['nickname'])))
return page_not_authorized(req=req, referer=referer,
text=error_msg, navmenuid="batchuploader")
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _check_client_ip(req):
"""
Is this client permitted to use the service?
"""
client_ip = _get_client_ip(req)
if client_ip in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.keys():
return True
return False
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
user_info = collect_user_info(req)
client_useragent = user_info['agent']
if client_useragent in CFG_BATCHUPLOADER_WEB_ROBOT_AGENT:
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
from invenio.bibrecord import create_records
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
|
Markus-Goetz/CDS-Invenio-Authorlist
|
modules/bibupload/lib/batchuploader_engine.py
|
Python
|
gpl-2.0
| 26,870
|
[
"VisIt"
] |
2367e528665cf59ab18d40f5954356e9d57ce2de9123d2a3e3899dc9d83418bc
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import mdtraj as md
import numpy as np
from pyemma.util.log import getLogger
from pyemma.coordinates.data.util.reader_utils import copy_traj_attributes as _copy_traj_attributes, \
preallocate_empty_trajectory as _preallocate_empty_trajectory, enforce_top as _enforce_top
__all__ = ['frames_from_file']
log = getLogger(__name__)
def frames_from_file(file_name, top, frames, chunksize=100,
stride=1, verbose=False, copy_not_join=False):
r"""Reads one "file_name" molecular trajectory and returns an mdtraj trajectory object
containing only the specified "frames" in the specified order.
Extracts the specified sequence of time/trajectory indexes from the input loader
and saves it in a molecular dynamics trajectory. The output format will be determined
by the outfile name.
Parameters
----------
file_name: str.
Absolute path to the molecular trajectory file, ex. trajout.xtc
top : str, mdtraj.Trajectory, or mdtraj.Topology
Topology information to load the molecular trajectroy file in :py:obj:`file_name`
frames : ndarray of shape (n_frames, ) and integer type
Contains the frame indices to be retrieved from "file_name". There is no restriction as to what
this array has to look like other than:
- positive integers
- <= the total number of frames in "file_name".
"frames" need not be monotonous or unique, i.e, arrays like
[3, 1, 4, 1, 5, 9, 9, 9, 9, 3000, 0, 0, 1] are welcome
verbose: boolean.
Level of verbosity while looking for "frames". Useful when using "chunksize" with large trajectories.
It provides the no. of frames accumulated for every chunk.
stride : integer, default is 1
This parameter informs :py:func:`save_traj` about the stride used in :py:obj:`indexes`. Typically, :py:obj:`indexes`
contains frame-indexes that match exactly the frames of the files contained in :py:obj:`traj_inp.trajfiles`.
However, in certain situations, that might not be the case. Examples are cases in which a stride value != 1
was used when reading/featurizing/transforming/discretizing the files contained in :py:obj:`traj_inp.trajfiles`.
copy_not_join : boolean, default is False
This parameter decides how geometry objects are appended onto one another. If left to False, mdtraj's own
:py:obj:`join` method will be used, which is the recommended method. However, for some combinations of
py:obj:`chunksizes` and :py:obj:`frames` this might be not very effective. If one sets :py:obj:`copy_not_join`
to True, the returned :py:obj:`traj` is preallocated and the important attributes (currently traj.xyz, traj.time,
traj.unit_lengths, traj.unit_angles) are broadcasted onto it.
Returns
-------
traj : an md trajectory object containing the frames specified in "frames",
in the order specified in "frames".
"""
assert isinstance(frames, np.ndarray), "input frames frames must be a numpy ndarray, got %s instead "%type(frames)
assert np.ndim(frames) == 1, "input frames frames must have ndim = 1, got np.ndim = %u instead "%np.ndim(frames)
assert isinstance(file_name, str), "input file_name must be a string, got %s instead"%type(file_name)
assert isinstance(top, (str, md.Trajectory, md.Topology)), "input topology must of one of type: " \
"str, mdtraj.Trajectory, or mdtraj.Topology. " \
"Got %s instead" % type(top)
# Enforce topology to be a md.Topology object
top = _enforce_top(top)
# Prepare the trajectory object
if copy_not_join:
traj = _preallocate_empty_trajectory(top, frames.shape[0])
else:
traj = None
# Prepare the running number of accumulated frames
cum_frames = 0
# Because the trajectory is streamed "chronologically", but "frames" can have any arbitrary order
# we store that order in "orig_order" to reshuffle the traj at the end
orig_order = frames.argsort().argsort()
sorted_frames = np.sort(frames)
for jj, traj_chunk in enumerate(md.iterload(file_name, top=top,
chunk=chunksize, stride=stride)):
# Create an indexing array for this trajchunk
i_idx = jj*chunksize
f_idx = i_idx+chunksize
chunk_frames = np.arange(i_idx, f_idx)[:traj_chunk.n_frames]
# Frames that appear more than one time will be kept
good_frames = np.hstack([np.argwhere(ff == chunk_frames).squeeze() for ff in sorted_frames])
# Keep the good frames of this chunk
if np.size(good_frames) > 0:
if copy_not_join: # => traj has been already preallocated, see above
traj = _copy_traj_attributes(traj, traj_chunk[good_frames], cum_frames)
elif traj is None: # => copy_not_join is False AND 1st run
traj = traj_chunk[good_frames]
else: # => copy_not_join is False AND we're not on the 1st run
traj = traj.join(traj_chunk[good_frames])
cum_frames += np.size(good_frames)
if verbose:
log.info('chunk %u of traj has size %u, indices %6u...%6u. Accumulated frames %u'
% (jj, traj_chunk.n_frames, chunk_frames[0], chunk_frames[-1], cum_frames))
# Check if we can already stop iterating
if chunk_frames[-1] >= frames.max():
break
# Make sure that "frames" did not contain impossible frames
if (frames > chunk_frames[-1]).any():
raise Exception('Cannot provide frames %s for trajectory %s with n_frames = %u'
% (frames[frames > chunk_frames[-1]], file_name, chunk_frames[-1]))
if stride != 1 and verbose:
log.info('A stride value of = %u was parsed, interpreting "indexes" accordingly.'%stride)
# Trajectory coordinates are is returned "reshuffled"
return traj[orig_order]
|
trendelkampschroer/PyEMMA
|
pyemma/coordinates/data/frames_from_file.py
|
Python
|
bsd-2-clause
| 7,557
|
[
"MDTraj"
] |
f3d47b75f0de73b1149bf0ff42be505b3f985dc9246743ec43a923b96ffb072e
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import platform, os, re
import subprocess
from mooseutils import colorText
from collections import OrderedDict
import json
TERM_COLS = int(os.getenv('MOOSE_TERM_COLS', '110'))
TERM_FORMAT = os.getenv('MOOSE_TERM_FORMAT', 'njcst')
MOOSE_OPTIONS = {
'ad_mode' : { 're_option' : r'#define\s+MOOSE_SPARSE_AD\s+(\d+)',
'default' : 'NONSPARSE',
'options' :
{ 'SPARSE' : '1',
'NONSPARSE' : '0'
}
},
'ad_indexing_type' : { 're_option' : r'#define\s+MOOSE_GLOBAL_AD_INDEXING\s+(\d+)',
'default' : 'LOCAL',
'options' :
{ 'GLOBAL' : '1',
'LOCAL' : '0'
}
},
'ad_size' : { 're_option' : r'#define\s+MOOSE_AD_MAX_DOFS_PER_ELEM\s+(\d+)',
'default' : '50'
},
'libpng' : { 're_option' : r'#define\s+MOOSE_HAVE_LIBPNG\s+(\d+)',
'default' : 'FALSE',
'options' :
{ 'TRUE' : '1',
'FALSE' : '0'
}
}
}
LIBMESH_OPTIONS = {
'mesh_mode' : { 're_option' : r'#define\s+LIBMESH_ENABLE_PARMESH\s+(\d+)',
'default' : 'REPLICATED',
'options' :
{
'DISTRIBUTED' : '1',
'REPLICATED' : '0'
}
},
'unique_ids' : { 're_option' : r'#define\s+LIBMESH_ENABLE_UNIQUE_ID\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'dtk' : { 're_option' : r'#define\s+LIBMESH_TRILINOS_HAVE_DTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'boost' : { 're_option' : r'#define\s+LIBMESH_HAVE_EXTERNAL_BOOST\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'vtk' : { 're_option' : r'#define\s+LIBMESH_HAVE_VTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'tecplot' : { 're_option' : r'#define\s+LIBMESH_HAVE_TECPLOT_API\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'petsc_major' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_MAJOR\s+(\d+)',
'default' : '1'
},
'petsc_minor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_MINOR\s+(\d+)',
'default' : '1'
},
'petsc_subminor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_SUBMINOR\s+(\d+)',
'default' : '1'
},
'petsc_version_release' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_RELEASE\s+(\d+)',
'default' : 'TRUE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'slepc_major' : { 're_option' : r'#define\s+LIBMESH_DETECTED_SLEPC_VERSION_MAJOR\s+(\d+)',
'default' : '1'
},
'slepc_minor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_SLEPC_VERSION_MINOR\s+(\d+)',
'default' : '1'
},
'slepc_subminor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_SLEPC_VERSION_SUBMINOR\s+(\d+)',
'default' : '1'
},
'dof_id_bytes' : { 're_option' : r'#define\s+LIBMESH_DOF_ID_BYTES\s+(\d+)',
'default' : '4'
},
'petsc_debug' : { 're_option' : r'#define\s+LIBMESH_PETSC_USE_DEBUG\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'curl' : { 're_option' : r'#define\s+LIBMESH_HAVE_CURL\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'threads' : { 're_option' : r'#define\s+LIBMESH_USING_THREADS\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'tbb' : { 're_option' : r'#define\s+LIBMESH_HAVE_TBB_API\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'openmp' : { 're_option' : r'#define\s+LIBMESH_HAVE_OPENMP\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'superlu' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_SUPERLU_DIST\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'mumps' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_MUMPS\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'strumpack' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_STRUMPACK\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'parmetis' : { 're_option' : r'#define\s+LIBMESH_(?:PETSC_){0,1}HAVE_PARMETIS\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'chaco' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_CHACO\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'party' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_PARTY\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'ptscotch' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_PTSCOTCH\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'slepc' : { 're_option' : r'#define\s+LIBMESH_HAVE_SLEPC\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'cxx11' : { 're_option' : r'#define\s+LIBMESH_HAVE_CXX11\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'unique_id' : { 're_option' : r'#define\s+LIBMESH_ENABLE_UNIQUE_ID\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'fparser_jit' : { 're_option' : r'#define\s+LIBMESH_HAVE_FPARSER_JIT\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
}
## Run a command and return the output, or ERROR: + output if retcode != 0
def runCommand(cmd, cwd=None):
# On Windows it is not allowed to close fds while redirecting output
should_close = platform.system() != "Windows"
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=should_close, shell=True)
output = p.communicate()[0].decode('utf-8')
if (p.returncode != 0):
output = 'ERROR: ' + output
return output
## method to return current character count with given results_dictionary
def resultCharacterCount(results_dict):
# { formatted_result_key : ( text, color ) }
printable_items = []
for result_key, printable in results_dict.items():
if printable:
printable_items.append(printable[0])
return len(' '.join(printable_items))
## convert the incoming message tuple to the same case, as the case of format_key
## store this information to the same cased key in formatted_results dict.
def formatCase(format_key, message, formatted_results):
if message and format_key.isupper():
formatted_results[format_key] = (message[0].upper(), message[1])
elif message:
formatted_results[format_key] = (message[0], message[1])
def formatStatusMessage(job, status, message, options):
# If there is no message, use status as message
if not message:
message = status
# Add caveats if requested
if job.isPass() and options.extra_info:
for check in list(options._checks.keys()):
if job.specs.isValid(check) and not 'ALL' in job.specs[check]:
job.addCaveats(check)
# Format the failed message to list a big fat FAILED in front of the status
elif job.isFail():
return 'FAILED (%s)' % (message)
return message
## print an optionally colorified test result
#
# The test will not be colored if
# 1) options.colored is False,
# 2) the color parameter is False.
def formatResult(job, options, result='', color=True, **kwargs):
# Support only one instance of a format identifier, but obey the order
terminal_format = list(OrderedDict.fromkeys(list(TERM_FORMAT)))
status, message, message_color, exit_code = job.getJointStatus()
color_opts = {'code' : options.code, 'colored' : options.colored}
# container for every printable item
formatted_results = dict.fromkeys(terminal_format)
# Populate formatted_results for those we support, with requested items
# specified by the user. Caveats and justifications are parsed outside of
# loop as these two items change based on character count consumed by others.
caveat_index = None
justification_index = None
for i, f_key in enumerate(terminal_format):
# Store the caveat request. We will use this later.
if str(f_key).lower() == 'c':
caveat_index = terminal_format[i]
# Store the justification request. We will use this later.
if str(f_key).lower() == 'j':
justification_index = terminal_format[i]
if str(f_key).lower() == 'p':
pre_result = ' '*(8-len(status)) + status
formatCase(f_key, (pre_result, message_color), formatted_results)
if str(f_key).lower() == 's':
if not result:
result = formatStatusMessage(job, status, message, options)
# refrain from printing a duplicate pre_result if it will match result
if 'p' in [x.lower() for x in terminal_format] and result == status:
formatCase(f_key, None, formatted_results)
else:
formatCase(f_key, (result, message_color), formatted_results)
if str(f_key).lower() == 'n':
formatCase(f_key, (job.getTestName(), None), formatted_results)
# Adjust the precision of time, so we can justify the length. The higher the
# seconds, the lower the decimal point, ie: [0.000s] - [100.0s]. Max: [99999s]
if str(f_key).lower() == 't' and options.timing:
actual = float(job.getTiming())
int_len = len(str(int(actual)))
precision = min(3, max(0,(4-int_len)))
f_time = '[' + '{0: <6}'.format('%0.*fs' % (precision, actual)) + ']'
formatCase(f_key, (f_time, None), formatted_results)
# Decorate Caveats
if job.getCaveats() and caveat_index is not None and 'caveats' in kwargs and kwargs['caveats']:
caveats = ','.join(job.getCaveats())
caveat_color = message_color
if not job.isFail():
caveat_color = 'CYAN'
f_caveats = '[' + caveats + ']'
# +1 space created later by join
character_count = resultCharacterCount(formatted_results) + len(f_caveats) + 1
# If caveats are the last items the user wants printed, or -e (extra_info) is
# called, allow caveats to consume available character count beyond TERM_COLS.
# Else, we trim caveats:
if terminal_format[-1].lower() != 'c' \
and not options.extra_info \
and character_count > TERM_COLS:
over_by_amount = character_count - TERM_COLS
f_caveats = '[' + caveats[:len(caveats) - (over_by_amount + 3)] + '...]'
formatCase(caveat_index, (f_caveats, caveat_color), formatted_results)
# Fill the available space left, with dots
if justification_index is not None:
j_dot = None
# +1 space created later by join
character_count = resultCharacterCount(formatted_results) + 1
if character_count < TERM_COLS:
j_dot = ('.'*max(0, (TERM_COLS - character_count)), 'GREY')
elif character_count == TERM_COLS:
j_dot = ('', 'GREY')
formatCase(justification_index, j_dot, formatted_results)
# If color, decorate those items which support it
if color:
for format_rule, printable in formatted_results.items():
if printable and (printable[0] and printable[1]):
formatted_results[format_rule] = (colorText(printable[0], printable[1], **color_opts), printable[1])
# Do special coloring for first directory
if format_rule == 'n' and options.color_first_directory:
formatted_results[format_rule] = (colorText(job.specs['first_directory'], 'CYAN', **color_opts) +\
formatted_results[format_rule][0].replace(job.specs['first_directory'], '', 1), 'CYAN') # Strip out first occurence only
# join printable results in the order in which the user asked
final_results = ' '.join([formatted_results[x][0] for x in terminal_format if formatted_results[x]])
return final_results
## Color the error messages if the options permit, also do not color in bitten scripts because
# it messes up the trac output.
# supports weirded html for more advanced coloring schemes. \verbatim<r>,<g>,<y>,<b>\endverbatim All colors are bolded.
def getPlatforms():
# We'll use uname to figure this out. platform.uname() is available on all platforms
# while os.uname() is not (See bugs.python.org/issue8080).
# Supported platforms are LINUX, DARWIN, ML, MAVERICKS, YOSEMITE, or ALL
platforms = set(['ALL'])
raw_uname = platform.uname()
if raw_uname[0].upper() == 'DARWIN':
platforms.add('DARWIN')
if re.match("12\.", raw_uname[2]):
platforms.add('ML')
if re.match("13\.", raw_uname[2]):
platforms.add("MAVERICKS")
if re.match("14\.", raw_uname[2]):
platforms.add("YOSEMITE")
else:
platforms.add(raw_uname[0].upper())
return platforms
def runExecutable(libmesh_dir, location, bin, args):
# Installed location of libmesh executable
libmesh_installed = libmesh_dir + '/' + location + '/' + bin
# Uninstalled location of libmesh executable
libmesh_uninstalled = libmesh_dir + '/' + bin
# Uninstalled location of libmesh executable
libmesh_uninstalled2 = libmesh_dir + '/contrib/bin/' + bin
# The eventual variable we will use to refer to libmesh's executable
libmesh_exe = ''
if os.path.exists(libmesh_installed):
libmesh_exe = libmesh_installed
elif os.path.exists(libmesh_uninstalled):
libmesh_exe = libmesh_uninstalled
elif os.path.exists(libmesh_uninstalled2):
libmesh_exe = libmesh_uninstalled2
else:
print(("Error! Could not find '" + bin + "' in any of the usual libmesh's locations!"))
exit(1)
return runCommand(libmesh_exe + " " + args).rstrip()
def getCompilers(libmesh_dir):
# Supported compilers are GCC, INTEL or ALL
compilers = set(['ALL'])
mpicxx_cmd = str(runExecutable(libmesh_dir, "bin", "libmesh-config", "--cxx"))
# Account for usage of distcc or ccache
if "distcc" in mpicxx_cmd or "ccache" in mpicxx_cmd:
mpicxx_cmd = mpicxx_cmd.split()[-1]
# If mpi is in the command, run -show to get the compiler
if "mpi" in mpicxx_cmd:
raw_compiler = runCommand(mpicxx_cmd + " -show")
else:
raw_compiler = mpicxx_cmd
if re.match('\S*icpc\s', raw_compiler) != None:
compilers.add("INTEL")
elif re.match('\S*clang\+\+\s', raw_compiler) != None:
compilers.add("CLANG")
elif re.match('\S*[cg]\+\+\s', raw_compiler) != None:
compilers.add("GCC")
return compilers
def getLibMeshThreadingModel(libmesh_dir):
threading_models = set(['ALL'])
have_threads = 'TRUE' in getLibMeshConfigOption(libmesh_dir, 'threads');
if have_threads:
have_tbb = 'TRUE' in getLibMeshConfigOption(libmesh_dir, 'tbb')
have_openmp = 'TRUE' in getLibMeshConfigOption(libmesh_dir, 'openmp')
if have_openmp:
threading_models.add("OPENMP")
elif have_tbb:
threading_models.add("TBB")
else:
threading_models.add("PTHREADS")
else:
threading_models.add("NONE")
return threading_models
def getPetscVersion(libmesh_dir):
major_version = getLibMeshConfigOption(libmesh_dir, 'petsc_major')
minor_version = getLibMeshConfigOption(libmesh_dir, 'petsc_minor')
subminor_version = getLibMeshConfigOption(libmesh_dir, 'petsc_subminor')
if len(major_version) != 1 or len(minor_version) != 1:
print("Error determining PETSC version")
exit(1)
return major_version.pop() + '.' + minor_version.pop() + '.' + subminor_version.pop()
def getSlepcVersion(libmesh_dir):
major_version = getLibMeshConfigOption(libmesh_dir, 'slepc_major')
minor_version = getLibMeshConfigOption(libmesh_dir, 'slepc_minor')
subminor_version = getLibMeshConfigOption(libmesh_dir, 'slepc_subminor')
if len(major_version) != 1 or len(minor_version) != 1 or len(major_version) != 1:
return None
return major_version.pop() + '.' + minor_version.pop() + '.' + subminor_version.pop()
def checkLogicVersionSingle(checks, iversion, package):
logic, version = re.search(r'(.*?)(\d\S+)', iversion).groups()
if logic == '' or logic == '=':
if version == checks[package]:
return True
else:
return False
# Logical match
if logic == '>' and list(map(int, checks[package].split("."))) > list(map(int, version.split("."))):
return True
elif logic == '>=' and list(map(int, checks[package].split("."))) >= list(map(int, version.split("."))):
return True
elif logic == '<' and list(map(int, checks[package].split("."))) < list(map(int, version.split("."))):
return True
elif logic == '<=' and list(map(int, checks[package].split("."))) <= list(map(int, version.split("."))):
return True
return False
def checkVersion(checks, test, package):
# This is a cheap tokenizer that will split apart the logic into logic groups separated by && and ||
split_versions_and_logic = re.findall(r".*?(?:(?:&&)|(?:\|\|)|(?:\s*$))", test)
for group in split_versions_and_logic:
m = re.search(r'\s*([^\d]*[\d.]*)\s*(\S*)', group)
if m:
version, logic_op = m.group(1, 2)
result = checkLogicVersionSingle(checks, version, package)
if logic_op == '||':
if result:
return True
elif logic_op == '&&':
if not result:
return False
else:
return result
# Break down petsc version logic in a new define
# TODO: find a way to eval() logic instead
def checkPetscVersion(checks, test):
# If any version of petsc works, return true immediately
if 'ALL' in set(test['petsc_version']):
return (True, None)
version_string = ' '.join(test['petsc_version'])
return (checkVersion(checks, version_string, 'petsc_version'), version_string)
# Break down slepc version logic in a new define
def checkSlepcVersion(checks, test):
# User does not require anything
if len(test['slepc_version']) == 0:
return (False, None)
# SLEPc is not installed
if checks['slepc_version'] == None:
return (False, None)
# If any version of SLEPc works, return true immediately
if 'ALL' in set(test['slepc_version']):
return (True, None)
version_string = ' '.join(test['slepc_version'])
return (checkVersion(checks, version_string, 'slepc_version'), version_string)
def getIfAsioExists(moose_dir):
option_set = set(['ALL'])
if os.path.exists(moose_dir+"/framework/contrib/asio/include/asio.hpp"):
option_set.add('TRUE')
else:
option_set.add('FALSE')
return option_set
def getConfigOption(config_files, option, options):
# Some tests work differently with parallel mesh enabled
# We need to detect this condition
option_set = set(['ALL'])
success = 0
for config_file in config_files:
if success == 1:
break
try:
f = open(config_file)
contents = f.read()
f.close()
info = options[option]
m = re.search(info['re_option'], contents)
if m != None:
if 'options' in info:
for value, option in info['options'].items():
if m.group(1) == option:
option_set.add(value)
else:
option_set.clear()
option_set.add(m.group(1))
else:
option_set.add(info['default'])
success = 1
except IOError:
pass
if success == 0:
print("Error! Could not find libmesh_config.h in any of the usual locations!")
exit(1)
return option_set
def getMooseConfigOption(moose_dir, option):
filenames = [
moose_dir + '/framework/include/base/MooseConfig.h',
moose_dir + '/include/moose/MooseConfig.h',
];
return getConfigOption(filenames, option, MOOSE_OPTIONS)
def getLibMeshConfigOption(libmesh_dir, option):
filenames = [
libmesh_dir + '/include/base/libmesh_config.h', # Old location
libmesh_dir + '/include/libmesh/libmesh_config.h' # New location
];
return getConfigOption(filenames, option, LIBMESH_OPTIONS)
def getSharedOption(libmesh_dir):
# Some tests may only run properly with shared libraries on/off
# We need to detect this condition
shared_option = set(['ALL'])
libtool = os.path.join(libmesh_dir, "contrib", "bin", "libtool")
f = open(libtool, "r")
found = False
for line in f:
try:
(key, value) = line.rstrip().split("=", 2)
except Exception as e:
continue
if key == 'build_libtool_libs':
if value == 'yes':
shared_option.add('DYNAMIC')
found = True
break
if value == 'no':
shared_option.add('STATIC')
found = True
break
f.close()
if not found:
# Neither no nor yes? Not possible!
print("Error! Could not determine whether shared libraries were built.")
exit(1)
return shared_option
def getInitializedSubmodules(root_dir):
"""
Gets a list of initialized submodules.
Input:
root_dir[str]: path to execute the git command. This should be the root
directory of the app so that the submodule names are correct
Return:
list[str]: List of iniitalized submodule names or an empty list if there was an error.
"""
output = str(runCommand("git submodule status", cwd=root_dir))
if output.startswith("ERROR"):
return []
# This ignores submodules that have a '-' at the beginning which means they are not initialized
return re.findall(r'^[ +]\S+ (\S+)', output, flags=re.MULTILINE)
def addObjectsFromBlock(objs, node, block_name):
"""
Utility function that iterates over a dictionary and adds keys
to the executable object name set.
"""
data = node.get(block_name, {})
if data: # could be None so we can't just iterate over items
for name, block in data.items():
objs.add(name)
addObjectNames(objs, block)
def addObjectNames(objs, node):
"""
Add object names that reside in this node.
"""
if not node:
return
addObjectsFromBlock(objs, node, "subblocks")
addObjectsFromBlock(objs, node, "subblock_types")
addObjectsFromBlock(objs, node, "types")
star = node.get("star")
if star:
addObjectNames(objs, star)
def getExeJSON(exe):
"""
Extracts the JSON from the dump
"""
output = runCommand("%s --json" % exe)
output = output.split('**START JSON DATA**\n')[1]
output = output.split('**END JSON DATA**\n')[0]
return json.loads(output)
def getExeObjects(exe):
"""
Gets a set of object names that are in the executable JSON dump.
"""
data = getExeJSON(exe)
obj_names = set()
addObjectsFromBlock(obj_names, data, "blocks")
return obj_names
def getExeRegisteredApps(exe):
"""
Gets a list of registered applications
"""
data = getExeJSON(exe)
return data.get('global', {}).get('registered_apps', [])
def checkOutputForPattern(output, re_pattern):
"""
Returns boolean of pattern match
"""
if re.search(re_pattern, output, re.MULTILINE | re.DOTALL) == None:
return False
else:
return True
def checkOutputForLiteral(output, literal):
"""
Returns boolean of literal match
"""
if output.find(literal) == -1:
return False
else:
return True
def deleteFilesAndFolders(test_dir, paths, delete_folders=True):
"""
Delete specified files
test_dir: The base test directory
paths: A list contianing files to delete
delete_folders: Attempt to delete any folders created
"""
for file in paths:
full_path = os.path.join(test_dir, file)
if os.path.exists(full_path):
try:
os.remove(full_path)
except:
print(("Unable to remove file: " + full_path))
# Now try to delete directories that might have been created
if delete_folders:
for file in paths:
path = os.path.dirname(file)
while path != '':
(path, tail) = os.path.split(path)
try:
os.rmdir(os.path.join(test_dir, path, tail))
except:
# There could definitely be problems with removing the directory
# because it might be non-empty due to checkpoint files or other
# files being created on different operating systems. We just
# don't care for the most part and we don't want to error out.
# As long as our test boxes clean before each test, we'll notice
# the case where these files aren't being generated for a
# particular run.
#
# TL;DR; Just pass...
pass
# Check if test has any redirected output, and if its ready to be read
def checkOutputReady(tester, options):
checked_files = []
for redirected_file in tester.getRedirectedOutputFiles(options):
file_path = os.path.join(tester.getTestDir(), redirected_file)
if os.access(file_path, os.R_OK):
checked_files.append(file_path)
return checked_files
# return concatenated output from tests with redirected output
def getOutputFromFiles(tester, options):
file_output = ''
output_files = checkOutputReady(tester, options)
for file_path in output_files:
with open(file_path, 'r+b') as f:
file_output += "#"*80 + "\nOutput from " + file_path \
+ "\n" + "#"*80 + "\n" + readOutput(f, None, tester)
return file_output
# Read stdout and stderr file objects, append error and return the string
def readOutput(stdout, stderr, tester):
output = ''
try:
if stdout:
stdout.seek(0)
output += stdout.read().decode('utf-8')
if stderr:
stderr.seek(0)
output += stderr.read().decode('utf-8')
except UnicodeDecodeError:
tester.setStatus(tester.fail, 'non-unicode characters in output')
except:
tester.setStatus(tester.fail, 'error while attempting to read output files')
return output
# Trimming routines for job output
def trimOutput(job, options):
output = job.getOutput()
if ((job.isFail() and options.no_trimmed_output_on_error)
or (job.specs.isValid('max_buffer_size') and job.specs['max_buffer_size'] == -1)
or options.no_trimmed_output):
return output
elif job.specs.isValid('max_buffer_size'):
max_size = int(job.specs['max_buffer_size'])
else:
max_size = 100000
if len(output) <= max_size:
return output
first_part = int(max_size*(2.0/3.0))
second_part = int(max_size*(1.0/3.0))
return "%s\n%s\n\nOutput trimmed\n\n%s\n%s" % (output[:first_part],
"#"*80,
"#"*80,
output[-second_part:])
|
harterj/moose
|
python/TestHarness/util.py
|
Python
|
lgpl-2.1
| 30,177
|
[
"MOOSE",
"VTK"
] |
4489d18301fdf4fd913a8b1c93ba7fae05694dd80333085b37f91185b0baf72f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains classes to wrap Python VTK to make nice molecular plots.
"""
import itertools
import math
import os
import subprocess
import time
import numpy as np
try:
import vtk
from vtk import vtkInteractorStyleTrackballCamera
except ImportError:
# VTK not present. The Camera is to set object to avoid errors in unittest.
vtk = None
vtkInteractorStyleTrackballCamera = object
from monty.dev import requires
from monty.serialization import loadfn
from pymatgen.core.periodic_table import Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.util.coord import in_coord_list
module_dir = os.path.dirname(os.path.abspath(__file__))
EL_COLORS = loadfn(os.path.join(module_dir, "ElementColorSchemes.yaml"))
class StructureVis:
"""
Provides Structure object visualization using VTK.
"""
@requires(vtk, "Visualization requires the installation of VTK with " "Python bindings.")
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=True,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
):
"""
Constructs a Structure Visualization.
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
Useful keyboard shortcuts implemented.
h : Show help
A/a : Increase/decrease cell by one unit vector in a-direction
B/b : Increase/decrease cell by one unit vector in b-direction
C/c : Increase/decrease cell by one unit vector in c-direction
# : Toggle showing of polyhedrons
- : Toggle showing of bonds
[ : Decrease poly_radii_tol_factor by 0.05
] : Increase poly_radii_tol_factor by 0.05
r : Reset camera direction
o : Orthogonalize structure
Up/Down : Rotate view along Up direction by 90 clock/anticlockwise
Left/right : Rotate view along camera direction by 90
clock/anticlockwise
"""
# create a rendering window and renderer
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(1, 1, 1)
self.title = "Structure Visualizer"
# create a renderwindowinteractor
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.mapper_map = {}
self.structure = None
if element_color_mapping:
self.el_color_mapping = element_color_mapping
else:
self.el_color_mapping = EL_COLORS["VESTA"]
self.show_unit_cell = show_unit_cell
self.show_bonds = show_bonds
self.show_polyhedron = show_polyhedron
self.poly_radii_tol_factor = poly_radii_tol_factor
self.excluded_bonding_elements = excluded_bonding_elements if excluded_bonding_elements else []
self.show_help = True
self.supercell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.redraw()
style = StructureInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.ren.parent = self
def rotate_view(self, axis_ind=0, angle=0):
"""
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
"""
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
def write_image(self, filename="image.png", magnification=1, image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large
def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render()
def orthongonalize_structure(self):
"""
Orthogonalize the structure.
"""
if self.structure is not None:
self.set_structure(self.structure.copy(sanitize=True))
self.ren_win.Render()
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a," " b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor " "by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 " "clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by " "90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def set_structure(self, structure, reset_camera=True, to_unit_cell=True):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
self.ren.RemoveAllViewProps()
has_lattice = hasattr(structure, "lattice")
if has_lattice:
s = Structure.from_sites(structure, to_unit_cell=to_unit_cell)
s.make_supercell(self.supercell, to_unit_cell=to_unit_cell)
else:
s = structure
inc_coords = []
for site in s:
self.add_site(site)
inc_coords.append(site.coords)
count = 0
labels = ["a", "b", "c"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
if has_lattice:
matrix = s.lattice.matrix
if self.show_unit_cell and has_lattice:
# matrix = s.lattice.matrix
self.add_text([0, 0, 0], "o")
for vec in matrix:
self.add_line((0, 0, 0), vec, colors[count])
self.add_text(vec, labels[count], colors[count])
count += 1
for (vec1, vec2) in itertools.permutations(matrix, 2):
self.add_line(vec1, vec1 + vec2)
for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):
self.add_line(vec1 + vec2, vec1 + vec2 + vec3)
if self.show_bonds or self.show_polyhedron:
elements = sorted(s.composition.elements, key=lambda a: a.X)
anion = elements[-1]
def contains_anion(site):
for sp in site.species.keys():
if sp.symbol == anion.symbol:
return True
return False
anion_radius = anion.average_ionic_radius
for site in s:
exclude = False
max_radius = 0
color = np.array([0, 0, 0])
for sp, occu in site.species.items():
if sp.symbol in self.excluded_bonding_elements or sp == anion:
exclude = True
break
max_radius = max(max_radius, sp.average_ionic_radius)
color = color + occu * np.array(self.el_color_mapping.get(sp.symbol, [0, 0, 0]))
if not exclude:
max_radius = (1 + self.poly_radii_tol_factor) * (max_radius + anion_radius)
nn = structure.get_neighbors(site, float(max_radius))
nn_sites = []
for neighbor in nn:
if contains_anion(neighbor):
nn_sites.append(neighbor)
if not in_coord_list(inc_coords, neighbor.coords):
self.add_site(neighbor)
if self.show_bonds:
self.add_bonds(nn_sites, site)
if self.show_polyhedron:
color = [i / 255 for i in color]
self.add_polyhedron(nn_sites, site, color)
if self.show_help:
self.helptxt_actor = vtk.vtkActor2D()
self.helptxt_actor.VisibilityOn()
self.helptxt_actor.SetMapper(self.helptxt_mapper)
self.ren.AddActor(self.helptxt_actor)
self.display_help()
camera = self.ren.GetActiveCamera()
if reset_camera:
if has_lattice:
# Adjust the camera for best viewing
lengths = s.lattice.abc
pos = (matrix[1] + matrix[2]) * 0.5 + matrix[0] * max(lengths) / lengths[0] * 3.5
camera.SetPosition(pos)
camera.SetViewUp(matrix[2])
camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)
else:
origin = s.center_of_mass
max_site = max(s, key=lambda site: site.distance_from_point(origin))
camera.SetPosition(origin + 5 * (max_site.coords - origin))
camera.SetFocalPoint(s.center_of_mass)
self.structure = structure
self.title = s.composition.formula
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render()
def show(self):
"""
Display the visualizer.
"""
self.iren.Initialize()
self.ren_win.SetSize(800, 800)
self.ren_win.SetWindowName(self.title)
self.ren_win.Render()
self.iren.Start()
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
total_occu += occu
vis_radius = 0.2 + 0.002 * radius
for specie, occu in site.species.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(
site.coords,
vis_radius,
(1, 1, 1),
start_angle,
start_angle + 360 * (1 - total_occu),
)
self.mapper_map[mapper] = [site]
def add_partial_sphere(self, coords, radius, color, start=0, end=360, opacity=1.0):
"""
Adding a partial sphere (to display partial occupancies.
Args:
coords (nd.array): Coordinates
radius (float): Radius of sphere
color (): Color of sphere.
start (float): Starting angle.
end (float): Ending angle.
opacity (float): Opacity.
"""
sphere = vtk.vtkSphereSource()
sphere.SetCenter(coords)
sphere.SetRadius(radius)
sphere.SetThetaResolution(18)
sphere.SetPhiResolution(18)
sphere.SetStartTheta(start)
sphere.SetEndTheta(end)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
self.ren.AddActor(actor)
return mapper
def add_text(self, coords, text, color=(0, 0, 0)):
"""
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
"""
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
def add_polyhedron(
self,
neighbors,
center,
color,
opacity=1.0,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i, n in enumerate(neighbors):
x, y, z = n.coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if vtk.VTK_MAJOR_VERSION <= 5:
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
# ac.SetMapper(mapHull)
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_triangle(
self,
neighbors,
color,
center=None,
opacity=0.4,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a triangular surface between three atoms.
Args:
atoms: Atoms between which a triangle will be drawn.
color: Color for triangle as RGB.
center: The "central atom" of the triangle
opacity: opacity of the triangle
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(neighbors[ii].x, neighbors[ii].y, neighbors[ii].z)
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
if center is None:
raise ValueError(
"Color should be chosen according to the central atom, " "and central atom is not provided"
)
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_faces(self, faces, color, opacity=0.35):
"""
Adding face of polygon.
Args:
faces (): Coordinates of the faces.
color (): Color.
opacity (float): Opacity
"""
for face in faces:
if len(face) == 3:
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif len(face) > 3:
center = np.zeros(3, np.float_)
for site in face:
center += site
center /= np.float_(len(face))
for ii, f in enumerate(face):
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
points.InsertNextPoint(f[0], f[1], f[2])
ii2 = np.mod(ii + 1, len(face))
points.InsertNextPoint(face[ii2][0], face[ii2][1], face[ii2][2])
points.InsertNextPoint(center[0], center[1], center[2])
for ii in range(3):
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
else:
raise ValueError("Number of points for a face should be >= 3")
def add_edges(self, edges, type="line", linewidth=2, color=[0.0, 0.0, 0.0]):
"""
Args:
edges (): List of edges
type ():
linewidth (): Width of line
color (nd.array/tuple): RGB color.
"""
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
for iedge, edge in enumerate(edges):
points.InsertPoint(2 * iedge, edge[0])
points.InsertPoint(2 * iedge + 1, edge[1])
lines.InsertNextCell(2)
lines.InsertCellPoint(2 * iedge)
lines.InsertCellPoint(2 * iedge + 1)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(polydata.GetProducerPort())
else:
mapper.SetInputData(polydata)
# mapper.SetInput(polydata)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetColor(color)
ac.GetProperty().SetLineWidth(linewidth)
self.ren.AddActor(ac)
def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1):
"""
Adds bonds for a site.
Args:
neighbors: Neighbors of the site.
center: The site in the center for all bonds.
color: Color of the tubes representing the bonds
opacity: Opacity of the tubes representing the bonds
radius: Radius of tube s representing the bonds
"""
points = vtk.vtkPoints()
points.InsertPoint(0, center.x, center.y, center.z)
n = len(neighbors)
lines = vtk.vtkCellArray()
for i in range(n):
points.InsertPoint(i + 1, neighbors[i].coords)
lines.InsertNextCell(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(i + 1)
pd = vtk.vtkPolyData()
pd.SetPoints(points)
pd.SetLines(lines)
tube = vtk.vtkTubeFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
tube.SetInputConnection(pd.GetProducerPort())
else:
tube.SetInputData(pd)
tube.SetRadius(radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if opacity is not None:
actor.GetProperty().SetOpacity(opacity)
if color is not None:
actor.GetProperty().SetColor(color)
self.ren.AddActor(actor)
def add_picker_fixed(self):
"""
Create a cell picker.Returns:
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotate_pick(obj, event):
if picker.GetCellId() < 0 and not self.show_help:
self.helptxt_actor.VisibilityOff()
else:
mapper = picker.GetMapper()
if mapper in self.mapper_map:
output = []
for site in self.mapper_map[mapper]:
row = [
"{} - ".format(site.species_string),
", ".join(["{:.3f}".format(c) for c in site.frac_coords]),
"[" + ", ".join(["{:.3f}".format(c) for c in site.coords]) + "]",
]
output.append("".join(row))
self.helptxt_mapper.SetInput("\n".join(output))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
self.show_help = False
self.picker = picker
picker.AddObserver("EndPickEvent", annotate_pick)
self.iren.SetPicker(picker)
def add_picker(self):
"""
Create a cell picker.
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
source = vtk.vtkVectorText()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor((0, 0, 0))
follower.SetScale(0.2)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
follower.VisibilityOff()
def annotate_pick(obj, event):
if picker.GetCellId() < 0:
follower.VisibilityOff()
else:
pick_pos = picker.GetPickPosition()
mapper = picker.GetMapper()
if mapper in self.mapper_map:
site = self.mapper_map[mapper]
output = [
site.species_string,
"Frac. coords: " + " ".join(["{:.4f}".format(c) for c in site.frac_coords]),
]
source.SetText("\n".join(output))
follower.SetPosition(pick_pos)
follower.VisibilityOn()
picker.AddObserver("EndPickEvent", annotate_pick)
self.picker = picker
self.iren.SetPicker(picker)
class StructureInteractorStyle(vtkInteractorStyleTrackballCamera):
"""
A custom interactor style for visualizing structures.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
self.parent = parent
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.AddObserver("MouseMoveEvent", self.mouseMoveEvent)
self.AddObserver("LeftButtonReleaseEvent", self.leftButtonReleaseEvent)
self.AddObserver("KeyPressEvent", self.keyPressEvent)
def leftButtonPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 0
self.OnLeftButtonDown()
def mouseMoveEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 1
self.OnMouseMove()
def leftButtonReleaseEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
ren = obj.GetCurrentRenderer()
iren = ren.GetRenderWindow().GetInteractor()
if self.mouse_motion == 0:
pos = iren.GetEventPosition()
iren.GetPicker().Pick(pos[0], pos[1], 0, ren)
self.OnLeftButtonUp()
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym in "ABCabc":
if sym == "A":
parent.supercell[0][0] += 1
elif sym == "B":
parent.supercell[1][1] += 1
elif sym == "C":
parent.supercell[2][2] += 1
elif sym == "a":
parent.supercell[0][0] = max(parent.supercell[0][0] - 1, 1)
elif sym == "b":
parent.supercell[1][1] = max(parent.supercell[1][1] - 1, 1)
elif sym == "c":
parent.supercell[2][2] = max(parent.supercell[2][2] - 1, 1)
parent.redraw()
elif sym == "numbersign":
parent.show_polyhedron = not parent.show_polyhedron
parent.redraw()
elif sym == "minus":
parent.show_bonds = not parent.show_bonds
parent.redraw()
elif sym == "bracketleft":
parent.poly_radii_tol_factor -= 0.05 if parent.poly_radii_tol_factor > 0 else 0
parent.redraw()
elif sym == "bracketright":
parent.poly_radii_tol_factor += 0.05
parent.redraw()
elif sym == "h":
parent.show_help = not parent.show_help
parent.redraw()
elif sym == "r":
parent.redraw(True)
elif sym == "s":
parent.write_image("image.png")
elif sym == "Up":
parent.rotate_view(1, 90)
elif sym == "Down":
parent.rotate_view(1, -90)
elif sym == "Left":
parent.rotate_view(0, -90)
elif sym == "Right":
parent.rotate_view(0, 90)
elif sym == "o":
parent.orthongonalize_structure()
parent.redraw()
self.OnKeyPress()
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20, bitrate="10000k", quality=1, **kwargs):
r"""
Generate a movie from a sequence of structures using vtk and ffmpeg.
Args:
structures ([Structure]): sequence of structures
output_filename (str): filename for structure output. defaults to
movie.mp4
zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.
fps (int): Frames per second for the movie. Defaults to 20.
bitrate (str): Video bitate. Defaults to "10000k" (fairly high
quality).
quality (int): A quality scale. Defaults to 1.
\\*\\*kwargs: Any kwargs supported by StructureVis to modify the images
generated.
"""
vis = StructureVis(**kwargs)
vis.show_help = False
vis.redraw()
vis.zoom(zoom)
sigfig = int(math.floor(math.log10(len(structures))) + 1)
filename = "image{0:0" + str(sigfig) + "d}.png"
for i, s in enumerate(structures):
vis.set_structure(s)
vis.write_image(filename.format(i), 3)
filename = "image%0" + str(sigfig) + "d.png"
args = [
"ffmpeg",
"-y",
"-i",
filename,
"-q:v",
str(quality),
"-r",
str(fps),
"-b:v",
str(bitrate),
output_filename,
]
with subprocess.Popen(args) as p:
p.communicate()
class MultiStructuresVis(StructureVis):
"""
Visualization for multiple structures.
"""
DEFAULT_ANIMATED_MOVIE_OPTIONS = {
"time_between_frames": 0.1,
"looping_type": "restart",
"number_of_loops": 1,
"time_between_loops": 1.0,
}
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=False,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
animated_movie_options=DEFAULT_ANIMATED_MOVIE_OPTIONS,
):
"""
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
animated_movie_options (): Used for moving.
"""
super().__init__(
element_color_mapping=element_color_mapping,
show_unit_cell=show_unit_cell,
show_bonds=show_bonds,
show_polyhedron=show_polyhedron,
poly_radii_tol_factor=poly_radii_tol_factor,
excluded_bonding_elements=excluded_bonding_elements,
)
self.warningtxt_actor = vtk.vtkActor2D()
self.infotxt_actor = vtk.vtkActor2D()
self.structures = None
style = MultiStructuresInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.istruct = 0
self.current_structure = None
self.set_animated_movie_options(animated_movie_options=animated_movie_options)
def set_structures(self, structures, tags=None):
"""
Add list of structures to the visualizer.
Args:
structures (List of Structures):
tags (): List of tags.
"""
self.structures = structures
self.istruct = 0
self.current_structure = self.structures[self.istruct]
self.tags = tags if tags is not None else []
self.all_radii = []
self.all_vis_radii = []
for struct in self.structures:
struct_radii = []
struct_vis_radii = []
for site in struct:
radius = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
vis_radius = 0.2 + 0.002 * radius
struct_radii.append(radius)
struct_vis_radii.append(vis_radius)
self.all_radii.append(struct_radii)
self.all_vis_radii.append(struct_vis_radii)
self.set_structure(self.current_structure, reset_camera=True, to_unit_cell=False)
def set_structure(self, structure, reset_camera=True, to_unit_cell=False):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
super().set_structure(structure=structure, reset_camera=reset_camera, to_unit_cell=to_unit_cell)
self.apply_tags()
def apply_tags(self):
"""
Apply tags.
"""
tags = {}
for tag in self.tags:
istruct = tag.get("istruct", "all")
if istruct != "all":
if istruct != self.istruct:
continue
site_index = tag["site_index"]
color = tag.get("color", [0.5, 0.5, 0.5])
opacity = tag.get("opacity", 0.5)
if site_index == "unit_cell_all":
struct_radii = self.all_vis_radii[self.istruct]
for isite, site in enumerate(self.current_structure):
vis_radius = 1.5 * tag.get("radius", struct_radii[isite])
tags[(isite, (0, 0, 0))] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
continue
cell_index = tag["cell_index"]
if "radius" in tag:
vis_radius = tag["radius"]
elif "radius_factor" in tag:
vis_radius = tag["radius_factor"] * self.all_vis_radii[self.istruct][site_index]
else:
vis_radius = 1.5 * self.all_vis_radii[self.istruct][site_index]
tags[(site_index, cell_index)] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
for site_and_cell_index, tag_style in tags.items():
isite, cell_index = site_and_cell_index
site = self.current_structure[isite]
if cell_index == (0, 0, 0):
coords = site.coords
else:
fcoords = site.frac_coords + np.array(cell_index)
site_image = PeriodicSite(
site.species,
fcoords,
self.current_structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=site.properties,
)
self.add_site(site_image)
coords = site_image.coords
vis_radius = tag_style["radius"]
color = tag_style["color"]
opacity = tag_style["opacity"]
self.add_partial_sphere(
coords=coords,
radius=vis_radius,
color=color,
start=0,
end=360,
opacity=opacity,
)
def set_animated_movie_options(self, animated_movie_options=None):
"""
Args:
animated_movie_options ():
"""
if animated_movie_options is None:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
else:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
for key in animated_movie_options:
if key not in self.DEFAULT_ANIMATED_MOVIE_OPTIONS.keys():
raise ValueError("Wrong option for animated movie")
self.animated_movie_options.update(animated_movie_options)
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a," " b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor " "by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 " "clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by " "90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
"n: Move to next structure",
"p: Move to previous structure",
"m: Animated movie of the structures",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def display_warning(self, warning):
"""
Args:
warning (str): Warning
"""
self.warningtxt_mapper = vtk.vtkTextMapper()
tprops = self.warningtxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(1, 0, 0)
tprops.BoldOn()
tprops.SetJustificationToRight()
self.warningtxt = "WARNING : {}".format(warning)
self.warningtxt_actor = vtk.vtkActor2D()
self.warningtxt_actor.VisibilityOn()
self.warningtxt_actor.SetMapper(self.warningtxt_mapper)
self.ren.AddActor(self.warningtxt_actor)
self.warningtxt_mapper.SetInput(self.warningtxt)
winsize = self.ren_win.GetSize()
self.warningtxt_actor.SetPosition(winsize[0] - 10, 10)
self.warningtxt_actor.VisibilityOn()
def erase_warning(self):
"""
Remove warnings.
"""
self.warningtxt_actor.VisibilityOff()
def display_info(self, info):
"""
Args:
info (str): Information.
"""
self.infotxt_mapper = vtk.vtkTextMapper()
tprops = self.infotxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 1)
tprops.BoldOn()
tprops.SetVerticalJustificationToTop()
self.infotxt = "INFO : {}".format(info)
self.infotxt_actor = vtk.vtkActor2D()
self.infotxt_actor.VisibilityOn()
self.infotxt_actor.SetMapper(self.infotxt_mapper)
self.ren.AddActor(self.infotxt_actor)
self.infotxt_mapper.SetInput(self.infotxt)
winsize = self.ren_win.GetSize()
self.infotxt_actor.SetPosition(10, winsize[1] - 10)
self.infotxt_actor.VisibilityOn()
def erase_info(self):
"""
Erase all info.
"""
self.infotxt_actor.VisibilityOff()
class MultiStructuresInteractorStyle(StructureInteractorStyle):
"""
Interactor for MultiStructureVis.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
StructureInteractorStyle.__init__(self, parent=parent)
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym == "n":
if parent.istruct == len(parent.structures) - 1:
parent.display_warning("LAST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct += 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "p":
if parent.istruct == 0:
parent.display_warning("FIRST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct -= 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "m":
parent.istruct = 0
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
nloops = parent.animated_movie_options["number_of_loops"]
tstep = parent.animated_movie_options["time_between_frames"]
tloops = parent.animated_movie_options["time_between_loops"]
if parent.animated_movie_options["looping_type"] == "restart":
loop_istructs = range(len(parent.structures))
elif parent.animated_movie_options["looping_type"] == "palindrome":
loop_istructs = range(len(parent.structures)) + range(len(parent.structures) - 2, -1, -1)
else:
raise ValueError('"looping_type" should be "restart" or "palindrome"')
for iloop in range(nloops):
for istruct in loop_istructs:
time.sleep(tstep)
parent.istruct = istruct
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.display_info(
"Animated movie : structure {:d}/{:d} "
"(loop {:d}/{:d})".format(istruct + 1, len(parent.structures), iloop + 1, nloops)
)
parent.ren_win.Render()
time.sleep(tloops)
parent.erase_info()
parent.display_info("Ended animated movie ...")
parent.ren_win.Render()
StructureInteractorStyle.keyPressEvent(self, obj, event)
|
gmatteo/pymatgen
|
pymatgen/vis/structure_vtk.py
|
Python
|
mit
| 48,211
|
[
"Jmol",
"VTK",
"pymatgen"
] |
291be964d8a81b0b0158fe3a671f0e1ad9fd8548cf5b7bbf84df534114d3766a
|
# Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import GP
from .. import likelihoods
from .. import kern
from .. import util
class GPCoregionalizedRegression(GP):
"""
Gaussian Process model for heteroscedastic multioutput regression
This is a thin wrapper around the models.GP class, with a set of sensible defaults
:param X_list: list of input observations corresponding to each output
:type X_list: list of numpy arrays
:param Y_list: list of observed values related to the different noise models
:type Y_list: list of numpy arrays
:param kernel: a GPy kernel, defaults to RBF ** Coregionalized
:type kernel: None | GPy.kernel defaults
:likelihoods_list: a list of likelihoods, defaults to list of Gaussian likelihoods
:type likelihoods_list: None | a list GPy.likelihoods
:param name: model name
:type name: string
:param W_rank: number tuples of the corregionalization parameters 'W' (see coregionalize kernel documentation)
:type W_rank: integer
:param kernel_name: name of the kernel
:type kernel_name: string
"""
def __init__(self, X_list, Y_list, kernel=None, likelihoods_list=None, name='GPCR',W_rank=1,kernel_name='coreg'):
#Input and Output
X,Y,self.output_index = util.multioutput.build_XY(X_list,Y_list)
Ny = len(Y_list)
#Kernel
if kernel is None:
kernel = util.multioutput.ICM(input_dim=X.shape[1]-1, num_outputs=Ny, kernel=kern.RBF(X.shape[1]-1), W_rank=1,name=kernel_name)
#Likelihood
likelihood = util.multioutput.build_likelihood(Y_list,self.output_index,likelihoods_list)
super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata={'output_index':self.output_index})
|
gusmaogabriels/GPy
|
GPy/models/gp_coregionalized_regression.py
|
Python
|
bsd-3-clause
| 1,876
|
[
"Gaussian"
] |
f940612c60aaac54afee42abff3191d2e5c5aa799e5ba13fa983ab7745f2ba7e
|
#!/usr/bin/python
import _mysql as mysql
import _mysql_exceptions as mysql_exceptions
import MySQLdb.cursors
import os
import pickle
import numpy as np
import sys
import StringIO
from scipy.misc import comb
import getopt
import csv
import re
import subprocess
import random
import glob
def usage():
print "\n-----------------------------------------------------------------"
print "Usage: "
print " sifter_prepare.py [options] <families_data_folder> <output_folder>"
print "-----------------------------------------------------------------\n"
print "Examples:"
print " sifter_prepare.py -p C0JYY2_HUMAN ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -s 9823 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -f PF03818 -a ../example/fam_data ../example/queries\n"
print " sifter_prepare.py --ip ../example/protein_list.txt -r ../example/fam_data ../example/queries\n"
print " sifter_prepare.py --if ../example/family_list.txt ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -p C0JYY2_HUMAN -x 1e5 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -p C0JYY2_HUMAN -t 2 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -s 9823 --dbaddr www.example.org --dbuser jack --dbpass 1234 ../example/fam_data ../example/queries\n"
print " sifter_prepare.py -A --hit_file ../example/pfam_res.txt ../example/fam_data ../example/queries\n"
print "This function prepares necessary files for your query to run SIFTER on."
print "@author Sayed Mohammad Ebrahim Sahraeian (mohammad@compbio.berkeley.edu)"
print "Please cite new paper:"
print "-Sahraeian SME, Luo KR, Brenner SE (2015)"
print "\nThe SIFTER algorithm presented in the following paper:"
print "- Engelhardt BE, Jordan MI, Srouji JR, Brenner SE. 2011. Genome-scale phylogenetic function annotation of large and diverse protein families. Genome Research 21:1969-1980. \n"
print "inputs:"
print " <families_data_folder> Path to the folder where the"
print " families data are placed. You can"
print " download the precomputed data"
print " or build it using the"
print " 'sifter_gather_family_data.py' script."
print " <output_folder> Path to the output folder where"
print " the necessary query files and"
print " results will be written to."
print "options: (you should only use one of '-p -s -f --ip -A' options.)"
print " -p STRING List of query proteins (use Uniprot ID"
print " or Accession) in comma seperated format."
print " -s STRING NCBI taxonomy ID for input species."
print " -f STRING List of Pfam families for which you"
print " want to prepare data."
print " (in comma seperated format)"
print " --ip STRING Path to the input file where the list"
print " of proteins are placed."
print " --if STRING Path to the input file where the list"
print " of families are placed."
print " --hit_file STRING Output of pfam_scan.pl file on the "
print " novel genome. This file consists of"
print " the list of pfam hits for the genome."
print " If this option is uded, we will"
print " look in this file to find Pfams"
print " instead of the SQL database."
print " -A Prepare for all Pfam families of queried"
print " novel genome. (hit_file should be provided)"
print " -a Include all experimental and"
print " non-experimental evidence"
print " in the inference. (Defualt [if"
print " this option is not used]: only"
print " experimental evidence will be used)."
print " -r Remove all query files already prepared"
print " and rebuild the queries."
print " -x INT Maximum number of nonzero elements"
print " in the transition matrix. Should be"
print " a number in [1e5,1e7] for reasonable"
print " time and accuracy balance (Default=2250000)"
print " Smaller value leads to faster running time."
print " -t INT Number of functions to truncate"
print " to in approximation [Default:"
print " adaptive based on -x option]"
print " Smaller value leads to faster running time."
print " --dbaddr STRING Address of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: localhost]"
print " --dbname STRING Name of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: sifter_db]"
print " --dbuser STRING Name of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: root]"
print " --dbpass STRING Password of the user for the MySQL"
print " database that has neccessary data"
print " for SIFTER [Default: '']"
print " -h Help. Print Usage."
def msql(query, db):
c = db.cursor()
c.execute(query)
results = c.fetchall()
c.close()
return results
def find_pfam_for_genes(genes):
sql="""SELECT
pfamseq.pfamseq_acc,
pfamseq.pfamseq_id,
pfamA.pfamA_acc
FROM pfamseq
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamseq.pfamseq_acc in ('%s')
OR pfamseq.pfamseq_id in ('%s')
"""%("','".join(genes),"','".join(genes))
seq_anns = msql(sql, db_mysql)
return seq_anns
def find_pfam_for_taxid(taxid):
sql="""SELECT
pfamseq.pfamseq_acc,
pfamseq.pfamseq_id,
pfamA.pfamA_acc
FROM pfamseq
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamseq.ncbi_taxid = '%s'
"""%(taxid)
seq_anns = msql(sql, db_mysql)
return seq_anns
def find_genes_for_pfams(pfam_ids):
sql="""SELECT
pfamseq.pfamseq_acc,
pfamA.pfamA_acc
FROM pfamseq
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamA.pfamA_acc in ('%s')
"""%("','".join(pfam_ids))
seq_anns = msql(sql, db_mysql)
return seq_anns
def find_pfam_2_gene(res):
pfam_2_gene={}
gene_2_pfam={}
for w in res:
my_pfam=w['pfamA_acc']
my_gene=w['pfamseq_acc']
if my_pfam not in pfam_2_gene:
pfam_2_gene[my_pfam]=set([])
pfam_2_gene[my_pfam].add(my_gene)
if my_gene not in gene_2_pfam:
gene_2_pfam[my_gene]=set([])
gene_2_pfam[my_gene].add(my_pfam)
return pfam_2_gene,gene_2_pfam
def find_pfam_2_gene_from_file(hit_file):
pfam_2_gene={}
gene_2_pfam={}
with open(hit_file, 'rb') as infile:
for line in infile:
line=line.strip()
if not line:
continue
if len(line)<3:
continue
if line[0]=="#" and not line[2]=="<":
continue
if line[0]=="#" and line[2]=="<":
keys=line.split('> <')
keys[0]=keys[0].split('<')[1]
keys[-1]=keys[-1].split('>')[0]
continue
row=line.split()
if not len(row)==15:
print "ERR"
break
r={k:row[i] for i,k in enumerate(keys)}
if r['significance']=='1':
pfam_id=r['hmm acc'][0:r['hmm acc'].find('.')]
my_gene=r['seq id']
if pfam_id not in pfam_2_gene.keys():
pfam_2_gene[pfam_id]=set([])
pfam_2_gene[pfam_id].add(my_gene)
if my_gene not in gene_2_pfam.keys():
gene_2_pfam[my_gene]=set([])
gene_2_pfam[my_gene].add(pfam_id)
print "Your queried novel genome has:"
print len(gene_2_pfam), "genes in pfam"
print len(pfam_2_gene), "pfam families\n"
return pfam_2_gene,gene_2_pfam
# See how many sequences are in each family, add it to pfds
def get_pfds(pfams):
pfds={}
for p in pfams:
sql_q="""select pfamA.num_full, pfamA.number_species, pfamA.pfamA_id, pfamA.description, group_concat(go_id) as go_ids, group_concat(term) as go_terms from pfamA left join gene_ontology on gene_ontology.auto_pfamA = pfamA.auto_pfamA where pfamA.pfamA_acc='%s' group by pfamA_acc
"""%(p)
#AND Locus.type=1
#AND Synonym.type=2
r = msql(sql_q, db_mysql)
if r:
pfds[p]={}
for w in r[0].keys():
pfds[p][w]=r[0][w]
return pfds
# ##Process Evidence
def parse_GO_OBO(obo_file):
go_dict={}
new_is_comming=-1
with open(obo_file, "r") as infile:
currentGOTerm = None
for line in infile:
line = line.strip()
if not line: continue #Skip empty
if new_is_comming==1:
key, sep, val = line.partition(":")
key=key.strip()
val=val.strip()
currentGOTerm=val
go_dict[currentGOTerm]={}
new_is_comming=0
continue
if line == "[Term]":
new_is_comming=1
elif line == "[Typedef]":
#Skip [Typedef sections]
new_is_comming=-1
elif new_is_comming==0:
#Only process if we're inside a [Term] environment
key, sep, val = line.partition(":")
key=key.strip()
val=val.strip()
if key not in go_dict[currentGOTerm]:
go_dict[currentGOTerm][key]=[]
go_dict[currentGOTerm][key].append(val.strip())
#Add last term
#remove obsoletes
obseletes=[]
for term in go_dict:
if 'is_obsolete' in go_dict[term]:
if go_dict[term]['is_obsolete'][0]== 'true':
obseletes.append(term)
continue
for term in obseletes:
del go_dict[term]
ontologies=['biological_process','molecular_function','cellular_component']
DAGs={w:{} for w in ontologies}
DAGs_r={w:{} for w in ontologies}
roots={w:{} for w in ontologies}
for term in go_dict.keys():
ont=go_dict[term]['namespace'][0]
DAGs[ont][term]=[]
DAGs_r[ont][term]=[]
for term in go_dict.keys():
ont=go_dict[term]['namespace'][0]
if 'is_a' in go_dict[term]:
for pa in go_dict[term]['is_a']:
term_2=pa.split(' ! ')[0]
DAGs[ont][term].append(term_2)
DAGs_r[ont][term_2].append(term)
else:
roots[ont]=term
return go_dict,DAGs,DAGs_r,roots
def trace_to_ontology_root(cur_node):
"""
Generator to recursively visit all nodes on each path
from a node up to the root node.
"""
#print "Graph node:", cur_node
yield cur_node
for pa in DAGs[ont][cur_node]:
for n in trace_to_ontology_root(pa):
yield n
def get_ontology_subdag(annotated_term_nodes):
"""
Given evidence_set, returns a filtered subgraph of evidence_ontology
that only contains those nodes or their ancestors.
"""
# For each annotated node, traverse to the root node of the ontology
# to include all its less-specific terms
all_term_nodes = set([])
for go_term in annotated_term_nodes:
traced=trace_to_ontology_root(go_term)
all_term_nodes.update(set(traced))
sub_dag = all_term_nodes
return sub_dag
def get_leaves_from_node(sub_dag, top_node):
descendant_leaves = set()
#print "Top node is: %s"%str(top_node)
#print "Successors: %s"%str(godag.successors(top_node))
for ch in set(DAGs_r[ont][top_node])&set(sub_dag):
if not set(DAGs_r[ont][ch])&set(sub_dag):
descendant_leaves.add(ch)
else:
descendant_leaves.update(get_leaves_from_node(sub_dag,ch))
return descendant_leaves
def find_candidate_fcns(unique_terms):
'''os.devnull
Using the parsed evidence, this places the evidence set
and modifies the gene ontology graph in the SIFTER 2.0 way.
'''
# For each protein in the evidence set, store the annotation
# into the evidence graph
annotated_term_nodes = []
for go_term in unique_terms:
if go_term not in DAGs[ont]:
print "GO term, %s doesn't seem to be named in your ontology."%go_term
continue
annotated_term_nodes.append(go_term)
go_subdag = get_ontology_subdag(annotated_term_nodes=annotated_term_nodes)
root_node = roots[ont]
candidate_fcns=get_leaves_from_node(go_subdag, root_node)
return candidate_fcns
def max_fun_possible(i,thr):
max_f=0
for j in range(1,i+1):
max_f_temp=max_f+comb(i,j,exact=0)
if max_f_temp>thr:
return [j-1,max_f]
else:
max_f=max_f_temp
return [i,max_f]
def calc_numel(numTerms, maxFun):
return pow(sum([float(comb(numTerms, i, exact=0)) for i in range(1, maxFun + 1)]), 2)
def get_criteria(numTerms, maxFun):
if numTerms > 8:
if maxFun > 1:
return 1
else:
return 2
else:
return 3
def get_category(numel, famSize):
# List of dividers based upon the number of elements NUMEL in transition matrix
numelDivs = [65025.0, 330625.0, 1046529.0]
# List of dividers based upon the family size FAMSIZE
famSizeDivs = [567.0, 1637.0, 4989.0]
n = sum(map(lambda x: numel > x, numelDivs))
s = sum(map(lambda x: famSize > x, famSizeDivs))
return (n, s)
def est_processing_time(numTerms, famSize, maxFun,numel):
paramsDict = {1: [-6.6940979152046394, 1.2175437752942884, 0.61437156459022535],
2: [-3.6107074614976109, 0.91343454244972999, 0.45521131812635984],
3: [-2.7026843343076519, 0.052132418536663394, 0.93755721899494526]}
crit = get_criteria(numTerms, maxFun)
line = paramsDict[crit]
return pow(10, line[0]) * pow(numel, line[1]) * pow(famSize, line[2])
def get_upper_bound(eTime, cat, per):
percentileDict={(0, 0): {'95': 8.3435056315411593, '99.9': 10.953643510480756},
(0, 1): {'95': 9.4040189875556379, '99.9': 10.175590194144538},
(0, 2): {'95': 7.0857310513064657, '99.9': 10.031292126553355},
(0, 3): {'95': 4.3471755740354761, '99.9': 8.7766092407283836},
(1, 0): {'95': 4.0445760101251587, '99.9': 9.5270816900332136},
(1, 1): {'95': 2.3310236959309329, '99.9': 3.4547033474036422},
(1, 2): {'95': 1.8195072570575042, '99.9': 2.9109043732685018},
(1, 3): {'95': 2.0892177205927638, '99.9': 7.8978069638688924},
(2, 0): {'95': 2.2542718513558571, '99.9': 2.9746194223225029},
(2, 1): {'95': 2.6775509810516125, '99.9': 4.4976310858312294},
(2, 2): {'95': 2.9809620961392786, '99.9': 4.8087748272548554},
(2, 3): {'95': 4.4914777165287258, '99.9': 6.7709753345612205},
(3, 0): {'95': 2.6439743599924892, '99.9': 3.3485478896514702},
(3, 1): {'95': 2.883955861280195, '99.9': 3.9323761482164077},
(3, 2): {'95': 3.156846158873563, '99.9': 3.904755873693849},
(3, 3): {'95': 3.898056279279821, '99.9': 4.4261063907623219}}
percentiles = percentileDict[(cat[0],cat[1])][per]
return eTime*percentiles
def format_times(times):
if not times:
return times
t = times[0]
if t < 1:
return ['%.1f seconds' % (60 * t) for t in times]
elif t < 60:
return ['%.1f minutes' % t for t in times]
elif t < 60 * 24:
return ['%.1f hours' % (t / 60) for t in times]
elif t < 60 * 24 * 365:
return ['%.1f days' % (t / 60 / 24) for t in times]
else:
return ['%.1f years' % (t / 60 / 24 / 365) for t in times]
def estimate_time(numTerms, famSize,t_lev):
tableBody = []
pers = ['95','99.9']
maxFun=min(t_lev,numTerms)
numel = calc_numel(numTerms, maxFun)
eTime = est_processing_time(numTerms, famSize, maxFun,numel)
eTime = max(eTime, 1.0) # set minimum estimated time to 1 minute
cat = get_category(numel, famSize)
row = [maxFun]
times = [eTime]
for j in range(len(pers)):
upper = get_upper_bound(eTime, cat, pers[j])
times.append(upper)
row.extend(times)
row.extend(format_times(times))
return row
def store_run_data(pfam_id):
data={}
data['pfam_id']=pfam_id
data['query_proteins']=[]#pplacer_queries[pfam_id]
data['query_protein_accs']={}#{k['id']:pfamseq_acc_for_id[k['id']] for k in pplacer_queries[pfam_id]}
data['tree_size']=tree_sizes[pfam_id]
data['evidence_constraints']=evidence_allowed
data['tree_loc']=reconciled_folder+'/%s'%pfam_id+"_reconciled.xml"
data['tree_format']='phyloxml',
data['annotation_loc']=evidence_folder+'/%s.pli'%pfam_id
data['annotation_loc_pickle']=evidence_folder+'/%s.pickle'%pfam_id
data['annotation_format']='pli'
print "Loading goa annotations for %s..."%pfam_id
evidence_pickle_file = evidence_folder+'/%s.pickle'%pfam_id # file with annotations
rand_id_1=random.randint(1000000,9999999)
if os.path.exists(evidence_pickle_file+'.gz'):
if os.path.exists('%s.%d'%(evidence_pickle_file,rand_id_1)):
subprocess.check_call("rm %s"%(evidence_pickle_file),shell=True)
subprocess.check_call("gunzip -c %s.gz > %s.%d"%(evidence_pickle_file,evidence_pickle_file,rand_id_1),shell=True)
[evidence_file2,pfam_anns, pp, seq_lookup] = pickle.load(open('%s.%d'%(evidence_pickle_file,rand_id_1), 'rb'))
if os.path.exists('%s.%d'%(evidence_pickle_file,rand_id_1)):
subprocess.check_call("rm %s.%d"%(evidence_pickle_file,rand_id_1),shell=True)
# Filter for only experimental annotations.
unique_terms=set([])
num_ev = 0
for prot_id, anns in pfam_anns.iteritems():
alwd_ev = [a['acc'] for a in anns if a['code'] in evidence_allowed]
unique_terms=unique_terms.union(set(alwd_ev))
# a is an annotation with a function and a code that says where the function came from
# keep this annotation if it was gotten through experiments
if len(alwd_ev) > 0:
num_ev += 1
print pfam_id,'has' ,num_ev, "annotated proteins with allowed evidence type"
data['num_ev_prots']=num_ev
data['num_any_ev_prots']=len(pfam_anns)
if len(unique_terms)==1 and ('GO:0003674' in unique_terms):
num_ev=0
if num_ev>0:
# Input evidence
candidate_fcns=find_candidate_fcns(unique_terms)
evidence_format = 'pli'
data['n_terms'] = len(candidate_fcns)
data['candids'] = candidate_fcns
thr=max_fun_possible(data['n_terms'],np.sqrt((mx_numel)))[0]
if truncation_level:
thr=min(thr,truncation_level)
row=estimate_time(data['n_terms'],data['tree_size'],thr)
data['e_time']=row
print "Number of functions:",data['n_terms']
print "We will use truncation level = %s"%row[0]
print "Estimated running time for family %s = %s (95%% confidence upper bound = %s)"%(pfam_id,row[4],row[5])
pickle.dump(data, open(queries_folder+'/%s_query.pickle'%pfam_id, 'wb'))
print "Processed evidence from:", pfam_id
else:
print "No candidate functions: SIFTER will not be run on this family."
data['n_terms'] = 0
pickle.dump(data, open(queries_folder+'/NQ/%s_query.pickle'%pfam_id, 'wb'))
def prepare_for_each_family(pfam_id):
reconciled_fname = reconciled_folder+'/%s'%pfam_id
evidence_file = evidence_folder+'/%s.pli'%pfam_id
evidence_pickle_file = evidence_folder+'/%s.pickle'%pfam_id
queries_to_process=[]
skip_flag=0
if not(os.path.isfile(reconciled_fname+"_reconciled.xml.gz")):
print "\nERROR: No tree file %s. Skip this family.\n"%(reconciled_fname+"_reconciled.xml.gz")
skip_flag=1
if not(os.path.isfile(evidence_file+'.gz')):
print "\nERROR: No evidence file %s.gz. Skip this family.\n"%(evidence_file)
skip_flag=1
if not(os.path.isfile(evidence_pickle_file+'.gz')):
print "\nERROR: No evidence file %s.gz. Skip this family.\n"%(evidence_pickle_file)
skip_flag=1
q_flag=0
if (skip_flag==0):
if not(os.path.isfile(queries_folder+'/%s_query.pickle'%pfam_id)) and not(os.path.isfile(queries_folder+'/NQ/%s_query.pickle'%pfam_id)):
store_run_data(pfam_id)
else:
print "Family %s already prepared."%(pfam_id)
if (os.path.isfile(queries_folder+'/%s_query.pickle'%pfam_id)):
q_flag=1
return q_flag
if __name__=="__main__":
# Initialization
params_mysql = {\
'db_address': 'localhost',
'db_username': 'root',
'db_password': '',
'db_name': 'sifter_db'
}
evidence_constraints_exp = [
# Experimental
'EXP', # Experiment
'IDA', # Direct Assay
'IPI', # Physical Interaction
'IMP', # Mutant Phenotype
'IGI', # Genetic Interaction
'IEP', # Expression Pattern
# Author Statements
'TAS', # Traceable Author Statement
'NAS', # Non-traceable Author Statement
]
evidence_constraints_all = [
# Experimental
'EXP', # Experiment
'IDA', # Direct Assay
'IPI', # Physical Interaction
'IMP', # Mutant Phenotype
'IGI', # Genetic Interaction
'IEP', # Expression Pattern
# Author Statements
'TAS', # Traceable Author Statement
'NAS', # Non-traceable Author Statement
# Computational Analysis Evidence Codes
'ISS', # Sequence/Structural Similarity
'ISO', # Sequence Orthology
'ISA', # Sequence Alignment
'ISM', # Sequence Model
'IGC', # Genomic Context
'IBA', # Biological aspect of ancestor
'IBD', # Biological aspect of descendant
'IKR', # Key Residues
'IRD', # Rapid Divergence
'RCA', # Reviews Computational Analysis
# Curator Statement
'IC', # Curator
'ND', # No biological data available
# Automatically assigned
'IEA', # Electronic Annotation
# Obsolete
'NR' # Not recorded
]
main_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
obo_file=main_dir+'/data/go.obo'
evidence_allowed = evidence_constraints_exp
taxid=''
query_families=[]
query_proteins=[]
p_input_file=''
f_input_file=''
truncation_level=0
pfams=[]
remove_query_files=0
mx_numel=2250000
hit_file=''
all_fams=0
# Check for options
opts, args = getopt.getopt(sys.argv[1:], "hraAp:s:f:t:x:",['ip=','if=','dbname=','dbpass=','dbuser=','dbaddr=','hit_file='])
if len(args) != 2:
usage()
sys.exit()
choices=[]
if len(opts)>0:
for o, a in opts:
if o == "-p":
splited =a.strip().split(',')
query_proteins=list(set([w for w in splited if w]))
choices.append('p')
elif o == "-s":
taxid = a
choices.append('s')
elif o == "-f":
splited =a.strip().split(',')
query_families=list(set([w for w in splited if w]))
choices.append('f')
elif o == "--ip":
p_input_file = a
choices.append('ip')
elif o == "--if":
f_input_file = a
choices.append('if')
elif o == "-A":
all_fams = 1
choices.append('A')
elif o == "--hit_file":
hit_file = a
elif o == "-a":
evidence_allowed = evidence_constraints_all
elif o == "-r":
remove_query_files=1
elif o == "-x":
mx_numel=int(float(a))
elif o == "-t":
truncation_level=int(a)
elif o == "--dbname":
params_mysql['db_name']= a
elif o == "--dbaddr":
params_mysql['db_address']= a
elif o == "--dbpass":
params_mysql['db_password']= a
elif o == "--dbuser":
params_mysql['db_username']= a
else:
usage()
sys.exit()
if len(choices)==0:
print "\nERROR: No queries are entered."
print "Please use one of the '-p -s -f --ip --if -A' options to enter your query.\n"
sys.exit()
elif len(choices)>1:
print "\nERROR: Please use ONLY one of the '-p -s -f --ip --if -A' options to enter your query.\n"
sys.exit()
families_data_path=args[0]
if not os.path.exists(families_data_path):
print "\nERROR: families_data directory ( %s ) does not exist\n"%families_data_path
sys.exit()
evidence_folder=families_data_path+'/annotations'
if not os.path.exists(evidence_folder):
print "\nERROR: annotations directory ( %s ) not exists\n"%evidence_folder
sys.exit()
reconciled_folder=families_data_path+'/reconciled_trees'
if not os.path.exists(reconciled_folder):
print "\nERROR: reconciled_trees directory ( %s ) not exists\n"%reconciled_folder
sys.exit()
alignment_folder=families_data_path+'/alignments'
if not os.path.exists(alignment_folder):
print "\nERROR: alignment directory( %s ) not exists\n"%alignment_folder
sys.exit()
###
output_path=args[1]
queries_folder=output_path
if remove_query_files==1:
os.system('rm -rf %s'%queries_folder)
if not os.path.exists(output_path):
os.mkdir(output_path)
db_mysql = MySQLdb.connect(host=params_mysql['db_address'],
user=params_mysql['db_username'],
passwd=params_mysql['db_password'],
db=params_mysql['db_name'],
cursorclass=MySQLdb.cursors.DictCursor)
queries_folder_NQ=output_path+'/NQ'
if not os.path.exists(queries_folder_NQ):
os.mkdir(queries_folder_NQ)
prepared_queries=glob.glob(output_path+'/*.pickle')+glob.glob(queries_folder_NQ+'/*.pickle')
prepared_queries=[(w.split('/')[-1]).split('_query')[0] for w in prepared_queries]
already_prepared_fams=[]
print "\n\n--------------Reading the query information------------"
if hit_file:
if not os.path.exists(hit_file):
print "\nERROR: No Pfam hit file at %s.\n"%hit_file
sys.exit()
else:
pfam_2_gene_hit,gene_2_pfam_hit=find_pfam_2_gene_from_file(hit_file)
if query_families or f_input_file:
if f_input_file:
if not os.path.exists(f_input_file):
print "\nERROR: No file exists at %s\n"%f_input_file
sys.exit()
f = open(f_input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
query_families=list(set([w for w in splited if w]))
already_prepared_fams=list(set(query_families)&set(prepared_queries))
toprep_families=list(set(query_families)-set(prepared_queries))
print "%s out of %s Families have already prepared. We will Check %s others."%(len(already_prepared_fams),len(query_families),len(toprep_families))
query_families=toprep_families
res=find_genes_for_pfams(query_families)
pfam_2_gene,gene_2_pfam=find_pfam_2_gene(res)
pfams=pfam_2_gene.keys()
for f in set(query_families)-set(pfams):
print "Family %s is not in the SQL database."%f
if query_families:
print "Run SIFTER for Pfam families: %s"%','.join(query_families)
elif query_proteins or p_input_file:
if p_input_file:
if not os.path.exists(p_input_file):
print "\nERROR: No file exists at %s\n"%p_input_file
sys.exit()
f = open(p_input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
query_proteins=list(set([w for w in splited if w]))
if not hit_file:
res=find_pfam_for_genes(query_proteins)
pfam_2_gene,gene_2_pfam=find_pfam_2_gene(res)
pfams=pfam_2_gene.keys()
else:
gene_2_pfam={p:gene_2_pfam_hit[p] for p in query_proteins if p in gene_2_pfam_hit}
pfam_2_gene={}
for g,fs in gene_2_pfam.iteritems():
for f in fs:
if not f in pfam_2_gene:
pfam_2_gene[f]=set([])
pfam_2_gene[f].add(g)
pfams=pfam_2_gene.keys()
print "Run SIFTER for %s Pfam families for %s query proteins"%(len(pfams),len(query_proteins))
elif taxid:
if not hit_file:
res=find_pfam_for_taxid(taxid)
pfam_2_gene,gene_2_pfam=find_pfam_2_gene(res)
pfams=pfam_2_gene.keys()
print "Run SIFTER for %s Pfam families for query species (taxid=%s) with %s proteins"%(len(pfams),taxid,len(gene_2_pfam))
else:
gene_2_pfam=gene_2_pfam_hit;
pfam_2_gene=pfam_2_gene_hit;
pfams=pfam_2_gene.keys()
print "-s will be ignored. We will run on all %s Pfam families in the hit-file"%(len(pfams))
elif all_fams==1:
if not hit_file:
print "\nERROR: -A option can only used for novel genomes (hit_file should be provided)\n"
sys.exit()
else:
gene_2_pfam=gene_2_pfam_hit;
pfam_2_gene=pfam_2_gene_hit;
pfams=pfam_2_gene.keys()
print "We will run on all %s Pfam families in the hit-file"%(len(pfams))
if (not pfams):
if (not already_prepared_fams):
print "\nERROR: There are no pfam families for your input query."
print "Please use one of the '-p -s -f --ip -A --hit-file' options to enter your query.\n"
sys.exit()
else:
print "-------------------Preperation is Done----------------------"
print "All of your %s query families have been already prepared."%(len(already_prepared_fams))
print "\nNext step is to run 'sifter_run.py'."
print "You may exclude some of the more complex families there.\n"
else:
pfds=get_pfds(pfams)
tree_sizes = {}
for p in pfds.keys():
tree_sizes[p] = pfds[p]['num_full']
sorted_fams = sorted(pfds.keys(), key=lambda k:pfds[k]['num_full'])
print "Number of families:" ,len(sorted_fams)
print "\n-----------------Reading the ontology file----------------"
ont='molecular_function'
go_dict,DAGs,DAGs_r,roots=parse_GO_OBO(obo_file)
print "\n------------Prepare the necessary query files-------------"
pfams_to_process = []
for i,pfam_id in enumerate(sorted_fams):
q_flag=prepare_for_each_family(pfam_id)
if q_flag==1:
pfams_to_process.append(pfam_id)
print "Input file prepared for %s (%d out of %d families)"%(pfam_id,i+1,len(sorted_fams))
nqs=0
for pfam_id in sorted_fams:
nqf=queries_folder_NQ+'/%s_query.pickle'%pfam_id
if (os.path.isfile(nqf)):
nqs+=1
errors=len(sorted_fams)-len(pfams_to_process)-nqs
if len(pfams_to_process)>0:
e_times = []
total_e=0
total_95=0
with open(output_path+'/running_estimation.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Family','number of candidate functions','Family size','Truncation level','Estimated running time','95%% confidence upper bound','99.9%% confidence upper bound'])
for pfam_id in pfams_to_process:
qfile=queries_folder+'/%s_query.pickle'%pfam_id
query_data = pickle.load(open(qfile, "rb" ))
row=query_data['e_time']
spamwriter.writerow([pfam_id,query_data['n_terms'],query_data['tree_size'],row[0],row[4],row[5],row[6]])
total_e +=(row[1])
total_95 +=(row[2])
fe=format_times([total_e,total_95])
if already_prepared_fams:
print "%s of your query families have been already prepared."%(len(already_prepared_fams))
print "Here is the statistics for the rest of queries."
print "\nFiles are prepared for %d out of %d families. (%s missed due to errors, %s are skipped duo to no candidate functions)"%(len(pfams_to_process),len(sorted_fams),errors,nqs)
print "-------------------Preperation is Done----------------------"
print "There are %s families to run SIFTER on."%(len(pfams_to_process))
print "\nTotal estimated time for your query is %s (95%% confidence upper bound = %s)."%(fe[0],fe[1])
print "Details for individual families are written in '%s/running_estimation.csv'"%output_path
print "\nNext step is to run 'sifter_run.py'."
print "You may exclude some of the more complex families there.\n"
else:
print "\nFiles are prepared for %d out of %d families. (%s missed due to errors, %s are skipped duo to no candidate functions)"%(len(pfams_to_process),len(sorted_fams),errors,nqs)
|
juliandev/SIFTER
|
src/scripts/sifter_prepare.py
|
Python
|
gpl-3.0
| 35,833
|
[
"VisIt"
] |
78ae62a352e71114a3bd39000a9dd474aa3f5696df0b3d46dab5da85110d8400
|
#
# iqcalc.py -- image quality calculations on FITS data
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) 2011-2012, Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import logging
import numpy
import threading
try:
import scipy.optimize as optimize
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
have_scipy = True
except ImportError:
have_scipy = False
from ginga.misc import Bunch
def get_mean(data_np):
mdata = numpy.ma.masked_array(data_np, numpy.isnan(data_np))
return numpy.mean(mdata)
def get_median(data_np):
mdata = numpy.ma.masked_array(data_np, numpy.isnan(data_np))
return numpy.median(mdata)
class IQCalcError(Exception):
"""Base exception for raising errors in this module."""
pass
class IQCalc(object):
def __init__(self, logger=None):
if not logger:
logger = logging.getLogger('IQCalc')
self.logger = logger
# for mutex around scipy.optimize, which seems to be non-threadsafe
self.lock = threading.RLock()
# for adjustments to background level
self.skylevel_magnification = 1.05
self.skylevel_offset = 40.0
# FWHM CALCULATION
def gaussian(self, x, p):
"""Gaussian fitting function in 1D. Makes a sine function with
amplitude determined by maxv. See calc_fwhm().
p[0]==mean, p[1]==sdev, p[2]=maxv
"""
y = (1.0 / (p[1] * numpy.sqrt(2*numpy.pi)) *
numpy.exp(-(x - p[0])**2 / (2*p[1]**2))) * p[2]
return y
def calc_fwhm(self, arr1d, medv=None, gauss_fn=None):
"""FWHM calculation on a 1D array by using least square fitting of
a gaussian function on the data. arr1d is a 1D array cut in either
X or Y direction on the object.
"""
if not gauss_fn:
gauss_fn = self.gaussian
N = len(arr1d)
X = numpy.array(list(range(N)))
Y = arr1d
# Fitting works more reliably if we do the following
# a. subtract sky background
if medv == None:
medv = numpy.median(Y)
Y = Y - medv
maxv = Y.max()
# b. clamp to 0..max (of the sky subtracted field)
Y = Y.clip(0, maxv)
# Fit a gaussian
p0 = [0, N-1, maxv] # Inital guess
# Distance to the target function
errfunc = lambda p, x, y: gauss_fn(x, p) - y
# Least square fit to the gaussian
with self.lock:
# NOTE: without this mutex, optimize.leastsq causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y))
if not success:
raise IQCalcError("FWHM gaussian fitting failed")
mu, sdev, maxv = p1
self.logger.debug("mu=%f sdev=%f maxv=%f" % (mu, sdev, maxv))
# Now that we have the sdev from fitting, we can calculate FWHM
# (fwhm = sdev * sqrt(8*log(2)) ?)
fwhm = 2.0 * numpy.sqrt(2.0 * numpy.log(2.0)) * sdev
#return (fwhm, mu, sdev, maxv)
return (float(fwhm), float(mu), float(sdev), maxv)
def get_fwhm(self, x, y, radius, data, medv=None):
"""
"""
if medv == None:
medv = numpy.median(data)
# Get two cuts of the data, one in X and one in Y
x0, y0, xarr, yarr = self.cut_cross(x, y, radius, data)
# Calculate FWHM in each direction
fwhm_x, cx, sdx, maxx = self.calc_fwhm(xarr, medv=medv)
fwhm_y, cy, sdy, maxy = self.calc_fwhm(yarr, medv=medv)
ctr_x = x0 + cx
ctr_y = y0 + cy
self.logger.debug("fwhm_x,fwhm_y=%f,%f center=%f,%f" % (
fwhm_x, fwhm_y, ctr_x, ctr_y))
return (fwhm_x, fwhm_y, ctr_x, ctr_y, sdx, sdy, maxx, maxy)
def starsize(self, fwhm_x, deg_pix_x, fwhm_y, deg_pix_y):
cdelta1 = math.fabs(deg_pix_x)
cdelta2 = math.fabs(deg_pix_y)
fwhm = (fwhm_x * cdelta1 + fwhm_y * cdelta2) / 2.0
fwhm = fwhm * 3600.0
return fwhm
def centroid(self, data, xc, yc, radius):
x0, y0, arr = self.cut_region(self, xc, yc, radius, data)
cy, cx = ndimage.center_of_mass(arr)
return (cx, cy)
# FINDING BRIGHT PEAKS
def get_threshold(self, data, sigma=5.0):
median = numpy.median(data)
# NOTE: for this method a good default sigma is 5.0
dist = numpy.fabs(data - median).mean()
threshold = median + sigma * dist
# NOTE: for this method a good default sigma is 2.0
## std = numpy.std(data - median)
## threshold = median + sigma * std
self.logger.debug("calc threshold=%f" % (threshold))
return threshold
def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
"""
Find bright peak candidates in (data). (threshold) specifies a
threshold value below which an object is not considered a candidate.
If threshold is blank, a default is calculated using (sigma).
(radius) defines a pixel radius for determining local maxima--if the
desired objects are larger in size, specify a larger radius.
The routine returns a list of candidate object coordinate tuples
(x, y) in data.
"""
if threshold == None:
# set threshold to default if none provided
threshold = self.get_threshold(data, sigma=sigma)
self.logger.debug("threshold defaults to %f (sigma=%f)" % (
threshold, sigma))
data_max = filters.maximum_filter(data, radius)
maxima = (data == data_max)
diff = data_max > threshold
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
peaks = []
for dy, dx in slices:
xc = (dx.start + dx.stop - 1)/2.0
yc = (dy.start + dy.stop - 1)/2.0
# This is only an approximate center; use FWHM or centroid
# calculation to refine further
peaks.append((xc, yc))
return peaks
def cut_region(self, x, y, radius, data):
"""Return a cut region (radius) pixels away from (x, y) in (data).
"""
n = radius
ht, wd = data.shape
x0, x1 = max(0, x-n), min(wd-1, x+n)
y0, y1 = max(0, y-n), min(ht-1, y+n)
arr = data[y0:y1+1, x0:x1+1]
return (x0, y0, arr)
def cut_cross(self, x, y, radius, data):
"""Cut two data subarrays that have a center at (x, y) and with
radius (radius) from (data). Returns the starting pixel (x0, y0)
of each cut and the respective arrays (xarr, yarr).
"""
n = radius
ht, wd = data.shape
x0, x1 = max(0, x-n), min(wd-1, x+n)
y0, y1 = max(0, y-n), min(ht-1, y+n)
xarr = data[y, x0:x1+1]
yarr = data[y0:y1+1, x]
return (x0, y0, xarr, yarr)
def brightness(self, x, y, radius, medv, data):
"""Return the brightness value found in a region (radius) pixels away
from (x, y) in (data).
"""
x0, y0, arr = self.cut_region(x, y, radius, data)
arr2 = numpy.sort(arr.flat)
idx = int(len(arr2) * 0.8)
res = arr2[idx] - medv
return float(res)
def fwhm_data(self, x, y, data, radius=15):
return self.get_fwhm(x, y, radius, data)
# EVALUATION ON A FIELD
def evaluate_peaks(self, peaks, data, bright_radius=2, fwhm_radius=15,
fwhm_method=1, cb_fn=None, ev_intr=None):
height, width = data.shape
hh = float(height) / 2.0
ht = float(height)
h4 = float(height) * 4.0
wh = float(width) / 2.0
wd = float(width)
w4 = float(width) * 4.0
# Find the median (sky/background) level
median = float(numpy.median(data))
#skylevel = median
# Old SOSS qualsize() applied this calculation to skylevel
skylevel = median * self.skylevel_magnification + self.skylevel_offset
# Form a list of objects and their characteristics
objlist = []
for x, y in peaks:
if ev_intr and ev_intr.isSet():
raise IQCalcError("Evaluation interrupted!")
# Find the fwhm in x and y
try:
if fwhm_method == 1:
(fwhm_x, fwhm_y, ctr_x, ctr_y,
sdx, sdy, maxx, maxy) = self.fwhm_data(x, y, data,
radius=fwhm_radius)
## # Average the X and Y gaussian fitting near the peak
bx = self.gaussian(round(ctr_x), (ctr_x, sdx, maxx))
by = self.gaussian(round(ctr_y), (ctr_y, sdy, maxy))
## ## bx = self.gaussian(ctr_x, (ctr_x, sdx, maxx))
## ## by = self.gaussian(ctr_y, (ctr_y, sdy, maxy))
bright = float((bx + by)/2.0)
else:
raise IQCalcError("Method (%d) not supported for fwhm calculation!" %(
fwhm_method))
except Exception as e:
# Error doing FWHM, skip this object
self.logger.debug("Error doing FWHM on object at %.2f,%.2f: %s" % (
x, y, str(e)))
continue
self.logger.debug("orig=%f,%f ctr=%f,%f fwhm=%f,%f bright=%f" % (
x, y, ctr_x, ctr_y, fwhm_x, fwhm_y, bright))
# overall measure of fwhm as a single value
#fwhm = math.sqrt(fwhm_x*fwhm_x + fwhm_y*fwhm_y)
#fwhm = (math.fabs(fwhm_x) + math.fabs(fwhm_y)) / 2.0
fwhm = (math.sqrt(fwhm_x*fwhm_x + fwhm_y*fwhm_y) *
(1.0 / math.sqrt(2.0)) )
# calculate a measure of ellipticity
elipse = math.fabs(min(fwhm_x, fwhm_y) / max(fwhm_x, fwhm_y))
# calculate a measure of distance from center of image
dx = wh - ctr_x
dy = hh - ctr_y
dx2 = dx*dx / wd / w4
dy2 = dy*dy / ht / h4
if dx2 > dy2:
pos = 1.0 - dx2
else:
pos = 1.0 - dy2
obj = Bunch.Bunch(objx=ctr_x, objy=ctr_y, pos=pos,
fwhm_x=fwhm_x, fwhm_y=fwhm_y,
fwhm=fwhm, fwhm_radius=fwhm_radius,
brightness=bright, elipse=elipse,
x=int(x), y=int(y),
skylevel=skylevel, background=median)
objlist.append(obj)
if cb_fn != None:
cb_fn(obj)
return objlist
# def _compare(self, obj1, obj2):
# val1 = obj1.brightness * obj1.pos/math.sqrt(obj1.fwhm)
# val2 = obj2.brightness * obj2.pos/math.sqrt(obj2.fwhm)
# if val1 > val2:
# return -1
# elif val2 > val1:
# return 1
# else:
# return 0
def _sortkey(self, obj):
val = obj.brightness * obj.pos/math.sqrt(obj.fwhm)
return val
def objlist_select(self, objlist, width, height,
minfwhm=2.0, maxfwhm=150.0, minelipse=0.5,
edgew=0.01):
results = []
count = 0
for obj in objlist:
count += 1
self.logger.debug("%d obj x,y=%.2f,%.2f fwhm=%.2f bright=%.2f" % (
count, obj.objx, obj.objy, obj.fwhm, obj.brightness))
# If peak has a minfwhm < fwhm < maxfwhm and the object
# is inside the frame by edgew pct
if ((minfwhm < obj.fwhm) and (obj.fwhm < maxfwhm) and
(minelipse < obj.elipse) and (width*edgew < obj.x) and
(height*edgew < obj.y) and (width*(1.0-edgew) > obj.x) and
(height*(1.0-edgew) > obj.y)):
results.append(obj)
#results.sort(cmp=self._compare)
results.sort(key=self._sortkey, reverse=True)
return results
def pick_field(self, data, peak_radius=5, bright_radius=2, fwhm_radius=15,
threshold=None,
minfwhm=2.0, maxfwhm=50.0, minelipse=0.5,
edgew=0.01):
height, width = data.shape
# Find the bright peaks in the image
peaks = self.find_bright_peaks(data, radius=peak_radius,
threshold=threshold)
#print "peaks=", peaks
self.logger.info("peaks=%s" % str(peaks))
if len(peaks) == 0:
raise IQCalcError("Cannot find bright peaks")
# Evaluate those peaks
objlist = self.evaluate_peaks(peaks, data,
bright_radius=bright_radius,
fwhm_radius=fwhm_radius)
if len(objlist) == 0:
raise IQCalcError("Error evaluating bright peaks")
results = self.objlist_select(objlist, width, height,
minfwhm=minfwhm, maxfwhm=maxfwhm,
minelipse=minelipse, edgew=edgew)
if len(results) == 0:
raise IQCalcError("No object matches selection criteria")
return results[0]
def qualsize(self, image, x1=None, y1=None, x2=None, y2=None,
radius=5, bright_radius=2, fwhm_radius=15, threshold=None,
minfwhm=2.0, maxfwhm=50.0, minelipse=0.5,
edgew=0.01):
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
data = image.cutout_data(x1, y1, x2, y2, astype='float32')
qs = self.pick_field(data, peak_radius=radius,
bright_radius=bright_radius,
fwhm_radius=fwhm_radius,
threshold=threshold,
minfwhm=minfwhm, maxfwhm=maxfwhm,
minelipse=minelipse, edgew=edgew)
# Add back in offsets into image to get correct values with respect
# to the entire image
qs.x += x1
qs.y += y1
qs.objx += x1
qs.objy += y1
self.logger.debug("obj=%f,%f fwhm=%f sky=%f bright=%f" % (
qs.objx, qs.objy, qs.fwhm, qs.skylevel, qs.brightness))
return qs
#END
|
bsipocz/ginga
|
ginga/util/iqcalc.py
|
Python
|
bsd-3-clause
| 14,707
|
[
"Gaussian"
] |
bdcf289f899fcc3d6a553c8beed9040fcc7ab7203436d52bad8e529e3f76557c
|
#!/usr/bin/env python
from Bio import SeqIO
import tempfile
import subprocess
import shutil
import os
import re
"""
FASTA statting tool. Usable from the shell or imported as a module.
"""
entrez_CFSAN_genera = '("Campylobacter"[Organism]) OR ("Erwinia"[Organism]) OR ("Listeria"[Organism]) OR ("Escherichia"[Organism]) OR ("Vibrio"[Organism]) OR ("Salmonella"[Organism]) OR ("Bacillus"[Organism]) OR ("Achromobacter"[Organism]) OR ("Citrobacter"[Organism]) OR ("Proteus"[Organism]) OR ("Serratia"[Organism]) OR ("Brenneria"[Organism]) OR ("Paenibacillus"[Organism]) OR ("Brucella"[Organism]) OR ("Enterobacter"[Organism]) OR ("Clostridium"[Organism]) OR ("Cronobacter"[Organism]) OR ("Mycoplasma"[Organism]) OR ("Lymphocryptovirus"[Organism]) OR ("Klebsiella"[Organism]) OR ("Shigella"[Organism])'
def stat_fasta(fasta_file):
"Basic FASTA statistics. Can't determine average coverage on its own."
d = {'num_contigs':'',
'n50':'',
'num_bases':'',
'fasta_file':os.path.basename(fasta_file)
}
#print "statting ", fasta_file
with open(fasta_file, 'r') as f:
fasta = list(SeqIO.parse(f, "fasta"))
if not len(fasta):
raise ValueError("No contigs or improperly formatted fasta.")
print "Tried to read:\n{}".format(open(fasta_file, 'rU').read())
#determine number of contigs
d['num_contigs'] = len(fasta)
#determine N50, see http://en.wikipedia.org/wiki/N50_statistic
f_prime = list()
for contig in fasta:
for i in range(len(contig)):
f_prime.append(len(contig)) #"Create another list L' , which is identical to L, except that every element n in L has been replaced with n copies of itself"
f_prime.sort()
if len(f_prime) % 2 == 0: #is even:
d['n50'] = (f_prime[len(f_prime) / 2] + f_prime[len(f_prime) / 2 + 1]) / 2
else:
d['n50'] = f_prime[len(f_prime) / 2]
d['num_bases'] = sum([len(contig) for contig in fasta])
return d
def stat_abyss(fasta_file, k_value=64):
"FASTA statistics using information in ABySS headers, including average coverage."
d = stat_fasta(fasta_file)
contig_cov = list()
with open(fasta_file, 'r') as f:
for contig in SeqIO.parse(f, "fasta"):
raw_cov = float(contig.description.split(" ")[2])
contig_cov.append(raw_cov / (float(len(contig)) - float(k_value) + 1))
avg_cov = sum(contig_cov) / float(len(contig_cov))
d['average_coverage'] = "{}X".format(int(avg_cov))
return d
def stat_velvet(fasta_file, k_value=171):
"FASTA statistics using information in Velvet headers, including average coverage."
d = stat_fasta(fasta_file)
#print "statting (velvet headers)", fasta_file
#>NODE_1_length_71394_cov_21.306412
cov_exp = re.compile(r"length_(?P<length>\d*)_cov_(?P<cov>\d*\.\d*)")
with open(fasta_file, 'r') as f:
covs = list()
for contig in SeqIO.parse(f, "fasta"):
m = cov_exp.search(contig.description)
if m:
cov=(float(m.groupdict()['cov']))
length=(float(m.groupdict()['length']))
covs.append(cov * (length - int(k_value) + 1) / length)
avg_cov = sum(covs) / float(len(covs))
d['average_coverage'] = "{}X".format(int(avg_cov))
return d
def stat_blast(fasta_file, callback=None, organism=entrez_CFSAN_genera, num_threads=4):
"Experimental method to determine assembly 'realness' by blasting against reference sequences."
print ""
import datetime
if not callback:
def callback(s):
print "[{}]".format(datetime.datetime.today().ctime()), s
callback("Importing modules...")
from Bio.Blast import NCBIWWW
import xml.etree.ElementTree as xml
callback("Statting fasta...")
d = stat_fasta(fasta_file)
results = list()
callback("Loading {}...".format(fasta_file))
# with open(fasta_file, 'r') as f:
# os.chdir("/data/blast_db")
# for contig in sorted(list(SeqIO.parse(f, "fasta")), key=lambda c: -len(c)):
# if len(contig) >= d['n50']:
# callback("BLASTing {} ({} bases)...".format(contig.description, len(contig)))
# result = xml.parse(subprocess.check_output("blastn -db refseq_genomic -query {} -task blastn -num_threads 8 -outfmt 5 -max_target_seqs 1".format(contig.seq), shell=True)
# #results.append(result.findall(".//Hsp_identity")[0:1])
# realness = float(result.findall(".//Hsp_identity")[0].text) / float(len(contig)) * 100.0
# print "{}% identity to something real".format(realness)
with open(fasta_file, 'r') as f:
os.chdir("/data/blast_db")
#contigs = filter(lambda c: len(c) >= d['n50'], sorted(list(SeqIO.parse(f, "fasta")), key=lambda c: -len(c)))
contigs = filter(lambda c: len(c) < 10000, sorted(list(SeqIO.parse(f, "fasta")), key=lambda c: -len(c)))
p = 0
results = list()
while p < len(contigs):
q = p + num_threads
#make a query file
with tempfile.NamedTemporaryFile("w") as query_file:
query = "\n".join([">{}\n{}".format(c.description, c.seq) for c in contigs[p:q]])
query_file.write(query)
callback("Made contigs file {}".format(query_file.name))
callback("BLASTing {}-{} of {} contigs...".format(p + 1, min(q, len(contigs)), len(contigs)))
r = subprocess.check_output("blastn -db refseq_genomic -query {} -task blastn -num_threads 8 -outfmt 5 -max_target_seqs 1".format(query_file.name), shell=True)
callback("BLAST complete. Parsing...")
result = xml.parse(r)
#result.write("/home/justin.payne/blast{}.xml".format(min(q, len(contigs))))
results.append(result)
p = q
callback("Done.")
realnesses = list()
for r in results:
for iteration in r.findall(".//Iteration"):
realness = float(iteration.find("/Hit/Hit_hsps/Hsp/Hsp_identity").text) / float(iteration.find("/Iteration_query-len").text) * 100.0
realnesses.append(realness)
total_realness = sum(realnesses) / float(len(realnesses))
return total_realness
def find_closest_ref(fasta_file, callback=None, update_callback=lambda d: None, organism=entrez_CFSAN_genera):
"Find closest match in NCBI Refseq to longest contig, then collect URL for it"
if not callback:
import datetime
def callback(s):
print "[{}]".format(datetime.datetime.today().ctime()), s
callback("Importing modules...")
from Bio.Blast import NCBIWWW
import xml.etree.ElementTree as xml
callback("Loading fasta ({})...".format(fasta_file))
with open(fasta_file, 'r') as f:
contigs = iter(sorted(list(SeqIO.parse(f, 'fasta')), lambda a,b: cmp(len(a), len(b))))
contig = contigs.next()
while len(contig) < 1500:
try:
contig = contigs.next()
except StopIteration:
break
callback("Longest contig is {} bases. BLASTing...".format(len(contig)))
r = NCBIWWW.qblast("blastn", "chromosome", ">{}\n{}".format(contig.description, contig.seq),
alignments=1,
entrez_query="{}".format(organism),
hitlist_size=1,
filter='L')
callback("BLAST finished.")
result = xml.parse(r)
refseq = result.find(".//Iteration/Iteration_hits/Hit/Hit_id").text.split("|")[1]
refseq_url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id={}&rettype=fasta&retmode=text'.format(refseq)
update_callback({'ref_file':refseq, 'ref_url':refseq_url})
return refseq
#
# with open(fasta_file, 'r') as f:
# contigs = filter(lambda c: len(c) >= d['n50'], sorted(list(SeqIO.parse(f, "fasta")), key=lambda c: -len(c)))
# p = 0
# results = list()
# while p < len(contigs):
# q = p + num_threads
# callback("BLASTing {}-{} of {} contigs...".format(p, min(q, len(contigs)), len(contigs)))
# r = NCBIWWW.qblast("blastn", "refseq_genomic", "\n".join([">{}\n{}".format(c.description, c.seq) for c in contigs[p:q]]), alignments=1, entrez_query="{}[Organism]".format(organism))
# callback("BLAST complete. Parsing...")
# result = xml.parse(r)
# result.write("/home/justin.payne/blast{}.xml".format(min(q, len(contigs))))
# results.append(result)
# p = q
# callback("Done.")
def quast_compare(path, fastas, gi=None, callback=None, update_callback=lambda d: None, debug=False, **kwargs):
"Find best reference, then use Quast to compare assemblies and return best one."
if not callback:
import datetime
def callback(s):
print "\n[{}] ".format(datetime.datetime.today().ctime()) + s
import urllib
import tempfile
import subprocess
import glob
import csv
def rank(a, b):
"Rank two assemblers based on a priority list"
#our own intuition about which assemblies are better, from best to worst
assemblers = ['SPAdes', 'ABySS', 'Velvet', 'CLC']
if a['assembler'] in assemblers and b['assembler'] in assemblers:
return cmp(assemblers.index(a['assembler']),
assemblers.index(b['assembler']))
elif a['assembler'] in assemblers:
return -1
else:
return 1
temp_dir = tempfile.mkdtemp()
def cb(a, b, c):
if int(a) % 100 == 0:
callback("Downloaded block {} of {}\r".format(a, int(c) / int(b)))
try:
fastas.sort(rank)
if "WORST" in fastas[0]['assembler']:
raise ValueError("MISASSEMBLY DO NOT USE")
callback("Getting closest reference.")
if not gi:
gi = find_closest_ref(os.path.join(path, fastas[0]['fasta_file']), callback=callback, update_callback=update_callback, **kwargs)
urllib.urlretrieve('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id={}&rettype=fasta&retmode=text'.format(gi),
os.path.join(temp_dir, "reference.fasta"),
cb)
callback("Getting gene annotations.")
urllib.urlretrieve('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id={}&rettype=ft&retmode=text'.format(gi),
os.path.join(temp_dir, "reference.genes"),
cb)
with open(os.path.join(temp_dir, "reference.fasta"), 'r') as ref_fasta:
update_callback({'ref_url':ref_fasta.readline()})
if os.path.exists("{}quast/".format(path)):
shutil.rmtree("{}quast/".format(path))
for r in fastas:
if not os.path.exists(os.path.join(path, r['fasta_file'])):
callback("Symlinking {}".format(r['fasta_file']))
try:
os.symlink(os.path.join(path, r['fasta_file'].split('.')[0]+".fasta"), os.path.join(path, r['fasta_file']))
except OSError:
try:
shutil.copyfile(os.path.join(path, r['fasta_file'].split('.')[0]+".fasta"), os.path.join(path, r['fasta_file']))
except OSError:
pass
callback("Running QUAST with reference gi{}...".format(gi))
subprocess.check_call("quast {} -R {}/reference.fasta -G {}/reference.genes -o {} --labels {} --gene-finding".format(
" ".join([os.path.join(path, r['fasta_file']) for r in fastas]),
temp_dir,
temp_dir,
os.path.join(path, "quast"),
",".join([r['assembler'] for r in fastas])), shell=True)
except OSError:
pass
finally:
callback("Cleaning {}...".format(temp_dir))
shutil.rmtree(temp_dir)
with open("{}/quast/report.tsv".format(path), 'rU') as report:
#load generated report
r = list(csv.DictReader(report, dialect='excel', delimiter='\t'))
#QUAST reports come in with the assemblers as the columns; need to pivot this table so assemblers are as rows
tbl = [] #table
#get assemblers from column headers
for h in r[0].keys():
if 'Assembly' not in h:
tbl.append({'assembler':h})
for row in r:
#these are the specific fields we want, in a row-dict whose key is "Assembly" (I know, right?)
for h in row.keys():
if 'Assembly' not in h:
tr = filter(lambda t: h in t['assembler'], tbl)[0]
tr[row['Assembly']] = row[tr['assembler']]
for r in tbl:
for h in r.keys():
#cast to number values if possible
try:
r[h] = int(r[h])
except ValueError:
try:
r[h] = float(r[h])
except ValueError:
if r[h] == '-':
r[h] = 0
for h in ('Genome fraction (%)', 'NA50', 'Reference length', 'Reference GC (%)', '# misassemblies'):
if h not in r:
r[h] = 0
try:
#"goodness" model
r['goodness'] = ((r['Genome fraction (%)'] / 100) or 0) * ( (r['NA50'] or r['N50']) * min(r['Total length'], r['Reference length']) ) / ( pow( r['GC (%)'] - r['Reference GC (%)'], 2) * max(r['# misassemblies'], 1) * r['# contigs'])
except KeyError as e:
if debug:
print tbl
raise e
return tbl
def read_map(assembly, read1, read2=None, callback=lambda s: None):
"Remap raw reads to assembly using Bowtie to determine average library insert length."
temp = tempfile.mkdtemp()
index = os.path.join(temp, "index")
#alignment = os.path.join(temp, "alignment.map")
try:
callback("statting: building index")
results = subprocess.check_output("bowtie-build {assembly} {index}".format(assembly=assembly, index=index), shell=True)
callback("statting: mapping reads to assembly")
if read2:
results = subprocess.check_output("bowtie -p 4 -v 1 -s 25 {index} -1 {read1} -2 {read2} -I 50 -X 3000 --quiet --suppress 1,2,6,7,8".format(index=index, read1=read1, read2=read2), shell=True)
else:
results = subprocess.check_output("bowtie -p 4 -v 1 -s 25 {index} -1 {read1} --quiet --suppress 1,2,6,7,8".format(index=index, read1=read1), shell=True)
#capture results
results = results.split("\n") #turn into lines
inserts = list()
for i in range(0, len(results), 2):
try:
if results[i].split("\t")[0] not in results[i+1].split("\t")[0]:
print results[i].split("\t")[0], results[i+1].split("\t")[0]
raise ValueError("Paired-end reads didn't map to same contigs.")
coord1 = float(results[i].split("\t")[1])
coord2 = float(results[i+1].split("\t")[1]) + float(len(results[i+1].split("\t")[2]))
inserts.append(coord2 - coord1)
except IndexError:
pass
avg_insert = sum(inserts) / float(len(inserts))
finally:
shutil.rmtree(temp)
return avg_insert
#fasta_statter.read_map("/shared/gn2/CFSANgenomes/CFSAN002091/asm/CFSAN002091_01.fasta", "/shared/gn2/CFSANgenomes/CFSAN002091/CFSAN002091_01/CFSAN002091-01_S6_L001_R1_001.fastq", "/shared/gn2/CFSANgenomes/CFSAN002091/CFSAN002091_01/CFSAN002091-01_S6_L001_R2_001.fastq")
#fasta_statter.stat_blast("/shared/gn2/CFSANgenomes/CFSAN002091/asm/CFSAN002091_01.fasta")
if __name__ == "__main__":
import sys
import csv
import traceback
def ucb(d):
for (k,v) in d.items():
print k, ':', v
debug = '-debug' in sys.argv
if '-test-quast' in sys.argv:
fastas = list(csv.DictReader(open('/shared/gn2/CFSANgenomes/CFSAN004357/asm/comparative_assembly_stats.txt', 'r'), delimiter='\t'))
print quast_compare('/shared/gn2/CFSANgenomes/CFSAN004357/asm/',
fastas,
#gi = '523917454',
update_callback=ucb,
debug=debug)
elif '-redo-quast' in sys.argv:
try:
accession = sys.argv.pop(sys.argv.index('-redo-quast') + 1)
fastas = list(csv.DictReader(open('/shared/gn2/CFSANgenomes/{}/asm/comparative_assembly_stats.txt'.format(accession), 'r'), delimiter='\t'))
quast_results = quast_compare('/shared/gn2/CFSANgenomes/{}/asm/'.format(accession),
fastas,
update_callback=ucb,
debug=debug)
headers = ('assembler',
'n50',
'num_contigs',
'Total length',
'Reference length',
'Genome fraction (%)',
'# misassemblies',
'goodness')
widths = [max([max(len(str(r.get(h, ''))), len(str(h))) for r in quast_results]) + 2 for h in headers] #get longest string in each field in quast_results
print "".join([str(h).ljust(w) for (w, h) in zip(widths, headers)])
for r in sorted(quast_results, key=lambda r: r['assembler']):
print "".join([str(r.get(h, '-')).ljust(w) for (w, h) in zip(widths, headers)])
except IOError as e:
print "{}: Supersembler not performed on this isolate.".format(e)
try:
subprocess.check_call("quast /shared/gn2/CFSANgenomes/{0}/asm/*.fasta -o /shared/gn2/CFSANgenomes/{0}/asm/quast/ -R /shared/gn2/CFSANgenomes/CFSAN002060/asm/CFSAN002060.fasta --gene-finding -t 8".format(accession), shell=True)
except subprocess.CalledProcessError as e:
print e.output
except IndexError:
print "Specify FDA CFSAN accession number (i.e. 'CFSAN001250')"
quit()
elif '-scan-assemblies' in sys.argv:
import glob
for path in glob.glob('/shared/gn2/CFSANgenomes/*/asm/'):
try:
fastas = list(csv.DictReader(open(os.path.join(path, 'comparative_assembly_stats.txt'), 'r'), delimiter='\t'))
quast_compare(path, fastas, update_callback=ucb)
except IOError:
try:
subprocess.check_call("quast {0}*.fasta -o {0}quast/ --gene-finding -t 8".format(path), shell=True)
except subprocess.CalledProcessError:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
else:
ucb(stat_fasta(os.path.normpath(sys.argv[1])))
|
crashfrog/Dispatch
|
fasta_statter.py
|
Python
|
unlicense
| 16,556
|
[
"BLAST",
"Bowtie"
] |
2e5a907f75b147d1cf4365f7e3015bb3a047344c3655548692077c382964f506
|
#!/usr/bin/env python
''' Example use :
cat file.fq | ./fastq2wm33.py > ./bwa pssm ... -
Significant speedup can be achieved with using the pypy interpreter: http://pypy.org/
After installation, change interpreter from python to pypy in first line or run
cat file.fq | pypy fastq2wm33.py > ./bwa pssm ... - '''
import array
import sys
import math
from optparse import OptionParser
# ------------------------------------------------------------
# Global parameters
# ------------------------------------------------------------
# Genome base composition
Q = array.array( 'f', [0.25, 0.25, 0.25, 0.25] )
# Damage
# Set the probability of a C->T and G->A at any position in the
# 5'end of the read up to position $Ndamage.
# Cytosine to thymine misincorporation rates are highest (30.7%) at
# the first position of the sequences and decrease by approximately
# twofold per position as the read progresses (Fig. 4, bottom).
# This rate was reduced to 3.2% at the fifth nucleotide.
Ndamage = 6
CT5 = array.array( 'f', [0.307,0.16,0.067,0.043,0.032,0.024] )
GA5 = array.array( 'f', [0.,0.,0.,0.,0.,0.] )
# Similar for the 3' end of the read (starting with last base of read)
CT3 = array.array( 'f', [0.,0.,0.,0.,0.,0.] )
GA3 = array.array( 'f', [0.307,0.16,0.067,0.043,0.032,0.024] )
# Mutations
# If we have an overall mutation frequency A and assume that
# transversions and transitions have same prob,
# P(a|g)=A/3 for a different from g
ptransition = 0.005/3
ptransversion = ptransition
# Alphabet
alph = array.array( 'c', ["A", "C", "G", "T"] )
logConstant = 1.0/math.log(2.0);
# ------------------------------------------------------------
# define matrix functions
# ------------------------------------------------------------
# ------------------------------------------------------------
# Define mutation matrix
# This is the prob P(a|g) for sample base a and genome g.
def mutation_matrix() :
#Create a matrix (list of lists)
mut = [ [ptransversion for i in xrange(4)] for j in range(4) ]
# Fill transitions
mut[0][2] = mut[2][0] = mut[1][3] = mut[3][1] = ptransition
# Fill diagonals
mut[0][0] = mut[1][1] = mut[2][2] = mut[3][3] = 1.0 - 2.0*ptransversion - ptransition
return mut
# ------------------------------------------------------------
# Define the damage matrix
# This is the product matrix P(b|g) = \sum_a P(b|a)P(a|g)
# for damaged b, sample a and genome g.
# For each position in the 5' and 3' end it differs
#
# For a given set of parameters, it returns a reference to a matrix
def damage_matrix( ct, ga, mut ) :
# Construct as identity matrix (list of lists)
dam = [ [1.0 if i == j else 0 for i in xrange(4)] for j in range(4) ]
# C -> T
dam[3][1] = ct # P(T|C)
dam[1][1] = 1.0 - ct # P(C|C)
# G -> A
dam[0][2] = ga # P(A|G)
dam[2][2] = 1.0 - ga # P(G|G)
# Product \sum_a P(b|a)P(a|g) where P(a|g) is $mut[$a][$g]
r = [ [0.0 for i in xrange(4)] for j in range(4) ]
for b in xrange( 4 ) :
for g in xrange( 4 ) :
for a in xrange( 4 ) :
r[b][g] += dam[b][a] * mut[a][g]
return r
# ------------------------------------------------------------
# Define correction matrix
# Correction to qual score probability
# A(b,g) = P(b,g)/\sum_g' P(b,g')
# where P(b,g)=P(b|g)P(g)
def correction_matrix( r, q ) : # arguments ( P(b|g), P(g) )
A = [ [0.0 for i in xrange(4)] for j in range(4) ]
for b in xrange( 4 ) :
# Sum over r(b|g) * q(g) for g = 0,1,2,3
s = sum( iter(r[b][g] * q[g] for g in xrange(4) ) )
for g in xrange(4) :
A[b][g] = r[b][g] * q[g] / s
return A
# ------------------------------------------------------------
# Define geno probabilities
# The actual genomic probabilities for x - the called base and qual
# Returns the 4 base probs P(g|x)
def genome_probs( A, p ) :
r = [0.0 for i in xrange(4)]
for g in xrange(4) :
for b in xrange(4) :
r[g] += p[b] * A[b][g]
return r
# ------------------------------------------------------------
# Calculate adjusted scores
# For a given qual score (number) and called base (number),
# return the scores of the four bases using the correction matrix $A
def adjusted_scores( A, base, qual, q ) :
e = math.pow(10, -0.1 * qual )
if e > 0.75 : e = 0.75
p = [e/3, e/3, e/3, e/3]
p[base] = 1.0 - e
r = genome_probs(A, p)
for g in xrange(4) :
r[g] = logConstant * math.log( r[g] / q[g] )
# Convert output to str and return
return [ '%.2f'%ir for ir in r ]
def main():
usage = """
Example use :
cat file.fq | ./fastq2wm33.py > ./bwa pssm ... -
Significant speedup can be achieved with using the pypy interpreter: http://pypy.org/
After installation, change interpreter from python to pypy in first line or run
cat file.fq | pypy fastq2wm33.py > ./bwa pssm ... -
"""
num_args= 0
parser = OptionParser(usage=usage)
parser.add_option('-q', '--quality-base', dest='quality_base', default=33,
help="The base quality score (33 or 64, usually)", type='int')
parser.add_option('-m', '--quality-max', dest='quality_max', default=40,
help="The maximum possible quality score", type='int')
parser.add_option('-l', '--max-length', dest='max_length', default=76,
help="The maximum possible length", type='int')
#parser.add_option('-u', '--useless', dest='uselesss', default=False, action='store_true', help='Another useless option')
(options, args) = parser.parse_args()
if len(args) < num_args:
parser.print_help()
sys.exit(1)
# ScoreHash will be a dictionary of string lists : [key][base] = str(value)
scoreHash = {}
mut = mutation_matrix()
A = correction_matrix( mut, Q )
# scoreHash away from ends: No damage; Rate = mutation matrix
for i in xrange( options.quality_max + 1) :
for x in xrange( 4 ) :
key = alph[x] + chr( options.quality_base + i )
scoreHash[key] = adjusted_scores( A,x,i,Q )
# Do the same for 5' positions
for k in xrange( Ndamage ) :
# Matrices here
R = damage_matrix( CT5[k], GA5[k], mut )
A = correction_matrix( R, Q )
for i in xrange( options.quality_max + 1) :
for x in xrange( 4 ) :
key = alph[x] + chr( options.quality_base + i ) + str(k)
scoreHash[key] = adjusted_scores( A, x, i, Q )
# Do the same for 3' positions
for k in xrange( Ndamage ) :
# Matrices here
R = damage_matrix( CT3[k], GA3[k], mut )
A = correction_matrix( R,Q )
for i in xrange( options.quality_max + 1 ) :
for x in xrange( 4 ) :
key = alph[x] + chr( options.quality_base + i ) + '-' + str(k)
scoreHash[key] = adjusted_scores(A, x, i, Q)
# ------------------------------------------------------------
# Read from STDIN, write to STDOUT
# ------------------------------------------------------------
count = 0
seq = array.array('c')
qual = array.array('c')
for line in sys.stdin :
# New read
if line[0] == '@' :
count = 0
if count < 2 :
print line,
if count == 1 :
seq = array.array('c', list( line.strip() ) )
elif count == 3 :
print '&'
qual = array.array('c', list( line.strip() ) )
length = len( qual )
if length < 2*Ndamage + 2 : continue
score_arr = [[] for x in xrange(length)]
for i, (iseq, iqual) in enumerate( zip( seq, qual ) ) :
# Not recognised base
if iseq not in 'ACGT' :
iseq = 'A'
iqual = chr(options.quality_base)
key = iseq + iqual
# If in beginning of read
if i < Ndamage : key += str(i)
# If in end of read
elif length < options.max_length :
j = length-i-1
if j < Ndamage :
key += '-'+str(j)
score_arr[i] = scoreHash[key]
# Transpose list-list
score_arr = map(list, zip(*score_arr))
print '\n'.join( [' '.join( irow ) for irow in score_arr ] )
# Increment where in read line is
count+=1
if __name__ == '__main__':
sys.exit( main() )
|
pkerpedjiev/bwa-pssm
|
scripts/fastq2wm.py
|
Python
|
gpl-3.0
| 8,621
|
[
"BWA"
] |
91431afda2dda1506cb3f4339430d1f70a7aed2774e03e7895c21e72b8f52db6
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss (jonasgliss@gmail.com)
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pyplis module for DOAS calibration including FOV search engines."""
from __future__ import (absolute_import, division)
from numpy import (min, arange, asarray, zeros, column_stack,
ones, nan, float64)
from scipy.stats.stats import pearsonr
from scipy.sparse.linalg import lsmr
from datetime import datetime
from pandas import Series
from copy import deepcopy
from astropy.io import fits
from traceback import format_exc
import six
from pyplis import logger
from matplotlib.pyplot import subplots
from matplotlib.patches import Circle, Ellipse
from matplotlib.cm import RdBu
from matplotlib.dates import DateFormatter
from .glob import SPECIES_ID
from .helpers import (shifted_color_map, mesh_from_img, get_img_maximum,
sub_img_to_detector_coords, map_coordinates_sub_img,
exponent, rotate_xtick_labels)
from .optimisation import gauss_fit_2d, GAUSS_2D_PARAM_INFO
from .image import Img
from .inout import get_camera_info
from .setupclasses import Camera
from .calib_base import CalibData
from .helpers import make_circular_mask
class DoasCalibData(CalibData):
"""Class containing DOAS calibration data.
Parameters
----------
tau_vec : ndarray
tau data vector for calibration data
cd_vec : ndarray
DOAS-CD data vector for calibration data
cd_vec_err : ndarray
Fit errors of DOAS-CDs
time_stamps : ndarray
array with datetime objects containing time stamps
(e.g. start acquisition) of calibration data
calib_fun : function
optimisation function used for fitting of calibration data
calib_coeffs : ;obj:`list`, optional
optimisation parameters for calibration curve.
senscorr_mask : :obj:`ndarray`or :obj:`Img`, optional
sensitivity correction mask that was normalised relative to the
pixel position where the calibration data was retrieved (i.e.
position of DOAS FOV in case of DOAS calibration data, or image pixel
position, where cell calibration data was retrieved)
calib_id : str
calibration ID (e.g. "aa", "tau_on", "tau_off")
camera : Camera
camera object (not necessarily required). A camera can be assigned
in order to convert the FOV extend from pixel coordinates into
decimal degrees
fov : DoasFOV
information about position and shape of the FOV of the DOAS within
the camera images
"""
def __init__(self, tau_vec=None, cd_vec=None, cd_vec_err=None, time_stamps=None,
calib_fun=None, calib_coeffs=None, senscorr_mask=None,
polyorder=1, calib_id="", camera=None, fov=None):
super(DoasCalibData, self).__init__(tau_vec, cd_vec, cd_vec_err,
time_stamps, calib_fun,
calib_coeffs, senscorr_mask,
polyorder, calib_id, camera)
if tau_vec is None:
tau_vec = []
if cd_vec is None:
cd_vec = []
if cd_vec_err is None:
cd_vec_err = []
if time_stamps is None:
time_stamps = []
if calib_coeffs is None:
calib_coeffs = []
self.type = "doas"
if not isinstance(fov, DoasFOV):
fov = DoasFOV(camera)
self.fov = fov
def save_as_fits(self, save_dir=None, save_name=None,
overwrite_existing=True):
"""Save calibration data as FITS file.
Parameters
----------
save_dir : str
save directory, if None, the current working directory is used
save_name : str
filename of the FITS file (if None, use pyplis default naming)
"""
# hdulist containing calibration data and senscorr_mask
hdulist = self._prep_fits_save()
# add DOAS FOV information (if applicable)
hdulist.extend(self.fov.prep_hdulist())
# returns abspath of current wkdir if None
hdulist.writeto(self._prep_fits_savepath(save_dir, save_name),
clobber=overwrite_existing)
def load_from_fits(self, file_path):
"""Load stack object (fits).
Parameters
----------
file_path : str
file path of calibration data
"""
# loads senscorr_mask and calibration data (tau and cd vectors,
# timestamps)
hdu = super(DoasCalibData, self).load_from_fits(file_path)
self.fov.import_from_hdulist(hdu, first_idx=2)
hdu.close()
def plot_data_tseries_overlay(self, date_fmt=None, ax=None):
"""Plot overlay of tau and DOAS time series."""
if ax is None:
fig, ax = subplots(1, 1)
s1 = self.tau_tseries
s2 = self.cd_tseries
p1 = ax.plot(s1.index.to_pydatetime(), s1.values, "--xb",
label=r"$\tau$")
ax.set_ylabel("tau")
ax2 = ax.twinx()
p2 = ax2.plot(s2.index.to_pydatetime(), s2.values, "--xr",
label="DOAS CDs")
ax2.set_ylabel(r"$S_{%s}$ [cm$^{-2}$]" % SPECIES_ID)
ax.set_title("Time series overlay DOAS calib data")
try:
if date_fmt is not None:
ax.xaxis.set_major_formatter(DateFormatter(date_fmt))
except BaseException:
pass
ps = p1 + p2
labs = [l.get_label() for l in ps]
ax.legend(ps, labs, loc="best", fancybox=True, framealpha=0.5)
ax.grid()
rotate_xtick_labels(ax)
return (ax, ax2)
class DoasFOV(object):
"""Class for storage of FOV information."""
def __init__(self, camera=None):
self.search_settings = {}
self.img_prep = {}
self.roi_abs = None
self.img_shape_orig = None
self.camera = None
self.start_search = datetime(1900, 1, 1)
self.stop_search = datetime(1900, 1, 1)
self.corr_img = None
self.fov_mask_rel = None
self.result_pearson = {"cx_rel": nan,
"cy_rel": nan,
"rad_rel": nan,
"corr_curve": None}
self.result_ifr = {"popt": None,
"pcov": None}
if isinstance(camera, Camera):
self.camera = camera
self.img_shape_orig = (camera.pixnum_y, camera.pixnum_x)
@property
def method(self):
"""Return search method."""
try:
return self.search_settings["method"]
except KeyError:
raise ValueError("No information about FOV search available")
@property
def pyrlevel(self):
"""Return pyramide level at which FOV search was performed."""
try:
return self.img_prep["pyrlevel"]
except KeyError:
raise KeyError("Image preparation data is not available: %s"
% format_exc())
@property
def cx_rel(self):
"""Return center x coordinate of FOV (in relative coords)."""
if self.method == "ifr":
return self.result_ifr["popt"][1]
else:
return self.result_pearson["cx_rel"]
@property
def cy_rel(self):
"""Return center x coordinate of FOV (in relative coords)."""
if self.method == "ifr":
return self.result_ifr["popt"][2]
else:
return self.result_pearson["cy_rel"]
@property
def radius_rel(self):
"""Return radius of FOV (in relative coords).
:raises: TypeError if method == "ifr"
"""
if self.method == "ifr":
raise TypeError("Invalid value: method IFR does not have FOV "
"parameter radius, call self.popt for relevant "
"parameters")
return self.result_pearson["rad_rel"]
@property
def popt(self):
"""Return super gauss optimisation parameters (in relative coords).
:raises: TypeError if method == "pearson"
"""
if self.method == "pearson":
raise TypeError("Invalid value: method pearson does not have "
"FOV shape parameters, call self.radius to "
"retrieve disk radius")
return self.result_ifr["popt"]
@property
def x_abs(self):
return self.pos_abs[0]
@property
def y_abs(self):
return self.pos_abs[1]
@property
def sigma_x_abs(self):
if self.method == "pearson":
return self.radius_rel * 2**self.pyrlevel
return self.popt[3] * 2**self.pyrlevel
@property
def sigma_y_abs(self):
if self.method == "pearson":
return self.radius_rel * 2**self.pyrlevel
return (self.popt[3] / self.popt[4]) * 2 ** self.pyrlevel
@property
def pos_abs(self):
"""Return center coordinates of FOV (in absolute detector coords)."""
return self.pixel_position_center(True)
def _max_extend_rel(self):
"""Return maximum pixel extend of FOV.
For method pearson this is the radius (trivial), for an elliptical
super gauss (i.e. method IFR) this is the longer axis
"""
if self.method == "pearson":
return self.radius_rel
else:
return max([self.popt[3], self.popt[3] / self.popt[4]])
def pixel_extend(self, abs_coords=True):
"""Return pixel extend of FOV on image.
:param bool abs_coords: return value in absolute or relative
coordinates (considering pyrlevel and roi)
"""
ext_rel = self._max_extend_rel()
if not abs_coords:
return ext_rel
return ext_rel * 2**self.pyrlevel
def pixel_position_center(self, abs_coords=False):
"""Return pixel position of center of FOV.
:param bool abs_coords: return position in absolute or relative
coordinates (considering pyrlevel and roi)
:return:
- tuple, ``(cx, cy)``
"""
try:
cx, cy = self.cx_rel, self.cy_rel
except BaseException:
logger.warning("Could not access information about FOV position")
if not abs_coords:
return (cx, cy)
return map_coordinates_sub_img(cx, cy, self.roi_abs, self.pyrlevel,
inverse=True)
def fov_mask_abs(self, img_shape_orig=(), cam_id=""):
"""Convert the FOV mask to absolute detector coordinates.
The shape of the FOV mask (and the represented pixel coordinates)
depends on the image preparation settings of the :class:`ImgStack`
object which was used to identify the FOV.
Parameters
----------
img_shape_orig : tuple
image shape of original image data (can be extracted from an
unedited image)
cam_id : str
string ID of pyplis default camera (e.g. "ecII")
"""
if not len(img_shape_orig) == 2:
try:
info = get_camera_info(cam_id)
img_shape_orig = (int(info["pixnum_y"]), int(info["pixnum_x"]))
except BaseException:
raise IOError("Image shape could not be retrieved...")
mask = self.fov_mask_rel.astype(float64)
return sub_img_to_detector_coords(mask, img_shape_orig,
self.pyrlevel,
self.roi_abs).astype(bool)
# ==============================================================================
#
# def fov_mask(self, abs_coords = False):
# """Returns FOV mask for data access
#
# :param bool abs_coords: if False, mask is created in stack
# coordinates (i.e. corresponding to ROI and pyrlevel of stack).
# If True, the FOV parameters are converted into absolute
# detector coordinates such that they can be used for original
# images.
#
# """
# raise NotImplementedError
# =============================================================================
def import_from_hdulist(self, hdu, first_idx=0):
"""Import FOV information from FITS HDU list.
Parameters
----------
hdu : HDUList
HDU list containing a list of HDUs created using
:func:`prep_hdulist` starting at index :param:`first_idx`
(e.g. first_idx==2 if the method :func:`save_as_fits` from
the :class:`DoasCalibData` class is used, since the first 2
indices are used for saving the acutal calibration data)
first_idx : int
index specifying the first entry of the FOV info in the
provided HDU list
"""
i = first_idx
try:
self.fov_mask_rel = hdu[i].data.byteswap().newbyteorder()
except BaseException:
logger.info("(Warning loading DOAS calib data): FOV mask not "
"available")
prep_keys = Img().edit_log.keys()
search_keys = DoasFOVEngine()._settings.keys()
for key, val in six.iteritems(hdu[i].header):
k = key.lower()
if k in prep_keys:
self.img_prep[k] = val
elif k in search_keys:
self.search_settings[k] = val
elif k in self.result_pearson.keys():
self.result_pearson[k] = val
try:
self.corr_img = Img(hdu[i + 1].data.byteswap().newbyteorder())
except BaseException:
logger.info("(Warning loading DOAS calib data): FOV search correlation "
"image not available")
self.roi_abs = hdu[i + 2].data["roi"].byteswap().newbyteorder()
try:
self.result_ifr["popt"] =\
hdu[i + 3].data["ifr_res"].byteswap().newbyteorder()
except BaseException:
logger.info("Failed to import array containing IFR optimisation "
" results from FOV search")
def prep_hdulist(self):
"""Prepare and return :class:`HDUList` object for saving as FITS."""
fov_mask = fits.ImageHDU(self.fov_mask_rel)
fov_mask.header.update(self.img_prep)
fov_mask.header.update(self.search_settings)
ifr_res = []
if self.method == "pearson":
rd = self.result_pearson
try:
fov_mask.header.update(cx_rel=rd["cx_rel"],
cy_rel=rd["cy_rel"],
rad_rel=rd["rad_rel"])
except BaseException:
logger.warning("Position of FOV (pearson method) not available")
elif self.method == "ifr":
ifr_res = self.result_ifr["popt"]
try:
hdu_cim = fits.ImageHDU(data=self.corr_img.img)
except BaseException:
hdu_cim = fits.ImageHDU()
logger.warning("FOV search correlation image not available")
roi = fits.BinTableHDU.from_columns([fits.Column(name="roi",
format="I",
array=self.roi_abs)])
col_ifr = fits.Column(name="ifr_res", format="D", array=ifr_res)
res_ifr = fits.BinTableHDU.from_columns([col_ifr])
return fits.HDUList([fov_mask, hdu_cim, roi, res_ifr])
def save_as_fits(self, **kwargs):
"""Save the fov as fits file.
Saves this object as DoasCalibData::
d = DoasCalibData(fov = self)
d.save_as_fits(**kwargs)
"""
d = DoasCalibData(fov=self)
d.save_as_fits(**kwargs)
def __str__(self):
s = "DoasFOV information\n------------------------\n"
s += "\nImg stack preparation settings\n............................\n"
for k, v in six.iteritems(self.img_prep):
s += "%s: %s\n" % (k, v)
s += "\nFOV search settings\n............................\n"
for k, v in six.iteritems(self.search_settings):
s += "%s: %s\n" % (k, v)
if self.method == "ifr":
s += "\nIFR search results \n.........................\n"
s += "\nSuper gauss fit optimised params\n"
popt = self.popt
for k in range(len(popt)):
s += "%s: %.3f\n" % (GAUSS_2D_PARAM_INFO[k], popt[k])
elif self.method == "pearson":
s += "\nPearson search results \n.......................\n"
for k, v in six.iteritems(self.result_pearson):
if not k == "corr_curve":
s += "%s: %s\n" % (k, v)
return s
def plot(self, ax=None):
"""Draw the current FOV position into the current correlation img."""
if ax is None:
fig, ax = subplots(1, 1, figsize=(12, 8))
else:
fig = ax.figure
img = self.corr_img.img
vmin, vmax = img.min(), img.max()
cmap = shifted_color_map(vmin, vmax, cmap=RdBu)
h, w = img.shape
disp = ax.imshow(img, vmin=vmin, vmax=vmax, cmap=cmap)
cb = fig.colorbar(disp, ax=ax, shrink=0.9)
cx, cy = self.pixel_position_center(1)
if self.method == "ifr":
popt = self.popt
cb.set_label(r"FOV fraction [$10^{-2}$ pixel$^{-1}$]")
xgrid, ygrid = mesh_from_img(img)
if len(popt) == 7:
ell = Ellipse(xy=(popt[1], popt[2]), width=popt[3],
height=popt[3] / popt[4], color="k", lw=2,
fc="lime", alpha=.5)
else:
ell = Ellipse(xy=(popt[1], popt[2]), width=popt[3],
height=popt[3] / popt[4], angle=popt[7],
color="k", lw=2, fc="lime", alpha=.5)
ax.add_artist(ell)
ax.axhline(self.cy_rel, ls="--", color="k")
ax.axvline(self.cx_rel, ls="--", color="k")
ax.get_xaxis().set_ticks([0, self.cx_rel, w])
ax.get_yaxis().set_ticks([0, self.cy_rel, h])
# ax.set_axis_off()
ax.set_title(r"Corr img (IFR), pos abs (x,y): (%d, %d), "
"lambda=%.1e"
% (cx, cy, self.search_settings["ifrlbda"]))
elif self.method == "pearson":
cb.set_label(r"Pearson corr. coeff.")
ax.autoscale(False)
c = Circle((self.cx_rel, self.cy_rel), self.radius_rel, ec="k",
lw=2, fc="lime", alpha=.5)
ax.add_artist(c)
ax.set_title("Corr img (pearson), pos abs (x,y): (%d, %d)"
% (cx, cy))
ax.get_xaxis().set_ticks([0, self.cx_rel, w])
ax.get_yaxis().set_ticks([0, self.cy_rel, h])
ax.axhline(self.cy_rel, ls="--", color="k")
ax.axvline(self.cx_rel, ls="--", color="k")
ax.set_xlabel("Pixel row")
ax.set_ylabel("Pixel column")
return ax
class DoasFOVEngine(object):
"""Engine to perform DOAS FOV search."""
def __init__(self, img_stack=None, doas_series=None, method="pearson",
**settings):
self._settings = {"method": "pearson",
"maxrad": 80,
"ifrlbda": 1e-6, # lambda val IFR
"g2dasym": True, # elliptic FOV
"g2dsuper": True, # super gauss fit (IFR)
"g2dcrop": True,
"g2dtilt": False,
"blur": 4,
"mergeopt": "average"}
self.data_merged = False
self.img_stack = img_stack
self.doas_series = doas_series
self.calib_data = DoasCalibData() # includes DoasFOV class
self.update_search_settings(**settings)
self.method = method
@property
def maxrad(self):
"""For Pearson method: maximum expected disk radius of FOV.
Note
----
this radius is considered independent of the current pyramid level
of the image stack, hence, if it is set 20 and the pyramid level of
the stack is 2, then, the FOV disk radius (in detector coords) may
be 80.
"""
return self._settings["maxrad"]
@maxrad.setter
def maxrad(self, val):
logger.info("Updating seeting for maximum radius of FOV, new value: %s"
% val)
self._settings["maxrad"] = int(val)
@property
def ifrlbda(self):
"""For IFR method: allow asymmetric 2d gauss fit."""
return self._settings["ifrlbda"]
@ifrlbda.setter
def ifrlbda(self, val):
self._settings["ifrlbda"] = val
@property
def g2dasym(self):
"""For IFR method: allow asymmetric 2d gauss fit."""
return self._settings["g2dasym"]
@g2dasym.setter
def g2dasym(self, val):
if val not in [True, False]:
raise ValueError("Invalid input value: require boolean")
self._settings["g2dasym"] = val
@property
def g2dsuper(self):
"""For IFR method: use supergauss parametrisation."""
return self._settings["g2dsuper"]
@g2dsuper.setter
def g2dsuper(self, val):
if val not in [True, False]:
raise ValueError("Invalid input value: require boolean")
self._settings["g2dsuper"] = val
@property
def g2dcrop(self):
"""For IFR method: crop gaussian FOV parametrisation at sigma."""
return self._settings["g2dcrop"]
@g2dcrop.setter
def g2dcrop(self, val):
if val not in [True, False]:
raise ValueError("Invalid input value: require boolean")
self._settings["g2dcrop"] = val
@property
def g2dtilt(self):
"""For IFR method: allow supergauss-fit to be tilted."""
return self._settings["g2dtilt"]
@g2dtilt.setter
def g2dtilt(self, val):
if val not in [True, False]:
raise ValueError("Invalid input value: require boolean")
self._settings["g2dtilt"] = val
@property
def blur(self):
"""Sigma of gaussian blurring filter applied to correlation image.
The filter is applied to the correlation image before finding the
position of the maximum correlation. This is only relevant for
method IFR, since this method parameterises the FOV by fitting a
2D Gaussian to the correlation image. Defaults to 4.
"""
return self._settings["blur"]
@blur.setter
def blur(self, val):
self._settings["blur"] = val
@property
def mergeopt(self):
"""Option for temporal merging of stack and DOAS vector.
Choose from ``average, nearest, interpolation``
"""
return self._settings["mergeopt"]
@mergeopt.setter
def mergeopt(self, val):
if val not in ["average", "nearest", "interpolation"]:
raise ValueError("Invalid method: choose from average, "
"nearest or interpolation")
self._settings["mergeopt"] = val
@property
def method(self):
"""Return method used for FOV search (e.g. pearson, ifr)."""
return self._settings["method"]
@method.setter
def method(self, val):
if val not in ["pearson", "ifr"]:
raise ValueError("Invalid method: choose from pearson or ifr")
self._settings["method"] = val
def update_search_settings(self, **settings):
"""Update current search settings.
:param **settings: keyword args to be updated (only
valid keys will be updated)
"""
for k, v in six.iteritems(settings):
if k in self._settings:
logger.info("Updating FOV search setting %s, new value: %s"
% (k, v))
self._settings[k] = v
@property
def doas_data_vec(self):
"""Return DOAS CD vector (values of ``self.doas_series``)."""
return self.doas_series.values
@property
def method(self):
"""Return current FOV search method."""
return self._settings["method"]
@method.setter
def method(self, value):
"""Return current FOV search method."""
if value not in ["ifr", "pearson"]:
raise ValueError("Invalid search method: choose from ifr or"
" pearson")
self._settings["method"] = value
def perform_fov_search(self, **settings):
"""High level method for automatic FOV search.
Uses the current settings (``self._settings``) to perform the
following steps:
1. Call :func:`merge_data`: Time merging of stack and DOAS
vector. This step is skipped if data was already merged within
this engine, i.e. if ``self.data_merged == True``
#. Call :func:`det_correlation_image`: Determination of
correlation image using ``self.method`` ('ifr' or 'pearson')
#. Call :func:`get_fov_shape`: Identification of FOV shape /
extend on image detector either using circular disk approach
(if ``self.method == 'pearson'``) or 2D (super) Gauss fit
(if ``self.method == 'ifr').
All relevant results are written into ``self.calib_data`` (which
includes :class:`DoasFOV` object)
"""
self.calib_data = DoasCalibData() # includes DoasCalibData class
self.update_search_settings(**settings)
self.merge_data(merge_type=self._settings["mergeopt"])
self.det_correlation_image(search_type=self.method)
self.get_fov_shape()
self.calib_data.fov.search_settings = deepcopy(self._settings)
return self.calib_data
def run_fov_fine_search(self, img_list, doas_series, extend_fac=3,
**settings):
"""Get FOV position in full resolution.
Note
----
1. Only works if FOV search (i.e. :func:`perform_fov_search`) was
already performed.
#. This method requires some time as it needs to
recompute a cropped image stack in full resolution from the
provided img_list.
#. This method deletes the current image stack in this objects.
#. Uses the same search settings as set in this class (i.e. method,
etc.)
Parameters
----------
img_list : BaseImgList
image list used to calculate cropped stack
doas_series : DoasResults
original DOAS time series (i.e. not merged in time with image
data, needs to be provided since the one stored within this
class is modified during the first FOV search)
extend_fac : int
factor determining crop ROI based on the current pixel extend
of the FOV
Returns
-------
DoasFOVEngine
new instance containing results from fine search
"""
self.update_search_settings(**settings)
try:
extend = self.calib_data.fov.pixel_extend(abs_coords=True)
(pos_x, pos_y) = self.calib_data.fov.pixel_position_center(abs_coords=True) # noqa: E501
self.img_stack = None # make space for new stack
# create ROI around center position of FOV
roi = [pos_x - extend_fac * extend, pos_y - extend_fac * extend,
pos_x + extend_fac * extend, pos_y + extend_fac * extend]
self.img_stack = stack = img_list.make_stack(pyrlevel=0,
roi_abs=roi)
s = DoasFOVEngine(stack, self.doas_series, **self._settings)
calib = s.perform_fov_search()
calib.fit_calib_data()
return s
except Exception as e:
raise Exception("Failed to perform fine search: %s" % repr(e))
def merge_data(self, merge_type=None):
"""Merge stack data and DOAS vector in time.
Wrapper for :func:`merge_with_time_series` of :class:`ImgStack`
:param str merge_type: choose between ``average, interpolation,
nearest``
Note
----
Current data (i.e. ``self.img_stack`` and ``self.doas_series``)
will be overwritten if merging succeeds.
Parameters
----------
merge_type : :obj:`str`, optional,
one of the available merge types, see :attr:`mergeopt` for
valid options
Raises
------
RuntimeError
if merging of data fails
"""
if self.data_merged:
logger.info("Data merging unncessary, img stack and DOAS vector are "
"already merged in time")
return
if merge_type is None:
merge_type = self._settings["mergeopt"]
new_stack, new_doas_series = self.img_stack.merge_with_time_series(
self.doas_series,
method=merge_type)
if len(new_doas_series) == new_stack.shape[0]:
self.img_stack = new_stack
self.doas_series = new_doas_series
self._settings["mergeopt"] = merge_type
self.data_merged = True
return
raise RuntimeError("Temporal merging of image and DOAS data failed...")
def det_correlation_image(self, search_type="pearson", **kwargs):
"""Determine correlation image.
Determines correlation image either using IFR or Pearson method.
Results are written into ``self.calib_data.fov`` (:class:`DoasFOV`)
:param str search_type: updates current search type, available types
``["pearson", "ifr"]``
"""
if not self.img_stack.shape[0] == len(self.doas_series):
raise ValueError("DOAS correlation image object could not be "
"determined: inconsistent array lengths, please "
"perform timemerging first")
self.update_search_settings(method=search_type, **kwargs)
if search_type == "pearson":
corr_img, _ = self._det_correlation_image_pearson(
**self._settings)
elif search_type == "ifr":
corr_img, _ = self._det_correlation_image_ifr_lsmr(
**self._settings)
else:
raise ValueError("Invalid search type %s: choose from "
"pearson or ifr" % search_type)
corr_img = Img(corr_img,
pyrlevel=self.img_stack.img_prep["pyrlevel"])
corr_img.add_gaussian_blurring(self._settings["blur"])
# corr_img.pyr_up(self.img_stack.img_prep["pyrlevel"])
self.calib_data.fov.corr_img = corr_img
self.calib_data.fov.img_prep = self.img_stack.img_prep
self.calib_data.fov.roi_abs = self.img_stack.roi_abs
self.calib_data.fov.start_search = self.img_stack.start
self.calib_data.fov.stop_search = self.img_stack.stop
try:
if self.img_stack.img_prep["is_aa"]:
cid = "AA"
else:
raise Exception
except:
cid = self.img_stack.stack_id
self.calib_data.calib_id = cid
return corr_img
def _det_correlation_image_pearson(self, **kwargs):
"""Determine correlation image based on pearson correlation.
:returns: - correlation image (pix wise value of pearson corr coeff)
"""
h, w = self.img_stack.shape[1:]
corr_img = zeros((h, w), dtype=float64)
corr_img_err = zeros((h, w), dtype=float64)
cd_vec = self.doas_series.values
exp = int(10**exponent(h) / 4.0)
for i in range(h):
try:
if i % exp == 0:
logger.info("FOV search: current img row (y): " + str(i))
except BaseException:
pass
for j in range(w):
# get series from stack at current pixel
corr_img[i, j], corr_img_err[i, j] = pearsonr(
self.img_stack.stack[:, i, j], cd_vec)
self._settings["method"] = "pearson"
return corr_img, corr_img_err
def _det_correlation_image_ifr_lsmr(self, ifrlbda=1e-6, **kwargs):
"""Apply LSMR algorithm to identify the FOV.
:param float ifrlbda: tolerance parameter lambda
"""
# some input data size checking
(m,) = self.doas_data_vec.shape
(m2, ny, nx) = self.img_stack.shape
if m != m2:
raise ValueError("Inconsistent array lengths, please perform time "
"merging of image stack and doas vector first")
# construct H-matrix through reshaping image stack
# h_matrix = transpose(self.img_stack.stack, (2,0,1)).reshape(m, nx*ny)
h_matrix = self.img_stack.stack.reshape(m, nx * ny)
# and one-vector
h_vec = ones((m, 1), dtype=h_matrix.dtype)
# and stacking in the end
h = column_stack((h_vec, h_matrix))
# solve using LSMR regularisation
a = lsmr(h, self.doas_data_vec, atol=ifrlbda, btol=ifrlbda)
c = a[0]
# separate offset and image
lsmr_offset = c[0]
lsmr_image = c[1:].reshape(ny, nx) / max(c[1:])
# THIS NORMALISATION IS NEW
# lsmr_image = lsmr_image / abs(lsmr_image).max()
self._settings["method"] = "ifr"
self._settings["ifrlbda"] = ifrlbda
return lsmr_image, lsmr_offset
def get_fov_shape(self, **settings):
"""Find shape of FOV based on correlation image.
Search pixel coordinate of highest correlation in
``self.calib_data.fov.corr_img`` (using :func:`get_img_maximum`) and
based on that finds FOV shape either using disk approach (if
``self.method == 'pearson'``) calling :func:`fov_radius_search` or
using 2D Gauss fit (if ``self.method == 'ifr'``) calling
:func:`fov_gauss_fit`. Results are written into ``self.calib_data.fov``
(:class:`DoasFOV` object)
:param **settings: update current settings (keyword args passed
to :func:`update_search_settings`)
"""
if not isinstance(self.calib_data.fov.corr_img, Img):
raise Exception("Could not access correlation image")
if self.method == "pearson":
cy, cx = get_img_maximum(self.calib_data.fov.corr_img.img)
logger.info("Start radius search in stack around x/y: %s/%s" % (cx, cy))
(radius,
corr_curve,
tau_vec,
cd_vec,
fov_mask) = self.fov_radius_search(cx, cy)
if not radius > 0:
raise ValueError("Pearson FOV search failed")
self.calib_data.fov.result_pearson["cx_rel"] = cx
self.calib_data.fov.result_pearson["cy_rel"] = cy
self.calib_data.fov.result_pearson["rad_rel"] = radius
self.calib_data.fov.result_pearson["corr_curve"] = corr_curve
self.calib_data.fov.fov_mask_rel = fov_mask
self.calib_data.tau_vec = tau_vec.astype(float64)
self.calib_data.cd_vec = cd_vec.astype(float64)
try:
self.calib_data.cd_vec_err = self.doas_series.fit_errs
except BaseException:
pass
self.calib_data.time_stamps = self.img_stack.time_stamps
return
elif self.method == "ifr":
# the fit is performed in absolute dectector coordinates
# corr_img_abs = Img(self.fov.corr_img.img).pyr_up(pyrlevel).img
popt, pcov, fov_mask = self._fov_gauss_fit(
self.calib_data.fov.corr_img,
**self._settings)
tau_vec = self.convolve_stack_fov(fov_mask)
self.calib_data.fov.result_ifr["popt"] = popt
self.calib_data.fov.result_ifr["pcov"] = pcov
self.calib_data.fov.fov_mask_rel = fov_mask
self.calib_data.tau_vec = tau_vec
self.calib_data.cd_vec = self.doas_data_vec
try:
self.calib_data.cd_vec_err = self.doas_series.fit_errs
except BaseException:
pass
self.calib_data.time_stamps = self.img_stack.time_stamps
else:
raise ValueError("Invalid search method...")
def fov_radius_search(self, cx, cy):
"""Search the FOV disk radius around center coordinate.
The search varies the radius around the center coordinate and
extracts image data time series from average values of all pixels
falling into the current disk. These time series are correlated
with spectrometer data to find the radius showing highest
correlation.
:param int cx: pixel x coordinate of center position
:param int cy: pixel y coordinate of center position
"""
stack = self.img_stack
cd_vec = self.doas_series.values
if not len(cd_vec) == stack.shape[0]:
raise ValueError("Mismatch in lengths of input arrays")
h, w = stack.shape[1:]
pyrlevel = stack.pyrlevel
# find maximum radius (around CFOV pos) which still fits into the image
# shape of the stack used to find the best radius
max_rad = min([cx, cy, w - cx, h - cy])
crad = int(self.maxrad * 2**(-pyrlevel))
if crad < max_rad:
max_rad_search = crad
else:
max_rad_search = max_rad
self.maxrad = int(max_rad * 2**(pyrlevel))
# radius array
radii = arange(1, max_rad_search + 1, 1)
logger.info("Maximum radius at pyramid level %d: %s"
% (pyrlevel, max_rad_search))
# some variable initialisations
coeffs, coeffs_err = [], []
max_corr = 0
tau_vec = None
mask = zeros((h, w)).astype(float64)
radius = 0
# loop over all radii, get tauSeries at each, (merge) and determine
# correlation coefficient
for r in radii:
# now get mean values of all images in stack in circular ROI around
# CFOV
tau_series, m = stack.get_time_series(cx, cy, radius=r)
tau_dat = tau_series.values
coeff, err = pearsonr(tau_dat, cd_vec)
logger.info("Rad: {} (R: {:.4f})".format(r, coeff))
coeffs.append(coeff)
coeffs_err.append(err)
# and append correlation coefficient to results
if coeff > max_corr:
radius = r
mask = m.astype(float64)
max_corr = coeff
tau_vec = tau_dat
corr_curve = Series(asarray(coeffs, dtype=float), radii)
return radius, corr_curve, tau_vec, cd_vec, mask
# define IFR model function (Super-Gaussian)
def _fov_gauss_fit(self, corr_img, g2dasym=True, g2dsuper=True,
g2dcrop=True, g2dtilt=False, blur=4, **kwargs):
"""Apply 2D gauss fit to correlation image.
Parameters
----------
corr_img : Img
correlation image
g2dasym : bool
allow for assymetric shape (sigmax != sigmay), True
g2dsuper: bool
allow for supergauss fit, True
g2dcrop : bool
if True, set outside (1/e amplitude) datapoints = 0, True
g2dtilt : bool
allow gauss to be tilted with respect to x/y axis
blur : int
width of gaussian smoothing kernel convolved with correlation
image in order to identify position of maximum
Returns
-------
tuple
3-element tuple containing
- array (popt): optimised multi-gauss parameters
- 2d array (pcov): estimated covariance of popt
- 2d array: correlation image
"""
img = corr_img.img
h, w = img.shape
xgrid, ygrid = mesh_from_img(img)
# apply maximum of filtered image to initialise 2D gaussian fit
(cy, cx) = get_img_maximum(img)
maxrad = self.maxrad * 2**(-corr_img.pyrlevel)
mask = make_circular_mask(h, w, cx, cy, maxrad).astype(float)
img = img * mask
# constrain fit, if requested
(popt, pcov, fov_mask) = gauss_fit_2d(img, cx, cy, g2dasym,
g2d_super_gauss=g2dsuper,
g2d_crop=g2dcrop,
g2d_tilt=g2dtilt, **kwargs)
return (popt, pcov, fov_mask)
# function convolving the image stack with the obtained FOV distribution
def convolve_stack_fov(self, fov_mask):
"""Normalize fov image and convolve stack.
:returns: - stack time series vector within FOV
"""
# normalize fov_mask
normsum = fov_mask.sum()
fov_mask_norm = fov_mask / normsum
# convolve with image stack
# stack_data_conv = transpose(self.stac, (2,0,1)) * fov_fitted_norm
stack_data_conv = self.img_stack.stack * fov_mask_norm
return stack_data_conv.sum((1, 2))
# OLD STUFF
# =============================================================================
# class DoasCalibDataOLD(object):
# """Class containing DOAS calibration data
#
# Parameters
# ----------
# tau_vec : ndarray
# tau data vector for calibration data
# cd_vec : ndarray
# DOAS-CD data vector for calibration data
# cd_vec_err : ndarray
# Fit errors of DOAS-CDs
# time_stamps : ndarray
# array with datetime objects containing time stamps
# (e.g. start acquisition) of calibration data
# calib_id : str
# calibration ID (e.g. "aa", "tau_on", "tau_off")
# camera : Camera
# camera object (not necessarily required). A camera can be assigned
# in order to convert the FOV extend from pixel coordinates into
# decimal degrees
#
# """
# def __init__(self, tau_vec=[], cd_vec=[], cd_vec_err=[],
# time_stamps=[], calib_id="", fov=None, camera=None,
# polyorder=1):
#
# #tau data vector within FOV
# self.tau_vec = asarray(tau_vec).astype(float64)
# #doas data vector
# self.cd_vec = asarray(cd_vec).astype(float64)
# self.cd_vec_err = asarray(cd_vec_err).astype(float64)
#
# self._calib_funs = CalibFuns()
# self.time_stamps = time_stamps
# self.calib_id = calib_id
#
# self.camera = None
#
# if not isinstance(fov, DoasFOV):
# fov = DoasFOV(camera)
# self.fov = fov
#
# self._poly = None
# self._cov = None
# self._polyorder = None
# self._allowed_polyorders = [1,2,3]
# self.polyorder = polyorder
#
# if isinstance(camera, Camera):
# self.camera = Camera
#
# @property
# def start(self):
# """Start time of calibration data (datetime)"""
# try:
# return self.time_stamps[0]
# except TypeError:
# return self.fov.start_search
#
# @property
# def stop(self):
# """Stop time of calibration data (datetime)"""
# try:
# return self.time_stamps[-1]
# except TypeError:
# return self.fov.stop_search
#
# @property
# def calib_id_str(self):
# """String for calibration ID"""
# idx=0
# try:
# if self.calib_id.split("_")[1].lower() == "aa":
# idx=1
# try:
# return CALIB_ID_STRINGS[self.calib_id.split("_")[idx]]
# except:
# return self.calib_id.split("_")[idx]
# except:
# return ""
#
# @property
# def polyorder(self):
# """Current order of fit polynomial"""
# return self._polyorder
#
# @polyorder.setter
# def polyorder(self, val):
# if not val in self._allowed_polyorders:
# raise ValueError("Invalid value for polyorder: %.1f. "
# "Choose from %s"
# % (val, self._allowed_polyorders))
# self._polyorder = val
# if isinstance(self._poly, poly1d):
# logger.warning("Polynomial order was changed and changes were not yet "
# "applied. Please call "
# "fit_calib_polynomial to retrieve the calibration "
# "polynomial for the new settings")
#
# @property
# def poly(self):
# """Calibration polynomial"""
# if not isinstance(self._poly, poly1d):
# self.fit_calib_polynomial()
# return self._poly
#
# @poly.setter
# def poly(self, value):
# if not isinstance(value, poly1d):
# raise ValueError("Need numpy poly1d object...")
# self._poly=value
#
# @property
# def cov(self):
# """Covariance matriy of calibration polynomial"""
# if not isinstance(self._cov, ndarray):
# self.fit_calib_polynomial()
# return self._cov
#
# @cov.setter
# def cov(self, value):
# raise IOError("Covariance matrix of calibration polynomial cannot "
# "be set manually, please call function "
# "fit_calib_polynomial")
#
# @property
# def coeffs(self):
# """Coefficients of current calibration polynomial"""
# return self.poly.coeffs
#
# @property
# def slope(self):
# """Slope of current calib curve"""
# if self.polyorder > 1:
# logger.warning("Order of calibration polynomial > 1: use value of slope "
# "with care (i.e. also check curvature coefficients of "
# "polynomial")
#
# return self.coeffs[-2]
#
# @property
# def slope_err(self):
# """Slope error of current calib curve"""
# if self.polyorder > 1:
# logger.warning("Order of calibration polynomial > 1: use slope error with "
# "care")
# return sqrt(self.cov[-2][-2])
#
# @property
# def y_offset(self):
# """Y-axis offset of calib curve"""
# return self.coeffs[-1]
#
# @property
# def y_offset_err(self):
# """Error of y axis offset of calib curve"""
# return sqrt(self.cov[-1][-1])
#
# @property
# def cd_tseries(self):
# """Pandas Series object of doas data"""
# return Series(self.cd_vec, self.time_stamps)
#
# @property
# def tau_tseries(self):
# """Pandas Series object of tau data"""
# return Series(self.tau_vec, self.time_stamps)
#
# @property
# def tau_range(self):
# """Range of tau values extended by 5%
#
# Returns
# -------
# tuple
# 2-element tuple, containing
#
# - float, tau_min: lower end of tau range
# - float, tau_max: upper end of tau range
# """
# tau = self.tau_vec
# taumin, taumax = tau.min(), tau.max()
# if taumin > 0:
# taumin = 0
# add = (taumax - taumin) * 0.05
# return taumin - add, taumax + add
#
# @property
# def cd_range(self):
# """Range of DOAS cd values extended by 5%"""
# cds = self.cd_vec
# cdmin, cdmax = cds.min(), cds.max()
# if cdmin > 0:
# cdmin = 0
# add = (cdmax - cdmin) * 0.05
# return cdmin - add, cdmax + add
#
# @property
# def residual(self):
# """Residual of calibration curve"""
# return self.poly(self.tau_vec) - self.tau_vec
#
# def has_calib_data(self):
# """Checks if calibration data is available"""
# if not all([len(x) > 0 for x in [self.cd_vec, self.tau_vec]]):
# return False
# if not len(self.tau_vec) == len(self.cd_vec):
# return False
# return True
#
# def fit_calib_polynomial(self, polyorder=None, weighted=True,
# weights_how="abs",
# through_origin=False,
# plot=False):
# """Fit calibration polynomial to current data
#
# Parameters
# ----------
# polyorder : :obj:`int`, optional
# update current polyorder
# weighted : bool
# performs weighted fit based on DOAS errors in ``cd_vec_err``
# (if available), defaults to True
# weights_how : str
# use "rel" if relative errors are supposed to be used (i.e.
# w=CD_sigma / CD) or "abs" if absolute error is supposed to be
# used (i.e. w=CD_sigma).
# through_origin : bool
# if True, the fit is forced to cross the coordinate origin (
# done by adding data points)
# plot : bool
# If True, the calibration curve and the polynomial are plotted
#
# Returns
# -------
# poly1d
# calibration polynomial
# """
# if not weights_how in ["rel", "abs"]:
# raise IOError("Invalid input for parameter weights_how:"
# "Use rel for relative errors or abs for absolute"
# "errors for calculation of weights")
# if not self.has_calib_data():
# raise ValueError("Calibration data is not available")
# try:
# self.polyorder = polyorder
# except:
# pass
# # ======================================================================
# # if polyorder is None:
# # polyorder = self.polyorder
# #
# # ======================================================================
# if sum(isnan(self.tau_vec)) + sum(isnan(self.cd_vec)) > 0:
# raise ValueError("Encountered nans in data")
#
# exp = exponent(self.cd_vec.max())
# yerr = ones(len(self.cd_vec))
# yerr_abs = True
# if weighted:
# if not len(self.cd_vec) == len(self.cd_vec_err):
# logger.warning("Could not perform weighted calibration fit: "
# "Length mismatch between DOAS data vector"
# " and corresponding error vector")
# elif sum(self.cd_vec_err) == 0:
# logger.warning("Could not performed weighted calibration fit: "
# "Values of DOAS fit errors are 0. Do you have pydoas "
# "installed?")
# else:
# try:
# if weights_how == "abs":
# yerr = self.cd_vec_err / 10**exp
# else:
# yerr = self.cd_vec_err / self.cd_vec
# yerr_abs = False
# #ws = ws / max(ws)
# except:
# logger.warning("Failed to calculate weights")
# tau_vals = self.tau_vec
# cds = self.cd_vec / 10**exp
#
# fun = self._calib_funs.get_poly(self.polyorder, through_origin)
#
# coeffs, cov = curve_fit(fun, tau_vals.astype(float64),
# cds.astype(float64),
# sigma=yerr.astype(float64),
# absolute_sigma=yerr_abs)
# if through_origin:
# coeffs = append(coeffs, 0.0)
# # ======================================================================
# # if through_origin:
# # num = len(tau_vals)
# # tau_vals = concatenate([tau_vals, zeros(num)])
# # cds = concatenate([cds, zeros(num)])
# # ws = concatenate([ws, ones(num)])
# #
# # ======================================================================
# # ======================================================================
# # coeffs, cov = polyfit(tau_vals, cds,
# # polyorder, w=ws, cov=True)
# # ======================================================================
# #self.polyorder = polyorder
# #return (fun, coeffs, cov, tau_vals, cds, yerr, yerr_abs)
# self.poly = poly1d(coeffs * 10**exp)
# self._cov = cov * 10**(2*exp)
# if plot:
# self.plot()
# return self.poly
#
# def save_as_fits(self, save_dir=None, save_name=None):
# """Save calibration data as FITS file
#
# Parameters
# ----------
# save_dir : str
# save directory, if None, the current working directory is used
# save_name : str
# filename of the FITS file (if None, use pyplis default naming)
# """
# if not len(self.cd_vec) == len(self.tau_vec):
# raise ValueError("Could not save calibration data, mismatch in "
# " lengths of data arrays")
# if not len(self.time_stamps) == len(self.cd_vec):
# self.time_stamps = asarray([datetime(1900,1,1)] *\
# len(self.cd_vec))
# #returns abspath of current wkdir if None
# save_dir = abspath(save_dir)
# if not isdir(save_dir): #save_dir is a file path
# save_name = basename(save_dir)
# save_dir = dirname(save_dir)
# if save_name is None:
# save_name = "doascalib_id_%s_%s_%s_%s.fts" %(\
# self.calib_id, self.start.strftime("%Y%m%d"),\
# self.start.strftime("%H%M"), self.stop.strftime("%H%M"))
# else:
# save_name = save_name.split(".")[0] + ".fts"
# fov_mask = fits.PrimaryHDU()
# fov_mask.data = self.fov.fov_mask_rel
# fov_mask.header.update(self.fov.img_prep)
# fov_mask.header.update(self.fov.search_settings)
# fov_mask.header["calib_id"] = self.calib_id
# fov_mask.header.append()
#
# ifr_res = []
# if self.fov.method == "pearson":
# rd = self.fov.result_pearson
# try:
# fov_mask.header.update(cx_rel=rd["cx_rel"],
# cy_rel=rd["cy_rel"],
# rad_rel=rd["rad_rel"])
# except:
# logger.warning("Position of FOV (pearson method) not available")
#
# elif self.fov.method == "ifr":
# ifr_res = self.fov.result_ifr["popt"]
#
# try:
# hdu_cim = fits.ImageHDU(data = self.fov.corr_img.img)
# except:
# hdu_cim = fits.ImageHDU()
# logger.warning("FOV search correlation image not available")
#
# tstamps = [x.strftime("%Y%m%d%H%M%S%f") for x in self.time_stamps]
# col1 = fits.Column(name="time_stamps", format="25A", array=tstamps)
# col2 = fits.Column(name="tau_vec", format="D", array=self.tau_vec)
# col3 = fits.Column(name="cd_vec", format="D", array=self.cd_vec)
# col4 = fits.Column(name="cd_vec_err", format="D",
# array=self.cd_vec_err)
#
#
# cols = fits.ColDefs([col1, col2, col3, col4])
# arrays = fits.BinTableHDU.from_columns(cols)
#
# roi = fits.BinTableHDU.from_columns([fits.Column(name="roi",
# format="I",
# array=self.fov.roi_abs)])
# col_ifr = fits.Column(name="ifr_res", format="D", array=ifr_res)
# res_ifr = fits.BinTableHDU.from_columns([col_ifr])
# #==============================================================================
# # col1 = fits.Column(name = 'target', format = '20A', array=a1)
# # col2 = fits.Column(name = 'V_mag', format = 'E', array=a2)
# #==============================================================================
#
# hdulist = fits.HDUList([fov_mask, hdu_cim, arrays, roi, res_ifr])
# fpath = join(save_dir, save_name)
# try:
# remove(fpath)
# except:
# pass
# hdulist.writeto(fpath)
#
# def load_from_fits(self, file_path):
# """Load stack object (fits)
#
# Parameters
# ----------
# file_path : str
# file path of calibration data
# """
# if not exists(file_path):
# raise IOError("DoasCalibData object could not be loaded, "
# "path does not exist")
# hdu = fits.open(file_path)
# try:
# self.fov.fov_mask_rel = hdu[0].data.byteswap().newbyteorder()
# except:
# print ("(Warning loading DOAS calib data): FOV mask not "
# "available")
#
# prep_keys = Img().edit_log.keys()
# search_keys = DoasFOVEngine()._settings.keys()
# self.calib_id = hdu[0].header["calib_id"]
# for key, val in hdu[0].header.iteritems():
# k = key.lower()
# if k in prep_keys:
# self.fov.img_prep[k] = val
# elif k in search_keys:
# self.fov.search_settings[k] = val
# elif k in self.fov.result_pearson.keys():
# self.fov.result_pearson[k] = val
#
# try:
# self.fov.corr_img = Img(hdu[1].data.byteswap().newbyteorder())
# except:
# print ("(Warning loading DOAS calib data): FOV search "
# "correlation image not available")
# try:
# times = hdu[2].data["time_stamps"].byteswap().newbyteorder()
# self.time_stamps = [datetime.strptime(x, "%Y%m%d%H%M%S%f")
# for x in times]
# except:
# print ("(Warning loading DOAS calib data): Failed to import "
# "time stamps")
# try:
# self.tau_vec = hdu[2].data["tau_vec"].byteswap().newbyteorder()
# except:
# print "Failed to import calibration tau data vector"
# try:
# self.cd_vec = hdu[2].data["cd_vec"].byteswap().newbyteorder()
# except:
# print "Failed to import calibration doas data vector"
# try:
# self.cd_vec_err =\
# hdu[2].data["cd_vec_err"].byteswap().newbyteorder()
# except:
# print "Failed to import DOAS fit error information in calib data"
# try:
# self.fov.result_ifr["popt"] =\
# hdu[4].data["ifr_res"].byteswap().newbyteorder()
# except:
# print ("Failed to import array containing IFR optimisation "
# " results from FOV search")
# self.fov.roi_abs = hdu[3].data["roi"].byteswap().newbyteorder()
#
# @property
# def _poly_str(self):
# """Return custom string representation of polynomial"""
# exp = exponent(self.poly.coeffs[0])
# p = poly1d(round(self.poly / 10**(exp - 2))/10**2)
# s = "(%s)E%+d" %(p, exp)
# return s.replace("x", r"$\tau$")
#
# def plot(self, add_label_str="", shift_yoffset=False, ax=None,
# **kwargs):
# """Plot calibration data and fit result
#
# Parameters
# ----------
# add_label_str : str
# additional string added to label of plots for legend
# shift_yoffset : bool
# if True, the data is plotted without y-offset
# ax :
# matplotlib axes object, if None, a new one is created
# """
# if not "color" in kwargs:
# kwargs["color"] = "b"
#
# if ax is None:
# fig, ax = subplots(1,1, figsize=(10,8))
#
# taumin, taumax = self.tau_range
# x = linspace(taumin, taumax, 100)
#
# cds = self.cd_vec
# cds_poly = self.poly(x)
# if shift_yoffset:
# try:
# cds -= self.y_offset
# cds_poly -= self.y_offset
# except:
# logger.warning("Failed to subtract y offset")
#
# ax.plot(self.tau_vec, cds, ls="", marker=".",
# label="Data %s" %add_label_str, **kwargs)
# try:
# ax.errorbar(self.tau_vec, cds, yerr=self.cd_vec_err,
# marker="None", ls=" ", c="#b3b3b3")
# except:
# logger.warning("No DOAS-CD errors available")
# try:
# ax.plot(x, cds_poly, ls="-", marker="",
# label="Fit result", **kwargs)
#
# except TypeError:
# print "Calibration poly probably not fitted"
#
# ax.set_title("DOAS calibration data, ID: %s" %self.calib_id_str)
# ax.set_ylabel(r"$S_{%s}$ [cm$^{-2}$]" %SPECIES_ID)
# ax.set_xlabel(r"$\tau_{%s}$" %self.calib_id_str)
# ax.grid()
# ax.legend(loc='best', fancybox=True, framealpha=0.7)
# return ax
#
# def plot_poly(self, add_label_str="", shift_yoffset=False, ax=None,
# **kwargs):
# """Plot calibration fit result
#
# Parameters
# ----------
# add_label_str : str
# additional string added to label of plots for legend
# shift_yoffset : bool
# if True, the data is plotted without y-offset
# ax :
# matplotlib axes object, if None, a new one is created
# """
# if not "color" in kwargs:
# kwargs["color"] = "b"
#
# if ax is None:
# fig, ax = subplots(1,1, figsize=(10,8))
#
# taumin, taumax = self.tau_range
# x = linspace(taumin, taumax, 100)
#
# cds_poly = self.poly(x)
# if shift_yoffset:
# try:
# cds_poly -= self.y_offset
# except:
# logger.warning("Failed to subtract y offset")
#
# try:
# ax.plot(x, cds_poly, ls="-", marker="",
# label="Fit result %s" %add_label_str, **kwargs)
#
# except TypeError:
# print "Calibration poly probably not fitted"
#
# ax.grid()
# ax.legend(loc='best', fancybox=True, framealpha=0.7)
# return ax
#
# def plot_data_tseries_overlay(self, date_fmt=None, ax=None):
# """Plot overlay of tau and DOAS time series"""
# if ax is None:
# fig, ax = subplots(1,1)
# s1 = self.tau_tseries
# s2 = self.cd_tseries
# p1 = ax.plot(s1.index.to_pydatetime(), s1.values, "--xb",
# label = r"$\tau$")
# ax.set_ylabel("tau")
# ax2 = ax.twinx()
#
# p2 = ax2.plot(s2.index.to_pydatetime(), s2.values,"--xr",
# label="DOAS CDs")
# ax2.set_ylabel(r"$S_{%s}$ [cm$^{-2}$]" %SPECIES_ID)
# ax.set_title("Time series overlay DOAS calib data")
#
# try:
# if date_fmt is not None:
# ax.xaxis.set_major_formatter(DateFormatter(date_fmt))
# except:
# pass
#
# ps = p1 + p2
# labs = [l.get_label() for l in ps]
# ax.legend(ps, labs, loc="best",fancybox=True, framealpha=0.5)
# ax.grid()
# rotate_xtick_labels(ax)
# return (ax, ax2)
#
# def err(self, value):
# """Returns measurement error of tau value based on slope error"""
# val = self(value)
# r = self.slope_err / self.slope
# return val * r
#
# def __call__(self, value, **kwargs):
# """Define call function to apply calibration
#
# :param float value: tau or AA value
# :return: corresponding column density
# """
# if not isinstance(self.poly, poly1d):
# self.fit_calib_polynomial()
# if isinstance(value, Img):
# calib_im = value.duplicate()
# calib_im.img = self.poly(calib_im.img) - self.y_offset
# calib_im.edit_log["gascalib"] = True
# return calib_im
# elif isinstance(value, ImgStack):
# try:
# value = value.duplicate()
# except MemoryError:
# logger.warning("Stack cannot be duplicated, applying calibration to "
# "input stack")
# value.stack = self.poly(value.stack) - self.y_offset
# value.img_prep["gascalib"] = True
# return value
# return self.poly(value) - self.y_offset
# =============================================================================
|
jgliss/pyplis
|
pyplis/doascalib.py
|
Python
|
gpl-3.0
| 65,370
|
[
"Gaussian"
] |
a4484d90b88f33ff8cf7ba76b9ba9bfed6b95d52c8c8a2a90101ea7f30a72f39
|
'''
Authors: Ivan E. Cao-Berg (icaoberg@scs.cmu.edu)
Created: April 24, 2012
Copyright (C) 2012 Murphy Lab
Lane Center for Computational Biology
School of Computer Science
Carnegie Mellon University
April 24, 2012
* I. Cao-Berg Added getNominalMagnification method that queries OMERO and
tries to retrieve the nomimal magnification associated with an image.
If the result is empty, then it returns the DEFAULT_MAGNIFICATION
* I. Cao-Berg Added getResolution method that gets the pixel size in [x,y,z]
* I. Cao-Berg Added getScale method that calculate the image scale
May 2, 2012
* I. Cao-Berg Added method for retrieving a list of image ids for the current user
try:
query = conn.getQueryService()
me = conn.getAdminService().getEventContext().userId
string = "select i.id from Image i where i.details.owner.id = :id"
params = omero.sys.ParametersI().addId(me)
iids = omero.rtypes.unwrap(query.projection(string,params))
return iids
except:
return []
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
For additional information visit http://murphylab.web.cmu.edu or
send email to murphy@cmu.edu
'''
import omero, pyslic, pyslid.utilities
from utilities import PyslidException
import omero.util.script_utils as utils
from omero.rtypes import *
from omero.gateway import BlitzGateway
def getNomimalMagnification( conn, iid, debug=False ):
'''
Gets the nomimal magnification associated with an image given an image id (iid). If
the query is empty (meaning the field containing the nomimal magnification was empty),
then this method will return the value of DEFAULT_MAGNIFICATION. The out of the box value
of DEFAULT_MAGNIFICATION is set to 40, which corresponds to the 3D Hela dataset from
the Murphy Lab.
If the method is unable to connect to the OMERO.server, then the method will return None.
If the method doesn't find an image associated with the given image id (iid), then the
method will return None.
For detailed outputs, set debug flag to True.
@param connection
@param image id (iid)
@param debug
@return nominal magnification
'''
DEFAULT_MAGNIFICATION = 40
if not conn.isConnected():
raise PyslidException("Unable to connect to OMERO.server")
if not pyslid.utilities.hasImage( conn, iid ):
raise PyslidException("No image found with the give image id")
#create and populate parameter
params = omero.sys.ParametersI()
params.addLong( "iid", iid )
#hql string query
string = "select i from Image i join fetch i.objectiveSettings as objS join fetch objS.objective as ob where i.id=:iid"
#database query
query = conn.getQueryService()
try:
result = query.findByQuery(string, params.page(0,1), conn.SERVICE_OPTS)
except:
raise PyslidException("Unable to run query")
if not result:
if debug:
print "Query was empty. Setting magnification to default value"
return DEFAULT_MAGNIFICATION
else:
return result
def getResolution( conn, iid, debug=False ):
'''
Gets the image resolution.
If the method is unable to connect to the OMERO.server, then the method will return None.
If the method doesn't find an image associated with the given image id (iid), then the
method will return None.
For detailed outputs, set debug flag to True.
@param connection
@param image id (iid)
@param debug flag
@return resolution
'''
if not conn.isConnected():
raise PyslidException("Unable to connect to OMERO.server")
if not pyslid.utilities.hasImage( conn, iid ):
raise PyslidException("No image found with the give image id")
img = pyslid.utilities.getImage( conn, iid )
resolution = [ img.getPixelSizeX(), img.getPixelSizeY(), img.getPixelSizeZ() ]
return resolution
def getScale( conn, iid, debug=False ):
'''
Get image scale. Image scale is defined as resolution over nominal magnification.
If the method is unable to connect to the OMERO.server, then the method will return None.
If the method doesn't find an image associated with the given image id (iid), then the
method will return None.
For detailed outputs, set debug flag to True.
@param connection
@param image id (iid)
@param debug flag
'''
if not conn.isConnected():
raise PyslidException("Unable to connect to OMERO.server")
if not pyslid.utilities.hasImage( conn, iid ):
raise PyslidException("No image found with the give image id")
try:
resolution = pyslid.image.getResolution( conn, iid )
#magnification = pyslid.image.getNomimalMagnification( conn, iid )
magnification = 1.0
print 'Ignoring magnification, setting to %f' % magnification
scale = [resolution[0]/magnification, resolution[1]/magnification, resolution[2]/magnification]
except:
raise PyslidException("Unable to calculate scale")
return scale
def getList( conn, debug=False ):
'''
Returns a list of image ids (iids) from images owned by the user making the connection.
@param conn
@returns image ids (iids) list
'''
if not conn.isConnected():
raise PyslidException("Unable to connect to OMERO.server")
try:
query = conn.getQueryService()
me = conn.getAdminService().getEventContext().userId
string = "select i.id from Image i where i.details.owner.id = :id"
params = omero.sys.ParametersI().addId(me)
iids = omero.rtypes.unwrap(query.projection(
string, params, conn.SERVICE_OPTS))
return iids
except:
raise PyslidException("Unable to run query")
|
icaoberg/pyslid
|
pyslid/image.py
|
Python
|
gpl-3.0
| 6,345
|
[
"VisIt"
] |
93225dbcfb8d4241b74c1fbeeefba3aa9e389fa123e1151b78b7dd35be780387
|
'''@package docstring
@author: Jyh-Miin Lin (Jimmy), Cambridge University
@address: jyhmiinlin@gmail.com
Created on 2013/1/21
================================================================================
This file is part of pynufft.
pynufft is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pynufft is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pynufft. If not, see <http://www.gnu.org/licenses/>.
================================================================================
First, see test_1D(),test_2D(), test_3D(), examples
'''
try:
from good_3d_nufft import *
import numpy
import scipy.fftpack
import numpy.random
import matplotlib.pyplot
import matplotlib.cm
# import matplotlib.numerix
# import matplotlib.numerix.random_array
import sys
# import utils
except:
print('faile to import modules')
print('numpy, scipy, matplotlib are required')
raise
#
# try:
# import llvm
# except:
# print('llvm not supported')
# from cx import *
# Add the ptdraft folder path to the sys.path list
# sys.path.append('..')
#import CsTransform.pynufft as pf
# try:
# import pycuda.gpuarray as gpuarray
# import pycuda.driver as cuda
# import pycuda.autoinit
# import pycuda.cumath as cumath
# gpu_flag = 1
# except:
# print "No PyOpenCL/PyFFT detected"
# gpu_flag = 0
# import utils
cmap=matplotlib.cm.gray
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
try:
from numba import autojit
except:
print('numba not supported')
# def create_krnl(self,u): # create the negative 3D laplacian __kernel of size u.shape[0:3]
#
# krnl = numpy.zeros(numpy.shape(u)[0:3],dtype=numpy.complex64)
# krnl[0,0,0]=6
# krnl[1,0,0]=-1
# krnl[0,1,0]=-1
# krnl[0,0,1]=-1
# krnl[-1,0,0]=-1
# krnl[0,-1,0]=-1
# krnl[0,0,-1]=-1
# krnl = self.ifft_kkf(krnl)
#
# return krnl # (256*256*16)
# @autojit
def tailor_fftn(X):
X = scipy.fftpack.fftshift(scipy.fftpack.fftn(scipy.fftpack.fftshift((X))))
return X
def tailor_ifftn(X):
X = scipy.fftpack.fftshift(scipy.fftpack.ifftn(scipy.fftpack.ifftshift(X)))
return X
def output(cc):
print('max',numpy.max(numpy.abs(cc[:])))
def Normalize(D):
return D/numpy.max(numpy.abs(D[:]))
def checkmax(x,dbg):
max_val = numpy.max(numpy.abs(x[:]))
if dbg ==0:
pass
else:
print( max_val)
return max_val
def appendmat(input_array,L):
if numpy.ndim(input_array) == 1:
input_shape = numpy.size(input_array)
input_shape = (input_shape,)
else:
input_shape = input_array.shape
Lprod= numpy.prod(input_shape)
output_array=numpy.copy(input_array)
output_array=numpy.reshape(output_array,(Lprod,1),order='F')
output_array=numpy.tile(output_array,(1,L))
output_array=numpy.reshape(output_array,input_shape+(L,),order='F')
return output_array
def freq_gradient(x):# zero frequency at centre
grad_x = numpy.copy(x)
dim_x=numpy.shape(x)
# print('freq_gradient shape',dim_x)
for pp in xrange(0,dim_x[2]):
grad_x[...,pp,:]=grad_x[...,pp,:] * (-2.0*numpy.pi*(pp -dim_x[2]/2.0 )) / dim_x[2]
return grad_x
def freq_gradient_H(x):
return -freq_gradient(x)
# def shrink_core(s,LMBD):
# # LMBD = LMBD + 1.0e-15
# s = numpy.sqrt(s).real
# ss = numpy.maximum(s-LMBD , 0.0)/(s+1e-7) # shrinkage
# return ss
# def shrink(dd, bb,LMBD):
#
# n_dims=numpy.shape(dd)[0]
#
# xx=()
#
# s = numpy.zeros(dd[0].shape)
# for pj in xrange(0,n_dims):
# s = s+ (dd[pj] + bb[pj])*(dd[pj] + bb[pj]).conj()
# s = numpy.sqrt(s).real
# ss = numpy.maximum(s-LMBD*1.0 , 0.0)/(s+1e-7) # shrinkage
# for pj in xrange(0,n_dims):
#
# xx = xx+ (ss*(dd[pj]+bb[pj]),)
#
# return xx
def shrink2(dd,bb,ss,n_dims):
xx = tuple(ss*(dd[pj]+bb[pj]) for pj in xrange(0,n_dims))
return xx
def shrink1(dd,bb,n_dims):
# s = numpy.zeros(numpy.shape(dd[0]),dtype = numpy.float)
# c = numpy.empty_like(s) # only real
# for pj in xrange(0,n_dims):
# c = (dd[pj] + bb[pj]).real
# s = s+ c**2
s = sum((dd[pj] + bb[pj]).real**2 for pj in xrange(0,n_dims))
s = s**0.5
return s.real
def shrink(dd, bb,LMBD):
# n_dims=numpy.shape(dd)[0]
n_dims = len(dd)
s = shrink1(dd,bb,n_dims)
ss = numpy.maximum(s-LMBD*1.0 , 0.0)/(s+1e-15)# shrinkage
xx = shrink2(dd,bb,ss,n_dims)
return xx
def TVconstraint(xx,bb):
try:
n_xx = len(xx)
# n_bb = len(bb)
# cons_shape = numpy.shape(xx[0])
# cons=numpy.zeros(cons_shape,dtype=numpy.complex64)
# cons = sum( get_Diff_H( xx[jj] - bb[jj] , jj)
# for jj in xrange(0,n_xx))
# for jj in xrange(0,n_xx):
# cons = cons + get_Diff_H( xx[jj] - bb[jj] , jj)
cons = sum(get_Diff_H( xx[jj] - bb[jj] , jj) for jj in xrange(0,n_xx))
except:
n_xx = len(xx)
n_bb = len(bb)
if n_xx != n_bb:
print('xx and bb size wrong!')
return cons
# def Dx(u):
# shapes = numpy.shape(u)
# rows=shapes[0]
# ind1 = xrange(0,rows)
# ind2 = numpy.roll(ind1,1,axis=0)
# u2= u[ind2,...]
# u2[...]= u[...] - u2[...]
# return u2#u[ind1,...]-u[ind2,...]
def Dx(u):
u2=numpy.concatenate((u,u[0:1,...]),axis=0)
u2=numpy.roll(u2,1,axis=0)
u2=numpy.diff(u2,n=1,axis=0)
return u2
def get_Diff_H(x,axs): # hermitian operator of get_Diff(x,axs)
if axs > 0:
# transpose the specified axs to 0
# and use the case when axs == 0
# then transpose back
mylist=list(xrange(0,x.ndim))
(mylist[0], mylist[axs])=(mylist[axs],mylist[0])
tlist=tuple(mylist[:])
#=======================================================================
dcxt=numpy.transpose(
get_Diff_H(numpy.transpose(x,tlist),0),
tlist)
elif axs == 0:
# x=x[::-1,...]
#x=numpy.flipud(x)
dcxt=-get_Diff(x, 0)
#dcxt=numpy.flipud(dcxt)# flip along axes
# dcxt=dcxt[::-1,...]
dcxt=numpy.roll(dcxt, axis=0, shift=-1)
# dcxt=-get_Diff(x,0)
# dcxt=numpy.roll(dcxt,shift=2, axis=0)
return dcxt
def get_Diff(x,axs):
#calculate the 1D gradient of images
if axs > 0:
# transpose the specified axs to 0
# and use the case when axs == 0
# then transpose back
mylist=list(xrange(0,x.ndim))
(mylist[0], mylist[axs])=(mylist[axs],mylist[0])
tlist=tuple(mylist[:])
#=======================================================================
dcx=numpy.transpose(
get_Diff(numpy.transpose(x,tlist),0),
tlist)
elif axs == 0:
# xshape=numpy.shape(x)
# dcy=numpy.empty(numpy.shape(y),dtype=numpy.complex64)
# ShapeProd=numpy.prod(xshape[1:])
# x = numpy.reshape(x,xshape[0:1]+(ShapeProd,),order='F')
# dcx=numpy.empty(numpy.shape(x),dtype=x.dtype)
# dcx=Dx(x)
# for ss in xrange(0,ShapeProd):
# dcx[:,ss] = Dx(x[:,ss]) # Diff operators
dcx = Dx(x)
# dcy[:,:,ll] = Dyt(y[:,:,ll]-by[:,:,ll]) # Hermitian of Diff operators
# dcx=numpy.reshape(dcx, xshape ,order='F')
return dcx
def CombineMulti(multi_coil_data,axs):
U=numpy.mean(multi_coil_data,axs)
U = appendmat(U,multi_coil_data.shape[axs])
return U
def CopySingle2Multi(single_coil_data,n_tail):
U=numpy.copy(single_coil_data)
U = appendmat(U, n_tail)
return U
class pynufft(nufft):
def __init__(self,om, Nd, Kd,Jd):
nufft.__init__(self,om, Nd, Kd,Jd)
self.st['q'] = self.st['p']
self.st['q'] = self.st['q'].conj().multiply(self.st['q'])
self.st['q'] = self.st['q'].sum(0)
self.st['q'] = numpy.array(self.st['q'] )
self.st['q']=numpy.reshape(self.st['q'],(numpy.prod(self.st['Kd']),1),order='F').real
# self.st['q']=self.st['p'].getH().dot(self.st['p']).diagonal() # slow version
#
# self.st['q']=numpy.reshape(self.st['q'],(numpy.prod(self.st['Kd']),1),order='F')
#
def gpu_k_modulate(self):
try:
self.myfft(self.data_dev, self.data_dev,inverse=False)
self.data_dev=self.W_dev*self.data_dev
self.myfft(self.data_dev, self.data_dev,inverse=True)
return 0
except:
return 1
def Nd2KdWKd2Nd_gpu(self,x, weight_flag):
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
#print('661 x.shape',x.shape)
# x is Nd Lprod
st=self.st
Nd = st['Nd']
Kd = st['Kd']
# dims = numpy.shape(x)
# dd = numpy.size(Nd)
Lprod = numpy.shape(x)[-1]
if self.debug==0:
pass
else:
checker(x,Nd)
snc = st['sn']
output_x=numpy.zeros(Kd, dtype=numpy.complex64)
# self.W_dev = self.thr.to_device(self.W.T.astype(dtype))
for ll in xrange(0,Lprod):
if weight_flag == 0:
pass
else:
x[...,ll] = x[...,ll] * snc # scaling factors
output_x=output_x*0.0
output_x[crop_slice_ind(x[...,ll].shape)] = x[...,ll]
self.data_dev = self.thr.to_device(output_x.astype(dtype))
if self.gpu_k_modulate()==0:
pass
else:
print('gpu_k_modulate error')
break
x[...,ll]=self.data_dev.get()[crop_slice_ind(Nd)]
if weight_flag == 0:
pass
else: #weight_flag =1 scaling factors
x[...,ll] = x[...,ll]*snc.conj() #% scaling factors
if self.debug==0:
pass # turn off checker
else:
checker(x,Nd) # checking size of x divisible by Nd
return x
def forwardbackward_gpu(self,x):
# print('inside forwardbackward_gpu ')
st=self.st
Nd = st['Nd']
# Kd = st['Kd'] # unused
# dims = numpy.shape(x) #unused
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
Lprod = Lprod.astype(int)
x = numpy.reshape(x,Nd+(Lprod,),order='F')
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
x = self.Nd2KdWKd2Nd_gpu(x,0) #
# for ii in xrange(0,Lprod):
# # tmp_Xk = self.Nd2Kd_gpu(x[...,ii],0)
# Xk[...,ii] = st['q'][...,0]*Xk[...,ii]
# x[...,ii]= self.Kd2Nd_gpu(tmp_Xk,0)
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
# x= self.Kd2Nd(Xk,0) #
checker(x,Nd) # check output
return x
def forwardbackward(self,x):
if self.cuda_flag == 0:
st=self.st
Nd = st['Nd']
# Kd = st['Kd'] # unused
# dims = numpy.shape(x) #unused
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
x = numpy.reshape(x,Nd+(1,),order='F')
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
Lprod = Lprod.astype(int)
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
Xk = self.Nd2Kd(x,0) #
for ii in range(0,Lprod):
Xk[...,ii] = st['q'][...,0]*Xk[...,ii]
# Xk[...,ii] = st['T'].dot(Xk[...,ii])
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
x= self.Kd2Nd(Xk,0) #
checker(x,Nd) # check output
return x
elif self.cuda_flag == 1:
return self.forwardbackward_gpu(x)
def inverse(self,data, mu, LMBD, gamma, nInner, nBreg): # main function of solver
self.f = data
self.mu = mu
self.LMBD = LMBD
self.gamma = gamma
self.nInner= nInner
self.nBreg= nBreg
# print(numpy.size(data) , self.st['M'] )
if numpy.size(data) == self.st['M']:
self.st['senseflag'] = 0
# print(numpy.size(data) )
print('turn-off sense recon')
try:
if self.st['senseflag']==0:
self.st = self._create_mask()
pass
else:
raise
except:
self.LMBD=self.LMBD*1.0
self.st['senseflag']=0 # turn-off sense, to get sensemap
#precompute highly constrainted images to guess the sensitivity maps
(u0,dump)=self._kernel(self.f, self.st , self.mu, self.LMBD, self.gamma, 1,2)
#===============================================================================
# mask
#===============================================================================
self.st = self._create_mask()
if numpy.size(u0.shape) > numpy.size(self.st['Nd']):
for pp in range(2,numpy.size(u0.shape)):
self.st['mask'] = appendmat(self.st['mask'],u0.shape[pp] )
self.st['mask2'] = appendmat(self.st['mask2'],u0.shape[pp] )
#===============================================================================
#estimate sensitivity maps by divided by rms images
self.st = self._make_sense(u0) # setting up sense map in st['sensemap']
self.sense_fun = numpy.sum(self.st['sense'],-1)
self.st['senseflag']=1 # turn-on sense, to get sensemap
#scale back the _constrainted factor LMBD
self.LMBD=self.LMBD/1.0
#CS reconstruction
(self.u, self.u_stack)=self._kernel(self.f, self.st , self.mu, self.LMBD, self.gamma,
self.nInner,self.nBreg)
# for jj in range(0,self.u.shape[-1]):
# self.u[...,jj] = self.u[...,jj]*(self.st['sn']**0.7)# rescale the final image intensity
#
if self.u.shape[-1] == 1:
if numpy.ndim(self.u) != numpy.ndim(self.st['Nd']): # alwasy true?
self.u = self.u[...,0]
# self.u = Normalize(self.u)
return self.u
def _kernel(self, f, st , mu, LMBD, gamma, nInner, nBreg):
L= numpy.size(f)/st['M']
image_dim=st['Nd']+(L,)
if numpy.ndim(f) == 1:# preventing row vector
f=numpy.reshape(f,(numpy.shape(f)[0],1),order='F')
# f0 = numpy.copy(f) # deep copy to prevent scope f0 to f
# unused
# u = numpy.zeros(image_dim,dtype=numpy.complex64)
#===========================================================================
# check whether sense is used
# if senseflag == 0, create an all-ones mask
# if sensflag size is wrong, create an all-ones mask (shouldn't occur)
#===========================================================================
if st['senseflag'] == 0:
st['sensemap'] = numpy.ones(image_dim,dtype=numpy.complex64)
elif numpy.shape(st['sensemap']) != image_dim: #(shouldn't occur)
st['sensemap'] = numpy.ones(image_dim,dtype=numpy.complex64)
else:
pass # correct, use existing sensemap
#=========================================================================
# check whether mask is used
#=========================================================================
# if st.has_key('mask'):
if 'mask' in st:
if numpy.shape(st['mask']) != image_dim:
st['mask'] = numpy.ones(image_dim,dtype=numpy.complex64)
else:
st['mask'] = numpy.ones(image_dim,dtype=numpy.complex64)
if 'mask2' in st:
if numpy.shape(st['mask2']) != image_dim:
st['mask2'] = numpy.reshape(st['mask2'],image_dim,order='F')
else:
st['mask2'] = numpy.ones(image_dim,dtype=numpy.complex64)
#===========================================================================
# update sensemap so we don't need to add ['mask'] in the iteration
#===========================================================================
st['sensemap'] = st['sensemap']*st['mask']
#=======================================================================
# RTR: k-space sampled density
# only diagonal elements are relevant (on k-space grids)
#=======================================================================
RTR=self._create_kspace_sampling_density()
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# # related to _constraint
#===============================================================================
uker = self._create_laplacian_kernel()
#=======================================================================
# uker: deconvolution kernel in k-space,
# which will be divided in k-space in iterations
#=======================================================================
#===========================================================================
# initial estimation u, u0, uf
#===========================================================================
u = self.adjoint(f)
# c = numpy.max(numpy.abs(u.flatten())) # Rough coefficient
for jj in range(0,u.shape[-1]):
u[...,jj] = u[...,jj]/self.st['sn'] # remove scaling factor in the first place
if self.debug ==0:
pass
else:
print('senseflag',st['senseflag'])
if st['senseflag'] == 1:
u=CombineMulti(u,-1)[...,0:1] # summation of multicoil images
u0 = numpy.copy(u)
self.thresh_scale= numpy.max(numpy.abs(u0[:]))
self.u0=numpy.copy(u0)
# else:
# print('existing self.u, so we use previous u and u0')
# u=numpy.copy(self.u) # using existing initial values
# u0=numpy.copy(self.u0)
# if st['senseflag'] == 1:
# print('u.shape line 305',u.shape)
# u == u[...,0:1]
# print('u.shape line 307',u.shape)
#===============================================================================
# Now repeat the uker to L slices e.g. uker=512x512x8 (if L=8)
# useful for later calculation
#===============================================================================
#expand 2D/3D kernel to desired dimension of kspace
uker = self._expand_deconv_kernel_dimension(uker,u.shape[-1])
RTR = self._expand_RTR(RTR,u.shape[-1])
uker = self.mu*RTR - LMBD*uker + gamma
if self.debug ==0:
pass
else:
print('uker.shape line 319',uker.shape)
(xx,bb,dd)=self._make_split_variables(u)
uf = numpy.copy(u0) # only used for ISRA, written here for generality
u = u*1.0/numpy.max(st['q'][:]) # initial values
murf = numpy.copy(u) # initial values
# #===============================================================================
u_stack = numpy.empty(st['Nd']+(nBreg,),dtype=numpy.complex)
self.err =1.0e+13
u_k_1=0
for outer in numpy.arange(0,nBreg):
for inner in numpy.arange(0,nInner):
# update u
if self.debug==0:
pass
else:
print('iterating',[inner,outer])
#===============================================================
# update u # simple k-space deconvolution to guess initial u
u = self._update_u(murf,u,uker,xx,bb)
for jj in range(0,u.shape[-1]):
u[...,jj] = u[...,jj]*(self.st['sn']**1)
# Temporally scale the image for softthresholding
c = numpy.max(numpy.abs(u.flatten())) # Rough coefficient
# to correct threshold of nonlinear shrink
#===================================================================
# # update d
#===================================================================
#===================================================================
# Shrinkage: remove tiny values "in somewhere sparse!"
# dx+bx should be sparse!
#===================================================================
# shrinkage
#===================================================================
dd=self._update_d(u,dd)
xx=self._shrink( dd, bb, c/LMBD/numpy.sqrt(numpy.prod(st['Nd'])))
#===============================================================
#===================================================================
# # update b
#===================================================================
bb=self._update_b(bb, dd, xx)
for jj in range(0,u.shape[-1]):
u[...,jj] = u[...,jj]/(self.st['sn']**1)
# Temporally scale the image for softthresholding
# if outer < nBreg: # do not update in the last loop
if st['senseflag']== 1:
u = appendmat(u[...,0],L)
(u, murf, uf, u_k_1)=self._external_update(u, uf, u0, u_k_1, outer) # update outer Split_bregman
if st['senseflag']== 1:
u = u[...,0:1]
murf = murf[...,0:1]
u_stack[...,outer] = (u[...,0]*(self.st['sn']**1))
# u_stack[...,outer] =u[...,0]
fermi = scipy.fftpack.fftshift( self.st['mask'][...,0] )
for jj in range(0,u.shape[-1]):
# u[...,jj] = u[...,jj]*(self.st['sn']**1)# rescale the final image intensity
u[...,jj] = scipy.fftpack.ifftn(scipy.fftpack.fftn(u[...,jj])*fermi ) # apply GE's fermi filter
u[...,jj] = u[...,jj]*(self.st['sn'])*self.st['mask2'][...,jj]# rescale the final image intensity
if st['senseflag']== 1:
u[...,jj] = self.final_sense(u[...,jj],self.sense_fun)
return (u,u_stack)
def final_sense(self,u,sense_fun):
tmp_u = u
# for j in xrange(0,5):
# tmp_u = tmp_u + u - tmp_u*sense_fun*sense_fun.conj()
return tmp_u
def _update_u(self,murf,u,uker,xx,bb):
#print('inside _update_u')
# checkmax(u)
# checkmax(murf)
# rhs = self.mu*murf + self.LMBD*self.get_Diff(x,y,bx,by) + self.gamma
#=======================================================================
# Trick: make "llist" for numpy.transpose
mylist = tuple(numpy.arange(0,numpy.ndim(xx[0])))
tlist = mylist[1::-1]+mylist[2:]
#=======================================================================
# update the right-head side terms
rhs = (self.mu*murf +
self.LMBD*self._constraint(xx,bb) +
self.gamma * u)
rhs = rhs * self.st['mask'][...,0:u.shape[-1]]
# rhs=Normalize(rhs)
#=======================================================================
# Trick: make "flist" for fftn
flist = mylist[:-1:1]
u = self._k_deconv(rhs, uker,self.st,flist,mylist)
# print('max rhs u',numpy.max(numpy.abs(rhs[:])),numpy.max(numpy.abs(u[:])))
# print('max,q',numpy.max(numpy.abs(self.st['q'][:])))
# for jj in range(0,1):
# u = u - 0.1*(self.k_deconv(u, 1.0/(RTR+self.LMBD*uker+self.gamma),self.st,flist,mylist) - rhs
# )
# checkmax(u)
# checkmax(rhs)
# checkmax(murf)
#print('leaving _update_u')
return u # normalization
def _k_deconv(self, u,uker,st,flist,mylist):
u0=numpy.copy(u)
u=u*st['mask'][...,0:u.shape[-1]]
# u=scipy.fftpack.fftn(u, st['Kd'],flist)
U=numpy.empty(st['Kd']+(u.shape[-1],),dtype=u.dtype)
for pj in range(0,u.shape[-1]):
U[...,pj]=self.emb_fftn(u[...,pj], st['Kd'], range(0,numpy.size(st['Kd']))) / uker[...,pj] # deconvolution
U[...,pj]=self.emb_ifftn(U[...,pj], st['Kd'], range(0,numpy.size(st['Kd'])))
u = U[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
# optional: one- additional Conjugated step to ensure the quality
# for pp in range(0,3):
# u = self._cg_step(u0,u,uker,st,flist,mylist)
#
u=u*st['mask'][...,0:u.shape[-1]]
return u
def _cg_step(self, rhs, u, uker, st,flist,mylist):
u=u#*st['mask'][...,0:u.shape[-1]]
# u=scipy.fftpack.fftn(u, st['Kd'],flist)
AU=numpy.empty(st['Kd']+(u.shape[-1],),dtype=u.dtype)
# print('U.shape. line 446',U.shape)
# print('u.shape. line 447',u.shape)
for pj in range(0,u.shape[-1]):
AU[...,pj]=self.emb_fftn(u[...,pj], st['Kd'], range(0,numpy.size(st['Kd']))) * uker[...,pj] # deconvolution
AU[...,pj]=self.emb_ifftn(AU[...,pj], st['Kd'], range(0,numpy.size(st['Kd'])))
ax0 = AU[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
u=u#*st['mask'][...,0:u.shape[-1]]
r = rhs - ax0
p = r
for running_count in range(0,1):
upper_inner = r.conj()*r
upper_inner = numpy.sum(upper_inner[:])
AU=numpy.empty(st['Kd']+(u.shape[-1],),dtype=u.dtype)
# print('U.shape. line 446',U.shape)
# print('u.shape. line 447',u.shape)
for pj in range(0,u.shape[-1]):
AU[...,pj]=self.emb_fftn(p[...,pj], st['Kd'], range(0,numpy.size(st['Kd']))) * uker[...,pj] # deconvolution
AU[...,pj]=self.emb_ifftn(AU[...,pj], st['Kd'], range(0,numpy.size(st['Kd'])))
Ap = AU[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
lower_inner = p.conj()*Ap
lower_inner = numpy.sum(lower_inner[:])
alfa_k = upper_inner/ lower_inner
# alfa_k = alfa_k*0.6
u = u + alfa_k * p
r2 = r - alfa_k *Ap
beta_k = numpy.sum((r2.conj()*r2)[:])/numpy.sum((r.conj()*r)[:])
r = r2
p = r + beta_k*p
return u
def _constraint(self,xx,bb):
'''
include TVconstraint and others
'''
cons = TVconstraint(xx[:],bb[:])
return cons
def _shrink(self,dd,bb,thrsld):
'''
soft-thresholding the edges
'''
output_xx=shrink( dd[:], bb[:], thrsld)# 3D thresholding
return output_xx
def _make_split_variables(self,u):
n_dims = len(self.st['Nd'])
xx = ()
bb = ()
dd = ()
for jj in range(0,n_dims):
x=numpy.zeros(u.shape)
bx=numpy.zeros(u.shape)
dx=numpy.zeros(u.shape)
xx = xx + (x,)
bb = bb + (bx,)
dd = dd + (dx,)
# x=numpy.zeros(u.shape)
# y=numpy.zeros(u.shape)
# bx=numpy.zeros(u.shape)
# by=numpy.zeros(u.shape)
# dx=numpy.zeros(u.shape)
# dy=numpy.zeros(u.shape)
# xx= (x,y)
# bb= (bx,by)
# dd= (dx,dy)
return(xx,bb,dd)
def _extract_svd(self,input_stack,L):
C= numpy.copy(input_stack) # temporary array
print('size of input_stack', numpy.shape(input_stack))
C=C/numpy.max(numpy.abs(C))
reps_acs = 16
mysize = 16
K= 5 # rank of 10 prevent singular? artifacts(certain disruption)
half_mysize = mysize/2
dimension = numpy.ndim(C) -1 # collapse coil dimension
if dimension == 1:
tmp_stack = numpy.empty((mysize,),dtype = numpy.complex64)
svd_size = mysize
C_size = numpy.shape(C)[0]
data = numpy.empty((svd_size,L*reps_acs),dtype=numpy.complex64)
# for jj in xrange(0,L):
# C[:,jj]=tailor_fftn(C[:,jj])
# for kk in xrange(0,reps_acs):
# tmp_stack = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
# data[:,jj] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
elif dimension == 2:
tmp_stack = numpy.empty((mysize,mysize,),dtype = numpy.complex64)
svd_size = mysize**2
data = numpy.empty((svd_size,L*reps_acs),dtype=numpy.complex64)
C_size = numpy.shape(C)[0:2]
for jj in xrange(0,L):
# matplotlib.pyplot.imshow(C[...,jj].real)
# matplotlib.pyplot.show()
# tmp_pt=(C_size[0]-reps_acs)/2
C[:,:,jj]=tailor_fftn(C[:,:,jj])
for kk in xrange(0,reps_acs):
a=numpy.mod(kk,reps_acs**0.5)
b=kk/(reps_acs**0.5)
tmp_stack = C[C_size[0]/2-half_mysize-(reps_acs**0.5)/2+a : C_size[0]/2+half_mysize-(reps_acs**0.5)/2+a,
C_size[1]/2-half_mysize-(reps_acs**0.5)/2+b : C_size[1]/2+half_mysize-(reps_acs**0.5)/2+b,jj]
data[:,jj*reps_acs+kk] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
elif dimension == 3:
tmp_stack = numpy.empty((mysize,mysize,mysize),dtype = numpy.complex64)
svd_size = mysize**3
data = numpy.empty((svd_size,L),dtype=numpy.complex64)
C_size = numpy.shape(C)[0:3]
for jj in xrange(0,L):
C[:,:,:,jj]=tailor_fftn(C[:,:,:,jj])
tmp_stack= C[C_size[0]/2-half_mysize:C_size[0]/2+half_mysize,
C_size[1]/2-half_mysize:C_size[1]/2+half_mysize,
C_size[2]/2-half_mysize:C_size[2]/2+half_mysize,
jj]
data[:,jj] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
# OK, data is the matrix of size (mysize*n, L) for SVD
import scipy.linalg
(s_blah,vh_blah) = scipy.linalg.svd(data)[1:3]
for jj in xrange(0,numpy.size(s_blah)): #
if s_blah[jj] > 0.1*s_blah[0]: # 10% of maximum singular value to decide the rank
K = jj+1
# pass
else:
break
v_blah =vh_blah.conj().T
C = C*0.0 # now C will be used as the output stack
V_para = v_blah[:,0:K]
print('shape of V_para',numpy.shape(V_para))
V_para = numpy.reshape(V_para,(reps_acs**0.5,reps_acs**0.5,L, K),order='F')
C2 = numpy.zeros((C.shape[0],C.shape[1],L,K),dtype=numpy.complex64)
for jj in xrange(0,L): # coils
for kk in xrange(0,K): # rank
C2[C.shape[0]/2-reps_acs**0.5/2:C.shape[0]/2+reps_acs**0.5/2,
C.shape[1]/2-reps_acs**0.5/2:C.shape[1]/2+reps_acs**0.5/2,
jj,kk]=V_para[:,:,jj,kk]
C2[:,:,jj,kk]=tailor_fftn(C2[:,:,jj,kk])
# C_value = numpy.empty_like(C)
for mm in xrange(0,C.shape[0]): # dim 0
for nn in xrange(0,C.shape[1]): # dim 1
G = C2[mm,nn,:,:].T # Transpose (non-conjugated) of G
Gh = G.conj().T # hermitian
g = numpy.dot(Gh,G) #construct g matrix for eigen-decomposition
w,v = numpy.linalg.eig(g) # eigen value:w, eigen vector: v
ind = numpy.argmax(numpy.abs(w)) # find the maximum
the_eig = numpy.abs(w[ind]) # find the abs of maximal eigen value
ref_angle=(numpy.sum(v[:,ind])/(numpy.abs(numpy.sum(v[:,ind]))))
v[:,ind] = v[:,ind]/ref_angle # correct phase by summed value
C[mm,nn,:] = v[:,ind]*the_eig
# for jj in xrange(0,L):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow((C[...,jj].real))
# matplotlib.pyplot.show()
# for jj in xrange(0,L):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow((input_stack[...,jj].real))
# matplotlib.pyplot.show()
C = C
return C/numpy.max(numpy.abs(C)) # normalize the coil sensitivities
def _make_sense(self,u0):
st=self.st
L=numpy.shape(u0)[-1]
u0dims= numpy.ndim(u0)
# st=self.st
L=numpy.shape(u0)[-1]
try:
st['sensemap'] = self._extract_svd(u0,L)
print('run svd')
# st['sensemap']=u0
# for jj in xrange(0,L):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow((st['sensemap'][...,jj].real))
# matplotlib.pyplot.show()
return st
except:
print('not runing svd')
if u0dims-1 >0:
rows=numpy.shape(u0)[0]
dpss_rows = numpy.kaiser(rows, 100)
dpss_rows = numpy.fft.fftshift(dpss_rows)
dpss_rows[3:-3] = 0.0
dpss_fil = dpss_rows
if self.debug==0:
pass
else:
print('dpss shape',dpss_fil.shape)
if u0dims-1 > 1:
cols=numpy.shape(u0)[1]
dpss_cols = numpy.kaiser(cols, 100)
dpss_cols = numpy.fft.fftshift(dpss_cols)
dpss_cols[3:-3] = 0.0
dpss_fil = appendmat(dpss_fil,cols)
dpss_cols = appendmat(dpss_cols,rows)
dpss_fil=dpss_fil*numpy.transpose(dpss_cols,(1,0))
if self.debug==0:
pass
else:
print('dpss shape',dpss_fil.shape)
if u0dims-1 > 2:
zag = numpy.shape(u0)[2]
dpss_zag = numpy.kaiser(zag, 100)
dpss_zag = numpy.fft.fftshift(dpss_zag)
dpss_zag[3:-3] = 0.0
dpss_fil = appendmat(dpss_fil,zag)
dpss_zag = appendmat(dpss_zag,rows)
dpss_zag = appendmat(dpss_zag,cols)
dpss_fil=dpss_fil*numpy.transpose(dpss_zag,(1,2,0)) # low pass filter
if self.debug==0:
pass
else:
print('dpss shape',dpss_fil.shape)
#dpss_fil=dpss_fil / 10.0
rms=numpy.sqrt(numpy.mean(u0*u0.conj(),-1)) # Root of sum square
st['sensemap']=numpy.ones(numpy.shape(u0),dtype=numpy.complex64)
if self.debug==0:
pass
else:
print('sensemap shape',st['sensemap'].shape, L)
print('u0shape',u0.shape,rms.shape)
# print('L',L)
# print('rms',numpy.shape(rms))
for ll in numpy.arange(0,L):
st['sensemap'][...,ll]=(u0[...,ll]+1e-16)/(rms+1e-16)
if self.debug==0:
pass
else:
print('sensemap shape',st['sensemap'].shape, L)
print('rmsshape', rms.shape)
if self.pyfftw_flag == 1:
if self.debug==0:
pass
else:
print('USING pyfftw and thread is = ',self.threads)
st['sensemap'][...,ll] = pyfftw.interfaces.scipy_fftpack.fftn(st['sensemap'][...,ll],
st['sensemap'][...,ll].shape,
range(0,numpy.ndim(st['sensemap'][...,ll])),
threads=self.threads)
st['sensemap'][...,ll] = st['sensemap'][...,ll] * dpss_fil
st['sensemap'][...,ll] = pyfftw.interfaces.scipy_fftpack.ifftn(st['sensemap'][...,ll],
st['sensemap'][...,ll].shape,
range(0,numpy.ndim(st['sensemap'][...,ll])),
threads=self.threads)
else:
st['sensemap'][...,ll] = scipy.fftpack.fftn(st['sensemap'][...,ll],
st['sensemap'][...,ll].shape,
range(0,numpy.ndim(st['sensemap'][...,ll])))
st['sensemap'][...,ll] = st['sensemap'][...,ll] * dpss_fil
st['sensemap'][...,ll] = scipy.fftpack.ifftn(st['sensemap'][...,ll],
st['sensemap'][...,ll].shape,
range(0,numpy.ndim(st['sensemap'][...,ll])))
# st['sensemap'][...,ll]=scipy.fftpack.ifftn(scipy.fftpack.fftn(st['sensemap'][...,ll])*dpss_fil)
# st['sensemap'] = Normalize(st['sensemap'])
return st
def _create_kspace_sampling_density(self):
#=======================================================================
# RTR: k-space sampled density
# only diagonal elements are relevant (on k-space grids)
#=======================================================================
RTR=self.st['q'] # see __init__() in class "nufft"
return RTR
def _create_laplacian_kernel(self):
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# # related to constraint
#===============================================================================
uker = numpy.zeros(self.st['Kd'][:],dtype=numpy.complex64)
n_dims= numpy.size(self.st['Nd'])
if n_dims == 1:
uker[0] = -2.0
uker[1] = 1.0
uker[-1] = 1.0
elif n_dims == 2:
uker[0,0] = -4.0
uker[1,0] = 1.0
uker[-1,0] = 1.0
uker[0,1] = 1.0
uker[0,-1] = 1.0
elif n_dims == 3:
uker[0,0,0] = -6.0
uker[1,0,0] = 1.0
uker[-1,0,0] = 1.0
uker[0,1,0] = 1.0
uker[0,-1,0] = 1.0
uker[0,0,1] = 1.0
uker[0,0,-1] = 1.0
uker =self.emb_fftn(uker, self.st['Kd'][:], range(0,numpy.ndim(uker)))
return uker
def _expand_deconv_kernel_dimension(self, uker, L):
# if numpy.size(self.st['Kd']) > 2:
# for dd in range(2,numpy.size(self.st['Kd'])):
# uker = appendmat(uker,self.st['Kd'][dd])
uker = appendmat(uker,L)
return uker
def _expand_RTR(self,RTR,L):
# if numpy.size(self.st['Kd']) > 2:
# for dd in range(2,numpy.size(self.st['Kd'])):
# RTR = appendmat(RTR,self.st['Kd'][dd])
RTR= numpy.reshape(RTR,self.st['Kd'],order='F')
RTR = appendmat(RTR,L)
return RTR
def _update_d(self,u,dd):
out_dd = ()
for jj in range(0,len(dd)) :
out_dd = out_dd + (get_Diff(u,jj),)
return out_dd
def _update_b(self, bb, dd, xx):
ndims=len(bb)
cc=numpy.empty(bb[0].shape)
out_bb=()
for pj in range(0,ndims):
cc=bb[pj]+dd[pj]-xx[pj]
out_bb=out_bb+(cc,)
return out_bb
def _create_mask(self):
st=self.st
# st['mask']=numpy.ones(st['Nd'],dtype=numpy.float32)
# st['mask2']=numpy.ones(st['Nd'],dtype=numpy.float32)
n_dims= numpy.size(st['Nd'])
sp_rat =0.0
for di in xrange(0,n_dims):
sp_rat = sp_rat + (st['Nd'][di]/2)**2
sp_rat = sp_rat**0.5
x = numpy.ogrid[[slice(0, st['Nd'][_ss]) for _ss in xrange(0,n_dims)]]
tmp = 0
for di in xrange(0,n_dims):
tmp = tmp + ( (x[di] - st['Nd'][di]/2.0)/(st['Nd'][di]/2.0) )**2
tmp = (1.0*tmp)**0.5
# indx = tmp/sp_rat >=1.1
# st['mask'][indx] =0.0
st['mask'] = 1.0/(1.0+numpy.exp( (tmp-1.05)/0.005))
st['mask2'] =1.0/(1.0+numpy.exp( (tmp-1.025)/0.005))
# matplotlib.pyplot.imshow( st['mask'].real)
# matplotlib.pyplot.show()
return st
# def _create_mask(self):
# st=self.st
#
# st['mask']=numpy.ones(st['Nd'],dtype=numpy.float64)
# n_dims= numpy.size(st['Nd'])
#
# sp_rat =0.0
# for di in range(0,n_dims):
# sp_rat = sp_rat + (st['Nd'][di]/2)**2
#
# x = numpy.ogrid[[slice(0, st['Nd'][_ss]) for _ss in range(0,n_dims)]]
#
# tmp = 0
# for di in range(0,n_dims):
# tmp = tmp + ( x[di] - st['Nd'][di]/2 )**2
# indx = tmp/sp_rat >=1.0/n_dims
#
# st['mask'][indx] =0.0
#
#
# return st
def forwardbackward2(self,x): # pseudo_inverse second order
st=self.st
Nd = st['Nd']
# Kd = st['Kd'] # unused
# dims = numpy.shape(x) #unused
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
x = numpy.reshape(x,Nd+(1,),order='F')
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
Lprod = Lprod.astype(int)
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
# Xk = self.Nd2Kd(x,0) #
#
# if Lprod > 1:
# Xk = numpy.reshape(st['p'].dot(Xk),(st['M'],)+( Lprod,),order='F')
# else:
# Xk = numpy.reshape(st['p'].dot(Xk),(st['M'],1),order='F')
for ii in range(0,Lprod):
# Xk[...,ii] = Xk[...,ii]*st['q'][...,0]
x[...,ii] = x[...,ii]/st['sn'][...,0]
Xk = self.forward(x)
# Xk = self.Nd2Kd(x,0)
Xk = self.f - Xk
err2 = checkmax(Xk, self.debug)/checkmax(self.f, self.debug)
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
Xk = st['p'].getH().dot(Xk)
x1= self.Kd2Nd(Xk,0) #
for ii in range(0,Lprod):
# Xk[...,ii] = Xk[...,ii]*st['q'][...,0]
Xk[...,ii] = Xk[...,ii]*(st['q'][...,0]**0.5+1e-1)/(st['q'][...,0]+1e-1)
x2= self.Kd2Nd(Xk,0) #
for ii in range(0,Lprod):
# Xk[...,ii] = Xk[...,ii]*st['q'][...,0]
# x1[...,ii] = x1[...,ii]/st['sn'][...,0]
x2[...,ii] = x2[...,ii]/(st['sn'][...,0])
# x= x1 - x2
# checker(x,Nd) # check output
return x1,x2,err2
def adjoint(self,f):
'''
adjoint operator to calcualte AT*y
'''
st = self.st
return self.backward(f)*st['sensemap'].conj()#/(st['sensemap'].conj()*st['sensemap'] + 1e-2)
# self.u=1.5*self.u/numpy.max(numpy.real(self.u[:]))
def _external_update(self,u, uf, u0, u_k_1, outer): # overload the update function
tmpuf=self.forwardbackward(
u*self.st['sensemap'])*self.st['sensemap'].conj()
if self.st['senseflag'] == 1:
tmpuf=CombineMulti(tmpuf,-1)
err = (checkmax(tmpuf,self.debug) -checkmax(u0,self.debug) )/checkmax(u0,self.debug)
if outer < 1:
x1, x2, err2=self.forwardbackward2(u*self.st['sensemap'])
x1 = x1*self.st['sensemap'].conj()
x2 = x2*self.st['sensemap'].conj()/(self.st['sensemap'].conj()*self.st['sensemap']+1e-2)
tmpuf_order2 = x1 - x2
if self.st['senseflag'] == 1:
tmpuf_order2 = CombineMulti(tmpuf_order2,-1)
self.err2 = 1.0#/(1.0+numpy.exp(0.4*(1.0e-7+0.3 - uf)))
# matplotlib.pyplot.imshow(self.err2[...,0].real,norm = norm)
# matplotlib.pyplot.show()
# matplotlib.pyplot.imshow(tmpuf_order2[...,0].real,norm = norm)
# matplotlib.pyplot.show()
# matplotlib.pyplot.imshow(tmpuf_order2[...,0].real*self.err2[...,0].real,norm = norm)
# matplotlib.pyplot.show()
else:
tmpuf_order2 = 0.0
self.err2 = 0.0
err2 = 0.0
# if outer <= 1:
# self.err2 = 1.0
# if err2 > self.err2:
# self.err2 = 0.0
# if err2 > 1:
# err2 = 1.0/err2
print('err2 = ',err2)
# r = (u0 - tmpuf)*(1.0-self.err2) - tmpuf_order2*self.err2#/(outer+1)
r = (u0 - tmpuf) - tmpuf_order2*self.err2#/(outer+1)
# self.err2 = err2
# r = u0 - tmpuf
p = r
# err = (checkmax(tmpuf)- checkmax(u0))/checkmax(u0)
err= numpy.abs(err)
if self.debug==0:
pass
else:
print('err',err,self.err)
# if (err < self.err):
# uf = uf+p*err*0.1
if numpy.abs(err) < numpy.abs(self.err):
uf = uf + p#*err*(outer+1)
self.err = err
u_k_1 = u
else:
err = self.err
if self.debug==0:
pass
else:
print('no function')
u = u_k_1
murf = uf
if self.debug==0:
pass
else:
print('leaving ext_update')
return (u, murf, uf, u_k_1)
def show_3D():
import mayavi.mlab
raw = numpy.load('phantom_3D_128_128_128.npy')
reconreal = numpy.load('reconreal.npy')
blurreal = numpy.load('blurreal.npy')
reconreal[0:,0:80,0:64]=0
raw[0:,0:80,0:64]=0
blurreal[0:,0:80,0:64]=0
mayavi.mlab.contour3d(raw, contours=4, transparent=True)
mayavi.mlab.show()
mayavi.mlab.contour3d(blurreal, contours=4, transparent=True)
mayavi.mlab.show()
mayavi.mlab.contour3d(reconreal, contours=4, transparent=True)
mayavi.mlab.show()
def test_3D():
cm = matplotlib.cm.gray
# load raw data, which is 3D shapp-logan phantom
raw = numpy.load('phantom_3D_128_128_128.npy')*2.0
# numpy.save('testfile.npy',raw)
# raw = numpy.load('testfile.npy')
# demonstrate the 64th slice
matplotlib.pyplot.imshow(raw[:,:,64],cmap=cm)
matplotlib.pyplot.show()
print('max.image',numpy.max(raw[:]))
# load 3D k-space trajectory (sparse)
om = numpy.loadtxt('om3D2.txt')
print('omshape',numpy.shape(om))
# image dimension is 3D isotropic
Nd=(128,128,128)
Kd=(128,128,128)
# Note: sparse sampling works best for Jd = 1
Jd=(1,1,1)
# create Nufft Object
MyNufftObj = pynufft(om, Nd, Kd, Jd)
# create data
K_data=MyNufftObj.forward(raw)
# regridding and blurred images
image_blur = MyNufftObj.backward(K_data)[...,0]
# turn off sense recon because it is not necessary
MyNufftObj.st['senseflag']=1
# Now doing the reconstruction
# import pp
# job_server = pp.Server()
#
# f1=job_server.submit(MyNufftObj.inverse,(K_data, 1.0, 0.1, 0.01,3, 5),
# modules = ('numpy','pyfftw','pynufft'),globals=globals())
# # f2=job_server.submit(MyNufftObj.inverse,(numpy.sqrt(K_data)*10+(0.0+0.1j), 1.0, 0.05, 0.01,3, 20),
# # modules = ('numpy','pyfftw','pynufft'),globals=globals())
#
# image1 = f1()
# # image2 = f2()
image1 = MyNufftObj.inverse(K_data, 1.0, 0.1, 0.01,10, 10)
# image1 = MyNufftObj.inverse(K_data,1.0, 0.05, 0.001, 1,10)
# matplotlib.pyplot.subplot(2,3,1)
# matplotlib.pyplot.imshow(raw[:,:,64],cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,2)
# matplotlib.pyplot.imshow(image_blur[:,:,64].real,cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,3)
# matplotlib.pyplot.imshow((image2[:,:,64].real),cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,4)
# matplotlib.pyplot.imshow(raw[:,:,96],cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,5)
# matplotlib.pyplot.imshow(image_blur[:,:,96].real,cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,6)
# matplotlib.pyplot.imshow((image2[:,:,96].real),cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.show()
matplotlib.pyplot.subplot(2,3,1)
matplotlib.pyplot.imshow(raw[:,:,64],cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,2)
matplotlib.pyplot.imshow(image_blur[:,:,64].real,cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,3)
matplotlib.pyplot.imshow((image1[:,:,64].real),cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,4)
matplotlib.pyplot.imshow(raw[:,:,96],cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,5)
matplotlib.pyplot.imshow(image_blur[:,:,96].real,cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,6)
matplotlib.pyplot.imshow((image1[:,:,96].real),cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.show()
numpy.save('blurreal.npy',image_blur.real)
numpy.save('reconreal.npy',image1.real)
# mayavi.mlab.imshow()
def test_2D():
import numpy
import matplotlib#.pyplot
cm = matplotlib.cm.gray
# load example image
image = numpy.loadtxt('phantom_256_256.txt')
image[128,128]= 1.0
Nd =(256,256) # image space size
Kd =(512,512) # k-space size
Jd =(6,6) # interpolation size
# load k-space points
om = numpy.loadtxt('om.txt')
# om = numpy.loadtxt('om.gold')
#create object
NufftObj = pynufft(om, Nd,Kd,Jd)
# simulate "data"
data= NufftObj.forward(image )
# now get the original image
#reconstruct image with 0.1 constraint1, 0.001 constraint2,
# 2 inner iterations and 10 outer iterations
NufftObj.st['senseflag'] = 1
image_recon = NufftObj.inverse(data, 1.0, 2.0, 0.01,2, 10)
image_blur = NufftObj.backward(data)
image_recon = Normalize(image_recon)
matplotlib.pyplot.plot(om[:,0],om[:,1],'x')
matplotlib.pyplot.show()
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
norm2=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0e-1)
# display images
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.imshow(image,
norm = norm,cmap =cm,interpolation = 'nearest')
matplotlib.pyplot.title('true image')
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.imshow(image_recon.real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('recovered image')
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.imshow(image_blur[:,:,0].real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('blurred image')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.imshow(image_recon.real-image,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('residual error')
matplotlib.pyplot.show()
def test_1D():
# import several modules
import numpy
import matplotlib#.pyplot
#create 1D curve from 2D image
image = numpy.loadtxt('phantom_256_256.txt')
image = image[:,128]
#determine the location of samples
om = numpy.loadtxt('om1D.txt')[0:192]
om = numpy.reshape(om,(numpy.size(om),1),order='F')
# reconstruction parameters
Nd =(256,) # image space size
Kd =(256,) # k-space size
Jd =(1,) # interpolation size
# initiation of the object
NufftObj = pynufft(om, Nd,Kd,Jd)
# simulate "data"
data= NufftObj.forward(image )
#adjoint(reverse) of the forward transform
image_blur= NufftObj.backward(data)[:,0]
#inversion of data
image_recon = NufftObj.inverse(data, 1.0, 0.1, 0.001,10,10)
#Showing histogram of sampling locations
# matplotlib.pyplot.hist(om,20)
# matplotlib.pyplot.title('histogram of the sampling locations')
# matplotlib.pyplot.show()
#show reconstruction
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.plot(image)
matplotlib.pyplot.title('original')
matplotlib.pyplot.ylim([0,1])
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.plot(image_recon.real)
matplotlib.pyplot.title('recon')
matplotlib.pyplot.ylim([0,1])
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.plot(image_blur.real)
matplotlib.pyplot.title('blurred')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.plot(image_recon.real - image)
matplotlib.pyplot.axis([0 ,256 , -0.1, 0.1])
matplotlib.pyplot.title('residual')
# matplotlib.pyplot.subplot(2,2,4)
# matplotlib.pyplot.plot(numpy.abs(data))
matplotlib.pyplot.show()
# def test_Dx():
# u = numpy.ones((128,128,128,1),dtype = numpy.complex64)
def test_2D_multiprocessing():
import numpy
import matplotlib.pyplot
import copy
cm = matplotlib.cm.gray
# load example image
image = numpy.loadtxt('phantom_256_256.txt')
image[128,128]= 1.0
Nd =(256,256) # image space size
Kd =(512,512) # k-space size
Jd =(6,6) # interpolation size
# load k-space points
om = numpy.loadtxt('om.txt')
#create object
NufftObj = pynufft(om, Nd,Kd,Jd)
NewObj = copy.deepcopy(NufftObj)
# simulate "data"
data= NufftObj.forward(image )
# data2=data.copy()
# data2 =numpy.sqrt(data2)*10+(0.0+0.1j)
# now get the original image
#reconstruct image with 0.1 constraint1, 0.001 constraint2,
# 2 inner iterations and 10 outer iterations
import pp
job_server = pp.Server()
f1=job_server.submit(NewObj.inverse,(data, 1.0, 0.05, 0.01,3, 20),
modules = ('numpy','pyfftw','pynufft'),globals=globals())
f2=job_server.submit(NewObj.inverse,(numpy.sqrt(data)*10+(0.0+0.1j), 1.0, 0.05, 0.01,3, 20),
modules = ('numpy','pyfftw','pynufft'),globals=globals())
image_recon = f1()
image_recon2 = f2()
# image_recon = NewObj.inverse(data, 1.0, 0.05, 0.01,3, 20)
image_blur = NufftObj.backward(data)
image_recon = Normalize(image_recon)
matplotlib.pyplot.plot(om[:,0],om[:,1],'x')
matplotlib.pyplot.show()
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
norm2=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0e-1)
# display images
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.imshow(image,
norm = norm,cmap =cm,interpolation = 'nearest')
matplotlib.pyplot.title('true image')
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.imshow(image_recon.real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('recovered image')
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.imshow(image_blur[:,:,0].real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('blurred image')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.imshow(image_recon.real-image,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('residual error')
matplotlib.pyplot.show()
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.imshow(image,
norm = norm,cmap =cm,interpolation = 'nearest')
matplotlib.pyplot.title('true image')
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.imshow(image_recon2.real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('recovered image')
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.imshow(image_blur[:,:,0].real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('blurred image')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.imshow(image_recon.real-image,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('residual error')
matplotlib.pyplot.show()
if __name__ == '__main__':
import cProfile
test_1D()
# test_2D()
test_3D()
# show_3D()
# test_Dx()
# cProfile.run('test_3D()')
# cProfile.run('test_2D()')
# cProfile.run('test_2D_multiprocessing()')
|
jyhmiinlin/cineFSE
|
CsTransform/good_3d_pynufft.py
|
Python
|
gpl-3.0
| 59,360
|
[
"Mayavi"
] |
8f78a9d9a481b9ed115a6978b99d977a3bf75a98828d12d9a6af30afa8214b78
|
from setuptools import setup, find_packages
setup(
name = 'fqn',
version = '0.7.1',
description = 'Functions that can retrieve objects using Fully Qualified Names',
author = 'Brian Lauber',
author_email = 'constructible.truth@gmail.com',
packages = find_packages(exclude = ["tests", "tests.*"]),
test_suite = 'tests',
tests_require = ["mock>=1.0.0"]
)
|
briandamaged/fqn
|
setup.py
|
Python
|
mit
| 386
|
[
"Brian"
] |
1b33df633c30444c5e31ff76db7da05071775d147f2d5f3726f4e8210cba8463
|
# Created by DrLecter, based on DraX' scripts
# This script is part of the L2J Official Datapack Project
# Visit us at http://www.l2jdp.com/
# See readme-dp.txt and gpl.txt for license and distribution details
# Let us know if you did not receive a copy of such files.
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "elven_human_buffers_2"
#print "Elven human buffers 2"
#Quest items
MARK_OF_PILGRIM = 2721
MARK_OF_TRUST = 2734
MARK_OF_HEALER = 2820
MARK_OF_REFORMER = 2821
MARK_OF_LIFE = 3140
#MAXIMILIAN, HOLLINT,ORVEN,SQUILLARI,BERNHARD,SIEGMUND,GREGORY,HALASTER,BARYL,MARIE,RAHORAKI
NPCS=[30120,30191,30857,30905,31276,31321,31279,31755,31968,32095,31336]
#event:[newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item]
#low_ni : level too low, and you dont have quest item
#low_i: level too low, despite you have the item
#ok_ni: level ok, but you don't have quest item
#ok_i: level ok, you got quest item, class change takes place
CLASSES = {
"BI":[16,15,0,"16","17","18","19",[MARK_OF_PILGRIM,MARK_OF_TRUST,MARK_OF_HEALER]],
"PH":[17,15,0,"20","21","22","23",[MARK_OF_PILGRIM,MARK_OF_TRUST,MARK_OF_REFORMER]],
"EE":[30,29,1,"12","13","14","15",[MARK_OF_PILGRIM,MARK_OF_LIFE,MARK_OF_HEALER]],
}
#Messages
default = "No Quest"
def change(st,player,newclass,items) :
for item in items :
st.takeItems(item,1)
st.playSound("ItemSound.quest_fanfare_2")
player.setClassId(newclass)
player.setBaseClass(newclass)
player.broadcastUserInfo()
return
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
npcId = npc.getNpcId()
htmltext = default
suffix = ''
st = player.getQuestState(qn)
if not st : return
race = player.getRace().ordinal()
classid = player.getClassId().getId()
level = player.getLevel()
if npcId not in NPCS : return
if not event in CLASSES.keys() :
return event
else :
newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item=CLASSES[event]
if race == req_race and classid == req_class :
item = True
for i in req_item :
if not st.getQuestItemsCount(i):
item = False
if level < 40 :
suffix = low_i
if not item :
suffix = low_ni
else :
if not item :
suffix = ok_ni
else :
suffix = ok_i
change(st,player,newclass,req_item)
st.exitQuest(1)
htmltext = "30120-"+suffix+".htm"
return htmltext
def onTalk (self,npc,player):
st = player.getQuestState(qn)
npcId = npc.getNpcId()
race = player.getRace().ordinal()
classId = player.getClassId()
id = classId.getId()
htmltext = default
if player.isSubClassActive() :
st.exitQuest(1)
return htmltext
# Dark Elves only
if npcId in NPCS :
htmltext = "30120"
if race in [0,1] :
if id == 29 : # oracle
return htmltext+"-01.htm"
elif id == 15 : # cleric
return htmltext+"-05.htm"
elif classId.level() == 0 : # first occupation change not made yet
htmltext += "-24.htm"
elif classId.level() >= 2 : # second/third occupation change already made
htmltext += "-25.htm"
else :
htmltext += "-26.htm" # other conditions
else :
htmltext += "-26.htm" # other races
st.exitQuest(1)
return htmltext
QUEST = Quest(99992,qn,"village_master")
CREATED = State('Start', QUEST)
QUEST.setInitialState(CREATED)
for npc in NPCS:
QUEST.addStartNpc(npc)
QUEST.addTalkId(npc)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/village_master/elven_human_buffers_2/__init__.py
|
Python
|
gpl-3.0
| 3,935
|
[
"VisIt"
] |
fbd4ce6b2ed8d8112e993b90c97beb7706a1271c42e207c6fb002d4d281ca77a
|
""" DIRAC.Resources.Catalog package """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
yujikato/DIRAC
|
src/DIRAC/Resources/Catalog/__init__.py
|
Python
|
gpl-3.0
| 149
|
[
"DIRAC"
] |
7d1cf88bf28f4a52dcdd801bac73b8e04983cdc6afbe47440de4abff07344d43
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s
short_description: Manage Kubernetes (K8s) objects
version_added: "2.6"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates.
- Access to the full range of K8s APIs.
- Use the M(k8s_facts) module to obtain a list of items about an object of type C(kind)
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
options:
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By the default, the strategic
merge will typically be used.
- For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
want to use C(merge) if you see "strategic merge patch format is not supported"
- See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- Requires openshift >= 0.6.2
- If more than one merge_type is given, the merge_types will be tried in order
- If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
is simply C(strategic-merge).
choices:
- json
- merge
- strategic-merge
type: list
version_added: "2.7"
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Create a Service object by reading the definition from a file
k8s:
state: present
src: /testing/service.yml
- name: Remove an existing Service object
k8s:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s:
state: present
src: /testing/deployment.yml
- name: Read definition file from the Ansible controller file system
k8s:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') }}"
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when multiple yaml documents are passed to src or resource_definition
returned: when resource_definition or src contains list of objects
type: list
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()
|
caphrim007/ansible
|
lib/ansible/modules/clustering/k8s/k8s.py
|
Python
|
gpl-3.0
| 4,928
|
[
"Galaxy"
] |
50cc0c9eef3931b255b33106242adcff4b1bc563409f084f9e7c03ea48b4373a
|
# -*- coding: utf-8 -*-
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2014 nemunaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Progressive display of very long messages"""
import logging
from message import TextMessage, DirectAsk
from hooks import hook
nemubotversion = 3.4
logger = logging.getLogger("nemubot.response")
class Response:
def __init__(self, message=None, channel=None, nick=None, server=None,
nomore="No more message", title=None, more="(suite) ",
count=None, shown_first_count=-1, line_treat=None):
self.nomore = nomore
self.more = more
self.line_treat = line_treat
self.rawtitle = title
self.server = server
self.messages = list()
self.alone = True
if message is not None:
self.append_message(message, shown_first_count=shown_first_count)
self.elt = 0 # Next element to display
self.channel = channel
self.nick = nick
self.count = count
@property
def receivers(self):
if self.channel is None:
if self.nick is not None:
return [self.nick]
return list()
elif isinstance(self.channel, list):
return self.channel
else:
return [self.channel]
def append_message(self, message, title=None, shown_first_count=-1):
if type(message) is str:
message = message.split('\n')
if len(message) > 1:
for m in message:
self.append_message(m)
return
else:
message = message[0]
if message is not None and len(message) > 0:
if shown_first_count >= 0:
self.messages.append(message[:shown_first_count])
message = message[shown_first_count:]
self.messages.append(message)
self.alone = self.alone and len(self.messages) <= 1
if isinstance(self.rawtitle, list):
self.rawtitle.append(title)
elif title is not None:
rawtitle = self.rawtitle
self.rawtitle = list()
for osef in self.messages:
self.rawtitle.append(rawtitle)
self.rawtitle.pop()
self.rawtitle.append(title)
def append_content(self, message):
if message is not None and len(message) > 0:
if self.messages is None or len(self.messages) == 0:
self.messages = list(message)
self.alone = True
else:
self.messages[len(self.messages)-1] += message
self.alone = self.alone and len(self.messages) <= 1
@property
def empty(self):
return len(self.messages) <= 0
@property
def title(self):
if isinstance(self.rawtitle, list):
return self.rawtitle[0]
else:
return self.rawtitle
def pop(self):
self.messages.pop(0)
self.elt = 0
if isinstance(self.rawtitle, list):
self.rawtitle.pop(0)
if len(self.rawtitle) <= 0:
self.rawtitle = None
def accept(self, visitor):
visitor.visit(self.next_response())
def next_response(self, maxlen=440):
if self.nick:
return DirectAsk(self.nick,
self.get_message(maxlen - len(self.nick) - 2),
server=None, to=self.receivers)
else:
return TextMessage(self.get_message(maxlen),
server=None, to=self.receivers)
def get_message(self, maxlen):
if self.alone and len(self.messages) > 1:
self.alone = False
if self.empty:
if hasattr(self.nomore, '__call__'):
res = self.nomore(self)
if res is None:
return "No more message"
elif isinstance(res, Response):
self.__dict__ = res.__dict__
elif isinstance(res, list):
self.messages = res
elif isinstance(res, str):
self.messages.append(res)
else:
raise Exception("Type returned by nomore (%s) is not "
"handled here." % type(res))
return self.get_message()
else:
return self.nomore
if self.line_treat is not None and self.elt == 0:
self.messages[0] = (self.line_treat(self.messages[0])
.replace("\n", " ").strip())
msg = ""
if self.title is not None:
if self.elt > 0:
msg += self.title + " " + self.more + ": "
else:
msg += self.title + ": "
elif self.elt > 0:
msg += "[…] "
elts = self.messages[0][self.elt:]
if isinstance(elts, list):
for e in elts:
if len(msg) + len(e) > maxlen - 3:
msg += "[…]"
self.alone = False
return msg
else:
msg += e + ", "
self.elt += 1
self.pop()
return msg[:len(msg)-2]
else:
if len(elts.encode()) <= maxlen:
self.pop()
if self.count is not None:
return msg + elts + (self.count % len(self.messages))
else:
return msg + elts
else:
words = elts.split(' ')
if len(words[0].encode()) > maxlen - len(msg.encode()):
self.elt += maxlen - len(msg.encode())
return msg + elts[:self.elt] + "[…]"
for w in words:
if len(msg.encode()) + len(w.encode()) >= maxlen:
msg += "[…]"
self.alone = False
return msg
else:
msg += w + " "
self.elt += len(w) + 1
self.pop()
return msg
SERVERS = dict()
@hook("all_post")
def parseresponse(res):
# TODO: handle inter-bot communication NOMORE
# TODO: check that the response is not the one already saved
if isinstance(res, Response):
if res.server not in SERVERS:
SERVERS[res.server] = dict()
for receiver in res.receivers:
if receiver in SERVERS[res.server]:
nw, bk = SERVERS[res.server][receiver]
else:
nw, bk = None, None
if nw != res:
SERVERS[res.server][receiver] = (res, bk)
return res
@hook("cmd_hook", "more")
def cmd_more(msg):
"""Display next chunck of the message"""
res = list()
if msg.server in SERVERS:
for receiver in msg.to_response:
if receiver in SERVERS[msg.server]:
nw, bk = SERVERS[msg.server][receiver]
if nw is not None and not nw.alone:
bk = nw
SERVERS[msg.server][receiver] = None, bk
if bk is not None:
res.append(bk)
return res
@hook("cmd_hook", "next")
def cmd_next(msg):
"""Display the next information include in the message"""
res = list()
if msg.server in SERVERS:
for receiver in msg.to_response:
if receiver in SERVERS[msg.server]:
nw, bk = SERVERS[msg.server][receiver]
if nw is not None and not nw.alone:
bk = nw
SERVERS[msg.server][receiver] = None, bk
bk.pop()
if bk is not None:
res.append(bk)
return res
|
Bobobol/nemubot-1
|
modules/more.py
|
Python
|
agpl-3.0
| 8,516
|
[
"VisIt"
] |
2f27f7b334fba49ee178e3c014f9d21540fa52f9b8c8f9b57fa8bba581a2c2e5
|
##########################################################################
# This illustrates some of the capabilities for spine placement.
# It has spines whose size increase with distance from the soma.
# Further, the angular direction of the spines spirals around the dendrite.
##########################################################################
import moose
import rdesigneur as rd
rdes = rd.rdesigneur(
cellProto = [['ballAndStick', 'elec', 10e-6, 10e-6, 2e-6, 300e-6, 50]],
spineProto = [['makePassiveSpine()', 'spine']],
spineDistrib = [['spine', '#dend#', '3e-6', '-1e-6', '1+p*2e4', '0', 'p*6.28e7', '0']],
stimList = [['soma', '1', '.', 'inject', '(t>0.02) * 1e-9' ]],
moogList = [['#', '1', '.', 'Vm', 'Soma potential']]
)
rdes.buildModel()
moose.reinit()
rdes.displayMoogli( 0.0002, 0.025, 0.02 )
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex9.3_spiral_spines.py
|
Python
|
gpl-2.0
| 838
|
[
"MOOSE"
] |
99089720df5e69449881ae004840a31a7b7418d3fd213a447b44e811d02e503f
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import ray
import ray._private.services
import uuid
import random
from packaging import version
from bigdl.orca.data import XShards
from bigdl.orca.ray import RayContext
import logging
logger = logging.getLogger(__name__)
class LocalStore:
def __init__(self):
self.partitions = {}
def upload_shards(self, part_shard_id, shard_ref_list):
partition_idx, shard_idx = part_shard_id
if partition_idx not in self.partitions:
self.partitions[partition_idx] = {}
shard_ref = shard_ref_list[0]
self.partitions[partition_idx][shard_idx] = shard_ref
return 0
def upload_partition(self, partition_id, partition_ref_list):
self.partitions[partition_id] = partition_ref_list[0]
return 0
def get_shards(self, part_shard_id):
partition_idx, shard_idx = part_shard_id
return self.partitions[part_shard_id][shard_idx]
def get_partition_ref(self, partition_id):
"""
return a list of shard_refs or a part_ref
"""
part = self.partitions[partition_id]
if isinstance(part, dict):
partition = []
for shard_idx in range(len(part)):
shard = part[shard_idx]
partition.append(shard)
return partition
else:
return part
def get_partition(self, partition_id):
"""
return partition_data
"""
return ray.get(self.get_partition_ref(partition_id))
def get_partitions_refs(self):
"""
return a dictionary of partitions, each partition is a list of shard_refs or a part_ref
"""
result = {}
for k in self.partitions.keys():
result[k] = self.get_partition_ref(k)
return result
def init_ray_if_not(redis_address, redis_password):
if not ray.is_initialized():
init_params = dict(
address=redis_address,
_redis_password=redis_password,
ignore_reinit_error=True
)
if version.parse(ray.__version__) >= version.parse("1.4.0"):
init_params["namespace"] = "az"
ray.init(**init_params)
def write_to_ray(idx, partition, redis_address, redis_password, partition_store_names):
init_ray_if_not(redis_address, redis_password)
ip = ray._private.services.get_node_ip_address()
local_store_name = None
for name in partition_store_names:
if name.endswith(ip):
local_store_name = name
break
if local_store_name is None:
local_store_name = random.choice(partition_store_names)
local_store = ray.get_actor(local_store_name)
# directly calling ray.put will set this driver as the owner of this object,
# when the spark job finished, the driver might exit and make the object
# eligible for deletion.
result = []
for shard_id, shard in enumerate(partition):
shard_ref = ray.put(shard, _owner=local_store)
result.append(local_store.upload_shards.remote((idx, shard_id), [shard_ref]))
is_empty = len(result) == 0
if is_empty:
partition_ref = ray.put([], _owner=local_store)
result.append(local_store.upload_partition.remote(idx, [partition_ref]))
logger.warning(f"Partition {idx} is empty.")
ray.get(result)
return [(idx, local_store_name.split(":")[-1], local_store_name)]
def get_from_ray(idx, redis_address, redis_password, idx_to_store_name):
init_ray_if_not(redis_address, redis_password)
local_store_handle = ray.get_actor(idx_to_store_name[idx])
partition = ray.get(local_store_handle.get_partition.remote(idx))
return partition
class RayXShards(XShards):
def __init__(self, uuid, id_ip_store_rdd, partition_stores):
self.uuid = uuid
self.rdd = id_ip_store_rdd
self.partition_stores = partition_stores
self.id_ip_store = self.rdd.collect()
self.partition2store_name = {idx: store_name for idx, _, store_name in self.id_ip_store}
self.partition2ip = {idx: ip for idx, ip, _ in self.id_ip_store}
def transform_shard(self, func, *args):
raise Exception("Transform is not supported for RayXShards")
def num_partitions(self):
return len(self.partition2ip)
def collect(self):
# return a list of shards
partitions = self.collect_partitions()
data = [item for part in partitions for item in part]
return data
def get_partition_refs(self):
"""
Get a list of partition_refs, each partition_ref is a list of shard_refs or a partition_ref
"""
# part_shard_refs is a list of partitions, each partition is a dictionary,
# with key of partition index and value of (list of shard_refs or part_ref)
part_shard_refs = ray.get([local_store.get_partitions_refs.remote()
for local_store in self.partition_stores.values()])
result = {}
for part in part_shard_refs:
result.update(part)
return [result[idx] for idx in range(self.num_partitions())]
def get_refs(self):
"""
Flatten get_partition_refs. Get a list of partition_refs or shard_refs
"""
partition_refs = self.get_partition_refs()
return [ref for partition_ref in partition_refs for ref in partition_ref]
def collect_partitions(self):
part_refs = self.get_partition_refs()
return [ray.get(part_ref) for part_ref in part_refs]
def to_spark_xshards(self):
from bigdl.orca.data import SparkXShards
ray_ctx = RayContext.get()
sc = ray_ctx.sc
address = ray_ctx.redis_address
password = ray_ctx.redis_password
num_parts = self.num_partitions()
partition2store = self.partition2store_name
rdd = self.rdd.mapPartitionsWithIndex(
lambda idx, _: get_from_ray(idx, address, password, partition2store))
# the reason why we trigger computation here is to ensure we get the data
# from ray before the RayXShards goes out of scope and the data get garbage collected
from pyspark.storagelevel import StorageLevel
rdd = rdd.cache()
result_rdd = rdd.map(lambda x: x) # sparkxshards will uncache the rdd when gc
spark_xshards = SparkXShards(result_rdd)
return spark_xshards
def _get_multiple_partition_refs(self, ids):
refs = []
for idx in ids:
local_store_handle = self.partition_stores[self.partition2store_name[idx]]
partition_ref = local_store_handle.get_partition.remote(idx)
refs.append(partition_ref)
return refs
def transform_shards_with_actors(self, actors, func):
"""
Assign each partition_ref (referencing a list of shards) to an actor,
and run func for each actor and partition_ref pair.
Actors should have a `get_node_ip` method to achieve locality scheduling.
The `get_node_ip` method should call ray._private.services.get_node_ip_address()
to return the correct ip address.
The `func` should take an actor and a partition_ref as argument and
invoke some remote func on that actor and return a new partition_ref.
Note that if you pass partition_ref directly to actor method, ray
will resolve that partition_ref to the actual partition object, which
is a list of shards. If you pass partition_ref indirectly through other
object, say [partition_ref], ray will send the partition_ref itself to
actor, and you may need to use ray.get(partition_ref) on actor to retrieve
the actor partition objects.
"""
assigned_partitions, actor_ips, assigned_actors = self.assign_partitions_to_actors(actors)
assigned_partition_refs = [(part_ids, self._get_multiple_partition_refs(part_ids))
for part_ids in assigned_partitions]
new_part_id_refs = {
part_id: func(actor, part_ref)
for actor, (part_ids, part_refs) in zip(assigned_actors, assigned_partition_refs)
for part_id, part_ref in zip(part_ids, part_refs)}
actor_ip2part_id = defaultdict(list)
for actor_ip, part_ids in zip(actor_ips, assigned_partitions):
actor_ip2part_id[actor_ip].extend(part_ids)
return RayXShards.from_partition_refs(actor_ip2part_id, new_part_id_refs, self.rdd)
def reduce_partitions_for_actors(self, actors, reduce_partitions_func, return_refs=False):
"""
Evenly allocate partitions for actors and run `reduce_partitions_func` on partitions of each
worker.
Return a list of results, where one result corresponds to one worker.
:param actors: ray actors
:param reduce_partitions_func: Function to run on each ray actor which reduces the
partition refs on the actor to one result_ref. It should take an actor and a list of
partition_refs as argument return a result_ref
:param return_refs: Whether to return ray objects refs or ray objects. If True, return a
list of ray object refs, otherwise return a list of ray objects. Defaults to be False,
"""
assert self.num_partitions() >= len(actors), \
f"Get number of partitions ({self.num_partitions()}) smaller than " \
f"number of actors ({len(actors)}). Please submit an issue to analytics zoo."
assigned_partitions, _, _ = self.assign_partitions_to_actors(actors)
result_refs = []
for actor, part_ids in zip(actors, assigned_partitions):
assigned_partition_refs = self._get_multiple_partition_refs(part_ids)
result_ref = reduce_partitions_func(actor, assigned_partition_refs)
result_refs.append(result_ref)
if return_refs:
return result_refs
results = ray.get(result_refs)
return results
def zip_reduce_shards_with_actors(self, xshards, actors, reduce_partitions_func,
return_refs=False):
assert self.num_partitions() == xshards.num_partitions(),\
"the rdds to be zipped must have the same number of partitions"
assert self.num_partitions() >= len(actors), \
f"Get number of partitions ({self.num_partitions()}) smaller than " \
f"number of actors ({len(actors)}). Please submit an issue to analytics zoo."
assigned_partitions, _, _ = self.assign_partitions_to_actors(actors)
result_refs = []
for actor, part_ids in zip(actors, assigned_partitions):
assigned_partition_refs = self._get_multiple_partition_refs(part_ids)
assigned_partition_refs_other = xshards._get_multiple_partition_refs(part_ids)
result_ref = reduce_partitions_func(actor, assigned_partition_refs,
assigned_partition_refs_other)
result_refs.append(result_ref)
if return_refs:
return result_refs
results = ray.get(result_refs)
return results
def assign_partitions_to_actors(self, actors):
num_parts = self.num_partitions()
if num_parts < len(actors):
logger.warning(f"this rdd has {num_parts} partitions, which is smaller "
f"than actor number ({len(actors)} actors). That could cause "
f"unbalancing workload on different actors. We recommend you to "
f"repartition the rdd for better performance.")
avg_part_num = num_parts // len(actors)
remainder = num_parts % len(actors)
part_id2ip = self.partition2ip.copy()
# the assigning algorithm
# 1. calculate the average partition number per actor avg_part_num, and the number
# of remaining partitions remainder. So there are remainder number of actors got
# avg_part_num + 1 partitions and other actors got avg_part_num partitions.
# 2. loop partitions and assign each according to ip, if no actor with this ip or
# all actors with this ip have been full, this round of assignment failed.
# 3. assign the partitions that failed to be assigned to actors that has full
# todo extract this algorithm to other functions for unit tests.
actor_ips = []
for actor in actors:
assert hasattr(actor, "get_node_ip"), "each actor should have a get_node_ip method"
actor_ip = actor.get_node_ip.remote()
actor_ips.append(actor_ip)
actor_ips = ray.get(actor_ips)
actor2assignments = [[] for i in range(len(actors))]
ip2actors = {}
for idx, ip in enumerate(actor_ips):
if ip not in ip2actors:
ip2actors[ip] = []
ip2actors[ip].append(idx)
unassigned = []
for part_idx, ip in part_id2ip.items():
assigned = False
if ip in ip2actors:
ip_actors = ip2actors[ip]
for actor_id in ip_actors:
current_assignments = actor2assignments[actor_id]
if len(current_assignments) < avg_part_num:
current_assignments.append(part_idx)
assigned = True
break
elif len(current_assignments) == avg_part_num and remainder > 0:
current_assignments.append(part_idx)
remainder -= 1
assigned = True
break
if not assigned:
unassigned.append((part_idx, ip))
for part_idx, ip in unassigned:
for current_assignments in actor2assignments:
if len(current_assignments) < avg_part_num:
current_assignments.append(part_idx)
break
elif len(current_assignments) == avg_part_num and remainder > 0:
current_assignments.append(part_idx)
remainder -= 1
break
if num_parts < len(actors):
# filter assigned actors
assigned_actors = []
assigned_actor2assignments = []
assigned_actor_ips = []
for actor, assignment, ip in zip(actors, actor2assignments, actor_ips):
if assignment:
assigned_actors.append(actor)
assigned_actor2assignments.append(assignment)
assigned_actor_ips.append(ip)
return assigned_actor2assignments, assigned_actor_ips, assigned_actors
else:
return actor2assignments, actor_ips, actors
@staticmethod
def from_partition_refs(ip2part_id, part_id2ref, old_rdd):
ray_ctx = RayContext.get()
uuid_str = str(uuid.uuid4())
id2store_name = {}
partition_stores = {}
part_id2ip = {}
result = []
for node, part_ids in ip2part_id.items():
name = f"partition:{uuid_str}:{node}"
store = ray.remote(num_cpus=0, resources={f"node:{node}": 1e-4})(LocalStore) \
.options(name=name).remote()
partition_stores[name] = store
for idx in part_ids:
result.append(store.upload_partition.remote(idx, [part_id2ref[idx]]))
id2store_name[idx] = name
part_id2ip[idx] = node
ray.get(result)
new_id_ip_store_rdd = old_rdd.mapPartitionsWithIndex(
lambda idx, _: [(idx, part_id2ip[idx], id2store_name[idx])]).cache()
return RayXShards(uuid_str, new_id_ip_store_rdd, partition_stores)
@staticmethod
def from_spark_xshards(spark_xshards):
return RayXShards._from_spark_xshards_ray_api(spark_xshards)
@staticmethod
def _from_spark_xshards_ray_api(spark_xshards):
ray_ctx = RayContext.get()
address = ray_ctx.redis_address
password = ray_ctx.redis_password
driver_ip = ray._private.services.get_node_ip_address()
uuid_str = str(uuid.uuid4())
resources = ray.cluster_resources()
nodes = []
for key, value in resources.items():
if key.startswith("node:"):
# if running in cluster, filter out driver ip
if key != f"node:{driver_ip}":
nodes.append(key)
# for the case of local mode and single node spark standalone
if not nodes:
nodes.append(f"node:{driver_ip}")
partition_stores = {}
for node in nodes:
name = f"partition:{uuid_str}:{node}"
if version.parse(ray.__version__) >= version.parse("1.4.0"):
store = ray.remote(num_cpus=0, resources={node: 1e-4})(LocalStore)\
.options(name=name, lifetime="detached").remote()
else:
store = ray.remote(num_cpus=0, resources={node: 1e-4})(LocalStore) \
.options(name=name).remote()
partition_stores[name] = store
# actor creation is aync, this is to make sure they all have been started
ray.get([v.get_partitions_refs.remote() for v in partition_stores.values()])
partition_store_names = list(partition_stores.keys())
result_rdd = spark_xshards.rdd.mapPartitionsWithIndex(lambda idx, part: write_to_ray(
idx, part, address, password, partition_store_names)).cache()
result = result_rdd.collect()
id2ip = {}
id2store_name = {}
for idx, ip, local_store_name in result:
id2ip[idx] = ip
id2store_name[idx] = local_store_name
return RayXShards(uuid_str, result_rdd, partition_stores)
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/data/ray_xshards.py
|
Python
|
apache-2.0
| 18,454
|
[
"ORCA"
] |
30ac2d54ddeac51eb25938d3d330d4254ad794b7ea8d0c592dae27117aeea33d
|
import sys
from collections import namedtuple
from ua_parser import user_agent_parser
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
MOBILE_DEVICE_FAMILIES = (
'iPhone',
'iPod',
'Generic Smartphone',
'Generic Feature Phone',
)
MOBILE_OS_FAMILIES = (
'Windows Phone',
'Windows Phone OS', # Earlier versions of ua-parser returns Windows Phone OS
'Symbian OS',
)
TABLET_DEVICE_FAMILIES = (
'iPad',
'BlackBerry Playbook',
'Blackberry Playbook', # Earlier versions of ua-parser returns "Blackberry" instead of "BlackBerry"
'Kindle',
'Kindle Fire',
)
TOUCH_CAPABLE_OS_FAMILIES = (
'iOS',
'Android',
'Windows Phone',
'Windows Phone OS',
'Windows RT',
)
TOUCH_CAPABLE_DEVICE_FAMILIES = (
'BlackBerry Playbook',
'Blackberry Playbook',
'Kindle Fire',
)
def parse_version(major=None, minor=None, patch=None, patch_minor=None):
# Returns version number tuple, attributes will be integer if they're numbers
if major is not None and isinstance(major, string_types):
major = int(major) if major.isdigit() else major
if minor is not None and isinstance(minor, string_types):
minor = int(minor) if minor.isdigit() else minor
if patch is not None and isinstance(patch, string_types):
patch = int(patch) if patch.isdigit() else patch
if patch_minor is not None and isinstance(patch_minor, string_types):
patch_minor = int(patch_minor) if patch_minor.isdigit() else patch_minor
if patch_minor:
return (major, minor, patch, patch_minor)
elif patch:
return (major, minor, patch)
elif minor:
return (major, minor)
elif major:
return (major,)
else:
return tuple()
Browser = namedtuple('Browser', ['family', 'version', 'version_string'])
def parse_browser(family, major=None, minor=None, patch=None, patch_minor=None):
# Returns a browser object
version = parse_version(major, minor, patch)
version_string = '.'.join([str(v) for v in version])
return Browser(family, version, version_string)
OperatingSystem = namedtuple('OperatingSystem', ['family', 'version', 'version_string'])
def parse_operating_system(family, major=None, minor=None, patch=None, patch_minor=None):
version = parse_version(major, minor, patch)
version_string = '.'.join([str(v) for v in version])
return OperatingSystem(family, version, version_string)
Device = namedtuple('Device', ['family'])
def parse_device(family):
return Device(family)
class UserAgent(object):
def __init__(self, user_agent_string):
ua_dict = user_agent_parser.Parse(user_agent_string)
self.ua_string = user_agent_string
self.os = parse_operating_system(**ua_dict['os'])
self.browser = parse_browser(**ua_dict['user_agent'])
self.device = parse_device(**ua_dict['device'])
def _is_android_tablet(self):
# Newer Android tablets don't have "Mobile" in their user agent string,
# older ones like Galaxy Tab still have "Mobile" though they're not
if 'Mobile Safari' not in self.ua_string:
return True
if 'SCH-' in self.ua_string:
return True
return False
def _is_blackberry_touch_capable_device(self):
# A helper to determine whether a BB phone has touch capabilities
# Blackberry Bold Touch series begins with 99XX
if 'Blackberry 99' in self.device.family:
return True
if 'Blackberry 95' in self.device.family: # BB Storm devices
return True
if 'Blackberry 95' in self.device.family: # BB Torch devices
return True
return False
@property
def is_tablet(self):
if self.device.family in TABLET_DEVICE_FAMILIES:
return True
if (self.os.family == 'Android' and self._is_android_tablet()):
return True
if self.os.family == 'Windows RT':
return True
return False
@property
def is_mobile(self):
# First check for mobile device families
if self.device.family in MOBILE_DEVICE_FAMILIES:
return True
# Device is considered Mobile OS is Android and not tablet
# This is not fool proof but would have to suffice for now
if self.os.family == 'Android' and not self.is_tablet:
return True
if self.os.family == 'BlackBerry OS' and self.device.family != 'Blackberry Playbook':
return True
if self.os.family in MOBILE_OS_FAMILIES:
return True
# TODO: remove after https://github.com/tobie/ua-parser/issues/126 is closed
if 'J2ME' in self.ua_string or 'MIDP' in self.ua_string:
return True
return False
@property
def is_touch_capable(self):
# TODO: detect touch capable Nokia devices
if self.os.family in TOUCH_CAPABLE_OS_FAMILIES:
return True
if self.device.family in TOUCH_CAPABLE_DEVICE_FAMILIES:
return True
if self.os.family == 'Windows 8' and 'Touch' in self.ua_string:
return True
if 'BlackBerry' in self.os.family and self._is_blackberry_touch_capable_device():
return True
return False
@property
def is_pc(self):
# Returns True for "PC" devices (Windows, Mac and Linux)
if 'Windows NT' in self.ua_string:
return True
# TODO: remove after https://github.com/tobie/ua-parser/issues/127 is closed
if self.os.family == 'Mac OS X' and 'Silk' not in self.ua_string:
return True
if 'Linux' in self.ua_string and 'X11' in self.ua_string:
return True
return False
@property
def is_bot(self):
return True if self.device.family == 'Spider' else False
def parse(user_agent_string):
return UserAgent(user_agent_string)
|
natewinck/surfstat
|
user_agents/parsers.py
|
Python
|
gpl-2.0
| 6,000
|
[
"Galaxy"
] |
ec3b42af7701922819b2732a8aedc28368115352314838a71d945c110fcc3ab5
|
"""
Missing module doc.
"""
import subprocess
import os
import numpy as np
from pymatgen.io import atat
def run_mcsqs(structure, clusters, supercell=None, total_atoms=None, search_time=0.01):
"""
Helper function for calling mcsqs with different arguments
Args:
clusters (dict): dictionary of cluster interactions with entries in the form
# atoms: cutoff in angstroms
supercell (list): dimensions of the supercell in units of the original unit cell
total_atoms(int): total number of atoms in the final SQS. Choose either
this OR supercell
search_time (int): The time spent looking for the ideal SQS in minutes
Returns:
Pymatgen structure which is an SQS of the input structure
"""
num_atoms = len(structure)
if total_atoms is None:
total_atoms = num_atoms
if supercell is not None and total_atoms != num_atoms:
print("pick supercell OR number of atoms")
return
# Set supercell
cell = np.eye(3)
text_file = open("sqscell.out", "w")
text_file.write("1\n")
for i in range(len(cell)):
text_file.write("\n")
for j in range(len(cell[i])):
text_file.write(str(cell[i][j]) + " ")
text_file.close()
struccopy = structure.copy()
if supercell is not None:
struccopy.make_supercell(supercell)
struc = atat.Mcsqs(struccopy)
text_file = open("rndstr.in", "w")
text_file.write(struc.to_string())
text_file.close()
# Generate Clusters
command = ["mcsqs"]
for num in clusters:
command.append("-" + str(num) + "=" + str(clusters[num]))
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
p.communicate()
command = ["mcsqs", "-rc", "-n {}".format(len(structure))]
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
try:
p.communicate(timeout=search_time * 60)
except subprocess.TimeoutExpired:
p.kill()
p.communicate()
if os.path.exists("bestsqs.out"):
text_file = open("bestsqs.out", "r")
bestsqs = text_file.read()
text_file.close()
return atat.Mcsqs.structure_from_string(bestsqs)
else:
raise TimeoutError("Cluster expansion took too long.")
else:
struc = atat.Mcsqs(struccopy)
text_file = open("rndstr.in", "w")
text_file.write(struc.to_string())
text_file.close()
# Generate Clusters
command = ["mcsqs"]
for num in clusters:
command.append("-" + str(num) + "=" + str(clusters[num]))
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
p.communicate()
command = ["mcsqs", "-n {}".format(total_atoms)]
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
try:
p.communicate(timeout=search_time * 60)
except subprocess.TimeoutExpired:
p.kill()
p.communicate()
if os.path.exists("bestsqs.out"):
text_file = open("bestsqs.out", "r")
bestsqs = text_file.read()
text_file.close()
return atat.Mcsqs.structure_from_string(bestsqs)
else:
raise TimeoutError("Cluster expansion took too long.")
|
fraricci/pymatgen
|
pymatgen/command_line/mcsqs_caller.py
|
Python
|
mit
| 3,925
|
[
"pymatgen"
] |
f4ab6c858698b1ef9ec1fabad199f1a6b2705b6cf16b6e91516bfa8ab9c8bcad
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
from vtk.test import Testing
class TestCommand(Testing.vtkTest):
def _test(self, fname):
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(VTK_DATA_ROOT + fname)
elev = vtk.vtkElevationFilter()
elev.SetInputConnection(reader.GetOutputPort())
elev.SetLowPoint(-0.05, 0.05, 0)
elev.SetHighPoint(0.05, 0.05, 0)
grad = vtk.vtkGradientFilter()
grad.SetInputConnection(elev.GetOutputPort())
grad.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, "Elevation")
grad.Update()
vals = (10, 0, 0)
for i in range(3):
r = grad.GetOutput().GetPointData().GetArray("Gradients").GetRange(i)
self.assertTrue(abs(r[0] - vals[i]) < 1E-4)
self.assertTrue(abs(r[1] - vals[i]) < 1E-4)
elev.SetLowPoint(0.05, -0.05, 0)
elev.SetHighPoint(0.05, 0.05, 0)
grad.Update()
vals = (0, 10, 0)
for i in range(3):
r = grad.GetOutput().GetPointData().GetArray("Gradients").GetRange(i)
self.assertTrue(abs(r[0] - vals[i]) < 1E-4)
self.assertTrue(abs(r[1] - vals[i]) < 1E-4)
def testQuadraticQuad(self):
self._test("/Data/Disc_QuadraticQuads_0_0.vtu")
def testBiQuadraticQuad(self):
self._test("/Data/Disc_BiQuadraticQuads_0_0.vtu")
if __name__ == "__main__":
Testing.main([(TestCommand, 'test')])
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Common/DataModel/Testing/Python/quadraticQuadDeriv.py
|
Python
|
bsd-3-clause
| 1,642
|
[
"VTK"
] |
f2c3c873635955cfc95718580594c2580321d5b33ce7f94bdcd26a29f0b4da88
|
#!/usr/bin/env python
import sys, argparse
import numpy as np
def print_line(items):
print(items)
def read_profiles(inputfile, outputfile, params, dimension,depth1,depth2, reverse=False, format="%g"):
from netCDF4 import Dataset, num2date, date2num
rootgrp = Dataset(inputfile, 'r', format='NETCDF4')
f= open(outputfile,'wb');
# loop over time steps
ntime = rootgrp.variables['time'].size;
nz = rootgrp.variables['z'].size;
print(ntime)
for timestep in range(0,ntime-1):
# print header
table = np.zeros( [nz , len(params)] );
meanval = np.zeros([1,len(params)+1])
zvec = rootgrp.variables['z'];
# find dimension
index = 0
for param in params:
try:
if 'time' in rootgrp.variables[param].dimensions:
table[:,index] = rootgrp.variables[param][timestep].flatten()
else:
table[:,index] = rootgrp.variables[param][:].flatten()
index+=1
except KeyError:
print("Variable '{0}' does not exist".format(param))
sys.exit(64)
invert = 2 if reverse else 1
#print("reverse " )
#print(reverse)
header = ' '.join( ( str(num2date(rootgrp.variables['time'][timestep],rootgrp.variables['time'].units)) , str(1) , str(invert) ) )
# calculate the mean
try:
if reverse:
temp1 = next(zvec for zvec in zvec if abs(zvec) >= depth1);
temp2= np.where(zvec==temp1)
startmean = temp2[0]
if depth2>abs(zvec[-1]):
endmean = -1
else:
temp1 = next(zvec for zvec in zvec if abs(zvec) >= depth2);
temp2= np.where(zvec==temp1)
endmean = temp2[0]
#print("startmean " )
#print(startmean)
#print("endmean ")
#print(endmean)
else:
if depth1< abs(zvec[-1]):
endmean = -1
else:
temp1 =next(zvec for zvec in zvec if abs(zvec) <= depth1);
temp2 = np.where(zvec==temp1)
endmean = temp2[0]
temp1 = next(zvec for zvec in zvec if abs(zvec) <= depth2);
temp2 = np.where(zvec==temp1)
startmean = temp2[0]
#print(depth1)
#print(abs(zvec[-1]))
#print("startmean " )
#print(startmean)
#print("endmean ")
#print(endmean)
meanval[0,0]=depth2-depth1
index = 1
for param in params:
meanval[0,index] = sum(table[startmean:endmean,index-1])/len(table[startmean:endmean,index-1])
index+=1
except ValueError as e:
print("Error in calculation of mean: {0}".format(e))
try:
np.savetxt(f, meanval, delimiter=" ", fmt=format , header=header, comments='')
except OSError as e:
print("Cannot write to file '{0}': {1}".format(outputfile, e.strerror))
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError as e:
print("Error: {0}".format(e))
f.close()
rootgrp.close()
def main(argv):
parser = argparse.ArgumentParser(description='''
Extract a profile from the provided NetCDF GOTM output file
'''
)
parser.add_argument('-d','--dimension', type=str, default='z' ,help='get profile along this dimension')
parser.add_argument('--variables', type=str , metavar='V1(,V2)', required=True ,help='variables')
parser.add_argument('--depth1', type=float, default='0.0', help='Starting depth of mean from surface [m]')
parser.add_argument('--depth2', type=float, default='15.0', help='Ending depth of mean from surface [m]')
parser.add_argument('--reverse', action='store_true',help='reverse axis')
parser.add_argument('--format', type=str, default="%g", help='format specifier')
parser.add_argument('inputfile', type=str, help='input netcdf file')
parser.add_argument('outputfile', type=str, help='output profile file')
args = parser.parse_args()
read_profiles(args.inputfile, args.outputfile, str.split(args.variables,',') , args.dimension,args.depth1,args.depth2, reverse=args.reverse, format=args.format)
if __name__ == "__main__":
main(sys.argv[1:])
|
OpenDA-Association/OpenDA
|
model_gotm/tests/gotm_simple_kalman/stochModel/bin/profilemean.py
|
Python
|
lgpl-3.0
| 4,485
|
[
"NetCDF"
] |
aa34c0b1f4f917ca40ed111d0809fc654addf419a8077322823ed68a61df09f0
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# Test butterfly subdivision of point data
#
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(11)
sphere.SetThetaResolution(11)
colorIt = vtk.vtkElevationFilter()
colorIt.SetInputConnection(sphere.GetOutputPort())
colorIt.SetLowPoint(0,0,-.5)
colorIt.SetHighPoint(0,0,.5)
butterfly = vtk.vtkButterflySubdivisionFilter()
butterfly.SetInputConnection(colorIt.GetOutputPort())
butterfly.SetNumberOfSubdivisions(3)
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.Build()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(butterfly.GetOutputPort())
mapper.SetLookupTable(lut)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
linear = vtk.vtkLinearSubdivisionFilter()
linear.SetInputConnection(colorIt.GetOutputPort())
linear.SetNumberOfSubdivisions(3)
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(linear.GetOutputPort())
mapper2.SetLookupTable(lut)
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
mapper3 = vtk.vtkPolyDataMapper()
mapper3.SetInputConnection(colorIt.GetOutputPort())
mapper3.SetLookupTable(lut)
actor3 = vtk.vtkActor()
actor3.SetMapper(mapper3)
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
ren3 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
ren2.AddActor(actor2)
ren2.SetBackground(1,1,1)
ren3.AddActor(actor3)
ren3.SetBackground(1,1,1)
renWin.SetSize(600,200)
aCamera = vtk.vtkCamera()
aCamera.Azimuth(70)
aLight = vtk.vtkLight()
aLight.SetPosition(aCamera.GetPosition())
aLight.SetFocalPoint(aCamera.GetFocalPoint())
ren1.SetActiveCamera(aCamera)
ren1.AddLight(aLight)
ren1.ResetCamera()
aCamera.Dolly(1.4)
ren1.ResetCameraClippingRange()
ren2.SetActiveCamera(aCamera)
ren2.AddLight(aLight)
ren3.SetActiveCamera(aCamera)
ren3.AddLight(aLight)
ren3.SetViewport(0,0,.33,1)
ren2.SetViewport(.33,0,.67,1)
ren1.SetViewport(.67,0,1,1)
iren.Initialize()
def flat (__vtk__temp0=0,__vtk__temp1=0):
actor.GetProperty().SetInterpolationToFlat()
actor2.GetProperty().SetInterpolationToFlat()
actor3.GetProperty().SetInterpolationToFlat()
renWin.Render()
def smooth (__vtk__temp0=0,__vtk__temp1=0):
actor.GetProperty().SetInterpolationToGouraud()
actor2.GetProperty().SetInterpolationToGouraud()
actor3.GetProperty().SetInterpolationToGouraud()
renWin.Render()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Modeling/Testing/Python/subdividePointData.py
|
Python
|
gpl-3.0
| 2,598
|
[
"VTK"
] |
336d3b338831d25796c9f9a64f275248db733cf4972bfba9753f3a2d3379f6ac
|
import numpy as np
from scipy.special import erf
#--> The lnlikes
#-> The lnlike calculated with generalized chi square
#------------------------------------------------------------------------------#
# Note:
# When I compare the two different methods to compare the upperlimits, the
# results are surprisingly consistent with each other. Both of them could
# obtain the reasonable posterier probability distribution and I cannot tell
# which one is better than the other.
#------------------------------------------------------------------------------#
#The generalized chi-square function with Sawicki (2012)'s method.
def ChiSq_0(data, model, unct=None, flag=None):
'''
This is a generalized chi-square function that allows y to be upperlimits. The
upper limits are properly deal with using the method mentioned by Sawicki (2012).
Parameters
----------
data : float array
The observed data and upperlimits.
model : float array
The model.
unct : float array or Nobe by default
The uncertainties.
flag : float array or None by default
The flag of upperlimits, 0 for detection and 1 for upperlimits.
Returns
-------
chsq : float
The Chi square
Notes
-----
This chi-square form consider the x and y asymmetrically except for some special
situations.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(2 * np.pi * unct[fltr_dtc]**2.0) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
unct_non = unct[fltr_non]
wrsd_non = (data[fltr_non] - model[fltr_non])/(unct_non * 2**0.5)
chsq_non = np.sum( -2.* np.log( 0.5 * (1 + erf(wrsd_non)) ) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
#The generalized chi-square function with simple Gaussian method.
def ChiSq_1(data, model, unct=None, flag=None):
'''
This is a generalized chi-square function that allows y to be upperlimits.
It contributes zero to the chi square that the model is below the upperlimits,
while it contributes as the normal detected points whtn the model is above
the upperlimits.
Parameters
----------
data : float array
The observed data and upperlimits.
model : float array
The model.
unct : float array or Nobe by default
The uncertainties.
flag : float array or None by default
The flag of upperlimits, 0 for detection and 1 for upperlimits.
Returns
-------
chsq : float
The Chi square
Notes
-----
This chi-square form consider the x and y asymmetrically except for some special
situations.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(2 * np.pi * unct[fltr_dtc]**2) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
data_non = data[fltr_non]
model_non = model[fltr_non]
unct_non = unct[fltr_non]
wrsd_non = np.zeros_like(data_non)
#Only the when the model is above the upperlimit, it contributes to the chi square.
fltr = model_non > data_non
wrsd_non[fltr] = (model_non[fltr] - data_non[fltr]) / unct_non[fltr]
chsq_non = np.sum(wrsd_non**2) + np.sum( np.log(2 * np.pi * unct_non[fltr]**2) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
def lnlike_gcs(theta, x, y, xerr, yerr, fix_m=None, fix_b=None, *args, **kwargs):
"""
The ln of likelihood function use the generalized chi-square function. The y
of the data could be upperlimits.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
x : float array
The data of x.
y : float array
The data of y.
xerr : float array
The uncertainty of the x data.
yerr : float array
The uncertainty of the y data.
fix_m : (Optional) float
Fix the value of m into the given value.
fix_b : (Optional) float
Fix the value of b into the given value.
args and kwargs : for the ChiSq function.
Returns
-------
The ln likelihood.
Notes
-----
The lnf here we use mainly as the epsy0 of Nukers method instead of the
fraction of model uncertainty. With this treatment, the best-fit result is
close to the Nukers result and we can deal with the upperlimits with this
function.
"""
lenPar = len(theta)
parDict = {}
nFix = 0
if not fix_m is None:
parDict["m"] = fix_m
nFix += 1
if not fix_b is None:
parDict["b"] = fix_b
nFix += 1
fixList = parDict.keys()
nUse = 0
if (lenPar + nFix) == 2:
if "m" in fixList:
m = parDict["m"]
else:
m = theta[nUse]
nUse += 1
if "b" in fixList:
b = parDict["b"]
else:
b = theta[nUse]
model = m * x + b
s = np.sqrt(yerr**2 + (m*xerr)**2)
elif (lenPar + nFix) == 3:
if "m" in fixList:
m = parDict["m"]
else:
m = theta[nUse]
nUse += 1
if "b" in fixList:
b = parDict["b"]
else:
b = theta[nUse]
nUse += 1
model = m * x + b
lnf = theta[nUse]
#s = np.sqrt(yerr**2 + (m*xerr)**2 + model**2*np.exp(2*lnf))
s = np.sqrt(yerr**2 + (m*xerr)**2 + np.exp(2*lnf))
lnL = -0.5 * ChiSq_0(y, model, s, *args, **kwargs)
return lnL
#-> The Nukers' lnlike
def lnlike_Nukers(theta, x, y, epsx, epsy):
"""
This is the ln likelihood function resembling the Nukers' estimate from
Tremaine et al. (2002). One of the merits of this form is that the x and y
are symmetric (see the paper for the details). The symbols of the parameters
also follow the paper.
Parameters
----------
theta : list
The list of model parameters, [beta, alpha, epsy0 (optional)].
beta is the slope, alpha is the intercept and epsy0 is the intrinsic
scatter along the y direction.
x : float array
The x data.
y : float array
The y data.
epsx : float array
The uncertainty of x data.
epsy : float array
The uncertainty of y data.
Returns
-------
lnL : float
The ln of the likelihood.
Notes
-----
The lnlike penalizes the very broad intrinsic dispersion assuming it is a
Gaussian distribution. Therefore, the optimization is to seek the maximum of
the lnlike instead of the Nukers' estimate ~1.
"""
if len(theta) == 2:
beta, alpha = theta
inv_sigma2 = 1.0/(epsy**2 + (beta * epsx)**2)
if len(theta) == 3:
beta, alpha, epsy0 = theta
inv_sigma2 = 1.0/(epsy**2 + epsy0**2 + (beta * epsx)**2)
lnL = -0.5*(np.sum((y - alpha - beta * x)**2*inv_sigma2 - np.log(inv_sigma2)))
return lnL
#-> The lnlike calculated from the distance perpendicular to the line following
#Hogg et al. (2010; arXiv:1008.4686)
def lnlike_perp(theta, x, y, sigx, sigy):
"""
This is the ln likelihood function considering the 2-dimensional uncertainties
and calculated based on the distance of the points perpendicular to the line.
It follows the equation (35) of Hogg et al. (2010; arXiv:1008.4686).
Parameters
----------
theta : list
The list of model parameters, [t, bv, V (optional)]. t (in radian) is
the angle (theta = arctan slope), bv is the perpendicular distance of
the line from the origin and V is intrinsic Gaussian variance orthogonal
to the line.
x : float array
The x data.
y : float array
The y data.
sigx : float array
The uncertainty of x data.
sigy : float array
The uncertainty of y data.
Returns
-------
lnL : float
The ln likelihood.
Notes
-----
None.
"""
if len(theta) == 2:
t, bv = theta
V = 0
if len(theta) == 3:
t, bv, V = theta
delta = y * np.cos(t) - x * np.sin(t) - bv
Sigma2 = (sigx * np.sin(t))**2 + (sigy * np.cos(t))**2
lnL = -0.5 * np.sum(delta**2 / (Sigma2 + V) + np.log(Sigma2 + V))
return lnL
def lnlike_perp2(theta, x, y, sigx, sigy):
"""
This is the ln likelihood function considering the 2-dimensional uncertainties
and calculated based on the distance of the points perpendicular to the line.
It follows the equation (35) of Hogg et al. (2010; arXiv:1008.4686).
Parameters
----------
theta : list
The list of model parameters, [t, b, V (optional)]. t (in radian) is
the angle (theta = arctan slope), b is the intercept and V is intrinsic
Gaussian variance orthogonal
to the line.
x : float array
The x data.
y : float array
The y data.
sigx : float array
The uncertainty of x data.
sigy : float array
The uncertainty of y data.
Returns
-------
lnL : float
The ln likelihood.
Notes
-----
None.
"""
if len(theta) == 2:
t, b = theta
V = 0
if len(theta) == 3:
t, b, V = theta
delta = (y - b) * np.cos(t) - x * np.sin(t)
Sigma2 = (sigx * np.sin(t))**2 + (sigy * np.cos(t))**2
lnL = -0.5 * np.sum(delta**2 / (Sigma2 + V) + np.log(Sigma2 + V))
return lnL
#-> The lnlike that considers the model imperfectness naively as a fraction of
#the model values.
def lnlike_naive(theta, x, y, xerr, yerr):
"""
The ln of likelihood function using all detected data.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
x : float array
The data of x.
y : float array
The data of y.
xerr : float array
The uncertainty of the x data.
yerr : float array
The uncertainty of the y data.
Returns
-------
The ln likelihood.
Notes
-----
None.
"""
if len(theta) == 2:
m, b = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + (m*xerr)**2)
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
if len(theta) == 3:
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + (m*xerr)**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
else:
raise ValueError("[linfit]: The length of parameters ({0}) is incorrect!".format(len(theta)))
def lnprior(theta, pRanges):
"""
The ln of prior function.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
pRanges : list
The list of the parameter prior ranges.
Returns
-------
The ln prior.
Notes
-----
None.
"""
assert len(theta) == len(pRanges)
if len(theta) == 2:
m, b = theta
mR, bR = pRanges
if mR[0] < m < mR[1] and bR[0] < b < bR[1]:
return 0.0
return -np.inf
if len(theta) == 3:
m, b, lnf = theta
mR, bR, lnfR = pRanges
if mR[0] < m < mR[1] and bR[0] < b < bR[1] and lnfR[0] < lnf < lnfR[1]:
return 0.0
return -np.inf
else:
raise ValueError("[linfit]: The length of parameters ({0}) is incorrect!".format(len(theta)))
def lnprob(theta, x, y, xerr, yerr, pRanges, *args, **kwargs):
"""
The ln of probability function.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
x : float array
The data of x.
y : float array
The data of y.
xerr : float array
The uncertainty of the x data.
yerr : float array
The uncertainty of the y data.
pRanges : list
The list of the parameter prior ranges.
args and kwargs : for the ChiSq function.
Returns
-------
The ln probability.
Notes
-----
None.
"""
print "args", args
print "kwargs", kwargs
lp = lnprior(theta, pRanges)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, xerr, yerr, *args, **kwargs)
if __name__ == "__main__":
m_true = -0.9594
b_true = 4.294
data = np.loadtxt("examples/data_lnf.txt")
#data = np.loadtxt("examples/data_upp.txt")
x = data[:, 0]
y = data[:, 1]
xerr = data[:, 2]
yerr = data[:, 3]
flag = data[:, 4]
model = m_true * x + b_true
sigma = np.sqrt(yerr**2 + (m_true * xerr)**2)
print ChiSq_0(y, model, sigma, flag)
print ChiSq_1(y, model, sigma, flag)
|
darkbear9494/LinFit
|
linfit/likelihoods.py
|
Python
|
mit
| 13,199
|
[
"Gaussian"
] |
1a477b8949b49e822ebb5f99a6c4718c8d108a2178d7e6b53641f5c1075fd33b
|
import sys
import argparse
import methylpy
def parse_args():
# create the top-level parser
parser = argparse.ArgumentParser(
description = "You are using methylpy "
+ methylpy.__version__
+ " version ("
+ methylpy.__file__[:methylpy.__file__.rfind("/")]+"/"
+ ")"
)
subparsers = parser.add_subparsers(
title="functions",
dest="command",metavar="")
add_build_ref_subparser(subparsers)
add_se_pipeline_subparser(subparsers)
add_pe_pipeline_subparser(subparsers)
add_DMRfind_subparser(subparsers)
add_merge_DMS_subparser(subparsers)
add_get_methylation_level_subparser(subparsers)
add_bam_filter_subparser(subparsers)
add_call_mc_subparser(subparsers)
add_allc2bw_subparser(subparsers)
add_merge_allc_subparser(subparsers)
add_index_allc_subparser(subparsers)
add_filter_allc_subparser(subparsers)
add_test_allc_subparser(subparsers)
if len(sys.argv) > 1:
## print out version
if (sys.argv[1] == '--version' or sys.argv[1] == '-v'):
print(methylpy.__version__)
exit()
## all functions
args = parser.parse_args()
else:
args = parser.parse_args(["-h"])
exit()
if args.command == "build-reference":
from methylpy.call_mc_se import build_ref
build_ref(input_files=args.input_files,
output=args.output_prefix,
aligner=args.aligner,
path_to_aligner=args.path_to_aligner,
num_procs=args.num_procs,
buffsize=args.buffsize)
elif args.command == "DMRfind":
from methylpy.DMRfind import DMRfind
DMRfind(allc_files = args.allc_files,
samples=args.samples,
mc_type=args.mc_type,
chroms=args.chroms,
num_procs=args.num_procs,
output_prefix=args.output_prefix,
min_cov=args.min_cov,
dmr_max_dist=args.dmr_max_dist,
sig_cutoff=args.sig_cutoff,
num_sims=args.num_sims,
num_sig_tests=args.min_tests,
min_num_dms=args.min_num_dms,
sample_category=args.sample_category,
mc_max_dist=args.mc_max_dist,
resid_cutoff=args.resid_cutoff,
keep_temp_files=args.keep_temp_files,
min_cluster=args.min_cluster,
seed=args.seed)
elif args.command == "reidentify-DMR":
from methylpy.DMRfind import merge_DMS_to_DMR
merge_DMS_to_DMR(input_rms_file=args.input_rms_file,
output_file=args.output_file,
collapse_samples=args.collapse_samples,
sample_category=args.sample_category,
min_cluster=args.min_cluster,
sig_cutoff=args.sig_cutoff,
dmr_max_dist=args.dmr_max_dist,
min_num_dms=args.min_num_dms,
resid_cutoff=args.resid_cutoff,
num_sims=args.num_sims,
num_sig_tests=args.min_tests)
elif args.command == "single-end-pipeline":
from methylpy.call_mc_se import run_methylation_pipeline
run_methylation_pipeline(read_files=args.read_files,
sample=args.sample,
forward_reference=args.forward_ref,
reverse_reference=args.reverse_ref,
reference_fasta=args.ref_fasta,
libraries=args.libraries,
path_to_output=args.path_to_output,
pbat=args.pbat,
check_dependency=args.check_dependency,
num_procs=args.num_procs,
sort_mem=args.sort_mem,
num_upstr_bases=args.num_upstream_bases,
num_downstr_bases=args.num_downstream_bases,
generate_allc_file=args.generate_allc_file,
generate_mpileup_file=args.generate_mpileup_file,
compress_output=args.compress_output,
bgzip=args.bgzip,
path_to_bgzip=args.path_to_bgzip,
path_to_tabix=args.path_to_tabix,
trim_reads=args.trim_reads,
path_to_cutadapt=args.path_to_cutadapt,
path_to_aligner=args.path_to_aligner,
aligner=args.aligner,
aligner_options=args.aligner_options,
merge_by_max_mapq=args.merge_by_max_mapq,
min_mapq=args.min_mapq,
remove_clonal=args.remove_clonal,
path_to_picard=args.path_to_picard,
keep_clonal_stats=args.keep_clonal_stats,
java_options=args.java_options,
path_to_samtools=args.path_to_samtools,
remove_chr_prefix=args.remove_chr_prefix,
add_snp_info=args.add_snp_info,
adapter_seq=args.adapter_seq,
unmethylated_control=args.unmethylated_control,
binom_test=args.binom_test,
sig_cutoff=args.sig_cutoff,
min_cov=args.min_cov,
max_adapter_removal=args.max_adapter_removal,
overlap_length=args.overlap_length,
zero_cap=args.zero_cap,
error_rate=args.error_rate,
min_qual_score=args.min_qual_score,
min_read_len=args.min_read_len,
min_base_quality=args.min_base_quality,
keep_temp_files=args.keep_temp_files)
elif args.command == "paired-end-pipeline":
from methylpy.call_mc_pe import run_methylation_pipeline_pe
run_methylation_pipeline_pe(read1_files=args.read1_files,
read2_files=args.read2_files,
sample=args.sample,
forward_reference=args.forward_ref,
reverse_reference=args.reverse_ref,
reference_fasta=args.ref_fasta,
libraries=args.libraries,
path_to_output=args.path_to_output,
pbat=args.pbat,
check_dependency=args.check_dependency,
num_procs=args.num_procs,
sort_mem=args.sort_mem,
num_upstr_bases=args.num_upstream_bases,
num_downstr_bases=args.num_downstream_bases,
generate_allc_file=args.generate_allc_file,
generate_mpileup_file=args.generate_mpileup_file,
compress_output=args.compress_output,
bgzip=args.bgzip,
path_to_bgzip=args.path_to_bgzip,
path_to_tabix=args.path_to_tabix,
trim_reads=args.trim_reads,
path_to_cutadapt=args.path_to_cutadapt,
path_to_aligner=args.path_to_aligner,
aligner=args.aligner,
aligner_options=args.aligner_options,
merge_by_max_mapq=args.merge_by_max_mapq,
min_mapq=args.min_mapq,
remove_clonal=args.remove_clonal,
path_to_picard=args.path_to_picard,
keep_clonal_stats=args.keep_clonal_stats,
java_options=args.java_options,
path_to_samtools=args.path_to_samtools,
remove_chr_prefix=args.remove_chr_prefix,
add_snp_info=args.add_snp_info,
adapter_seq_read1=args.adapter_seq_read1,
adapter_seq_read2=args.adapter_seq_read2,
unmethylated_control=args.unmethylated_control,
binom_test=args.binom_test,
sig_cutoff=args.sig_cutoff,
min_cov=args.min_cov,
max_adapter_removal=args.max_adapter_removal,
overlap_length=args.overlap_length,
zero_cap=args.zero_cap,
error_rate=args.error_rate,
min_qual_score=args.min_qual_score,
min_read_len=args.min_read_len,
min_base_quality=args.min_base_quality,
keep_temp_files=args.keep_temp_files)
elif args.command == "bam-quality-filter":
from methylpy.call_mc_se import bam_quality_mch_filter
bam_quality_mch_filter(inputf=args.input_file,
outputf=args.output_file,
reference_fasta=args.ref_fasta,
min_mapq=args.min_mapq,
min_ch=args.min_num_ch,
max_mch_level=args.max_mch_level,
buffer_line_number=args.buffer_line_number)
elif args.command == "call-methylation-state":
if args.paired_end:
from methylpy.call_mc_pe import call_methylated_sites_pe
call_methylated_sites_pe(inputf=args.input_file,
sample=args.sample,
reference_fasta=args.ref_fasta,
unmethylated_control=args.unmethylated_control,
sig_cutoff=args.sig_cutoff,
num_procs=args.num_procs,
num_upstr_bases=args.num_upstream_bases,
num_downstr_bases=args.num_downstream_bases,
generate_mpileup_file=args.generate_mpileup_file,
compress_output=args.compress_output,
bgzip=args.bgzip,
path_to_bgzip=args.path_to_bgzip,
path_to_tabix=args.path_to_tabix,
min_mapq=args.min_mapq,
min_cov=args.min_cov,
binom_test=args.binom_test,
path_to_samtools=args.path_to_samtools,
remove_chr_prefix=args.remove_chr_prefix,
add_snp_info=args.add_snp_info,
path_to_files=args.path_to_output,
min_base_quality=args.min_base_quality,
keep_temp_files=args.keep_temp_files)
else:
from methylpy.call_mc_se import call_methylated_sites
call_methylated_sites(inputf=args.input_file,
sample=args.sample,
reference_fasta=args.ref_fasta,
unmethylated_control=args.unmethylated_control,
sig_cutoff=args.sig_cutoff,
num_procs=args.num_procs,
num_upstr_bases=args.num_upstream_bases,
num_downstr_bases=args.num_downstream_bases,
generate_mpileup_file=args.generate_mpileup_file,
compress_output=args.compress_output,
bgzip=args.bgzip,
path_to_bgzip=args.path_to_bgzip,
path_to_tabix=args.path_to_tabix,
min_mapq=args.min_mapq,
min_cov=args.min_cov,
binom_test=args.binom_test,
path_to_samtools=args.path_to_samtools,
remove_chr_prefix=args.remove_chr_prefix,
add_snp_info=args.add_snp_info,
path_to_files=args.path_to_output,
min_base_quality=args.min_base_quality)
elif args.command == "add-methylation-level":
if args.extra_info:
from methylpy.DMRfind import get_c_info_DMRfind
get_c_info_DMRfind(input_tsv_file=args.input_tsv_file,
output=args.output_file,
input_allc_files=args.allc_files,
samples=args.samples,
mc_type=args.mc_type,
num_procs=args.num_procs,
min_cov=args.min_cov,
max_cov=args.max_cov,
buffer_line_number=args.buffer_line_number,
input_no_header=args.input_no_header)
else:
from methylpy.DMRfind import get_methylation_levels_DMRfind
get_methylation_levels_DMRfind(input_tsv_file=args.input_tsv_file,
output=args.output_file,
input_allc_files=args.allc_files,
samples=args.samples,
mc_type=args.mc_type,
num_procs=args.num_procs,
min_cov=args.min_cov,
max_cov=args.max_cov,
buffer_line_number=args.buffer_line_number,
input_no_header=args.input_no_header)
elif args.command == "merge-allc":
from methylpy.utilities import merge_allc_files
merge_allc_files(allc_files=args.allc_files,
output_file=args.output_file,
num_procs=args.num_procs,
mini_batch=args.mini_batch,
compress_output=args.compress_output,
skip_snp_info=args.skip_snp_info)
elif args.command == "index-allc":
from methylpy.utilities import index_allc_file_batch
index_allc_file_batch(allc_files=args.allc_files,
num_procs=args.num_procs,
reindex=args.reindex)
elif args.command == "filter-allc":
from methylpy.utilities import filter_allc_files
filter_allc_files(allc_files=args.allc_files,
output_files=args.output_files,
num_procs=args.num_procs,
mc_type=args.mc_type,
chroms=args.chroms,
compress_output=args.compress_output,
max_mismatch=args.max_mismatch,
max_mismatch_frac=args.max_mismatch_frac,
min_cov=args.min_cov,
max_cov=args.max_cov)
elif args.command == "test-allc":
from methylpy.call_mc_se import perform_binomial_test
perform_binomial_test(allc_file=args.allc_file,
sample=args.sample,
path_to_output=args.path_to_output,
unmethylated_control=args.unmethylated_control,
min_cov=args.min_cov,
sig_cutoff=args.sig_cutoff,
num_procs=args.num_procs,
sort_mem=args.sort_mem,
compress_output=args.compress_output,
remove_chr_prefix=args.remove_chr_prefix)
elif args.command == "allc-to-bigwig":
from methylpy.utilities import convert_allc_to_bigwig
convert_allc_to_bigwig(input_allc_file=args.allc_file,
output_file=args.output_file,
reference_fasta=args.ref_fasta,
mc_type=args.mc_type,
bin_size=args.bin_size,
path_to_wigToBigWig=args.path_to_wigToBigWig,
path_to_samtools=args.path_to_samtools,
min_bin_sites=args.min_bin_sites,
min_bin_cov=args.min_bin_cov,
min_site_cov=args.min_site_cov,
max_site_cov=args.max_site_cov,
remove_chr_prefix=args.remove_chr_prefix,
add_chr_prefix=args.add_chr_prefix)
def add_DMRfind_subparser(subparsers):
# create the parser for the "DMRfind" command
parser_dmrfind = subparsers.add_parser(
"DMRfind",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Identify differentially methylated regions")
# add options
parser_dmrfind_req = parser_dmrfind.add_argument_group("required inputs")
parser_dmrfind_req.add_argument("--allc-files",
type=str,
nargs="+",
required=True,
help="List of allc files.")
parser_dmrfind_req.add_argument("--output-prefix",
type=str,
required=True,
help="String indicating the prefix for output files")
parser_dmrfind_opt = parser_dmrfind.add_argument_group("optional inputs")
parser_dmrfind_opt.add_argument("--samples",
type=str,
nargs="+",
default=None,
help="List of space separated samples matching allc files. By default "
+"sample names will be inferred from allc filenames")
parser_dmrfind_opt.add_argument("--chroms",
type=str,
nargs="+",
required=False,
default=None,
help="Space separated listing of chromosomes where DMRs will "
+"be called. If not specified, DMRs will be called across the chromosomes/contigs "
+"that contained any data in all allc files.")
parser_dmrfind_opt.add_argument("--mc-type",
type=str,
nargs="+",
default=["CGN"],
help="List of space separated mc nucleotide contexts for "
+ "which you want to look for DMRs. These classifications "
+ "may use the wildcards H (indicating anything but a G) and "
+ "N (indicating any nucleotide).")
parser_dmrfind_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
parser_dmrfind_opt.add_argument("--min-cov",
type=int,
default=0,
help="Minimum number of reads that must cover a site for it to be "
+ "considered.")
parser_dmrfind_opt.add_argument("--dmr-max-dist",
type=int,
default=250,
help="Maximum distance two significant sites can be to be included "
+ "in the same block.")
parser_dmrfind_opt.add_argument("--sig-cutoff",
type=float,
default=.01,
help="Float indicating at what FDR you want to consider a result "
+ "significant.")
parser_dmrfind_opt.add_argument("--num-sims",
type=int,
default=3000,
help="Number of permutation tests you would like to run to estimate "
+ "the p-values of the differential methylation tests")
parser_dmrfind_opt.add_argument("--min-tests",
type=int,
default=100,
help="Minimum number of permuation tests you\ would d like to run "
+ "for each mC")
parser_dmrfind_opt.add_argument("--min-num-dms",
type=int,
default=0,
help="The minimum number of differentially methylated sites "
+ "that a differentially methylated region needs to contain to be "
+ "reported")
parser_dmrfind_opt.add_argument("--sample-category",
type=str,
nargs="+",
default=False,
help="A list of categories that each respective sample belongs "
+ "to; the categories must begin at 0 and increase by 1 for "
+ "each category added. ex: samples [A,B,C] categories [0,1,2] "
+ "or categories [0, 1, 0] ")
parser_dmrfind_opt.add_argument("--mc-max-dist",
type=int,
default=0,
help="Integer indicating the maximum distance two sites can be "
+ "from one another for their methylation counts to be combined. "
+ "This option helps with low coverage experiments where you may "
+ "want to leverage the correlation of methylation between sites "
+ "to get more statistical power.")
parser_dmrfind_opt.add_argument("--resid-cutoff",
type=int,
default=0.01,
help="Results will have to show deviations in the contingency "
+ "table in the same direction as the rest of the window")
parser_dmrfind_opt.add_argument("--keep-temp-files",
type=str2bool,
default=False,
help="Boolean indicating that you would like to keep the intermediate files "
+"generated by this function. This can be useful for debugging, but in general "
+"should be left False.")
parser_dmrfind_opt.add_argument("--min-cluster",
type=int,
default=2,
help="The minimum number of each sample category that must be "
+ "present in every block that is output.")
parser_dmrfind_opt.add_argument("--seed",
type=int,
default=-1,
help="A seed to provide to the random number generator for "
+ "permutation testing. Only change this if you are debugging "
+ "and want to make sure the permutation output is consistent")
def add_merge_DMS_subparser(subparsers):
# create the parser for the "merge_DMS" command
parser_mergedms = subparsers.add_parser(
"reidentify-DMR",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Re-call DMRs from existing DMRfind result")
# add options
parser_mergedms_req = parser_mergedms.add_argument_group("required inputs")
parser_mergedms_req.add_argument("--input-rms-file",
type=str,
required=True,
help="File storing the results of RMS tests (from DMRfind function.")
parser_mergedms_req.add_argument("--output-file",
type=str,
required=True,
help="String indicating the name of output file")
parser_mergedms_opt = parser_mergedms.add_argument_group("optional inputs")
parser_mergedms_opt.add_argument("--collapse-samples",
type=str,
nargs="+",
default=False,
help="A list of samples for collapsing blocks")
parser_mergedms_opt.add_argument("--sample-category",
type=str,
nargs="+",
default=False,
help="A list of categories that each respective sample belongs "
+ "to; the categories must begin at 0 and increase by 1 for "
+ "each category added. ex: samples [A,B,C] categories [0,1,2] "
+ "or categories [0, 1, 0] ")
parser_mergedms_opt.add_argument("--min-cluster",
type=int,
default=2,
help="The minimum number of each sample category that must be "
+ "present in every block that is output.")
parser_mergedms_opt.add_argument("--sig-cutoff",
type=float,
default=.01,
help="Float indicating at what FDR you want to consider a result "
+ "significant.")
parser_mergedms_opt.add_argument("--dmr-max-dist",
type=int,
default=250,
help="Maximum distance two significant sites can be to be included "
+ "in the same block.")
parser_mergedms_opt.add_argument("--min-num-dms",
type=int,
default=0,
help="The minimum number of differentially methylated sites "
+ "that a differentially methylated region needs to contain to be "
+ "reported")
parser_mergedms_opt.add_argument("--resid-cutoff",
type=int,
default=0.01,
help="Results will have to show deviations in the contingency "
+ "table in the same direction as the rest of the window")
parser_mergedms_opt.add_argument("--num-sims",
type=int,
default=3000,
help="Number of permutation tests you would like to run to estimate "
+ "the p-values of the differential methylation tests")
parser_mergedms_opt.add_argument("--min-tests",
type=int,
default=100,
help="Minimum number of permuation tests you\ would d like to run "
+ "for each mC")
def add_se_pipeline_subparser(subparsers):
parser_se = subparsers.add_parser("single-end-pipeline",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Methylation pipeline for single-end data")
parser_se_req = parser_se.add_argument_group("required inputs")
parser_se_req.add_argument("--read-files",
type=str,
nargs="+",
required=True,
help="list of all the fastq files you would like to run through "
+ "the pipeline. Note that globbing is supported here (i.e., you "
+ "can use * in your paths)")
parser_se_req.add_argument("--sample",
type=str,
required=True,
help="String indicating the name of the sample you are processing. "
+ "It will be included in the output files.")
parser_se_req.add_argument("--forward-ref",
type=str, required=True, help="string indicating the path to the "
+ "forward strand reference created by build_ref")
parser_se_req.add_argument("--reverse-ref",
type=str,
required=True,
help="string indicating the path to the reverse strand reference "
+ "created by build_ref")
parser_se_req.add_argument("--ref-fasta",
type=str,
required=True,
help="string indicating the path to a fasta file containing the "
+ "sequences you used for mapping")
parser_se_opt = parser_se.add_argument_group("optional inputs")
parser_se_opt.add_argument("--libraries",
type=str,
nargs="+",
default=["libA"],
help="list of library IDs (in the same order as the files list) "
+ "indiciating which libraries each set of fastq files belong to. "
+ "If you use a glob, you only need to indicate the library ID for "
+ "those fastqs once (i.e., the length of files and libraries should "
+ "be the same)")
parser_se_opt.add_argument("--path-to-output",
type=str,
default="",
help="Path to a directory where you would like the output to be stored. "
+ "The default is the same directory as the input fastqs.")
parser_se_opt.add_argument("--pbat",
type=str2bool,
default=False,
help="Boolean indicating whether to process data in PBAT (Post-Bisulfite "
+"Adaptor Tagging) mode, in which reads will be mapped to opposite strand "
+"of C-T converted genome and the forward strand of G-A converted genome.")
parser_se_opt.add_argument("--check-dependency",
type=str2bool,
default=False,
help="Boolean indicating whether to check dependency requirements are met.")
parser_se_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
parser_se_opt.add_argument("--sort-mem",
type=str,
default="500M",
help="Parameter to pass to unix sort with -S/--buffer-size command")
parser_se_opt.add_argument("--num-upstream-bases",
type=int,
default=0,
help="Number of base(s) upstream of each cytosine that you wish to include "
+ "in output file. Recommend value 1 for NOMe-seq processing since the "
+ "upstream base is required to tell apart cytosine at GC context.")
parser_se_opt.add_argument("--num-downstream-bases",
type=int,
default=2,
help="Number of base(s) downstream of each cytosine that you wish to include "
+ "in output file. Recommend value to be at least 1 to separate cytosines at "
+ "different sequence context.")
parser_se_opt.add_argument("--generate-allc-file",
type=str2bool,
default=True,
help="Boolean indicating whether to generate the final output file that "
+" contains the methylation state of each cytosine. If set to be false, "
+"only alignment file (in BAM format) will be generated.")
parser_se_opt.add_argument("--generate-mpileup-file",
type=str2bool,
default=True,
help="Boolean indicating whether to generate intermediate mpileup file to save "
+"space. However, skipping mpileup step may cause problem due to the nature of "
+"python. Not skipping this step is recommended.")
parser_se_opt.add_argument("--compress-output",
type=str2bool,
default=True,
help="Boolean indicating whether to compress (by gzip) the final output "
+ "(allc file(s)).")
parser_se_opt.add_argument("--bgzip",
type=str2bool,
default=False,
help="Boolean indicating whether to bgzip compressed allc files and tabix index.")
parser_se_opt.add_argument("--path-to-bgzip",
type=str,
default="",
help="Path to bgzip installation")
parser_se_opt.add_argument("--path-to-tabix",
type=str,
default="",
help="Path to tabix installation")
parser_se_opt.add_argument("--trim-reads",
type=str2bool,
default=True,
help="Boolean indicating whether to trim reads using cutadapt.")
parser_se_opt.add_argument("--path-to-cutadapt",
type=str,
default="",
help="Path to cutadapt installation")
parser_se_opt.add_argument("--path-to-aligner",
type=str,
default="",
help="Path to bowtie/bowtie2 installation")
parser_se_opt.add_argument("--aligner",
type=str,
default="bowtie2",
help="Aligner to use. Currently, methylpy supports bowtie, bowtie2 and minimap2. ")
parser_se_opt.add_argument("--aligner-options",
type=str,
nargs="+",
help="list of strings indicating options you would like passed "
+ "to bowtie (e.g., \"-k 1 -l 2\")")
parser_se_opt.add_argument("--merge-by-max-mapq",
type=str2bool,
default=False,
help="Boolean indicates whether to merge alignment results from two "
+"converted genomes by MAPQ score. Be default, we only keep reads that "
+"are mapped to only one of the two converted genomes. If this option "
+"is set to True, for a read that could be mapped to both converted "
+"genomes, the alignment that achieves larger MAPQ score will be kept.")
parser_se_opt.add_argument("--remove-clonal",
type=str2bool,
default=False,
help="Boolean indicates whether to remove clonal reads or not")
parser_se_opt.add_argument("--path-to-picard",
type=str,
default="",
help="The path to the picard.jar in picard tools. The jar file can "
+ "be downloaded from https://broadinstitute.github.io/picard/index.html "
+ "(default is current dir)")
parser_se_opt.add_argument("--keep-clonal-stats",
type=str2bool,
default=True,
help="Boolean indicates whether to store the metric file from picard.")
parser_se_opt.add_argument("--java-options",
type=str,
default="-Xmx20g",
help="String indicating the option pass the java when running picard.")
parser_se_opt.add_argument("--path-to-samtools",
type=str,
default="",
help="Path to samtools installation")
parser_se_opt.add_argument("--adapter-seq",
type=str,
default="AGATCGGAAGAGCACACGTCTG",
help="sequence of an adapter that was ligated to the 3\' end. The "
+"adapter itself and anything that follows is trimmed.")
parser_se_opt.add_argument("--remove-chr-prefix",
type=str2bool,
default=True,
help="Boolean indicates whether to remove in the final output the \"chr\" prefix "
+"in the chromosome name")
parser_se_opt.add_argument("--add-snp-info",
type=str2bool,
default=False,
help="Boolean indicates whether to add extra two columns in the output (allc) file "
+"regarding the genotype information of each site. The first (second) column contain "
+"the number of basecalls that support the reference gentype (variant) for nucleotides "
"in the sequence context.")
parser_se_opt.add_argument("--unmethylated-control",
type=str,
default=None,
help="name of the chromosome/region that you want to use to estimate "
+ "the non-conversion rate of your sample, or the non-conversion rate "
+ "you would like to use. Consequently, control is either a string, or "
+ "a decimal. If control is a string then it should be in the following "
+ "format: \"chrom:start-end\". If you would like to specify an entire "
+ "chromosome simply use \"chrom:\"")
parser_se_opt.add_argument("--binom-test",
type=str2bool,
default=False,
help="Indicates that you would like to perform a binomial test on each cytosine "
+"to delineate cytosines that are significantly methylated than noise due to "
+"the failure of bisulfite conversion.")
parser_se_opt.add_argument("--sig-cutoff",
type=float,
default=.01,
help="float indicating the adjusted p-value cutoff you wish to use for "
+ "determining whether or not a site is methylated")
parser_se_opt.add_argument("--min-mapq",
type=int,
default=30,
help="Minimum MAPQ for reads to be included.")
parser_se_opt.add_argument("--min-cov",
type=int,
default=0,
help="Integer indicating the minimum number of reads for a site to be tested.")
parser_se_opt.add_argument("--max-adapter-removal",
type=int,
help="Indicates the maximum number of times to try to remove adapters. Useful "
+"when an adapter gets appended multiple times.")
parser_se_opt.add_argument("--overlap-length",
type=int,
help="Minimum overlap length. If the overlap between the read and the adapter "
+"is shorter than LENGTH, the read is not modified. This reduces the no. of "
+"bases trimmed purely due to short random adapter matches.")
parser_se_opt.add_argument("--zero-cap",
type=str2bool,
help="Flag that causes negative quality values to be set to zero (workaround "
+"to avoid segmentation faults in BWA)")
parser_se_opt.add_argument("--error-rate",
type=float,
help="maximum allowed error rate (no. of errors divided by the length of "
+"the matching region)")
parser_se_opt.add_argument("--min-qual-score",
type=int,
default=10,
help="allows you to trim low-quality ends from reads before adapter removal. "
+"The algorithm is the same as the one used by BWA (Subtract CUTOFF from all "
+"qualities; compute partial sums from all indices to the end of the sequence; "
+" cut sequence at the index at which the sum is minimal).")
parser_se_opt.add_argument("--min-read-len",
type=int,
default=30,
help="indicates the minimum length a read must be to be kept. Reads that "
+"are too short even before adapter removal are also discarded. In colorspace, "
"an initial primer is not counted.")
parser_se_opt.add_argument("--min-base-quality",
type=int,
default=1,
help="Integer indicating the minimum PHRED quality score for a base to be "
+"included in the mpileup file (and subsequently to be considered for "
+"methylation calling).")
parser_se_opt.add_argument("--keep-temp-files",
type=str2bool,
default=False,
help="Boolean indicating that you would like to keep the intermediate files "
+"generated by this function. This can be useful for debugging, but in general "
+"should be left False.")
def add_pe_pipeline_subparser(subparsers):
parser_pe = subparsers.add_parser("paired-end-pipeline",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Methylation pipeline for paired-end data")
parser_pe_req = parser_pe.add_argument_group("required inputs")
parser_pe_req.add_argument("--read1-files",
type=str,
nargs="+",
required=True,
help="list of all the read 1 fastq files you would like to run through "
+ "the pipeline. Note that globbing is supported here (i.e., you "
+ "can use * in your paths)")
parser_pe_req.add_argument("--read2-files",
type=str,
nargs="+",
required=True,
help="list of all the read 2 fastq files you would like to run through "
+ "the pipeline. Note that globbing is supported here (i.e., you "
+ "can use * in your paths)")
parser_pe_req.add_argument("--sample",
type=str,
required=True,
help="String indicating the name of the sample you are processing. "
+ "It will be included in the output files.")
parser_pe_req.add_argument("--forward-ref",
type=str, required=True, help="string indicating the path to the "
+ "forward strand reference created by build_ref")
parser_pe_req.add_argument("--reverse-ref",
type=str,
required=True,
help="string indicating the path to the reverse strand reference "
+ "created by build_ref")
parser_pe_req.add_argument("--ref-fasta",
type=str,
required=True,
help="string indicating the path to a fasta file containing the "
+ "sequences you used for mapping")
parser_pe_opt = parser_pe.add_argument_group("optional inputs")
parser_pe_opt.add_argument("--libraries",
type=str,
nargs="+",
default=["libA"],
help="list of library IDs (in the same order as the files list) "
+ "indiciating which libraries each set of fastq files belong to. "
+ "If you use a glob, you only need to indicate the library ID for "
+ "those fastqs once (i.e., the length of files and libraries should "
+ "be the same)")
parser_pe_opt.add_argument("--path-to-output",
type=str,
default="",
help="Path to a directory where you would like the output to be stored. "
+ "The default is the same directory as the input fastqs.")
parser_pe_opt.add_argument("--pbat",
type=str2bool,
default=False,
help="Boolean indicating whether to process data in PBAT (Post-Bisulfite "
+"Adaptor Tagging) mode, in which reads will be mapped to opposite strand "
+"of C-T converted genome and the forward strand of G-A converted genome.")
parser_pe_opt.add_argument("--check-dependency",
type=str2bool,
default=False,
help="Boolean indicating whether to check dependency requirements are met.")
parser_pe_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
parser_pe_opt.add_argument("--sort-mem",
type=str,
default="500M",
help="Parameter to pass to unix sort with -S/--buffer-size command")
parser_pe_opt.add_argument("--num-upstream-bases",
type=int,
default=0,
help="Number of base(s) upstream of each cytosine that you wish to include "
+ "in output file. Recommend value 1 for NOMe-seq processing since the "
+ "upstream base is required to tell apart cytosine at GC context.")
parser_pe_opt.add_argument("--num-downstream-bases",
type=int,
default=2,
help="Number of base(s) downstream of each cytosine that you wish to include "
+ "in output file. Recommend value to be at least 1 to separate cytosines at "
+ "different sequence contexts.")
parser_pe_opt.add_argument("--generate-allc-file",
type=str2bool,
default=True,
help="Boolean indicating whether to generate the final output file that "
+" contains the methylation state of each cytosine. If set to be false, "
+"only alignment file (in BAM format) will be generated.")
parser_pe_opt.add_argument("--generate-mpileup-file",
type=str2bool,
default=True,
help="Boolean indicating whether to generate intermediate mpileup file to save "
+"space. However, skipping mpileup step may cause problem due to the nature of "
+"python. Not skipping this step is recommended.")
parser_pe_opt.add_argument("--compress-output",
type=str2bool,
default=True,
help="Boolean indicating whether to compress (by gzip) the final output "
+ "(allc file(s)).")
parser_pe_opt.add_argument("--bgzip",
type=str2bool,
default=False,
help="Boolean indicating whether to bgzip compressed allc files and tabix index.")
parser_pe_opt.add_argument("--path-to-bgzip",
type=str,
default="",
help="Path to bgzip installation")
parser_pe_opt.add_argument("--path-to-tabix",
type=str,
default="",
help="Path to tabix installation")
parser_pe_opt.add_argument("--trim-reads",
type=str2bool,
default=True,
help="Boolean indicating whether to trim reads using cutadapt.")
parser_pe_opt.add_argument("--path-to-cutadapt",
type=str,
default="",
help="Path to cutadapt installation")
parser_pe_opt.add_argument("--path-to-aligner",
type=str,
default="",
help="Path to bowtie/bowtie2 installation")
parser_pe_opt.add_argument("--aligner",
type=str,
default="bowtie2",
help="Aligner to use. Currently, methylpy supports bowtie, bowtie2 and minimap2. ")
parser_pe_opt.add_argument("--aligner-options",
type=str,
nargs="+",
help="list of strings indicating options you would like passed "
+ "to bowtie (e.g., \"-k 1 -l 2\")")
parser_pe_opt.add_argument("--merge-by-max-mapq",
type=str2bool,
default=False,
help="Boolean indicates whether to merge alignment results from two "
+"converted genomes by MAPQ score. Be default, we only keep read pairs "
"that are mapped to only one of the two converted genomes. If this option "
+"is set to True, for a read pair that could be mapped to both converted "
+"genomes, the alignment that achieves larger MAPQ score will be kept.")
parser_pe_opt.add_argument("--remove-clonal",
type=str2bool,
default=False,
help="Boolean indicates whether to remove clonal reads or not")
parser_pe_opt.add_argument("--path-to-picard",
type=str,
default="",
help="The path to the picard.jar in picard tools. The jar file can "
+ "be downloaded from https://broadinstitute.github.io/picard/index.html "
+ "(default is current dir)")
parser_pe_opt.add_argument("--keep-clonal-stats",
type=str2bool,
default=True,
help="Boolean indicates whether to store the metric file from picard.")
parser_pe_opt.add_argument("--java-options",
type=str,
default="-Xmx20g",
help="String indicating the option pass the java when running picard.")
parser_pe_opt.add_argument("--path-to-samtools",
type=str,
default="",
help="Path to samtools installation")
parser_pe_opt.add_argument("--adapter-seq-read1",
type=str,
default="AGATCGGAAGAGCACACGTCTGAAC",
help="sequence of an adapter that was ligated to the 3\' end of read 1. The "
+"adapter itself and anything that follows is trimmed.")
parser_pe_opt.add_argument("--adapter-seq-read2",
type=str,
default="AGATCGGAAGAGCGTCGTGTAGGGA",
help="sequence of an adapter that was ligated to the 3\' end of read 2. The "
+"adapter itself and anything that follows is trimmed.")
parser_pe_opt.add_argument("--remove-chr-prefix",
type=str2bool,
default=True,
help="Boolean indicates whether to remove in the final output the \"chr\" prefix "
+"in the chromosome name")
parser_pe_opt.add_argument("--add-snp-info",
type=str2bool,
default=False,
help="Boolean indicates whether to add extra two columns in the output (allc) file "
+"regarding the genotype information of each site. The first (second) column contain "
+"the number of basecalls that support the reference gentype (variant) for nucleotides "
"in the sequence context.")
parser_pe_opt.add_argument("--unmethylated-control",
type=str,
default=None,
help="name of the chromosome/region that you want to use to estimate "
+ "the non-conversion rate of your sample, or the non-conversion rate "
+ "you would like to use. Consequently, control is either a string, or "
+ "a decimal. If control is a string then it should be in the following "
+ "format: \"chrom:start-end\". If you would like to specify an entire "
+ "chromosome simply use \"chrom:\"")
parser_pe_opt.add_argument("--binom-test",
type=str2bool,
default=False,
help="Indicates that you would like to perform a binomial test on each cytosine "
+"to delineate cytosines that are significantly methylated than noise due to "
+"the failure of bisulfite conversion.")
parser_pe_opt.add_argument("--sig-cutoff",
type=float,
default=.01,
help="float indicating the adjusted p-value cutoff you wish to use for "
+ "determining whether or not a site is methylated")
parser_pe_opt.add_argument("--min-mapq",
type=int,
default=30,
help="Minimum MAPQ for reads to be included.")
parser_pe_opt.add_argument("--min-cov",
type=int,
default=0,
help="Integer indicating the minimum number of reads for a site to be tested.")
parser_pe_opt.add_argument("--max-adapter-removal",
type=int,
help="Indicates the maximum number of times to try to remove adapters. Useful "
+"when an adapter gets appended multiple times.")
parser_pe_opt.add_argument("--overlap-length",
type=int,
help="Minimum overlap length. If the overlap between the read and the adapter "
+"is shorter than LENGTH, the read is not modified. This reduces the no. of "
+"bases trimmed purely due to short random adapter matches.")
parser_pe_opt.add_argument("--zero-cap",
type=str2bool,
help="Flag that causes negative quality values to be set to zero (workaround "
+"to avoid segmentation faults in BWA)")
parser_pe_opt.add_argument("--error-rate",
type=float,
help="maximum allowed error rate (no. of errors divided by the length of "
+"the matching region)")
parser_pe_opt.add_argument("--min-qual-score",
type=int,
default=10,
help="allows you to trim low-quality ends from reads before adapter removal. "
+"The algorithm is the same as the one used by BWA (Subtract CUTOFF from all "
+"qualities; compute partial sums from all indices to the end of the sequence; "
+" cut sequence at the index at which the sum is minimal).")
parser_pe_opt.add_argument("--min-read-len",
type=int,
default=30,
help="indicates the minimum length a read must be to be kept. Reads that "
+"are too short even before adapter removal are also discarded. In colorspace, "
"an initial primer is not counted.")
parser_pe_opt.add_argument("--min-base-quality",
type=int,
default=1,
help="Integer indicating the minimum PHRED quality score for a base to be "
+"included in the mpileup file (and subsequently to be considered for "
+"methylation calling).")
parser_pe_opt.add_argument("--keep-temp-files",
type=str2bool,
default=False,
help="Boolean indicating that you would like to keep the intermediate files "
+"generated by this function. This can be useful for debugging, but in general "
+"should be left False.")
def add_build_ref_subparser(subparsers):
# create the parser for the "DMRfind" command
parser_build = subparsers.add_parser(
"build-reference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Building reference for bisulfite sequencing data")
# add options
parser_build_req = parser_build.add_argument_group("required inputs")
parser_build_req.add_argument("--input-files",
type=str,
nargs="+",
required=True,
help="List of genome fasta files to build a reference from.")
parser_build_req.add_argument("--output-prefix",
type=str,
required=True,
help="the prefix of the two output reference files that will be created.")
parser_build_opt = parser_build.add_argument_group("optional inputs")
parser_build_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
parser_build_opt.add_argument("--aligner",
type=str,
default="bowtie2",
help="Aligner to use. Currently, methylpy supports bowtie, bowtie2 and minimap2. ")
parser_build_opt.add_argument("--path-to-aligner",
type=str,
default="",
help="Path to bowtie/bowtie2 installation")
parser_build_opt.add_argument("--buffsize",
type=int,
default=100,
help="The number of bytes that will be read in from the reference at once.")
def add_bam_filter_subparser(subparsers):
parser_filter = subparsers.add_parser(
"bam-quality-filter",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Filter out single-end reads by mapping quality and mCH level")
# add options
parser_filter_req = parser_filter.add_argument_group("required inputs")
parser_filter_req.add_argument("--input-file",
type=str,
required=True,
help="BAM file to filter.")
parser_filter_req.add_argument("--output-file",
type=str,
required=True,
help="Name of output file")
parser_filter_req.add_argument("--ref-fasta",
type=str,
required=True,
help="string indicating the path to a fasta file containing the "
+"sequences you used for mapping")
parser_filter_opt = parser_filter.add_argument_group("optional inputs")
parser_filter_opt.add_argument("--min-mapq",
type=int,
default=30,
help="Minimum MAPQ for reads to be included.")
parser_filter_opt.add_argument("--min-num-ch",
type=int,
default=30,
help="Minimum number of CH sites for mCH level filter to be applied.")
parser_filter_opt.add_argument("--max-mch-level",
type=float,
default=0.7,
help="Maximum mCH level for reads to be included.")
parser_filter_opt.add_argument("--buffer-line-number",
type=int,
default=100000,
help="size of buffer for reads to be written on hard drive.")
def add_call_mc_subparser(subparsers):
call_mc = subparsers.add_parser("call-methylation-state",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Call cytosine methylation state from BAM file")
call_mc_req = call_mc.add_argument_group("required inputs")
call_mc_req.add_argument("--input-file",
type=str,
help="bam file that contains mapped bisulfite sequencing reads.")
call_mc_req.add_argument("--sample",
type=str,
required=True,
help="String indicating the name of the sample you are processing. "
+ "It will be included in the output files.")
call_mc_req.add_argument("--ref-fasta",
type=str,
required=True,
help="string indicating the path to a fasta file containing the "
+ "sequences you used for mapping")
call_mc_req.add_argument("--paired-end",
type=str2bool,
required=True,
default=False,
help="Boolean indicating whether the input BAM file is from paired-end "
+"data.")
call_mc_opt = call_mc.add_argument_group("optional inputs")
call_mc_opt.add_argument("--path-to-output",
type=str,
default="",
help="Path to a directory where you would like the output to be stored. "
+ "The default is the same directory as the input fastqs.")
call_mc_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
call_mc_opt.add_argument("--num-upstream-bases",
type=int,
default=0,
help="Number of base(s) upstream of each cytosine that you wish to include "
+ "in output file. Recommend value 1 for NOMe-seq processing since the "
+ "upstream base is required to tell apart cytosine at GC context.")
call_mc_opt.add_argument("--num-downstream-bases",
type=int,
default=2,
help="Number of base(s) downstream of each cytosine that you wish to include "
+ "in output file. Recommend value to be at least 1 to separate cytosines at "
+ "different sequence contexts.")
call_mc_opt.add_argument("--generate-allc-file",
type=str2bool,
default=True,
help="Boolean indicating whether to generate the final output file that "
+" contains the methylation state of each cytosine. If set to be false, "
+"only alignment file (in BAM format) will be generated.")
call_mc_opt.add_argument("--generate-mpileup-file",
type=str2bool,
default=True,
help="Boolean indicating whether to generate intermediate mpileup file to save "
+"space. However, skipping mpileup step may cause problem due to the nature of "
+"python. Not skipping this step is recommended.")
call_mc_opt.add_argument("--compress-output",
type=str2bool,
default=True,
help="Boolean indicating whether to compress (by gzip) the final output "
+ "(allc file(s)).")
call_mc_opt.add_argument("--bgzip",
type=str2bool,
default=False,
help="Boolean indicating whether to bgzip compressed allc files and tabix index.")
call_mc_opt.add_argument("--path-to-bgzip",
type=str,
default="",
help="Path to bgzip installation")
call_mc_opt.add_argument("--path-to-tabix",
type=str,
default="",
help="Path to tabix installation")
call_mc_opt.add_argument("--path-to-samtools",
type=str,
default="",
help="Path to samtools installation")
call_mc_opt.add_argument("--remove-chr-prefix",
type=str2bool,
default=True,
help="Boolean indicates whether to remove in the final output the \"chr\" prefix "
+"in the chromosome name")
call_mc_opt.add_argument("--add-snp-info",
type=str2bool,
default=False,
help="Boolean indicates whether to add extra two columns in the output (allc) file "
+"regarding the genotype information of each site. The first (second) column contain "
+"the number of basecalls that support the reference gentype (variant) for nucleotides "
"in the sequence context.")
call_mc_opt.add_argument("--unmethylated-control",
type=str,
default=None,
help="name of the chromosome/region that you want to use to estimate "
+ "the non-conversion rate of your sample, or the non-conversion rate "
+ "you would like to use. Consequently, control is either a string, or "
+ "a decimal. If control is a string then it should be in the following "
+ "format: \"chrom:start-end\". If you would like to specify an entire "
+ "chromosome simply use \"chrom:\"")
call_mc_opt.add_argument("--binom-test",
type=str2bool,
default=False,
help="Indicates that you would like to perform a binomial test on each cytosine "
+"to delineate cytosines that are significantly methylated than noise due to "
+"the failure of bisulfite conversion.")
call_mc_opt.add_argument("--sig-cutoff",
type=float,
default=.01,
help="float indicating the adjusted p-value cutoff you wish to use for "
+ "determining whether or not a site is methylated")
call_mc_opt.add_argument("--min-mapq",
type=int,
default=30,
help="Minimum MAPQ for reads to be included.")
call_mc_opt.add_argument("--min-cov",
type=int,
default=0,
help="Integer indicating the minimum number of reads for a site to be tested.")
call_mc_opt.add_argument("--min-base-quality",
type=int,
default=1,
help="Integer indicating the minimum PHRED quality score for a base to be "
+"included in the mpileup file (and subsequently to be considered for "
+"methylation calling).")
call_mc_opt.add_argument("--keep-temp-files",
type=str2bool,
default=False,
help="Boolean indicating that you would like to keep the intermediate files "
+"generated by this function. This can be useful for debugging, but in general "
+"should be left False.")
def add_get_methylation_level_subparser(subparsers):
add_mc = subparsers.add_parser("add-methylation-level",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Get methylation level of genomic regions")
add_mc_req = add_mc.add_argument_group("required inputs")
add_mc_req.add_argument("--input-tsv-file",
type=str,
help="A tab-separate file that specifies genomic intervals. The file contains a header."
+"First three columns are required to be chromosome, start and end, which are "
+"1-based cooridates. It may contain additional column(s). ")
add_mc_req.add_argument("--output-file",
type=str,
required=True,
help="Name of output file")
add_mc_req.add_argument("--allc-files",
type=str,
nargs="+",
required=True,
help="List of allc files.")
add_mc_opt = add_mc.add_argument_group("optional inputs")
add_mc_opt.add_argument("--samples",
type=str,
nargs="+",
default=None,
help="List of space separated samples matching allc files. By default "
+"sample names will be inferred from allc filenames")
add_mc_opt.add_argument("--mc-type",
type=str,
nargs="+",
default=["CGN"],
help="List of space separated mc nucleotide contexts for "
+ "which you want to look for DMRs. These classifications "
+ "may use the wildcards H (indicating anything but a G) and "
+ "N (indicating any nucleotide).")
add_mc_opt.add_argument("--extra-info",
type=str2bool,
default=False,
help="Boolean to indicate whether to generate two output extra files with "
+"the total basecalls and covered sites in each of the regions.")
add_mc_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
add_mc_opt.add_argument("--min-cov",
type=int,
default=0,
help="Minimum coverage for a site to be included")
add_mc_opt.add_argument("--max-cov",
type=int,
default=None,
help="Maximum coverage for a site to be included. By default this cutoff is not applied.")
add_mc_opt.add_argument("--buffer-line-number",
type=int,
default=100000,
help="size of buffer for reads to be written on hard drive.")
add_mc_opt.add_argument("--input-no-header",
type=str2bool,
default=False,
help="Indicating whether input tsv file contains a header. If this is set to "
+"True, a header will be automatically generated in the output file.")
def add_allc2bw_subparser(subparsers):
allc2bw = subparsers.add_parser("allc-to-bigwig",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Get bigwig file from allc file")
allc2bw_req = allc2bw.add_argument_group("required inputs")
allc2bw_req.add_argument("--allc-file",
type=str,
help="input allc file to be converted to bigwig format")
allc2bw_req.add_argument("--output-file",
type=str,
required=True,
help="Name of output file")
allc2bw_req.add_argument("--ref-fasta",
type=str,
required=True,
help="string indicating the path to a fasta file containing the "
+ "genome sequences")
allc2bw_opt = allc2bw.add_argument_group("optional inputs")
allc2bw_opt.add_argument("--mc-type",
type=str,
nargs="+",
default=["CGN"],
help="List of space separated mc nucleotide contexts for "
+ "which you want to look for DMRs. These classifications "
+ "may use the wildcards H (indicating anything but a G) and "
+ "N (indicating any nucleotide).")
allc2bw_opt.add_argument("--bin-size",
type=int,
default=100,
help="Genomic bin size for calculating methylation level")
allc2bw_opt.add_argument("--min-bin-sites",
type=int,
default=0,
help="Minimum sites in a bin for it to be included.")
allc2bw_opt.add_argument("--min-bin-cov",
type=int,
default=0,
help="Minimum total coverage of all sites in a bin for methylation level "
+"to be calculated.")
allc2bw_opt.add_argument("--min-site-cov",
type=int,
default=0,
help="Minimum total coverage of a site for it to be included.")
allc2bw_opt.add_argument("--max-site-cov",
type=int,
default=None,
help="Maximum total coverage of a site for it to be included.")
allc2bw_opt.add_argument("--path-to-wigToBigWig",
type=str,
default="",
help="Path to wigToBigWig executable ")
allc2bw_opt.add_argument("--path-to-samtools",
type=str,
default="",
help="Path to samtools installation")
allc2bw_opt.add_argument("--remove-chr-prefix",
type=str2bool,
default=True,
help="Boolean indicates whether to remove \"chr\" in the chromosome names in "
+"genome sequence file to match chromosome names in input allc file.")
allc2bw_opt.add_argument("--add-chr-prefix",
type=str2bool,
default=False,
help="Boolean indicates whether to add \"chr\" in the chromosome names in "
+"input allc file to match chromosome names in genome sequence file. This option "
+"overrides --remove-chr-prefix.")
def add_merge_allc_subparser(subparsers):
merge_allc = subparsers.add_parser("merge-allc",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Merge allc files")
merge_allc_req = merge_allc.add_argument_group("required inputs")
merge_allc_req.add_argument("--allc-files",
type=str,
nargs="+",
required=True,
help="List of allc files to merge.")
merge_allc_req.add_argument("--output-file",
type=str,
required=True,
help="String indicating the name of output file")
merge_allc_opt = merge_allc.add_argument_group("optional inputs")
merge_allc_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors to use")
merge_allc_opt.add_argument("--compress-output",
type=str2bool,
default=True,
help="Boolean indicating whether to compress (by gzip) the final output")
merge_allc_opt.add_argument("--skip-snp-info",
type=str2bool,
default=True,
help="Boolean indicating whether to skip the merging of SNP information")
merge_allc_opt.add_argument("--mini-batch",
type=int,
default=100,
help="The maximum number of allc files to be merged at the same time. Since "
+"OS or python may limit the number of files that can be open at once, value "
+"larger than 200 is not recommended")
def add_index_allc_subparser(subparsers):
index_allc = subparsers.add_parser("index-allc",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Index allc files")
index_allc_req = index_allc.add_argument_group("required inputs")
index_allc_req.add_argument("--allc-files",
type=str,
nargs="+",
required=True,
help="List of allc files to index.")
index_allc_opt = index_allc.add_argument_group("optional inputs")
index_allc_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors to use")
index_allc_opt.add_argument("--reindex",
type=str2bool,
default=True,
help="Boolean indicating whether to index allc files whose "
+"index files already exist.")
def add_filter_allc_subparser(subparsers):
filter_allc = subparsers.add_parser("filter-allc",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Filter allc file")
filter_allc_req = filter_allc.add_argument_group("required inputs")
filter_allc_req.add_argument("--allc-files",
type=str,
required=True,
nargs="+",
help="allc files to filter.")
filter_allc_req.add_argument("--output-files",
type=str,
required=True,
nargs="+",
help="Name of output files. Each output file matches each allc file.")
filter_allc_opt = filter_allc.add_argument_group("optional inputs")
filter_allc_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
filter_allc_opt.add_argument("--mc-type",
type=str,
nargs="+",
default=None,
help="List of space separated cytosine nucleotide contexts for "
+ "sites to be included in output file. These classifications "
+ "may use the wildcards H (indicating anything but a G) and "
+ "N (indicating any nucleotide).")
filter_allc_opt.add_argument("--min-cov",
type=int,
default=0,
help="Minimum number of reads that must cover a site for it to be "
+ "included in the output file.")
filter_allc_opt.add_argument("--max-cov",
type=int,
default=None,
help="Maximum number of reads that must cover a site for it to be "
+ "included in the output file. By default this cutoff is not applied.")
filter_allc_opt.add_argument("--max-mismatch",
type=int,
nargs="+",
default=None,
help="Maximum numbers of mismatch basecalls allowed in each nucleotide in "
+"the sequence context of a site for it to be included in output file. If "
+"the sequence context has three nucleotides, an example of this option is \"0 1 2\". "
+"It requires no mismatch basecall at the first nucleotide, at most one mismatch "
+"basecall at the second nucleotide, and at most two at the third nucleotide for a site "
+"to be reported.")
filter_allc_opt.add_argument("--max-mismatch-frac",
type=float,
nargs="+",
default=None,
help="Maximum fraction of mismatch basecalls out of unambiguous basecalls allowed "
+"in each nucleotide in the sequence context of a site for it to be included "
+" in output file. If the sequence context has three nucleotides, an example "
+"of this option is \"0 0 0.1\". It requires no mismatch basecall at the first "
+"and second nucleotide, and at most 10%% mismatches out of unambiguous basecalls "
+"at the third nucleotide for a site to be reported.")
filter_allc_opt.add_argument("--compress-output",
type=str2bool,
default=True,
help="Boolean indicating whether to compress (by gzip) the final output")
filter_allc_opt.add_argument("--chroms",
type=str,
nargs="+",
default=None,
help="Space separated listing of chromosomes to be included in the output. "
+"By default, data of all chromosomes in input allc file will be included.")
def add_test_allc_subparser(subparsers):
test_allc = subparsers.add_parser("test-allc",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Binomial test on allc file")
test_allc_req = test_allc.add_argument_group("required inputs")
test_allc_req.add_argument("--allc-file",
type=str,
required=True,
help="allc file to be tested.")
test_allc_req.add_argument("--sample",
type=str,
required=True,
help="sample name")
test_allc_req.add_argument("--unmethylated-control",
type=str,
help="name of the chromosome/region that you want to use to estimate "
+ "the non-conversion rate of your sample, or the non-conversion rate "
+ "you would like to use. Consequently, control is either a string, or "
+ "a decimal. If control is a string then it should be in the following "
+ "format: \"chrom:start-end\". If you would like to specify an entire "
+ "chromosome simply use \"chrom:\"")
test_allc_opt = test_allc.add_argument_group("optional inputs")
test_allc_opt.add_argument("--path-to-output",
type=str,
default="",
help="Path to a directory where you would like the output to be stored. "
+ "The default is the same directory as the input fastqs.")
test_allc_opt.add_argument("--num-procs",
type=int,
default=1,
help="Number of processors you wish to use to parallelize this function")
test_allc_opt.add_argument("--min-cov",
type=int,
default=2,
help="Minimum number of reads that must cover a site for it to be "
+ "tested.")
test_allc_opt.add_argument("--compress-output",
type=str2bool,
default=True,
help="Boolean indicating whether to compress (by gzip) the final output")
test_allc_opt.add_argument("--sig-cutoff",
type=float,
default=.01,
help="Float indicating at what FDR you want to consider a result "
+ "significant.")
test_allc_opt.add_argument("--sort-mem",
type=str,
default="500M",
help="Parameter to pass to unix sort with -S/--buffer-size command")
test_allc_opt.add_argument("--remove-chr-prefix",
type=str2bool,
default=True,
help="Boolean indicates whether to remove in the final output the \"chr\" prefix "
+"in the chromosome name")
def str2bool(v):
## adapted from the answer by Maxim at
## https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return(True)
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return(False)
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
parse_args()
|
yupenghe/methylpy
|
methylpy/parser.py
|
Python
|
apache-2.0
| 97,419
|
[
"BWA",
"Bowtie"
] |
83b1f5299b8cc4f81f939b483b2dfa1f39956072c7fcda8443765faf3acf7264
|
import sys
import argparse
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib as mp
import os
from scipy import interpolate
import xes_energies
import pdb
from scipy.ndimage.filters import gaussian_filter as filt
data_extractor = np.genfromtxt
# backsatter energy for graphite 002
e0 = 1848.303414
# distance in mm between HOPG crystal and the spectrometer's
# source-to-dectector axis; i.e., the crystal's curvature radius
hopg_radius = 103.4
# Dict of emission line energies.
emission = xes_energies.emission_dict()
# Maps emission line keys to plot labels
lineidforplot = {'ka1': "$K\\alpha_1$", 'ka2': "$K\\alpha_2$", 'kb': "$K\\beta_{1,3}$", 'Ef': "$E_F$"}
def center_col(data):
"""
Return the peak index of the CSPAD data summed along the zeroth axis
(perpendicular to the energy-dispersive direction
"""
summed = np.sum(data, axis = 0)
return np.argmax(summed)
def lineout(data, cencol, pxwidth = 3):
"""
Return a 1d lineout
"""
spectrum_num_points = len(data)
spectrum_intensities = np.array([ sum( [data[i][j] for j in range(cencol-pxwidth,cencol+pxwidth+1)] ) for i in range(spectrum_num_points) ])
return spectrum_intensities
def get_normalization(x, intensities, sumwidth = 150):
n_ref1 = np.argmax(intensities)
x_ref1 = x[n_ref1]
filtered = intensities[np.logical_and(x > x_ref1 - sumwidth, x < x_ref1 + sumwidth)]
#print "peak index: ", n_ref1
return np.sum(filtered)
#def save_calib(fname, energies):
# with open(fname, 'wb') as f:
# np.savetxt(f, np.array([range(len(energies)), energies]).T, header = 'row index\tenergy(eV)')
def save_calib(spectrum_num_points, save_path, energy_ref1, energy_ref2, n_ref1, n_ref2):
"""
energy_ref1: first (lower) reference energy
energy_ref2: second (higer) reference energy
n_ref1: array index corresponding to energy_ref1
n_ref2: array index corresponding to energy_ref2
Saves the provided constants to save_path
"""
with open(save_path, 'wb') as f:
np.savetxt(f, np.array([[spectrum_num_points], [energy_ref1], [energy_ref2], [n_ref1], [n_ref2]]).T, '%d\t %f\t %f\t %d\t %d', header = 'Number of points in spectrum\tEnergy 1 (eV)\tEnergy 2 (eV)\tpixel index 1\tpixel index 2')
#def load_calib(fname):
# with open(fname, 'rb') as f:
# energies =(np.genfromtxt(f).T)[1]
# return energies
def load_calib(fname):
def parse_config_objs(spectrum_num_points, energy_ref1, energy_ref2, n_ref1, n_ref2):
return int(spectrum_num_points), float(energy_ref1), float(energy_ref2), int(n_ref1), int(n_ref2)
with open(fname, 'rb') as f:
spectrum_num_points, energy_ref1, energy_ref2, n_ref1, n_ref2 = parse_config_objs(*np.genfromtxt(f).T)
return energies_from_two_points(spectrum_num_points, energy_ref1, energy_ref2, n_ref1, n_ref2)
def get_k_energies_and_positions(eltname, spectrum):
"""
Return the energies and indices of the k alpha and k beta peaks in
spectrum.
Arguments:
spectrum: a 1d-array
It is assumed that the largest two peaks in spectrum are the k alpha
and k beta peak of a single element
"""
try:
energy_kalpha = emission[eltname]['ka1']
energy_kbeta = emission[eltname]['kb']
except KeyError:
raise KeyError("element identifier not found: " + eltname)
n_kalpha = np.argmax(spectrum)
offset = n_kalpha + 20
n_kbeta = np.argmax(spectrum[offset:]) + offset
return energy_kalpha, energy_kbeta, n_kalpha, n_kbeta
def energies_from_two_points(spectrum_num_points, energy_ref1, energy_ref2, n_ref1, n_ref2):
"""
Calculate an array of energy values corresponding to pixel indices using
two reference points
"""
# calculate position of peak positions on spectrometer
thalpha = math.asin(e0/energy_ref1)
posalpha = hopg_radius/(math.tan(thalpha))
thbeta = math.asin(e0/energy_ref2)
posbeta = hopg_radius/(math.tan(thbeta))
# calculate pixel size
pxsize = (posbeta - posalpha)/(n_ref1 - n_ref2)
# calculate pixel horizontal positions relative to source point
pixels = range(n_ref1-spectrum_num_points, n_ref1)
pixels = [ posalpha + pxsize*n for n in pixels ]
# calculate Bragg angles and energies for graphite 002
thetalist = [ math.atan(hopg_radius/p) for p in pixels ]
elist = [ e0/(math.sin(theta)) for theta in thetalist ]
return elist
def energies_from_data(data, cencol, save_path = None, eltname = '', calibration_mode = 'k alpha k beta', **kwargs):
"""
Return 1d array of energies corresponding to rows on the detector
based on calibration off of the ka and kb positions in XES data of the
given data
If a string is assigned to save_path, the calibration is saved under
that given name in the directory calibs/
Returns a tuple:
Row index of the k alpha peak, 1d array of energies
"""
spectrum = lineout(data, cencol)[::-1]
spectrum_num_points = len(spectrum)
if calibration_mode == 'k alpha k beta':
energy_ref1, energy_ref2, n_ref1, n_ref2 = get_k_energies_and_positions(eltname, spectrum)
# else:....
# TODO: add other energy calibration modes
energies = energies_from_two_points(spectrum_num_points, energy_ref1, energy_ref2, n_ref1, n_ref2)
nrm = np.sum(spectrum[max(n_ref1-40,0):min(n_ref1+40, spectrum_num_points)])
#energies = elist[::-1]
if save_path:
dirname = os.path.dirname(save_path)
if dirname and (not os.path.exists(dirname)):
os.system('mkdir -p ' + os.path.dirname(save_path))
#save_calib(save_path, energies)
save_calib(spectrum_num_points, save_path, energy_ref1, energy_ref2, n_ref1, n_ref2)
return np.array(energies)
# TODO: allow masking out bad data ranges (probably something to put into
# a config file) for background subtraction purposes
def get_spectrum(data, dark = None, cencol_calibration_data = None, cold_calibration_data = None,
pxwidth = 3, bg_sub = True, calib_load_path = None, calib_save_path = None,
energy_ref1_energy_ref2_calibration = True, eltname = ''):
"""
Return the XES spectrum corresponding to the given data
and element
Inputs:
eltname: string of the abbreviated element name
data: 2d array of CSPAD data
cold_calibration_data: data to use for determination of the energy
scale. If None, the first argument is used for this.
cencol_calibration_data: data to use for location of the
spectrometer's line of focus. If None, the first argument is used
for this.
pxwidth: width of the CSPAD lineout from which the spectrum is
constructed
peak_width:
TODO: deprecate or not?
bg_sub: if True, perform a constant subtraction. The subtracted
constant is the 5th percentile of the spectrum after smoothing
with a gaussian kernel of standard deviation 5
calib_load_path: path to a file with an energy calibration to load
calib_save_path: File to which to save an energy calibration if
calib_load_path is None
energy_ref1_energy_ref2_calibration: If calib_load_path is None, use k alpha
and k beta peak locations to determine an energy scale. If None
and calib_load_path is also None, do not perform an energy
calibration at all.
Output: array, array -> energy or index, normalized intensity
"""
if np.shape(data) != (391, 370):
print "WARNING: array dimensions differ from those of CSPAD140k"
if energy_ref1_energy_ref2_calibration or calib_load_path:
peak_width = 150
else:
peak_width = 15
if dark is not None:
# copy without mutating the original array
data = np.array(data, copy = True) - dark
if cencol_calibration_data is None:
cencol_calibration_data = data
cencol = center_col(cencol_calibration_data)
intensities = lineout(data, cencol, pxwidth = pxwidth)
# if calib_load_path:
# x = load_calib(calib_load_path)
if calib_load_path:
x = load_calib(calib_load_path)
elif energy_ref1_energy_ref2_calibration and eltname and (cold_calibration_data is not None):
x = energies_from_data(cold_calibration_data, cencol, save_path = calib_save_path, eltname = eltname)
else:
if energy_ref1_energy_ref2_calibration and not eltname:
print "No element identifier provided; skipping energy calibration."
elif energy_ref1_energy_ref2_calibration and not cold_calibration_data:
print "No file for calibration specified; skipping energy calibration"
x = np.array(range(len(intensities)))
if bg_sub:
smoothed = filt(intensities, 5)
floor = np.percentile(smoothed, 5)
intensities -= floor
norm = get_normalization(x, intensities, peak_width)
return x, intensities / norm
def main(paths, cold_calibration_path = None, pxwidth = 3,
calib_load_path = None, calib_save_path = None,
dark_path = None, energy_ref1_energy_ref2_calibration = True,
eltname = ''):
spectrumList = []
scale_ev = (energy_ref1_energy_ref2_calibration or calib_load_path)
if not os.path.exists('xes_spectra/'):
os.makedirs('xes_spectra')
if cold_calibration_path:
cold_calibration_data = data_extractor(cold_calibration_path)
else:
cold_calibration_data = None
if dark_path:
dark = data_extractor(dark_path)
else:
dark = None
data_arrays = map(data_extractor, paths)
labels = map(os.path.basename, paths)
for data, label in zip(data_arrays, labels):
energies, intensities = get_spectrum(data,
cencol_calibration_data = data,
dark = dark, cold_calibration_data = cold_calibration_data,
pxwidth = pxwidth, calib_load_path = calib_load_path,
calib_save_path = calib_save_path,
energy_ref1_energy_ref2_calibration = energy_ref1_energy_ref2_calibration, eltname = eltname)
spectrumList.append([energies, intensities])
if eltname:
np.savetxt('xes_spectra/' + label + '_' + eltname,
[energies, intensities], header = 'energy (eV)\tintensity (arb)')
else:
np.savetxt('xes_spectra/' + label,
[energies, intensities], header = 'energy (eV)\tintensity (arb)')
if eltname:
name = 'plots_xes/' + '_'.join(labels) + '_' + eltname
else:
name = 'plots_xes/' + '_'.join(labels)
plot_spectra(spectrumList, labels, scale_ev, name = name, eltname = eltname)
def plot_spectra(spectrumList, labels, scale_ev, name = None, eltname = ''):
if not os.path.exists('plots_xes/'):
os.makedirs('plots_xes/')
elist = spectrumList[0][0]
max_intensity = np.max(map(lambda x: x[1], spectrumList))
plt.plot(elist, spectrumList[0][1], label = labels[0])
plt.axhline(y=0, color = "black")
plt.title(eltname + " XES")
#add vertical lines to identify peaks within observed range
txtshift = {'ka1': 0, 'ka2': -20, 'kb': -25, 'Ef': 0}
txtheight = {'ka1': 1.1, 'ka2': 1.1, 'kb': 0.5, 'Ef': 0.5}
if eltname:
lines = emission[eltname].keys()
for line in lines:
if elist[-1] - 50 < emission[eltname][line] < elist[0] + 50:
plt.plot( [emission[eltname][line], emission[eltname][line]],
[(-0.05)*max_intensity, (txtheight[line])*max_intensity],
color = "gray")
plt.text(emission[eltname][line]+txtshift[line], (txtheight[line])*max_intensity, lineidforplot[line], size="large")
colorlist = 4 * ["orange", "green", "purple", "red", "brown", "black"]
ncolors = len(colorlist)
for spectrum, label, n in zip(spectrumList[1:], labels[1:], range(len(labels[1:]))):
plt.plot(spectrum[0], spectrum[1], label = label, color = colorlist[(n-1)])
plt.legend()
if scale_ev:
plt.xlabel("Energy (eV)", size="large")
else:
plt.xlabel("CSPAD index", size="large")
plt.ylabel("Counts", size="large")
plt.ylim((0, 1.15 * max_intensity))
if name:
plt.savefig(name + '.png', bbox_inches='tight')
plt.savefig(name + '.svg', bbox_inches='tight')
plt.show()
|
hoidn/LCLS
|
xes/xes/xes_process.py
|
Python
|
gpl-3.0
| 12,428
|
[
"CRYSTAL",
"Gaussian"
] |
5179bec7ba93cee141a616be2b4b20e89dda2d735b0b3df4a57750fc95416196
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
name = 'java'
if 'posix' in _names:
_name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
_name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
_name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
_name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
_name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
elif 'ibmi' in _names:
_name = 'ibmi'
linesep = '\n'
from ibmi import *
try:
from ibmi import _exit
except ImportError:
pass
import posixpath as path
import ibmi
__all__.extend(_get_exports_list(ibmi))
del ibmi
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
name=name.replace("\r","\\r")
if path.exists(name):
return
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def _exists(name):
# CPython eval's the name, whereas looking in __all__ works for
# Jython and is much faster
return name in __all__
if _exists('execv'):
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if _name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif _name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
import UserDict
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
def __delitem__(self, key):
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if sys.platform.startswith('java') or _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
close(_urandomfd)
return bytes
# Supply os.popen()
def popen(cmd, mode='r', bufsize=-1):
"""popen(command [, mode='r' [, bufsize]]) -> pipe
Open a pipe to/from a command returning a file object.
"""
if not isinstance(cmd, (str, unicode)):
raise TypeError('invalid cmd type (%s, expected string)' % type(cmd))
if mode not in ('r', 'w'):
raise ValueError("invalid mode %r" % mode)
import subprocess
if mode == 'r':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdout=subprocess.PIPE)
fp = proc.stdout
elif mode == 'w':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdin=subprocess.PIPE)
fp = proc.stdin
# files from subprocess are in binary mode but popen needs text mode
fp = fdopen(fp.fileno(), mode, bufsize)
return _wrap_close(fp, proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close(object):
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if _name == 'nt':
return returncode
else:
return returncode
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
|
rzabini/gradle-sphinx
|
src/main/jython/os.py
|
Python
|
apache-2.0
| 25,197
|
[
"VisIt"
] |
634207e03c5735a58c20ef702d206d0b5efac98090b6a4f0e1cd2867dc1eb65f
|
#!/usr/bin/env python
#######################
# Script to take center RA/DEC location & generate dithered offsets
# for baseline project exposures
########################
### History:
## 2014/07/21 rgm: take out "coordinates": "absolute",
## which we are told is invalid.
## 2014/07/25 rgm: fix --disableoutrigger; add argv to end of config
import sys, argparse, random, numpy as np
import json, math
########################
def generateMainDithers(nexp, dithersize):
if nexp == 0:
return []
exp_offsets = [(0., 0.)]
if nexp == 1:
return exp_offsets
OneOffsetPositive = True
OneOffsetCount = 0
IndexPositive = True
IndexCount = 1
IndexOffsetLeft = True
for i in range(1, nexp):
IndexOffset = i
if not IndexPositive:
IndexOffset = -i
OneOffset = 1
if not OneOffsetPositive:
OneOffset = -1
if IndexOffsetLeft:
exp_offsets.append((IndexOffset*dithersize, OneOffset*dithersize))
else:
exp_offsets.append((OneOffset*dithersize, IndexOffset*dithersize))
IndexOffsetLeft = not IndexOffsetLeft
OneOffsetCount += 1
if OneOffsetCount == 2:
OneOffsetPositive = not OneOffsetPositive
OneOffsetCount = 0
IndexCount += 1
if IndexCount == 2:
IndexPositive = not IndexPositive
IndexCount = 0
return exp_offsets
########################
def absolutePointings(ra, dec, exp_offsets):
cum_offsets = []
ra = ra
dec = dec
for offset in exp_offsets:
ra += (offset[0]/3600.)/np.cos(dec*np.pi/180.)
dec += (offset[1]/3600.)
cum_offsets.append((ra, dec))
return cum_offsets
########################
def generateOutriggers(ra, dec, outriggersize):
exp_positions = []
posangle = random.uniform(0, 360)
for i in range(3):
angle = (posangle + i*(360/3.))*(np.pi/180.)
exp_positions.append( (ra + outriggersize*np.cos(angle)/np.cos(dec*np.pi/180.),
dec + outriggersize*np.sin(angle)) )
return exp_positions
########################
def writeExposures(output, exposures):
json_string = json.dumps(exposures, sort_keys = True, indent=4)
output.write(json_string)
#######################
def countTime(exposures, overhead):
nexp = len(exposures)
if nexp == 0:
return 0, 0
imagetime = reduce(lambda x,y:x+y, [x['expTime'] for x in exposures if 'break' not in x])
totaltime = imagetime + nexp*overhead
return imagetime, totaltime
########################
def createScript(args):
output = open(args.scriptname, 'w')
### deal with stuff always at the start of the exposure sequence
start_exposures = []
if args.breakstart:
breakKeys = {'break' : True}
start_exposures.append(breakKeys)
#short test exposure
if not args.noshortexp:
seqid = '%s_%s_%d_short' % (args.object, args.filter, args.visit)
keywords = {'expType' : 'object', # 'coordinates' : 'absolute',
'RA' : args.ra, 'dec' : args.dec,
'filter' : args.filter, 'object' : args.object,
'expTime' : 10,
'seqid' : seqid, 'seqnum' : 0, 'seqtot' : 2}
start_exposures.append(keywords)
exptime = math.ceil(10**((np.log10(10.) + \
np.log10(args.singletime))/2.))
keywords = {'expType' : 'object', # 'coordinates' : 'absolute',
'RA' : args.ra, 'dec' : args.dec,
'filter' : args.filter, 'object' : args.object,
'expTime' : exptime,
'seqid' : seqid, 'seqnum' : 0, 'seqtot' : 2}
start_exposures.append(keywords)
breakKeys = {'break' : True}
start_exposures.append(breakKeys)
#deal with dithers
seqid = '%s_%s_%d_dither' % (args.object, args.filter, args.visit)
nexp = args.nexp
science_exposures = []
exposure_offsets = generateMainDithers(nexp = nexp, dithersize=args.dithersize)
abs_pointings = absolutePointings(args.ra, args.dec, exposure_offsets)
expIDoffset = args.startwithexpnum
for seqnum, pointing in enumerate(abs_pointings[expIDoffset:]):
# keywords = {'expType' : 'object', 'coordinates' : 'absolute', 'RA' : pointing[0], 'DEC' : pointing[1],
keywords = {'expType' : 'object', 'RA' : pointing[0], 'DEC' : pointing[1],
'filter' : args.filter, 'object' : args.object, 'expTime' : args.singletime,
'seqid' : seqid, 'seqnum' : expIDoffset+seqnum, 'seqtot' : nexp}
science_exposures.append(keywords)
#deal with outriggers
outrigger_exposures = []
if not args.disableoutrigger:
outrigger_positions = generateOutriggers(ra = args.ra, dec = args.dec, outriggersize = args.outriggersize)
# make sure we grab the central exposure if we aren't taking science images
outriggerid = '%s_%s_%d_outrigger' % (args.object, args.filter, args.visit)
outriggernum = 3
if args.nexp == 0:
outrigger_positions.insert(0,(args.ra, args.dec))
outriggernum = 4
for seqnum, exp_pos in enumerate(outrigger_positions):
# keywords = {'expType' : 'object', 'coordinates' : 'absolute',
keywords = {'expType' : 'object',
'RA' : exp_pos[0], 'DEC' : exp_pos[1],
'filter' : args.filter, 'object' : args.object, 'expTime' : args.outriggertime,
'seqid' : outriggerid, 'seqnum' : seqnum, 'seqtot' : outriggernum}
outrigger_exposures.append(keywords)
if args.outriggerfirst:
exposures = start_exposures + outrigger_exposures + science_exposures
else:
exposures = start_exposures + science_exposures + outrigger_exposures
if not args.nobreakend:
breakKeys = {'break' : True}
exposures.append(breakKeys)
writeExposures(output, exposures)
output.close()
calibImage, calibTotal = countTime(start_exposures + outrigger_exposures, args.overhead)
sciImage, sciTotal = countTime(science_exposures, args.overhead)
return sciImage, calibImage, sciTotal + calibTotal
########################
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--ra', type=float)
parser.add_argument('--dec', type=float)
parser.add_argument('--nexp', type=int,
help = 'Total number of full-length science exposures')
parser.add_argument('--singletime', type=int,
help = 'Exposure time per image')
parser.add_argument('--dithersize', type=float, default=60.0,
help = 'Basic unit size for dither; arcseconds')
parser.add_argument('--disableoutrigger', default=False, action='store_true')
parser.add_argument('--outriggersize', type=float, default = 0.5,
help = 'Step size from center for outrigger exp; degrees')
parser.add_argument('--outriggertime', type=int, default=-1,
help = 'Exposure time for each outrigger')
parser.add_argument('--outriggerfirst', default=False, action='store_true')
parser.add_argument('--filter', type=str)
parser.add_argument('--object', type=str)
parser.add_argument('--visit', type=int, default=0)
parser.add_argument('--breakstart', default=False, action='store_true')
parser.add_argument('--nobreakend', default=False, action='store_true')
parser.add_argument('--startwithexpnum', type=int, default = 0, help='Start at a different initial dither; 0-indexed')
parser.add_argument('--noshortexp', default=False, action='store_true')
parser.add_argument('--overhead', type=int, default = 30)
parser.add_argument('--offset', type=float, default = -4.5,
help = 'Offset in DEC direction to place center of cluster in the middle of a chip (in arcmin)')
parser.add_argument('--scriptname', type=str, default='')
args = parser.parse_args(argv)
args.offset = args.offset/60.
args.dec = args.dec + args.offset
if args.outriggertime == -1:
args.outriggertime = args.singletime / 2.
outriggerFlag = 1
if args.disableoutrigger:
outriggerFlag = 0
if args.scriptname == '':
args.scriptname = '%s_%s_v%d_sci%d-%d_out%d.script' % (args.object, args.filter, args.visit, args.startwithexpnum, args.nexp, outriggerFlag)
configfile = '%s.config' % args.scriptname
print 'Called with configuration:'
print 'RA: %f' % args.ra
print 'DEC: %f' % (args.dec - args.offset)
print 'DEC Offset: %f arcmin' % (60*args.offset)
print 'Number of Science Exposures: %d' % args.nexp
print 'Single Exposure: %d' % args.singletime
print 'Dither Size: %f' % args.dithersize
print 'Disable Outrigger?: %s' % args.disableoutrigger
print 'Outrigger First?: %s' % args.outriggerfirst
print 'Outrigger Size: %f' % args.outriggersize
print 'Outrigger time: %f' % args.outriggertime
print 'Filter: %s' % args.filter
print 'Object: %s' % args.object
print 'Break Start? : %s' % args.breakstart
print 'Break End? : %s' % (not args.nobreakend)
print 'Overhead: %d' % args.overhead
print 'First Exposure : %d' % args.startwithexpnum
print 'Script Name: %s' % args.scriptname
scitime, calibtime, totaltime = createScript(args)
print
print 'Science Time: %d' % scitime
print 'Calib Time: %d' % calibtime
print 'Total Time: %d' % totaltime
with open(configfile, 'w') as output:
output.write('Called with configuration:\n' )
output.write('RA: %f\n' % args.ra )
output.write('DEC: %f\n' % (args.dec - args.offset) )
output.write('DEC Offset: %f arcmin\n' % (60*args.offset))
output.write('Number of Science Exposures: %d\n' % args.nexp )
output.write('Single Exposure: %d\n' % args.singletime )
output.write('Dither Size: %f\n' % args.dithersize )
output.write('Disable Outrigger?: %s\n' % args.disableoutrigger )
output.write('Outrigger First?: %s\n' % args.outriggerfirst )
output.write('Outrigger Size: %f\n' % args.outriggersize )
output.write('Outigger Time: %f\n' % args.outriggertime )
output.write('Filter: %s\n' % args.filter )
output.write('Object: %s\n' % args.object )
output.write('Break Start? : %s\n' % args.breakstart )
output.write('Break End? : %s\n' % (not args.nobreakend) )
output.write('Overhead : %d\n' % args.overhead )
output.write('First Exposure : %d\n' % args.startwithexpnum )
output.write('Script Name: %s\n' % args.scriptname )
output.write('Science Time: %d\n' % scitime )
output.write('Calib Time: %d\n' % calibtime )
output.write('Total Time: %d\n' % totaltime )
output.write('Arguments: %s\n' % ' '.join(map(str,sys.argv[1:])))
#########################
if __name__ == '__main__':
main(argv = sys.argv[1:])
|
deapplegate/wtgpipeline
|
dithermaker.py
|
Python
|
mit
| 11,793
|
[
"VisIt"
] |
c3b3f96bd21a985c136ac68271db25abc822bc8a59ea28cef7814d36cd437dc8
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''This module defines the ScrollableLayer and ScrollingManager classes.
Controlling Scrolling
---------------------
You have two options for scrolling:
1. automatically scroll the map but stop at the map edges, and
2. scroll the map an allow the edge of the map to be displayed.
The ScrollingManager has a concept of "focus" which is the pixel
position of the player's view focus (*usually* the center of the
player sprite itself, but the player may be allowed to
move the view around, or you may move it around for them to highlight
something else in the scene). The ScrollingManager is clever enough to
manage many layers and handle scaling them.
Two methods are available for setting the map focus:
**set_focus(x, y)**
Attempt to set the focus to the pixel coordinates given. The layer(s)
contained in the ScrollingManager are moved accordingly. If a layer
would be moved outside of its define px_width, px_height then the
scrolling is restricted. The resultant restricted focal point is stored
on the ScrollingManager as restricted_fx and restricted_fy.
**force_focus(x, y)**
Force setting the focus to the pixel coordinates given. The layer(s)
contained in the ScrollingManager are moved accordingly regardless of
whether any out-of-bounds cells would be displayed. The .fx and .fy
attributes are still set, but they'll *always* be set to the supplied
x and y values.
'''
__docformat__ = 'restructuredtext'
from cocos.director import director
from cocos.layer.base_layers import Layer
import pyglet
from pyglet.gl import *
class ScrollableLayer(Layer):
'''A Cocos Layer that is scrollable in a Scene.
A layer may have a "parallax" value which is used to scale the position
(and not the dimensions) of the view of the layer - the layer's view
(x, y) coordinates are calculated as::
my_view_x = parallax * passed_view_x
my_view_y = parallax * passed_view_y
Scrollable layers have a view which identifies the section of the layer
currently visible.
The scrolling is usually managed by a ScrollingManager.
'''
view_x, view_y = 0, 0
view_w, view_h = 0, 0
origin_x = origin_y = origin_z = 0
def __init__(self, parallax=1):
super(ScrollableLayer,self).__init__()
self.parallax = parallax
# force (cocos) transform anchor to be 0 so we don't OpenGL
# glTranslate() and screw up our pixel alignment on screen
self.transform_anchor_x = 0
self.transform_anchor_y = 0
# XXX batch eh?
self.batch = pyglet.graphics.Batch()
def on_enter(self):
director.push_handlers(self.on_cocos_resize)
super(ScrollableLayer, self).on_enter()
def on_exit(self):
super(ScrollableLayer, self).on_exit()
director.pop_handlers()
def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0):
x *= self.parallax
y *= self.parallax
self.view_x, self.view_y = x, y
self.view_w, self.view_h = w, h
#print self, 'set_view - x, y, w, h:', self.view_x, self.view_y, self.view_w, self.view_h
x -= self.origin_x
y -= self.origin_y
x -= viewport_ox
y -= viewport_oy
self.position = (-x, -y)
def draw(self):
# invoked by Cocos machinery
super(ScrollableLayer, self).draw()
# XXX overriding draw eh?
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
def set_dirty(self):
'''The viewport has changed in some way.
'''
pass
def on_cocos_resize(self, usable_width, usable_height):
self.set_dirty()
class ScrollingManager(Layer):
'''Manages scrolling of Layers in a Cocos Scene.
Each ScrollableLayer that is added to this manager (via standard list
methods) may have pixel dimensions .px_width and .px_height. Tile
module MapLayers have these attribtues. The manager will limit scrolling
to stay within the pixel boundary of the most limiting layer.
If a layer has no dimensions it will scroll freely and without bound.
The manager is initialised with the viewport (usually a Window) which has
the pixel dimensions .width and .height which are used during focusing.
A ScrollingManager knows how to convert pixel coordinates from its own
pixel space to the screen space.
'''
def __init__(self, viewport=None, do_not_scale=None):
if do_not_scale is None:
do_not_scale = director.do_not_scale_window
self.autoscale = not do_not_scale and not director.do_not_scale_window
self.viewport = viewport
# These variables define the Layer-space pixel view which is mapping
# to the viewport. If the Layer is not scrolled or scaled then this
# will be a one to one mapping.
self.view_x, self.view_y = 0, 0
self.view_w, self.view_h = 1, 1
self.childs_ox = 0
self.childs_oy = 0
# Focal point on the Layer
self.fx = self.fy = 0
super(ScrollingManager, self).__init__()
# always transform about 0,0
self.transform_anchor_x = 0
self.transform_anchor_y = 0
def on_enter(self):
super(ScrollingManager, self).on_enter()
director.push_handlers(self.on_cocos_resize)
self.update_view_size()
self.refresh_focus()
def on_exit(self):
director.pop_handlers()
super(ScrollingManager, self).on_exit()
def update_view_size(self):
if self.viewport is not None:
self.view_w, self.view_h = self.viewport.width, self.viewport.height
self.view_x, self.view_y = getattr(self.viewport, 'position', (0,0))
if director.do_not_scale_window:
self._scissor_flat = (self.view_x, self.view_y,
self.view_w, self.view_h)
else:
w, h = director.get_window_size()
sx = director._usable_width/float(w)
sy = director._usable_height/float(h)
self._scissor_flat = (int(self.view_x * sx), int(self.view_y * sy),
int(self.view_w * sx), int(self.view_h * sy))
elif self.autoscale:
self.view_w, self.view_h = director.get_window_size()
else:
self.view_w = director._usable_width
self.view_h = director._usable_height
def on_cocos_resize(self, usable_width, usable_height):
# when using an explicit viewport you should adjust the viewport for
# resize changes here, before the lines that follows.
# Also, if your app performs other changes in viewport it should
# use the lines that follows to update viewport-related internal state
self.update_view_size()
self.refresh_focus()
def refresh_focus(self):
if self.children:
self._old_focus = None # disable NOP check
self.set_focus(self.fx, self.fy)
_scale = 1.0
def set_scale(self, scale):
self._scale = 1.0*scale
self.refresh_focus()
scale = property(lambda s: s._scale, set_scale)
def add(self, child, z=0, name=None):
'''Add the child and then update the manager's focus / viewport.
'''
super(ScrollingManager, self).add(child, z=z, name=name)
# set the focus again and force it so we don't just skip because the
# focal point hasn't changed
self.set_focus(self.fx, self.fy, force=True)
def pixel_from_screen(self, x, y):
'''Look up the Layer-space pixel matching the screen-space pixel.
Account for viewport, layer and screen transformations.
'''
# director display scaling
if not director.do_not_scale_window:
x, y = director.get_virtual_coordinates(x, y)
# normalise x,y coord
ww, wh = director.get_window_size()
sx = x / float(self.view_w)
sy = y / float(self.view_h)
# get the map-space dimensions
vx, vy = self.childs_ox, self.childs_oy
# get our scaled view size
w = int(self.view_w / self.scale)
h = int(self.view_h / self.scale)
#print (int(x), int(y)), (vx, vy, w, h), int(vx + sx * w), int(vy + sy * h)
# convert screen pixel to map pixel
return int(vx + sx * w), int(vy + sy * h)
def pixel_to_screen(self, x, y):
'''Look up the screen-space pixel matching the Layer-space pixel.
Account for viewport, layer and screen transformations.
'''
screen_x = self.scale*(x-self.childs_ox)
screen_y = self.scale*(y-self.childs_oy)
return int(screen_x), int(screen_y)
_old_focus = None
def set_focus(self, fx, fy, force=False):
'''Determine the viewport based on a desired focus pixel in the
Layer space (fx, fy) and honoring any bounding restrictions of
child layers.
The focus will always be shifted to ensure no child layers display
out-of-bounds data, as defined by their dimensions px_width and px_height.
'''
# if no child specifies dimensions then just force the focus
if not [l for z,l in self.children if hasattr(l, 'px_width')]:
return self.force_focus(fx, fy)
# This calculation takes into account the scaling of this Layer (and
# therefore also its children).
# The result is that all chilren will have their viewport set, defining
# which of their pixels should be visible.
fx, fy = int(fx), int(fy)
self.fx, self.fy = fx, fy
a = (fx, fy, self.scale)
# check for NOOP (same arg passed in)
if not force and self._old_focus == a:
return
self._old_focus = a
# collate children dimensions
x1 = []; y1 = []; x2 = []; y2 = []
for z, layer in self.children:
if not hasattr(layer, 'px_width'): continue
x1.append(layer.origin_x)
y1.append(layer.origin_y)
x2.append(layer.origin_x + layer.px_width)
y2.append(layer.origin_y + layer.px_height)
# figure the child layer min/max bounds
b_min_x = min(x1)
b_min_y = min(y1)
b_max_x = min(x2)
b_max_y = min(y2)
# get our viewport information, scaled as appropriate
w = int(self.view_w / self.scale)
h = int(self.view_h / self.scale)
w2, h2 = w//2, h//2
if (b_max_x - b_min_x)<=w:
# this branch for prety centered view and no view jump when
# crossing the center; both when world width <= view width
restricted_fx = (b_max_x + b_min_x)/2
else:
if (fx - w2) < b_min_x:
restricted_fx = b_min_x + w2 # hit minimum X extent
elif (fx + w2) > b_max_x:
restricted_fx = b_max_x - w2 # hit maximum X extent
else:
restricted_fx = fx
if (b_max_y - b_min_y)<=h:
# this branch for prety centered view and no view jump when
# crossing the center; both when world height <= view height
restricted_fy = (b_max_y + b_min_y)/2
else:
if (fy - h2) < b_min_y:
restricted_fy = b_min_y + h2 # hit minimum Y extent
elif (fy + h2) > b_max_y:
restricted_fy = b_max_y - h2 # hit maximum Y extent
else:
restricted_fy = fy
# ... and this is our focus point, center of screen
self.restricted_fx = int(restricted_fx)
self.restricted_fy = int(restricted_fy)
# determine child view bounds to match that focus point
x, y = int(restricted_fx - w2), int(restricted_fy - h2)
childs_scroll_x = x #- self.view_x/self.scale
childs_scroll_y = y #- self.view_y/self.scale
self.childs_ox = childs_scroll_x - self.view_x/self.scale
self.childs_oy = childs_scroll_y - self.view_y/self.scale
for z, layer in self.children:
layer.set_view(childs_scroll_x, childs_scroll_y, w, h,
self.view_x/self.scale, self.view_y/self.scale)
def force_focus(self, fx, fy):
'''Force the manager to focus on a point, regardless of any managed layer
visible boundaries.
'''
# This calculation takes into account the scaling of this Layer (and
# therefore also its children).
# The result is that all chilren will have their viewport set, defining
# which of their pixels should be visible.
self.fx, self.fy = map(int, (fx, fy))
self.fx, self.fy = fx, fy
# get our scaled view size
w = int(self.view_w / self.scale)
h = int(self.view_h / self.scale)
w2, h2 = w//2, h//2
# bottom-left corner of the
x, y = fx - w2, fy - h2
childs_scroll_x = x #- self.view_x/self.scale
childs_scroll_y = y #- self.view_y/self.scale
self.childs_ox = childs_scroll_x - self.view_x/self.scale
self.childs_oy = childs_scroll_y - self.view_y/self.scale
for z, layer in self.children:
layer.set_view(childs_scroll_x, childs_scroll_y, w, h,
self.view_x/self.scale, self.view_y/self.scale)
def set_state(self):
# preserve gl scissors info
self._scissor_enabled = glIsEnabled(GL_SCISSOR_TEST)
self._old_scissor_flat = (GLint * 4)() #4-tuple
glGetIntegerv(GL_SCISSOR_BOX, self._old_scissor_flat)
# set our scissor
if not self._scissor_enabled:
glEnable(GL_SCISSOR_TEST)
glScissor(*self._scissor_flat)
def unset_state(self):
# restore gl scissors info
glScissor(*self._old_scissor_flat)
if not self._scissor_enabled:
glDisable(GL_SCISSOR_TEST)
def visit(self):
if self.viewport is not None:
self.set_state()
super(ScrollingManager, self).visit()
self.unset_state()
else:
super(ScrollingManager, self).visit()
|
eevee/cocos2d-mirror
|
cocos/layer/scrolling.py
|
Python
|
bsd-3-clause
| 16,009
|
[
"VisIt"
] |
fe6493ca83325b3dce64e4499d59a540c9574f72a6cfb85e1dc5491199f17d94
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neighborlist functions for tensorflow."""
from collections import namedtuple
import tensorflow as tf
import numpy as np
def get_distances(config, positions, cell, atom_mask=None):
"""Get distances to neighboring atoms with periodic boundary conditions.
The way this function works is it tiles a volume with unit cells to at least
fill a sphere with a radius of cutoff_radius. That means some atoms will be
outside the cutoff radius. Those are included in the results. Then we get
distances to all atoms in the tiled volume. This is always the same number for
every atom, so we have consistent sized arrays.
Args:
config: A dictionary containing 'cutoff_radius' with a float value.
positions: array-like or Tensor shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
atom_mask: array-like (numatoms,)
ones for atoms, zero for padded positions. If None, defaults to all ones
cutoff_radius: float
The cutoff_radius we want atoms within.
Returns:
distances: shape=(maxnatoms, maxnatoms, nunitcells) containing the distances
between all pairs of atoms in the tiled volume.
Related
-------
pydoc:pymatgen.core.lattice.Lattice.get_points_in_sphere
"""
with tf.name_scope('get_distances'):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if atom_mask is None:
natoms = positions.get_shape()[0]
atom_mask = tf.ones((natoms, 1), dtype=cell.dtype)
else:
atom_mask = tf.convert_to_tensor(atom_mask, dtype=cell.dtype)
cutoff_radius = tf.convert_to_tensor(
config['cutoff_radius'], dtype=cell.dtype)
# Next we get the inverse unit cell, which will be used to compute the
# unit cell offsets required to tile space inside the sphere.
inverse_cell = tf.matrix_inverse(cell)
fractional_coords = tf.mod(
tf.matmul(positions, inverse_cell), tf.ones_like(positions))
num_cell_repeats = cutoff_radius * tf.norm(inverse_cell, axis=0)
mins = tf.reduce_min(tf.floor(fractional_coords - num_cell_repeats), axis=0)
maxs = tf.reduce_max(tf.ceil(fractional_coords + num_cell_repeats), axis=0)
# Now we generate a set of cell offsets. We start with the repeats in each
# unit cell direction.
v0_range = tf.range(mins[0], maxs[0])
v1_range = tf.range(mins[1], maxs[1])
v2_range = tf.range(mins[2], maxs[2])
# Then we expand them in each dimension
xhat = tf.constant([1.0, 0.0, 0.0], dtype=inverse_cell.dtype)
yhat = tf.constant([0.0, 1.0, 0.0], dtype=inverse_cell.dtype)
zhat = tf.constant([0.0, 0.0, 1.0], dtype=inverse_cell.dtype)
v0_range = v0_range[:, None] * xhat[None, :]
v1_range = v1_range[:, None] * yhat[None, :]
v2_range = v2_range[:, None] * zhat[None, :]
# And combine them to get an offset vector for each cell
offsets = (
v0_range[:, None, None] + v1_range[None, :, None] +
v2_range[None, None, :])
offsets = tf.reshape(offsets, (-1, 3))
# Now we have a vector of unit cell offsets (offset_index, 3) in the inverse
# unit cell basis. We convert that to cartesian coordinate offsets here.
cart_offsets = tf.matmul(offsets, cell)
# we need to offset each atom coordinate by each offset.
# This array is (atom_index, offset, 3)
shifted_cart_coords = positions[:, None] + cart_offsets[None, :]
# Next, we subtract each position from the array of positions.
# This leads to (atom_i, atom_j, positionvector, xhat)
relative_positions = shifted_cart_coords - positions[:, None, None]
# This is the distance squared. This leads to (atom_i, atom_j, distance2)
distances2 = tf.reduce_sum(relative_positions**2, axis=3)
# We zero out masked distances.
distances2 *= atom_mask
distances2 *= atom_mask[:, None]
# We do not mask out the values greater than cutoff_radius here. That is
# done later in the energy function. The zero masking here is due to the
# fact that the gradient of the square_root at x=0 is nan, so we have to
# avoid the zeros. Here we replace the zeros temporarily with ones, take the
# sqrt, and then return the right parts.
zeros = tf.equal(distances2, 0.0)
adjusted = tf.where(zeros, tf.ones_like(distances2), distances2)
distance = tf.sqrt(adjusted)
return tf.where(zeros, tf.zeros_like(distance), distance)
def get_neighbors_oneway(positions,
cell,
cutoff_distance,
skin=0.01,
strain=np.zeros((3, 3)),
debug=False):
"""Oneway neighborlist.
Returns
-------
indices_tuples: a list of tuples (atom_index, neighbor_index, offset_index),
i.e. the atom at neighbor_index is a neighbor of the atom at atom_index and it
is located at the offset in offset_index.
Adapted from
https://wiki.fysik.dtu.dk/ase/_modules/ase/neighborlist.html#NeighborList.
"""
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
strain = tf.convert_to_tensor(strain, dtype=cell.dtype)
strain_tensor = tf.eye(3, dtype=cell.dtype) + strain
positions = tf.transpose(tf.matmul(strain_tensor, tf.transpose(positions)))
cell = tf.transpose(tf.matmul(strain_tensor, tf.transpose(cell)))
inverse_cell = tf.matrix_inverse(cell)
h = 1 / tf.norm(inverse_cell, axis=0)
N = tf.floor(cutoff_distance / h) + 1
if debug:
N = tf.Print(N, [N], ' tf N: ')
scaled = tf.matmul(positions, inverse_cell)
scaled0 = tf.matmul(positions, inverse_cell) % 1.0
offsets = tf.round(scaled0 - scaled)
if debug:
offsets = tf.Print(offsets, [offsets], ' tf offsets:', summarize=100)
positions0 = positions + tf.matmul(offsets, cell)
if debug:
positions0 = tf.Print(
positions0, [positions0], ' tf positions: ', summarize=100)
v0_range = tf.range(0, N[0] + 1)
v1_range = tf.range(-N[1], N[1] + 1)
v2_range = tf.range(-N[2], N[2] + 1)
xhat = tf.constant([1, 0, 0], dtype=cell.dtype)
yhat = tf.constant([0, 1, 0], dtype=cell.dtype)
zhat = tf.constant([0, 0, 1], dtype=cell.dtype)
v0_range = v0_range[:, None] * xhat[None, :]
v1_range = v1_range[:, None] * yhat[None, :]
v2_range = v2_range[:, None] * zhat[None, :]
N = (
v0_range[:, None, None] + v1_range[None, :, None] +
v2_range[None, None, :])
N = tf.reshape(N, (-1, 3))
n1 = N[:, 0]
n2 = N[:, 1]
n3 = N[:, 2]
mask = tf.logical_not(
tf.logical_and(
tf.equal(n1, 0.0),
tf.logical_or(
tf.less(n2, 0.0),
tf.logical_and(tf.equal(n2, 0.0), tf.less(n3, 0.0)))))
N = tf.boolean_mask(N, mask)
if debug:
N = tf.Print(N, [N], 'tf offsets', summarize=20)
noffsets = tf.shape(N)[0]
natoms = tf.shape(positions)[0]
indices = tf.range(natoms)
# Finally, we have to run two loops, one over the offsets, and one over the
# positions. We will accumulate the neighbors as we go. I like to save all the
# loop vars in one place.
# n is a counter for offsets
# a is a counter for atom index
# k is a counter for neighbors
# indices contains a list of (a, index): the index of the neighbor of atom a.
# displacements is a list of (n1, n2, n3) corresponding to displacements for
# each neighbor.
LV = namedtuple('LoopVariables', 'n, a, k, indices, distances, displacements')
lv0 = LV(
tf.constant(0, dtype=tf.int32), # n, unit cell offset counter
tf.constant(0, dtype=tf.int32), # a, counter for atom index
tf.constant(0, dtype=tf.int32), # k, neighbor counter
tf.Variable(tf.zeros((0, 2), dtype=tf.int32), dtype=tf.int32), # indices
# distances
tf.Variable(tf.zeros((0,), dtype=positions.dtype), dtype=positions.dtype),
tf.Variable(tf.zeros((0, 3), dtype=tf.int32),
dtype=tf.int32) # displacements
)
shiv = LV(
tf.TensorShape(None), tf.TensorShape(None), tf.TensorShape(None),
tf.TensorShape([None, 2]), tf.TensorShape(None), tf.TensorShape([None,
3]))
def outer_cond(nt):
return tf.less(nt.n, noffsets)
def outer_body(nt):
"""This is the loop over the offsets."""
n1, n2, n3 = tf.unstack(N[nt.n])
displacement = tf.matmul(tf.cast(N[nt.n][None, :], dtype=cell.dtype), cell)
if debug:
displacement = tf.Print(displacement, [n, displacement],
'tf displacement: ')
# Now we loop over each atom
def inner_cond(nt):
return tf.less(nt.a, natoms)
def inner_body(nt):
"""This is a loop over each atom."""
_p = positions0 + displacement - positions0[nt.a]
_p2 = tf.reduce_sum(_p**2, axis=1)
_m0 = tf.equal(_p2, 0.0)
_mp = tf.where(_m0, tf.ones_like(_p2), _p2)
_d = tf.sqrt(_mp)
# These are the distances to the neighbors
d = tf.where(_m0, tf.zeros_like(_p2), _d)
# get indices where the distance is within the cutoff distance
# skip self (d == 0).
neighbor_mask = tf.logical_and(d > 0.0, d < (cutoff_distance + skin))
i = tf.boolean_mask(indices, neighbor_mask)
d = tf.boolean_mask(d, neighbor_mask)
# ug. you have to specify the shape here since i, and hence m is not know
# in advance. Without it you get:
# "Number of mask dimensions must be specified, even if some
# dimensions" ValueError: Number of mask dimensions must be specified,
# even if some dimensions are None. E.g. shape=[None] is ok, but
# shape=None is not.
def self_interaction():
m = tf.greater(i, nt.a)
m.set_shape([None])
return tf.boolean_mask(i, m), tf.boolean_mask(d, m)
i, d = tf.cond(
tf.reduce_all([tf.equal(n1, 0),
tf.equal(n2, 0),
tf.equal(n3, 0)]),
true_fn=self_interaction,
false_fn=lambda: (i, d))
# Now we need to add tuples of (nt.a, ind) for ind in i if there is
# anything in i, and also the index of the offset.
n_inds = tf.shape(i)[0]
disp = N[nt.n][None, :]
disp += tf.gather(offsets, i)
disp -= offsets[nt.a]
def nind_cond(nt):
return tf.less(nt.k, n_inds)
def nind_body(nt):
tups = tf.concat(
[
nt.indices,
[(
nt.a, # atom to get neighbors for
i[nt.k], # index of neighbor equivalent atom.
)]
],
axis=0)
dists = tf.concat([nt.distances, [d[nt.k]]], axis=0)
disps = tf.concat(
[nt.displacements, [tf.cast(disp[nt.k], tf.int32)]], axis=0)
return LV(nt.n, nt.a, nt.k + 1, tups, dists, disps),
nt, = tf.while_loop(nind_cond, nind_body, [nt], [shiv])
return LV(nt.n, nt.a + 1, 0, nt.indices, nt.distances, nt.displacements),
nt, = tf.while_loop(inner_cond, inner_body, [nt], [shiv])
return LV(nt.n + 1, 0, 0, nt.indices, nt.distances, nt.displacements),
lv1, = tf.while_loop(outer_cond, outer_body, [lv0], [shiv])
return lv1.indices, lv1.distances, lv1.displacements
|
google/differentiable-atomistic-potentials
|
dap/tf/neighborlist.py
|
Python
|
apache-2.0
| 11,919
|
[
"ASE",
"pymatgen"
] |
ca256bfdec4ae846b45231dc4fbc978b563ce9e56728e1d0732bcf0c31fdca73
|
#!/usr/bin/env python
"""
Converter to generate 3 (or 4) column base-pair coverage from an interval file.
usage: %prog bed_file out_file
-1, --cols1=N,N,N,N: Columns for chrom, start, end, strand in interval file
-2, --cols2=N,N,N,N: Columns for chrom, start, end, strand in coverage file
"""
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.intervals import io
from bx.cookbook import doc_optparse
import psyco_full
import commands
import os
from os import environ
import tempfile
from bisect import bisect
INTERVAL_METADATA = ('chromCol',
'startCol',
'endCol',
'strandCol',)
COVERAGE_METADATA = ('chromCol',
'positionCol',
'forwardCol',
'reverseCol',)
def main( interval, coverage ):
"""
Uses a sliding window of partitions to count coverages.
Every interval record adds its start and end to the partitions. The result
is a list of partitions, or every position that has a (maybe) different
number of basepairs covered. We don't worry about merging because we pop
as the sorted intervals are read in. As the input start positions exceed
the partition positions in partitions, coverages are kicked out in bulk.
"""
partitions = []
forward_covs = []
reverse_covs = []
offset = 0
chrom = None
lastchrom = None
for record in interval:
chrom = record.chrom
if lastchrom and not lastchrom == chrom and partitions:
for partition in xrange(0, len(partitions)-1):
forward = forward_covs[partition]
reverse = reverse_covs[partition]
if forward+reverse > 0:
coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
forward=forward, reverse=reverse)
partitions = []
forward_covs = []
reverse_covs = []
start_index = bisect(partitions, record.start)
forward = int(record.strand == "+")
reverse = int(record.strand == "-")
forward_base = 0
reverse_base = 0
if start_index > 0:
forward_base = forward_covs[start_index-1]
reverse_base = reverse_covs[start_index-1]
partitions.insert(start_index, record.start)
forward_covs.insert(start_index, forward_base)
reverse_covs.insert(start_index, reverse_base)
end_index = bisect(partitions, record.end)
for index in xrange(start_index, end_index):
forward_covs[index] += forward
reverse_covs[index] += reverse
partitions.insert(end_index, record.end)
forward_covs.insert(end_index, forward_covs[end_index-1] - forward )
reverse_covs.insert(end_index, reverse_covs[end_index-1] - reverse )
if partitions:
for partition in xrange(0, start_index):
forward = forward_covs[partition]
reverse = reverse_covs[partition]
if forward+reverse > 0:
coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
forward=forward, reverse=reverse)
partitions = partitions[start_index:]
forward_covs = forward_covs[start_index:]
reverse_covs = reverse_covs[start_index:]
lastchrom = chrom
# Finish the last chromosome
if partitions:
for partition in xrange(0, len(partitions)-1):
forward = forward_covs[partition]
reverse = reverse_covs[partition]
if forward+reverse > 0:
coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
forward=forward, reverse=reverse)
class CoverageWriter( object ):
def __init__( self, out_stream=None, chromCol=0, positionCol=1, forwardCol=2, reverseCol=3 ):
self.out_stream = out_stream
self.reverseCol = reverseCol
self.nlines = 0
positions = {str(chromCol):'%(chrom)s',
str(positionCol):'%(position)d',
str(forwardCol):'%(forward)d',
str(reverseCol):'%(reverse)d'}
if reverseCol < 0:
self.template = "%(0)s\t%(1)s\t%(2)s\n" % positions
else:
self.template = "%(0)s\t%(1)s\t%(2)s\t%(3)s\n" % positions
def write(self, **kwargs ):
if self.reverseCol < 0: kwargs['forward'] += kwargs['reverse']
posgen = kwargs['position']
for position in posgen:
kwargs['position'] = position
self.out_stream.write(self.template % kwargs)
def close(self):
self.out_stream.flush()
self.out_stream.close()
if __name__ == "__main__":
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = [int(x)-1 for x in options.cols1.split(',')]
chr_col_2, position_col_2, forward_col_2, reverse_col_2 = [int(x)-1 for x in options.cols2.split(',')]
in_fname, out_fname = args
except:
doc_optparse.exception()
# Sort through a tempfile first
temp_file = tempfile.NamedTemporaryFile(mode="r")
environ['LC_ALL'] = 'POSIX'
commandline = "sort -f -n -k %d -k %d -k %d -o %s %s" % (chr_col_1+1,start_col_1+1,end_col_1+1, temp_file.name, in_fname)
errorcode, stdout = commands.getstatusoutput(commandline)
coverage = CoverageWriter( out_stream = open(out_fname, "a"),
chromCol = chr_col_2, positionCol = position_col_2,
forwardCol = forward_col_2, reverseCol = reverse_col_2, )
temp_file.seek(0)
interval = io.NiceReaderWrapper( temp_file,
chrom_col=chr_col_1,
start_col=start_col_1,
end_col=end_col_1,
strand_col=strand_col_1,
fix_strand=True )
main( interval, coverage )
temp_file.close()
coverage.close()
|
volpino/Yeps-EURAC
|
lib/galaxy/datatypes/converters/interval_to_coverage.py
|
Python
|
mit
| 6,336
|
[
"Galaxy"
] |
7a8ce0b05847149a68a95d5bcc6bf727b3188e8986cdc2a0025d3cb54418627a
|
from __future__ import print_function
import numpy as np
import pylab as plt
import fitsio
from legacypipe.survey import *
from astrometry.util.util import Tan
from astrometry.util.fits import *
from astrometry.util.resample import *
from astrometry.util.plotutils import *
from wise.forcedphot import unwise_tiles_touching_wcs
from wise.unwise import get_unwise_tractor_image
from legacypipe.catalog import read_fits_catalog
from tractor.ellipses import EllipseE
from tractor import Tractor, NanoMaggies, LinearPhotoCal, ConstantFitsWcs, ConstantSky, NCircularGaussianPSF, NanoMaggies, Image
# UGH, copy-n-pasted below...
#from decals_web.map.views import _unwise_to_rgb
def wise_cutouts(ra, dec, radius, ps, pixscale=2.75, survey_dir=None,
unwise_dir=None):
'''
radius in arcsec.
pixscale: WISE pixel scale in arcsec/pixel;
make this smaller than 2.75 to oversample.
'''
if unwise_dir is None:
unwise_dir = os.environ.get('UNWISE_COADDS_DIR')
npix = int(np.ceil(radius / pixscale))
print('Image size:', npix)
W = H = npix
pix = pixscale / 3600.
wcs = Tan(ra, dec, (W+1)/2., (H+1)/2., -pix, 0., 0., pix,float(W),float(H))
# Find DECaLS bricks overlapping
survey = LegacySurveyData(survey_dir=survey_dir)
B = bricks_touching_wcs(wcs, survey=survey)
print('Found', len(B), 'bricks overlapping')
TT = []
for b in B.brickname:
fn = survey.find_file('tractor', brick=b)
T = fits_table(fn)
print('Read', len(T), 'from', b)
primhdr = fitsio.read_header(fn)
TT.append(T)
T = merge_tables(TT)
print('Total of', len(T), 'sources')
T.cut(T.brick_primary)
print(len(T), 'primary')
margin = 20
ok,xx,yy = wcs.radec2pixelxy(T.ra, T.dec)
I = np.flatnonzero((xx > -margin) * (yy > -margin) *
(xx < W+margin) * (yy < H+margin))
T.cut(I)
print(len(T), 'within ROI')
#return wcs,T
# Pull out DECaLS coadds (image, model, resid).
dwcs = wcs.scale(2. * pixscale / 0.262)
dh,dw = dwcs.shape
print('DECaLS resampled shape:', dh,dw)
tags = ['image', 'model', 'resid']
coimgs = [np.zeros((dh,dw,3), np.uint8) for t in tags]
for b in B.brickname:
fn = survey.find_file('image', brick=b, band='r')
bwcs = Tan(fn, 1) # ext 1: .fz
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(dwcs, bwcs)
except ResampleError:
continue
if len(Yo) == 0:
continue
print('Resampling', len(Yo), 'pixels from', b)
xl,xh,yl,yh = Xi.min(), Xi.max(), Yi.min(), Yi.max()
#print('python legacypipe/runbrick.py -b %s --zoom %i %i %i %i --outdir cluster --pixpsf --splinesky --pipe --no-early-coadds' %
# (b, xl-5, xh+5, yl-5, yh+5) + ' -P \'pickles/cluster-%(brick)s-%%(stage)s.pickle\'')
for i,tag in enumerate(tags):
fn = survey.find_file(tag+'-jpeg', brick=b)
img = plt.imread(fn)
img = np.flipud(img)
coimgs[i][Yo,Xo,:] = img[Yi,Xi,:]
tt = dict(image='Image', model='Model', resid='Resid')
for img,tag in zip(coimgs, tags):
plt.clf()
dimshow(img, ticks=False)
plt.title('DECaLS grz %s' % tt[tag])
ps.savefig()
# Find unWISE tiles overlapping
tiles = unwise_tiles_touching_wcs(wcs)
print('Cut to', len(tiles), 'unWISE tiles')
# Here we assume the targetwcs is axis-aligned and that the
# edge midpoints yield the RA,Dec limits (true for TAN).
r,d = wcs.pixelxy2radec(np.array([1, W, W/2, W/2]),
np.array([H/2, H/2, 1, H ]))
# the way the roiradec box is used, the min/max order doesn't matter
roiradec = [r[0], r[1], d[2], d[3]]
ra,dec = T.ra, T.dec
srcs = read_fits_catalog(T)
wbands = [1,2,3,4]
wanyband = 'w'
for band in wbands:
f = T.get('flux_w%i' % band)
f *= 10.**(primhdr['WISEAB%i' % band] / 2.5)
coimgs = [np.zeros((H,W), np.float32) for b in wbands]
comods = [np.zeros((H,W), np.float32) for b in wbands]
con = [np.zeros((H,W), np.uint8) for b in wbands]
for iband,band in enumerate(wbands):
print('Photometering WISE band', band)
wband = 'w%i' % band
for i,src in enumerate(srcs):
#print('Source', src, 'brightness', src.getBrightness(), 'params', src.getBrightness().getParams())
#src.getBrightness().setParams([T.wise_flux[i, band-1]])
src.setBrightness(NanoMaggies(**{wanyband: T.get('flux_w%i'%band)[i]}))
# print('Set source brightness:', src.getBrightness())
# The tiles have some overlap, so for each source, keep the
# fit in the tile whose center is closest to the source.
for tile in tiles:
print('Reading tile', tile.coadd_id)
tim = get_unwise_tractor_image(unwise_dir, tile.coadd_id, band,
bandname=wanyband,
roiradecbox=roiradec)
if tim is None:
print('Actually, no overlap with tile', tile.coadd_id)
continue
print('Read image with shape', tim.shape)
# Select sources in play.
wisewcs = tim.wcs.wcs
H,W = tim.shape
ok,x,y = wisewcs.radec2pixelxy(ra, dec)
x = (x - 1.).astype(np.float32)
y = (y - 1.).astype(np.float32)
margin = 10.
I = np.flatnonzero((x >= -margin) * (x < W+margin) *
(y >= -margin) * (y < H+margin))
print(len(I), 'within the image + margin')
subcat = [srcs[i] for i in I]
tractor = Tractor([tim], subcat)
mod = tractor.getModelImage(0)
# plt.clf()
# dimshow(tim.getImage(), ticks=False)
# plt.title('WISE %s %s' % (tile.coadd_id, wband))
# ps.savefig()
# plt.clf()
# dimshow(mod, ticks=False)
# plt.title('WISE %s %s' % (tile.coadd_id, wband))
# ps.savefig()
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, tim.wcs.wcs)
except ResampleError:
continue
if len(Yo) == 0:
continue
print('Resampling', len(Yo), 'pixels from WISE', tile.coadd_id,
band)
coimgs[iband][Yo,Xo] += tim.getImage()[Yi,Xi]
comods[iband][Yo,Xo] += mod[Yi,Xi]
con [iband][Yo,Xo] += 1
for img,mod,n in zip(coimgs, comods, con):
img /= np.maximum(n, 1)
mod /= np.maximum(n, 1)
for band,img,mod in zip(wbands, coimgs, comods):
lo,hi = np.percentile(img, [25,99])
plt.clf()
dimshow(img, vmin=lo, vmax=hi, ticks=False)
plt.title('WISE W%i Data' % band)
ps.savefig()
plt.clf()
dimshow(mod, vmin=lo, vmax=hi, ticks=False)
plt.title('WISE W%i Model' % band)
ps.savefig()
resid = img - mod
mx = np.abs(resid).max()
plt.clf()
dimshow(resid, vmin=-mx, vmax=mx, ticks=False)
plt.title('WISE W%i Resid' % band)
ps.savefig()
#kwa = dict(mn=-0.1, mx=2., arcsinh = 1.)
kwa = dict(mn=-0.1, mx=2., arcsinh=None)
rgb = _unwise_to_rgb(coimgs[:2], **kwa)
plt.clf()
dimshow(rgb, ticks=False)
plt.title('WISE W1/W2 Data')
ps.savefig()
rgb = _unwise_to_rgb(comods[:2], **kwa)
plt.clf()
dimshow(rgb, ticks=False)
plt.title('WISE W1/W2 Model')
ps.savefig()
kwa = dict(mn=-1, mx=1, arcsinh=None)
rgb = _unwise_to_rgb([img-mod for img,mod in list(zip(coimgs,comods))[:2]], **kwa)
plt.clf()
dimshow(rgb, ticks=False)
plt.title('WISE W1/W2 Resid')
ps.savefig()
return wcs, T
def _unwise_to_rgb(imgs, bands=[1,2], mn=-1, mx=100, arcsinh=1.):
import numpy as np
img = imgs[0]
H,W = img.shape
## FIXME
w1,w2 = imgs
rgb = np.zeros((H, W, 3), np.uint8)
scale1 = 50.
scale2 = 50.
#mn,mx = -3.,30.
#arcsinh = None
img1 = w1 / scale1
img2 = w2 / scale2
print('W1 99th', np.percentile(img1, 99))
print('W2 99th', np.percentile(img2, 99))
if arcsinh is not None:
def nlmap(x):
return np.arcsinh(x * arcsinh) / np.sqrt(arcsinh)
#img1 = nlmap(img1)
#img2 = nlmap(img2)
mean = (img1 + img2) / 2.
I = nlmap(mean)
img1 = img1 / mean * I
img2 = img2 / mean * I
mn = nlmap(mn)
mx = nlmap(mx)
img1 = (img1 - mn) / (mx - mn)
img2 = (img2 - mn) / (mx - mn)
rgb[:,:,2] = (np.clip(img1, 0., 1.) * 255).astype(np.uint8)
rgb[:,:,0] = (np.clip(img2, 0., 1.) * 255).astype(np.uint8)
rgb[:,:,1] = rgb[:,:,0]/2 + rgb[:,:,2]/2
return rgb
def ra_ranges_overlap(ralo, rahi, ra1, ra2):
import numpy as np
x1 = np.cos(np.deg2rad(ralo))
y1 = np.sin(np.deg2rad(ralo))
x2 = np.cos(np.deg2rad(rahi))
y2 = np.sin(np.deg2rad(rahi))
x3 = np.cos(np.deg2rad(ra1))
y3 = np.sin(np.deg2rad(ra1))
x4 = np.cos(np.deg2rad(ra2))
y4 = np.sin(np.deg2rad(ra2))
cw32 = x2*y3 - x3*y2
cw41 = x1*y4 - x4*y1
return np.logical_and(cw32 <= 0, cw41 >= 0)
def galex_rgb(imgs, bands, **kwargs):
import numpy as np
from scipy.ndimage.filters import uniform_filter, gaussian_filter
nuv,fuv = imgs
h,w = nuv.shape
red = nuv * 0.206 * 2297
blue = fuv * 1.4 * 1525
#blue = uniform_filter(blue, 3)
blue = gaussian_filter(blue, 1.)
green = (0.2*blue + 0.8*red)
red *= 0.085
green *= 0.095
blue *= 0.08
nonlinearity = 2.5
radius = red + green + blue
val = np.arcsinh(radius * nonlinearity) / nonlinearity
with np.errstate(divide='ignore', invalid='ignore'):
red = red * val / radius
green = green * val / radius
blue = blue * val / radius
mx = np.maximum(red, np.maximum(green, blue))
mx = np.maximum(1., mx)
red /= mx
green /= mx
blue /= mx
rgb = np.clip(np.dstack((red, green, blue)), 0., 1.)
return rgb
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--ra', type=float, default=329.0358)
parser.add_argument('-d', '--dec', type=float, default= 1.3909)
parser.add_argument('--radius', type=float, default=90., help='Cutout radius (arcsec)')
parser.add_argument('--survey-dir', help='Legacy Survey base directory')
parser.add_argument('--base', help='Base filename for output plots', default='cutouts')
parser.add_argument('--galex-dir', help='Try making GALEX cutouts too?')
opt = parser.parse_args()
#ra,dec = 203.522, 20.232
#ra,dec = 329.0358,1.3909 # horrible fit
#ra,dec = 244.0424,6.9179
# arcsec
radius = opt.radius
ps = PlotSequence(opt.base)
plt.figure(figsize=(4,4))
plt.subplots_adjust(left=0.005, right=0.995, bottom=0.005, top=0.995)
wcs,T = wise_cutouts(opt.ra, opt.dec, radius, ps,
pixscale=2.75 / 2.,
survey_dir=opt.survey_dir)
H,W = wcs.shape
ralo,declo = wcs.pixelxy2radec(W,1)
rahi,dechi = wcs.pixelxy2radec(1,H)
print('RA', ralo,rahi)
print('Dec', declo,dechi)
if opt.galex_dir:
fn = os.path.join(opt.galex_dir, 'galex-images.fits')
print('Reading', fn)
# galex "bricks" (actually just GALEX tiles)
galex = fits_table(fn)
galex.rename('ra_cent', 'ra')
galex.rename('dec_cent', 'dec')
galex.rename('have_n', 'has_n')
galex.rename('have_f', 'has_f')
cosd = np.cos(np.deg2rad(galex.dec))
galex.ra1 = galex.ra - 3840*1.5/3600./2./cosd
galex.ra2 = galex.ra + 3840*1.5/3600./2./cosd
galex.dec1 = galex.dec - 3840*1.5/3600./2.
galex.dec2 = galex.dec + 3840*1.5/3600./2.
bricknames = []
for tile,subvis in zip(galex.tilename, galex.subvis):
if subvis == -999:
bricknames.append(tile.strip())
else:
bricknames.append('%s_sg%02i' % (tile.strip(), subvis))
galex.brickname = np.array(bricknames)
# bricks_touching_radec_box(self, ralo, rahi, declo, dechi, scale=None):
I, = np.nonzero((galex.dec1 <= dechi) * (galex.dec2 >= declo))
ok = ra_ranges_overlap(ralo, rahi, galex.ra1[I], galex.ra2[I])
I = I[ok]
galex.cut(I)
print('-> bricks', galex.brickname)
gbands = ['n','f']
coimgs = []
comods = []
srcs = read_fits_catalog(T)
for src in srcs:
src.freezeAllBut('brightness')
for band in gbands:
J = np.flatnonzero(galex.get('has_'+band))
print(len(J), 'GALEX tiles have coverage in band', band)
coimg = np.zeros((H,W), np.float32)
comod = np.zeros((H,W), np.float32)
cowt = np.zeros((H,W), np.float32)
for src in srcs:
src.setBrightness(NanoMaggies(**{band: 1}))
for j in J:
brick = galex[j]
fn = os.path.join(opt.galex_dir, brick.tilename.strip(),
'%s-%sd-intbgsub.fits.gz' % (brick.brickname, band))
print(fn)
gwcs = Tan(*[float(f) for f in
[brick.crval1, brick.crval2, brick.crpix1, brick.crpix2,
brick.cdelt1, 0., 0., brick.cdelt2, 3840., 3840.]])
img = fitsio.read(fn)
print('Read', img.shape)
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, gwcs, [], 3)
except OverlapError:
continue
K = np.flatnonzero(img[Yi,Xi] != 0.)
if len(K) == 0:
continue
Yo = Yo[K]
Xo = Xo[K]
Yi = Yi[K]
Xi = Xi[K]
rimg = np.zeros((H,W), np.float32)
rimg[Yo,Xo] = img[Yi,Xi]
plt.clf()
plt.imshow(rimg, interpolation='nearest', origin='lower')
ps.savefig()
wt = brick.get(band + 'exptime')
coimg[Yo,Xo] += wt * img[Yi,Xi]
cowt [Yo,Xo] += wt
x0 = min(Xi)
x1 = max(Xi)
y0 = min(Yi)
y1 = max(Yi)
subwcs = gwcs.get_subimage(x0, y0, x1-x0+1, y1-y0+1)
twcs = ConstantFitsWcs(subwcs)
timg = img[y0:y1+1, x0:x1+1]
tie = np.ones_like(timg) ## HACK!
#hdr = fitsio.read_header(fn)
#zp = hdr['
zps = dict(n=20.08, f=18.82)
zp = zps[band]
photocal = LinearPhotoCal(NanoMaggies.zeropointToScale(zp),
band=band)
tsky = ConstantSky(0.)
# HACK -- circular Gaussian PSF of fixed size...
# in arcsec
#fwhms = dict(NUV=6.0, FUV=6.0)
# -> sigma in pixels
#sig = fwhms[band] / 2.35 / twcs.pixel_scale()
sig = 6.0 / 2.35 / twcs.pixel_scale()
tpsf = NCircularGaussianPSF([sig], [1.])
tim = Image(data=timg, inverr=tie, psf=tpsf, wcs=twcs,
sky=tsky, photocal=photocal,
name='GALEX ' + band + brick.brickname)
tractor = Tractor([tim], srcs)
mod = tractor.getModelImage(0)
print('Tractor image', tim.name)
plt.clf()
plt.imshow(timg, interpolation='nearest', origin='lower')
ps.savefig()
print('Tractor model', tim.name)
plt.clf()
plt.imshow(mod, interpolation='nearest', origin='lower')
ps.savefig()
tractor.freezeParam('images')
print('Params:')
tractor.printThawedParams()
tractor.optimize_forced_photometry(priors=False, shared_params=False)
mod = tractor.getModelImage(0)
print('Tractor model (forced phot)', tim.name)
plt.clf()
plt.imshow(mod, interpolation='nearest', origin='lower')
ps.savefig()
comod[Yo,Xo] += wt * mod[Yi-y0,Xi-x0]
coimg /= np.maximum(cowt, 1e-18)
comod /= np.maximum(cowt, 1e-18)
coimgs.append(coimg)
comods.append(comod)
print('Coadded image')
plt.clf()
plt.imshow(coimg, interpolation='nearest', origin='lower')
ps.savefig()
print('Coadded model')
plt.clf()
plt.imshow(comod, interpolation='nearest', origin='lower')
ps.savefig()
rgb = galex_rgb(coimgs, gbands)
plt.clf()
plt.imshow(rgb, interpolation='nearest', origin='lower')
ps.savefig()
print('Model RGB')
rgb = galex_rgb(comods, gbands)
plt.clf()
plt.imshow(rgb, interpolation='nearest', origin='lower')
ps.savefig()
|
legacysurvey/pipeline
|
py/legacyanalysis/wise-cutouts.py
|
Python
|
gpl-2.0
| 17,422
|
[
"Gaussian"
] |
43e8477810b84f4654f43d735e8622b9a4e665cb61c4a189d387e01d55ed4493
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from espressopp import *
class Test0Real3D(unittest.TestCase) :
def test0Create(self) :
'Test the creation of Real3D instances.'
x = Real3D()
self.assertEqual(x[0], 0.0)
self.assertEqual(x[1], 0.0)
self.assertEqual(x[2], 0.0)
x = Real3D(1.0, 2.0, 3.0)
self.assertEqual(x[0], 1.0)
self.assertEqual(x[1], 2.0)
self.assertEqual(x[2], 3.0)
x = Real3D([1.0, 2.0, 3.0])
self.assertEqual(x[0], 1.0)
self.assertEqual(x[1], 2.0)
self.assertEqual(x[2], 3.0)
x = Real3D((1.0, 2.0, 3.0))
self.assertEqual(x[0], 1.0)
self.assertEqual(x[1], 2.0)
self.assertEqual(x[2], 3.0)
x = Real3D(3.141)
self.assertEqual(x[0], 3.141)
self.assertEqual(x[1], 3.141)
self.assertEqual(x[2], 3.141)
x = Real3D(10)
self.assertEqual(x[0], 10.0)
self.assertEqual(x[1], 10.0)
self.assertEqual(x[2], 10.0)
self.assertRaises(TypeError, Real3D, 1.0, 2.0)
self.assertRaises(TypeError, Real3D, 1.0, 2.0, 3.0, 4.0)
self.assertRaises(TypeError, Real3D, (1.0, 2.0))
self.assertRaises(TypeError, Real3D, (1.0, 2.0, 3.0, 4.0))
def test1OutOfRange(self) :
'Test out-of-range Real3D element access.'
v = Real3D()
self.assertRaises(IndexError, v.__getitem__, -1)
self.assertRaises(IndexError, v.__getitem__, 3)
def test2SetItem(self) :
'Test setting Real3D elements.'
x = Real3D();
x[0] = 1.0;
x[1] = 2.0;
x[2] = 3.0;
self.assertEqual(x[0], 1.0)
self.assertEqual(x[1], 2.0)
self.assertEqual(x[2], 3.0)
def test3Properties(self) :
'Test Real3D properties.'
v = Real3D(1.0, 2.0, 3.0)
self.assertEqual(v.x, 1.0)
self.assertEqual(v.y, 2.0)
self.assertEqual(v.z, 3.0)
v = Real3D()
v.x = 1.0
v.y = 2.0
v.z = 3.0
self.assertEqual(v[0], 1.0)
self.assertEqual(v[1], 2.0)
self.assertEqual(v[2], 3.0)
def test4Conversion(self) :
'Test conversion of Real3D to other types.'
v = Real3D(1.0, 2.0, 3.0)
self.assertEqual(tuple(v), (1.0, 2.0, 3.0))
self.assertEqual(list(v), [1.0, 2.0, 3.0])
self.assertEqual(str(v), '(1.0, 2.0, 3.0)')
self.assertEqual(repr(v), 'Real3D(1.0, 2.0, 3.0)')
def test5Comparison(self) :
'Test Real3D comparison operations.'
v = Real3D(1.0, 2.0, 3.0)
v2 = Real3D(1.0, 2.0, 3.0)
self.assertEqual(v, v2)
self.assertFalse(v != v2)
self.assert_(v is not v2)
def test6Numerics(self) :
'Test various numeric operations of Real3D.'
v = Real3D(1.0, 2.0, 3.0)
r = v * 2.0
self.assertEqual(type(r), Real3D)
self.assertEqual(r, Real3D(2.0, 4.0, 6.0))
r = 2.0 * v
self.assertEqual(type(r), Real3D)
self.assertEqual(r, Real3D(2.0, 4.0, 6.0))
r = v*v
self.assertEqual(r, 14.0)
r = v.sqr()
self.assertEqual(r, 14.0)
r = v.cross(v)
self.assertEqual(r, Real3D(0.0, 0.0, 0.0))
v2 = Real3D(3.0, 2.0, 1.0)
r = v.cross(v2)
self.assertEqual(r, Real3D(-4.0, 8.0, -4.0))
def test7Pickle(self) :
'Test pickling Real3D.'
import pickle
v = Real3D(1.0, 2.0, 3.0)
# pickle
s = pickle.dumps(v)
# unpickle
v2 = pickle.loads(s)
self.assert_(v is not v2)
self.assertEqual(v, v2)
if __name__ == "__main__":
unittest.main()
|
govarguz/espressopp
|
testsuite/unittest/TestReal3D.py
|
Python
|
gpl-3.0
| 4,531
|
[
"ESPResSo"
] |
740996c799a6732c60a933c183ed183d6699f95f23242d2d2f683bbee2f5fd3f
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import sys
import os
import os.path as op
import shutil
import numpy as np
from scipy import linalg
from .fixes import partial
from .utils import (verbose, logger, run_subprocess, deprecated,
get_subjects_dir)
from .transforms import _ensure_trans, apply_trans
from .io.constants import FIFF
from .io.write import (start_file, start_block, write_float, write_int,
write_float_matrix, write_int_matrix, end_block,
end_file)
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.open import fiff_open
from .externals.six import string_types
# ############################################################################
# Compute BEM solution
# define VEC_DIFF(from,to,diff) {\
# (diff)[X] = (to)[X] - (from)[X];\
# The following approach is based on:
#
# de Munck JC: "A linear discretization of the volume conductor boundary
# integral equation using analytically integrated elements",
# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990
#
class ConductorModel(dict):
"""BEM or sphere model"""
def __repr__(self):
if self['is_sphere']:
center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0'])
pl = '' if len(self['layers']) == 1 else 's'
rad = self.radius
if rad is None: # no radius / MEG only
extra = 'Sphere (no layers): r0=[%s] mm' % center
else:
extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm'
% (len(self['layers']) - 1, pl, center, rad * 1000.))
else:
pl = '' if len(self['surfs']) == 1 else 's'
extra = ('BEM (%s layer%s)' % (len(self['surfs']), pl))
return '<ConductorModel | %s>' % extra
@property
def radius(self):
if not self['is_sphere']:
raise RuntimeError('radius undefined for BEM')
return None if len(self['layers']) == 0 else self['layers'][-1]['rad']
def _calc_beta(rk, rk_norm, rk1, rk1_norm):
"""These coefficients are used to calculate the magic vector omega"""
rkk1 = rk1[0] - rk[0]
size = np.sqrt(np.dot(rkk1, rkk1))
rkk1 /= size
num = rk_norm + np.dot(rk, rkk1)
den = rk1_norm + np.dot(rk1, rkk1)
res = np.log(num / den) / size
return res
def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):
"""The linear potential matrix element computations"""
from .source_space import _fast_cross_nd_sum
omega = np.zeros((len(fros), 3))
# we replicate a little bit of the _get_solids code here for speed
v1 = tri_rr[np.newaxis, 0, :] - fros
v2 = tri_rr[np.newaxis, 1, :] - fros
v3 = tri_rr[np.newaxis, 2, :] - fros
triples = _fast_cross_nd_sum(v1, v2, v3)
l1 = np.sqrt(np.sum(v1 * v1, axis=1))
l2 = np.sqrt(np.sum(v2 * v2, axis=1))
l3 = np.sqrt(np.sum(v3 * v3, axis=1))
ss = (l1 * l2 * l3 +
np.sum(v1 * v2, axis=1) * l3 +
np.sum(v1 * v3, axis=1) * l2 +
np.sum(v2 * v3, axis=1) * l1)
solids = np.arctan2(triples, ss)
# We *could* subselect the good points from v1, v2, v3, triples, solids,
# l1, l2, and l3, but there are *very* few bad points. So instead we do
# some unnecessary calculations, and then omit them from the final
# solution. These three lines ensure we don't get invalid values in
# _calc_beta.
bad_mask = np.abs(solids) < np.pi / 1e6
l1[bad_mask] = 1.
l2[bad_mask] = 1.
l3[bad_mask] = 1.
# Calculate the magic vector vec_omega
beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],
_calc_beta(v2, l2, v3, l3)[:, np.newaxis],
_calc_beta(v3, l3, v1, l1)[:, np.newaxis]]
vec_omega = (beta[2] - beta[0]) * v1
vec_omega += (beta[0] - beta[1]) * v2
vec_omega += (beta[1] - beta[2]) * v3
area2 = 2.0 * tri_area
n2 = 1.0 / (area2 * area2)
# leave omega = 0 otherwise
# Put it all together...
yys = [v1, v2, v3]
idx = [0, 1, 2, 0, 2]
for k in range(3):
diff = yys[idx[k - 1]] - yys[idx[k + 1]]
zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)
omega[:, k] = -n2 * (area2 * zdots * 2. * solids -
triples * (diff * vec_omega).sum(axis=-1))
# omit the bad points from the solution
omega[bad_mask] = 0.
return omega
def _correct_auto_elements(surf, mat):
"""Improve auto-element approximation..."""
pi2 = 2.0 * np.pi
tris_flat = surf['tris'].ravel()
misses = pi2 - mat.sum(axis=1)
for j, miss in enumerate(misses):
# How much is missing?
n_memb = len(surf['neighbor_tri'][j])
# The node itself receives one half
mat[j, j] = miss / 2.0
# The rest is divided evenly among the member nodes...
miss /= (4.0 * n_memb)
members = np.where(j == tris_flat)[0]
mods = members % 3
offsets = np.array([[1, 2], [-1, 1], [-1, -2]])
tri_1 = members + offsets[mods, 0]
tri_2 = members + offsets[mods, 1]
for t1, t2 in zip(tri_1, tri_2):
mat[j, tris_flat[t1]] += miss
mat[j, tris_flat[t2]] += miss
return
def _fwd_bem_lin_pot_coeff(surfs):
"""Calculate the coefficients for linear collocation approach"""
# taken from fwd_bem_linear_collocation.c
nps = [surf['np'] for surf in surfs]
np_tot = sum(nps)
coeff = np.zeros((np_tot, np_tot))
offsets = np.cumsum(np.concatenate(([0], nps)))
for si_1, surf1 in enumerate(surfs):
rr_ord = np.arange(nps[si_1])
for si_2, surf2 in enumerate(surfs):
logger.info(" %s (%d) -> %s (%d) ..." %
(_bem_explain_surface(surf1['id']), nps[si_1],
_bem_explain_surface(surf2['id']), nps[si_2]))
tri_rr = surf2['rr'][surf2['tris']]
tri_nn = surf2['tri_nn']
tri_area = surf2['tri_area']
submat = coeff[offsets[si_1]:offsets[si_1 + 1],
offsets[si_2]:offsets[si_2 + 1]] # view
for k in range(surf2['ntri']):
tri = surf2['tris'][k]
if si_1 == si_2:
skip_idx = ((rr_ord == tri[0]) |
(rr_ord == tri[1]) |
(rr_ord == tri[2]))
else:
skip_idx = list()
# No contribution from a triangle that
# this vertex belongs to
# if sidx1 == sidx2 and (tri == j).any():
# continue
# Otherwise do the hard job
coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],
tri_area[k])
coeffs[skip_idx] = 0.
submat[:, tri] -= coeffs
if si_1 == si_2:
_correct_auto_elements(surf1, submat)
return coeff
def _fwd_bem_multi_solution(solids, gamma, nps):
"""Do multi surface solution
* Invert I - solids/(2*M_PI)
* Take deflation into account
* The matrix is destroyed after inversion
* This is the general multilayer case
"""
pi2 = 1.0 / (2 * np.pi)
n_tot = np.sum(nps)
assert solids.shape == (n_tot, n_tot)
nsurf = len(nps)
defl = 1.0 / n_tot
# Modify the matrix
offsets = np.cumsum(np.concatenate(([0], nps)))
for si_1 in range(nsurf):
for si_2 in range(nsurf):
mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
slice_j = slice(offsets[si_1], offsets[si_1 + 1])
slice_k = slice(offsets[si_2], offsets[si_2 + 1])
solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
solids += np.eye(n_tot)
return linalg.inv(solids, overwrite_a=True)
def _fwd_bem_homog_solution(solids, nps):
"""Helper to make a homogeneous solution"""
return _fwd_bem_multi_solution(solids, None, nps)
def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):
"""Modify the solution according to the IP approach"""
n_last = n_tri[-1]
mult = (1.0 + ip_mult) / ip_mult
logger.info(' Combining...')
offsets = np.cumsum(np.concatenate(([0], n_tri)))
for si in range(len(n_tri)):
# Pick the correct submatrix (right column) and multiply
sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]
# Multiply
sub -= 2 * np.dot(sub, ip_solution)
# The lower right corner is a special case
sub[-n_last:, -n_last:] += mult * ip_solution
# Final scaling
logger.info(' Scaling...')
solution *= ip_mult
return
def _fwd_bem_linear_collocation_solution(m):
"""Compute the linear collocation potential solution"""
# first, add surface geometries
from .surface import _complete_surface_info
for surf in m['surfs']:
_complete_surface_info(surf, verbose=False)
logger.info('Computing the linear collocation solution...')
logger.info(' Matrix coefficients...')
coeff = _fwd_bem_lin_pot_coeff(m['surfs'])
m['nsol'] = len(coeff)
logger.info(" Inverting the coefficient matrix...")
nps = [surf['np'] for surf in m['surfs']]
m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
if len(m['surfs']) == 3:
ip_mult = m['sigma'][1] / m['sigma'][2]
if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:
logger.info('IP approach required...')
logger.info(' Matrix coefficients (homog)...')
coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
logger.info(' Inverting the coefficient matrix (homog)...')
ip_solution = _fwd_bem_homog_solution(coeff,
[m['surfs'][-1]['np']])
logger.info(' Modify the original solution to incorporate '
'IP approach...')
_fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
nps)
m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL
logger.info("Solution ready.")
@verbose
def make_bem_solution(surfs, verbose=None):
"""Create a BEM solution using the linear collocation approach
Parameters
----------
surfs : list of dict
The BEM surfaces to use.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
bem : instance of ConductorModel
The BEM solution.
Notes
-----
.. versionadded:: 0.10.0
See Also
--------
make_bem_model
read_bem_surfaces
write_bem_surfaces
read_bem_solution
write_bem_solution
"""
logger.info('Approximation method : Linear collocation\n')
if isinstance(surfs, string_types):
# Load the surfaces
logger.info('Loading surfaces...')
surfs = read_bem_surfaces(surfs)
bem = ConductorModel(is_sphere=False, surfs=surfs)
_add_gamma_multipliers(bem)
if len(bem['surfs']) == 3:
logger.info('Three-layer model surfaces loaded.')
elif len(bem['surfs']) == 1:
logger.info('Homogeneous model surface loaded.')
else:
raise RuntimeError('Only 1- or 3-layer BEM computations supported')
_fwd_bem_linear_collocation_solution(bem)
logger.info('BEM geometry computations complete.')
return bem
# ############################################################################
# Make BEM model
def _ico_downsample(surf, dest_grade):
"""Downsample the surface if isomorphic to a subdivided icosahedron"""
from .surface import _get_ico_surface
n_tri = surf['ntri']
found = -1
bad_msg = ("A surface with %d triangles cannot be isomorphic with a "
"subdivided icosahedron." % surf['ntri'])
if n_tri % 20 != 0:
raise RuntimeError(bad_msg)
n_tri = n_tri // 20
found = int(round(np.log(n_tri) / np.log(4)))
if n_tri != 4 ** found:
raise RuntimeError(bad_msg)
del n_tri
if dest_grade > found:
raise RuntimeError('For this surface, decimation grade should be %d '
'or less, not %s.' % (found, dest_grade))
source = _get_ico_surface(found)
dest = _get_ico_surface(dest_grade, patch_stats=True)
del dest['tri_cent']
del dest['tri_nn']
del dest['neighbor_tri']
del dest['tri_area']
if not np.array_equal(source['tris'], surf['tris']):
raise RuntimeError('The source surface has a matching number of '
'triangles but ordering is wrong')
logger.info('Going from %dth to %dth subdivision of an icosahedron '
'(n_tri: %d -> %d)' % (found, dest_grade, surf['ntri'],
dest['ntri']))
# Find the mapping
dest['rr'] = surf['rr'][_get_ico_map(source, dest)]
return dest
def _get_ico_map(fro, to):
"""Helper to get a mapping between ico surfaces"""
from .surface import _compute_nearest
nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)
n_bads = (dists > 5e-3).sum()
if n_bads > 0:
raise RuntimeError('No matching vertex for %d destination vertices'
% (n_bads))
return nearest
def _order_surfaces(surfs):
"""Reorder the surfaces"""
if len(surfs) != 3:
return surfs
# we have three surfaces
surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_BRAIN]
ids = np.array([surf['id'] for surf in surfs])
if set(ids) != set(surf_order):
raise RuntimeError('bad surface ids: %s' % ids)
order = [np.where(ids == id_)[0][0] for id_ in surf_order]
surfs = [surfs[idx] for idx in order]
return surfs
def _assert_complete_surface(surf):
"""Check the sum of solid angles as seen from inside"""
# from surface_checks.c
from .source_space import _get_solids
tot_angle = 0.
# Center of mass....
cm = surf['rr'].mean(axis=0)
logger.info('%s CM is %6.2f %6.2f %6.2f mm' %
(_surf_name[surf['id']],
1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
if np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5:
raise RuntimeError('Surface %s is not complete (sum of solid angles '
'= %g * 4*PI instead).' %
(_surf_name[surf['id']], tot_angle))
_surf_name = {
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',
FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',
FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ',
}
def _assert_inside(fro, to):
"""Helper to check one set of points is inside a surface"""
# this is "is_inside" in surface_checks.c
from .source_space import _get_solids
tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])
if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():
raise RuntimeError('Surface %s is not completely inside surface %s'
% (_surf_name[fro['id']], _surf_name[to['id']]))
def _check_surfaces(surfs):
"""Check that the surfaces are complete and non-intersecting"""
for surf in surfs:
_assert_complete_surface(surf)
# Then check the topology
for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
logger.info('Checking that %s surface is inside %s surface...' %
(_surf_name[surf_2['id']], _surf_name[surf_1['id']]))
_assert_inside(surf_2, surf_1)
def _check_surface_size(surf):
"""Check that the coordinate limits are reasonable"""
sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
if (sizes < 0.05).any():
raise RuntimeError('Dimensions of the surface %s seem too small '
'(%9.5f mm). Maybe the the unit of measure is '
'meters instead of mm' %
(_surf_name[surf['id']], 1000 * sizes.min()))
def _check_thicknesses(surfs):
"""How close are we?"""
from .surface import _compute_nearest
for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],
return_dists=True)[0]
min_dist = min_dist.min()
logger.info('Checking distance between %s and %s surfaces...' %
(_surf_name[surf_1['id']], _surf_name[surf_2['id']]))
logger.info('Minimum distance between the %s and %s surfaces is '
'approximately %6.1f mm' %
(_surf_name[surf_1['id']], _surf_name[surf_2['id']],
1000 * min_dist))
def _surfaces_to_bem(fname_surfs, ids, sigmas, ico=None):
"""Convert surfaces to a BEM
"""
from .surface import _read_surface_geom
# equivalent of mne_surf2bem
surfs = list()
assert len(fname_surfs) in (1, 3)
for fname in fname_surfs:
surfs.append(_read_surface_geom(fname, patch_stats=False,
verbose=False))
surfs[-1]['rr'] /= 1000.
# Downsampling if the surface is isomorphic with a subdivided icosahedron
if ico is not None:
for si, surf in enumerate(surfs):
surfs[si] = _ico_downsample(surf, ico)
for surf, id_ in zip(surfs, ids):
surf['id'] = id_
# Shifting surfaces is not implemented here
# Order the surfaces for the benefit of the topology checks
for surf, sigma in zip(surfs, sigmas):
surf['sigma'] = sigma
surfs = _order_surfaces(surfs)
# Check topology as best we can
_check_surfaces(surfs)
for surf in surfs:
_check_surface_size(surf)
_check_thicknesses(surfs)
logger.info('Surfaces passed the basic topology checks.')
return surfs
@verbose
def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),
subjects_dir=None, verbose=None):
"""Create a BEM model for a subject
Parameters
----------
subject : str
The subject.
ico : int | None
The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.
conductivity : array of int, shape (3,) or (1,)
The conductivities to use for each shell. Should be a single element
for a one-layer model, or three elements for a three-layer model.
Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a
single-layer model would be ``[0.3]``.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surfaces : list of dict
The BEM surfaces. Use `make_bem_solution` to turn these into a
`ConductorModel` suitable for forward calculation.
Notes
-----
.. versionadded:: 0.10.0
See Also
--------
make_bem_solution
make_sphere_model
read_bem_surfaces
write_bem_surfaces
"""
conductivity = np.array(conductivity, float)
if conductivity.ndim != 1 or conductivity.size not in (1, 3):
raise ValueError('conductivity must be 1D array-like with 1 or 3 '
'elements')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = op.join(subjects_dir, subject)
bem_dir = op.join(subject_dir, 'bem')
inner_skull = op.join(bem_dir, 'inner_skull.surf')
outer_skull = op.join(bem_dir, 'outer_skull.surf')
outer_skin = op.join(bem_dir, 'outer_skin.surf')
surfaces = [inner_skull, outer_skull, outer_skin]
ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_HEAD]
logger.info('Creating the BEM geometry...')
if len(conductivity) == 1:
surfaces = surfaces[:1]
ids = ids[:1]
surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)
logger.info('Complete.\n')
return surfaces
# ############################################################################
# Compute EEG sphere model
def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):
"""Get the model depended weighting factor for n"""
nlayer = len(m['layers'])
if nlayer in (0, 1):
return 1.
# Initialize the arrays
c1 = np.zeros(nlayer - 1)
c2 = np.zeros(nlayer - 1)
cr = np.zeros(nlayer - 1)
cr_mult = np.zeros(nlayer - 1)
for k in range(nlayer - 1):
c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']
c2[k] = c1[k] - 1.0
cr_mult[k] = m['layers'][k]['rel_rad']
cr[k] = cr_mult[k]
cr_mult[k] *= cr_mult[k]
coeffs = np.zeros(n_terms - 1)
for n in range(1, n_terms):
# Increment the radius coefficients
for k in range(nlayer - 1):
cr[k] *= cr_mult[k]
# Multiply the matrices
M = np.eye(2)
n1 = n + 1.0
for k in range(nlayer - 2, -1, -1):
M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],
[n * c2[k] * cr[k], n1 + n * c1[k]]], M)
num = n * (2.0 * n + 1.0) ** (nlayer - 1)
coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])
return coeffs
def _compose_linear_fitting_data(mu, u):
# y is the data to be fitted (nterms-1 x 1)
# M is the model matrix (nterms-1 x nfit-1)
for k in range(u['nterms'] - 1):
k1 = k + 1
mu1n = np.power(mu[0], k1)
u['y'][k] = u['w'][k] * (u['fn'][k1] - mu1n * u['fn'][0])
for p in range(u['nfit'] - 1):
u['M'][k][p] = u['w'][k] * (np.power(mu[p + 1], k1) - mu1n)
def _compute_linear_parameters(mu, u):
"""Compute the best-fitting linear parameters"""
_compose_linear_fitting_data(mu, u)
uu, sing, vv = linalg.svd(u['M'], full_matrices=False)
# Compute the residuals
u['resi'] = u['y'].copy()
vec = np.empty(u['nfit'] - 1)
for p in range(u['nfit'] - 1):
vec[p] = np.dot(uu[:, p], u['y'])
for k in range(u['nterms'] - 1):
u['resi'][k] -= uu[k, p] * vec[p]
vec[p] = vec[p] / sing[p]
lambda_ = np.zeros(u['nfit'])
for p in range(u['nfit'] - 1):
sum_ = 0.
for q in range(u['nfit'] - 1):
sum_ += vv[q, p] * vec[q]
lambda_[p + 1] = sum_
lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])
rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y'])
return rv, lambda_
def _one_step(mu, u):
"""Evaluate the residual sum of squares fit for one set of mu values"""
if np.abs(mu).max() > 1.0:
return 1.0
# Compose the data for the linear fitting, compute SVD, then residuals
_compose_linear_fitting_data(mu, u)
u['uu'], u['sing'], u['vv'] = linalg.svd(u['M'])
u['resi'][:] = u['y'][:]
for p in range(u['nfit'] - 1):
dot = np.dot(u['uu'][p], u['y'])
for k in range(u['nterms'] - 1):
u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot
# Return their sum of squares
return np.dot(u['resi'], u['resi'])
def _fwd_eeg_fit_berg_scherg(m, nterms, nfit):
"""Fit the Berg-Scherg equivalent spherical model dipole parameters"""
from scipy.optimize import fmin_cobyla
assert nfit >= 2
u = dict(y=np.zeros(nterms - 1), resi=np.zeros(nterms - 1),
nfit=nfit, nterms=nterms, M=np.zeros((nterms - 1, nfit - 1)))
# (1) Calculate the coefficients of the true expansion
u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)
# (2) Calculate the weighting
f = (min([layer['rad'] for layer in m['layers']]) /
max([layer['rad'] for layer in m['layers']]))
# correct weighting
k = np.arange(1, nterms + 1)
u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /
k) * np.power(f, (k - 1.0))
u['w'][-1] = 0
# Do the nonlinear minimization, constraining mu to the interval [-1, +1]
mu_0 = np.random.RandomState(0).rand(nfit) * f
fun = partial(_one_step, u=u)
max_ = 1. - 2e-4 # adjust for fmin_cobyla "catol" that not all scipy have
cons = [(lambda x: max_ - np.abs(x[ii])) for ii in range(nfit)]
mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=5e-3, disp=0)
# (6) Do the final step: calculation of the linear parameters
rv, lambda_ = _compute_linear_parameters(mu, u)
order = np.argsort(mu)[::-1]
mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first
m['mu'] = mu
# This division takes into account the actual conductivities
m['lambda'] = lambda_ / m['layers'][-1]['sigma']
m['nfit'] = nfit
return rv
@verbose
def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
relative_radii=(0.90, 0.92, 0.97, 1.0),
sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):
"""Create a spherical model for forward solution calculation
Parameters
----------
r0 : array-like | str
Head center to use (in head coordinates). If 'auto', the head
center will be calculated from the digitization points in info.
head_radius : float | str | None
If float, compute spherical shells for EEG using the given radius.
If 'auto', estimate an approriate radius from the dig points in Info,
If None, exclude shells.
info : instance of mne.io.meas_info.Info | None
Measurement info. Only needed if ``r0`` or ``head_radius`` are
``'auto'``.
relative_radii : array-like
Relative radii for the spherical shells.
sigmas : array-like
Sigma values for the spherical shells.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
sphere : instance of ConductorModel
The resulting spherical conductor model.
Notes
-----
.. versionadded:: 0.9.0
See Also
--------
make_bem_model
make_bem_solution
"""
for name in ('r0', 'head_radius'):
param = locals()[name]
if isinstance(param, string_types):
if param != 'auto':
raise ValueError('%s, if str, must be "auto" not "%s"'
% (name, param))
if (isinstance(r0, string_types) and r0 == 'auto') or \
(isinstance(head_radius, string_types) and head_radius == 'auto'):
if info is None:
raise ValueError('Info must not be None for auto mode')
head_radius_fit, r0_fit = fit_sphere_to_headshape(info)[:2]
if isinstance(r0, string_types):
r0 = r0_fit / 1000.
if isinstance(head_radius, string_types):
head_radius = head_radius_fit / 1000.
sphere = ConductorModel(is_sphere=True, r0=np.array(r0),
coord_frame=FIFF.FIFFV_COORD_HEAD)
sphere['layers'] = list()
if head_radius is not None:
# Eventually these could be configurable...
relative_radii = np.array(relative_radii, float)
sigmas = np.array(sigmas, float)
order = np.argsort(relative_radii)
relative_radii = relative_radii[order]
sigmas = sigmas[order]
for rel_rad, sig in zip(relative_radii, sigmas):
# sort layers by (relative) radius, and scale radii
layer = dict(rad=rel_rad, sigma=sig)
layer['rel_rad'] = layer['rad'] = rel_rad
sphere['layers'].append(layer)
# scale the radii
R = sphere['layers'][-1]['rad']
rR = sphere['layers'][-1]['rel_rad']
for layer in sphere['layers']:
layer['rad'] /= R
layer['rel_rad'] /= rR
#
# Setup the EEG sphere model calculations
#
# Scale the relative radii
for k in range(len(relative_radii)):
sphere['layers'][k]['rad'] = (head_radius *
sphere['layers'][k]['rel_rad'])
rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)
logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv))
for k in range(3):
logger.info('mu%d = %g lambda%d = %g'
% (k + 1, sphere['mu'][k], k + 1,
sphere['layers'][-1]['sigma'] *
sphere['lambda'][k]))
logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n'
% (1000 * head_radius,))
return ConductorModel(sphere)
# #############################################################################
# Helpers
@verbose
def fit_sphere_to_headshape(info, dig_kinds=(FIFF.FIFFV_POINT_EXTRA,),
verbose=None):
"""Fit a sphere to the headshape points to determine head center
Parameters
----------
info : instance of mne.io.meas_info.Info
Measurement info.
dig_kinds : tuple of int
Kind of digitization points to use in the fitting. These can be
any kind defined in io.constants.FIFF::
FIFFV_POINT_CARDINAL
FIFFV_POINT_HPI
FIFFV_POINT_EEG
FIFFV_POINT_EXTRA
Defaults to (FIFFV_POINT_EXTRA,).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
radius : float
Sphere radius in mm.
origin_head: ndarray, shape (3,)
Head center in head coordinates (mm).
origin_device: ndarray, shape (3,)
Head center in device coordinates (mm).
"""
# get head digization points of the specified kind
hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]
if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']):
raise RuntimeError('Digitization points not in head coordinates, '
'contact mne-python developers')
# exclude some frontal points (nose etc.)
hsp = [p for p in hsp if not (p[2] < 0 and p[1] > 0)]
if len(hsp) == 0:
raise ValueError('No head digitization points of the specified '
'kinds (%s) found.' % dig_kinds)
radius, origin_head = _fit_sphere(np.array(hsp), disp=False)
# compute origin in device coordinates
head_to_dev = _ensure_trans(info['dev_head_t'], 'head', 'meg')
origin_device = apply_trans(head_to_dev, origin_head)
radius *= 1e3
origin_head *= 1e3
origin_device *= 1e3
logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm' % radius)
logger.info('Origin head coordinates:'.ljust(30) +
'%0.1f %0.1f %0.1f mm' % tuple(origin_head))
logger.info('Origin device coordinates:'.ljust(30) +
'%0.1f %0.1f %0.1f mm' % tuple(origin_device))
return radius, origin_head, origin_device
def _fit_sphere(points, disp='auto'):
"""Aux function to fit a sphere to an arbitrary set of points"""
from scipy.optimize import fmin_cobyla
if isinstance(disp, string_types) and disp == 'auto':
disp = True if logger.level <= 20 else False
# initial guess for center and radius
radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.
radius_init = radii.mean()
center_init = np.median(points, axis=0)
# optimization
x0 = np.concatenate([center_init, [radius_init]])
def cost_fun(center_rad):
d = points - center_rad[:3]
d = (np.sqrt(np.sum(d * d, axis=1)) - center_rad[3])
return np.sum(d * d)
def constraint(center_rad):
return center_rad[3] # radius must be >= 0
x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init,
rhoend=radius_init * 1e-6, disp=disp)
origin = x_opt[:3]
radius = x_opt[3]
return radius, origin
# ############################################################################
# Create BEM surfaces
@verbose
def make_watershed_bem(subject, subjects_dir=None, overwrite=False,
volume='T1', atlas=False, gcaatlas=False, preflood=None,
verbose=None):
"""
Create BEM surfaces using the watershed algorithm included with FreeSurfer
Parameters
----------
subject : str
Subject name (required)
subjects_dir : str
Directory containing subjects data. If None use
the Freesurfer SUBJECTS_DIR environment variable.
overwrite : bool
Write over existing files
volume : str
Defaults to T1
atlas : bool
Specify the --atlas option for mri_watershed
gcaatlas : bool
Use the subcortical atlas
preflood : int
Change the preflood height
verbose : bool, str or None
If not None, override default verbose level
.. versionadded:: 0.10
"""
from .surface import read_surface
env = os.environ.copy()
if not os.environ.get('FREESURFER_HOME'):
raise RuntimeError('FREESURFER_HOME environment variable not set')
env['SUBJECT'] = subject
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
env['SUBJECTS_DIR'] = subjects_dir
subject_dir = op.join(subjects_dir, subject)
mri_dir = op.join(subject_dir, 'mri')
T1_dir = op.join(mri_dir, volume)
T1_mgz = op.join(mri_dir, volume + '.mgz')
bem_dir = op.join(subject_dir, 'bem')
ws_dir = op.join(subject_dir, 'bem', 'watershed')
if not op.isdir(subject_dir):
raise RuntimeError('Could not find the MRI data directory "%s"'
% subject_dir)
if not op.isdir(bem_dir):
os.makedirs(bem_dir)
if not op.isdir(T1_dir) and not op.isfile(T1_mgz):
raise RuntimeError('Could not find the MRI data')
if op.isdir(ws_dir):
if not overwrite:
raise RuntimeError('%s already exists. Use the --overwrite option'
'to recreate it.' % ws_dir)
else:
shutil.rmtree(ws_dir)
# put together the command
cmd = ['mri_watershed']
if preflood:
cmd += ["-h", "%s" % int(preflood)]
if gcaatlas:
cmd += ['-atlas', '-T1', '-brain_atlas', env['FREESURFER_HOME'] +
'/average/RB_all_withskull_2007-08-08.gca',
subject_dir + '/mri/transforms/talairach_with_skull.lta']
elif atlas:
cmd += ['-atlas']
if op.exists(T1_mgz):
cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz,
op.join(ws_dir, 'ws')]
else:
cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir,
op.join(ws_dir, 'ws')]
# report and run
logger.info('\nRunning mri_watershed for BEM segmentation with the '
'following parameters:\n\n'
'SUBJECTS_DIR = %s\n'
'SUBJECT = %s\n'
'Results dir = %s\n' % (subjects_dir, subject, ws_dir))
os.makedirs(op.join(ws_dir, 'ws'))
run_subprocess(cmd, env=env, stdout=sys.stdout)
#
os.chdir(ws_dir)
if op.isfile(T1_mgz):
# XXX : do this with python code
surfaces = [subject + '_brain_surface', subject +
'_inner_skull_surface', subject + '_outer_skull_surface',
subject + '_outer_skin_surface']
for s in surfaces:
cmd = ['mne_convert_surface', '--surf', s, '--mghmri', T1_mgz,
'--surfout', s, "--replacegeom"]
run_subprocess(cmd, env=env, stdout=sys.stdout)
os.chdir(bem_dir)
if op.isfile(subject + '-head.fif'):
os.remove(subject + '-head.fif')
# run the equivalent of mne_surf2bem
points, tris = read_surface(op.join(ws_dir,
subject + '_outer_skin_surface'))
points *= 1e-3
surf = dict(coord_frame=5, id=4, nn=None, np=len(points),
ntri=len(tris), rr=points, sigma=1, tris=tris)
write_bem_surfaces(subject + '-head.fif', surf)
logger.info('Created %s/%s-head.fif\n\nComplete.' % (bem_dir, subject))
# ############################################################################
# Read
@verbose
def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):
"""Read the BEM surfaces from a FIF file
Parameters
----------
fname : string
The name of the file containing the surfaces.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
s_id : int | None
If int, only read and return the surface with the given s_id.
An error will be raised if it doesn't exist. If None, all
surfaces are read and returned.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surf: list | dict
A list of dictionaries that each contain a surface. If s_id
is not None, only the requested surface will be returned.
See Also
--------
write_bem_surfaces, write_bem_solution, make_bem_model
"""
from .surface import _complete_surface_info
# Default coordinate frame
coord_frame = FIFF.FIFFV_COORD_MRI
# Open the file, create directory
f, tree, _ = fiff_open(fname)
with f as fid:
# Find BEM
bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
if bem is None or len(bem) == 0:
raise ValueError('BEM data not found')
bem = bem[0]
# Locate all surfaces
bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
if bemsurf is None:
raise ValueError('BEM surface data not found')
logger.info(' %d BEM surfaces found' % len(bemsurf))
# Coordinate frame possibly at the top level
tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
if tag is not None:
coord_frame = tag.data
# Read all surfaces
if s_id is not None:
surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
for bsurf in bemsurf]
surf = [s for s in surf if s is not None]
if not len(surf) == 1:
raise ValueError('surface with id %d not found' % s_id)
else:
surf = list()
for bsurf in bemsurf:
logger.info(' Reading a surface...')
this = _read_bem_surface(fid, bsurf, coord_frame)
surf.append(this)
logger.info('[done]')
logger.info(' %d BEM surfaces read' % len(surf))
if patch_stats:
for this in surf:
_complete_surface_info(this)
return surf[0] if s_id is not None else surf
def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
"""Read one bem surface
"""
# fid should be open as a context manager here
res = dict()
# Read all the interesting stuff
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
if tag is None:
res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
else:
res['id'] = int(tag.data)
if s_id is not None and res['id'] != s_id:
return None
tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
res['sigma'] = 1.0 if tag is None else float(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
if tag is None:
raise ValueError('Number of triangles not found')
res['ntri'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
if tag is None:
res['coord_frame'] = def_coord_frame
else:
res['coord_frame'] = tag.data
else:
res['coord_frame'] = tag.data
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
if tag is None:
res['nn'] = list()
else:
res['nn'] = tag.data
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
return res
@verbose
def read_bem_solution(fname, verbose=None):
"""Read the BEM solution from a file
Parameters
----------
fname : string
The file containing the BEM solution.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
bem : instance of ConductorModel
The BEM solution.
See Also
--------
write_bem_solution, read_bem_surfaces, write_bem_surfaces,
make_bem_solution
"""
# mirrors fwd_bem_load_surfaces from fwd_bem_model.c
logger.info('Loading surfaces...')
bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)
if len(bem_surfs) == 3:
logger.info('Three-layer model surfaces loaded.')
needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_BRAIN])
if not all(x['id'] in needed for x in bem_surfs):
raise RuntimeError('Could not find necessary BEM surfaces')
# reorder surfaces as necessary (shouldn't need to?)
reorder = [None] * 3
for x in bem_surfs:
reorder[np.where(x['id'] == needed)[0][0]] = x
bem_surfs = reorder
elif len(bem_surfs) == 1:
if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
raise RuntimeError('BEM Surfaces not found')
logger.info('Homogeneous model surface loaded.')
# convert from surfaces to solution
bem = ConductorModel(is_sphere=False, surfs=bem_surfs)
logger.info('\nLoading the solution matrix...\n')
f, tree, _ = fiff_open(fname)
with f as fid:
# Find the BEM data
nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
if len(nodes) == 0:
raise RuntimeError('No BEM data in %s' % fname)
bem_node = nodes[0]
# Approximation method
tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
if tag is None:
raise RuntimeError('No BEM solution found in %s' % fname)
method = tag.data[0]
if method not in (FIFF.FIFFV_BEM_APPROX_CONST,
FIFF.FIFFV_BEM_APPROX_LINEAR):
raise RuntimeError('Cannot handle BEM approximation method : %d'
% method)
tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
dims = tag.data.shape
if len(dims) != 2:
raise RuntimeError('Expected a two-dimensional solution matrix '
'instead of a %d dimensional one' % dims[0])
dim = 0
for surf in bem['surfs']:
if method == FIFF.FIFFV_BEM_APPROX_LINEAR:
dim += surf['np']
else: # method == FIFF.FIFFV_BEM_APPROX_CONST
dim += surf['ntri']
if dims[0] != dim or dims[1] != dim:
raise RuntimeError('Expected a %d x %d solution matrix instead of '
'a %d x %d one' % (dim, dim, dims[1], dims[0]))
sol = tag.data
nsol = dims[0]
bem['solution'] = sol
bem['nsol'] = nsol
bem['bem_method'] = method
# Gamma factors and multipliers
_add_gamma_multipliers(bem)
kind = {
FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',
FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',
}[bem['bem_method']]
logger.info('Loaded %s BEM solution from %s', kind, fname)
return bem
def _add_gamma_multipliers(bem):
"""Helper to add gamma and multipliers in-place"""
bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
# Dirty trick for the zero conductivity outside
sigma = np.r_[0.0, bem['sigma']]
bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
bem['field_mult'] = sigma[1:] - sigma[:-1]
# make sure subsequent "zip"s work correctly
assert len(bem['surfs']) == len(bem['field_mult'])
bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
(sigma[1:] + sigma[:-1])[:, np.newaxis])
_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,
'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,
'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}
def _bem_find_surface(bem, id_):
"""Find surface from already-loaded BEM"""
if isinstance(id_, string_types):
name = id_
id_ = _surf_dict[id_]
else:
name = _bem_explain_surface(id_)
idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]
if len(idx) != 1:
raise RuntimeError('BEM model does not have the %s triangulation'
% name.replace('_', ' '))
return bem['surfs'][idx[0]]
def _bem_explain_surface(id_):
"""Return a string corresponding to the given surface ID"""
_rev_dict = dict((val, key) for key, val in _surf_dict.items())
return _rev_dict[id_]
# ############################################################################
# Write
@deprecated('write_bem_surface is deprecated and will be removed in 0.11, '
'use write_bem_surfaces instead')
def write_bem_surface(fname, surf):
"""Write one bem surface
Parameters
----------
fname : string
File to write
surf : dict
A surface structured as obtained with read_bem_surfaces
See Also
--------
read_bem_surfaces
"""
write_bem_surfaces(fname, surf)
def write_bem_surfaces(fname, surfs):
"""Write BEM surfaces to a fiff file
Parameters
----------
fname : str
Filename to write.
surfs : dict | list of dict
The surfaces, or a single surface.
"""
if isinstance(surfs, dict):
surfs = [surfs]
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_BEM)
write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])
_write_bem_surfaces_block(fid, surfs)
end_block(fid, FIFF.FIFFB_BEM)
end_file(fid)
def _write_bem_surfaces_block(fid, surfs):
"""Helper to actually write bem surfaces"""
for surf in surfs:
start_block(fid, FIFF.FIFFB_BEM_SURF)
write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])
write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
# index start at 0 in Python
write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,
surf['tris'] + 1)
if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])
end_block(fid, FIFF.FIFFB_BEM_SURF)
def write_bem_solution(fname, bem):
"""Write a BEM model with solution
Parameters
----------
fname : str
The filename to use.
bem : instance of ConductorModel
The BEM model with solution to save.
See Also
--------
read_bem_solution
"""
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_BEM)
# Coordinate frame (mainly for backward compatibility)
write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,
bem['surfs'][0]['coord_frame'])
# Surfaces
_write_bem_surfaces_block(fid, bem['surfs'])
# The potential solution
if 'solution' in bem:
if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
raise RuntimeError('Only linear collocation supported')
write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)
write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,
bem['solution'])
end_block(fid, FIFF.FIFFB_BEM)
end_file(fid)
|
lorenzo-desantis/mne-python
|
mne/bem.py
|
Python
|
bsd-3-clause
| 49,041
|
[
"Mayavi"
] |
6dbb78e6831dae757a4c7f1171610569005fa095f1796db94554b0d0870a3746
|
from django.db import models
from edc_base.model.fields import OtherCharField
from edc_constants.choices import YES_NO_UNKNOWN
from edc_base.model.models import BaseUuidModel
from edc_base.model.validators import date_not_future
from edc_visit_tracking.models import CrfInlineModelMixin
from edc_sync.models import SyncModelMixin
from tshilo_dikotla.choices import REASONS_VACCINES_MISSED
from ..choices import IMMUNIZATIONS, INFANT_AGE_VACCINE_GIVEN
from ..managers import VaccinesMissedManager, VaccinesReceivedManager
from .infant_crf_model import InfantCrfModel
class InfantFuImmunizations(InfantCrfModel):
""" A model completed by the user on the infant's follow up immunizations. """
vaccines_received = models.CharField(
max_length=25,
choices=YES_NO_UNKNOWN,
verbose_name="Did this infant receive any vaccinations since the last visit",
help_text="")
vaccines_missed = models.CharField(
max_length=25,
choices=YES_NO_UNKNOWN,
verbose_name="Is the child missing any vaccinations?",
help_text="")
class Meta:
app_label = 'td_infant'
verbose_name = "Infant FollowUp: Immunizations"
verbose_name_plural = "Infant FollowUp: Immunizations"
class VaccinesReceived(CrfInlineModelMixin, SyncModelMixin, BaseUuidModel):
"""ALL possible vaccines given to infant"""
infant_fu_immunizations = models.ForeignKey(InfantFuImmunizations)
received_vaccine_name = models.CharField(
verbose_name="Received vaccine name",
choices=IMMUNIZATIONS,
max_length=100,
null=True,
blank=True)
date_given = models.DateField(
verbose_name="Date Given",
validators=[
date_not_future, ],
null=True,
blank=True)
infant_age = models.CharField(
verbose_name="Infant age when vaccine given",
choices=INFANT_AGE_VACCINE_GIVEN,
null=True,
blank=True,
max_length=35)
objects = VaccinesReceivedManager()
def natural_key(self):
return (self.received_vaccine_name, ) + self.infant_fu_immunizations.natural_key()
class Meta:
app_label = 'td_infant'
verbose_name = 'Received Vaccines'
verbose_name_plural = 'Received Vaccines'
unique_together = ('received_vaccine_name', 'infant_fu_immunizations')
class VaccinesMissed(CrfInlineModelMixin, SyncModelMixin, BaseUuidModel):
"""ALL vaccines missed by infant"""
parent_model_attr = 'infant_fu_immunizations'
infant_fu_immunizations = models.ForeignKey(InfantFuImmunizations)
missed_vaccine_name = models.CharField(
verbose_name="Missed vaccine name",
choices=IMMUNIZATIONS,
max_length=100,
null=True,
blank=True)
reason_missed = models.CharField(
verbose_name="Reasons infant missed vaccines",
choices=REASONS_VACCINES_MISSED,
max_length=100,
null=True,
blank=True)
reason_missed_other = OtherCharField()
objects = VaccinesMissedManager()
def natural_key(self):
return (self.missed_vaccine_name, ) + self.infant_fu_immunizations.natural_key()
class Meta:
app_label = 'td_infant'
verbose_name = 'Missed Vaccines'
verbose_name_plural = 'Missed Vaccines'
unique_together = ('missed_vaccine_name', 'infant_fu_immunizations')
|
TshepangRas/tshilo-dikotla
|
td_infant/models/infant_fu_immunizations.py
|
Python
|
gpl-2.0
| 3,416
|
[
"VisIt"
] |
4c34ff20a03789bd1114f2853958dd27f5be7b6e26cae2adc8496e2ed108e09d
|
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
from nose.plugins.attrib import attr
from unittest import skip
from base_studio_test import StudioCourseTest
from bok_choy.promise import EmptyPromise
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.lms.courseware import CoursewarePage
from textwrap import dedent
from xmodule.partitions.partitions import Group
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
@skip("TNL-3258")
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
EmptyPromise(
lambda: self.outline_page.is_browser_on_page(), "loaded page {!r}".format(self.outline_page),
timeout=30
).fulfill()
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
self.assertTrue(self.advanced_settings.is_browser_on_page())
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Press Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
print self.advanced_settings.get(key)
print val
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr('shard_1')
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
|
playm2mboy/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
Python
|
agpl-3.0
| 19,808
|
[
"VisIt"
] |
c63a50573d227cc82705269ed90bd14338f47b14abd2679b88dd6bfc0cc12825
|
"""
This code generates the path required for a knight's tour
around a chessboard with user-specified dimensions
Written by Sophie Li, 2016, modified for use with SenseHat by Stuart Weenig
http://blog.justsophie.com/algorithm-for-knights-tour-in-python/
"""
import sys
from time import sleep
if raw_input('Use real sense hat? (Y/N)').lower() == 'y':
from sense_hat import SenseHat
else:
from sense_emu import SenseHat
sense = SenseHat()
sense.clear()
w = (255,255,255)
b = (0,0,0)
k = (0,0,255) #color for the knight
p = (0,255,0) #color for previous path spaces
if raw_input('Draw checkerboard? (Y/N)').lower() == 'y':
blankboard = [[w,b,w,b,w,b,w,b],
[b,w,b,w,b,w,b,w],
[w,b,w,b,w,b,w,b],
[b,w,b,w,b,w,b,w],
[w,b,w,b,w,b,w,b],
[b,w,b,w,b,w,b,w],
[w,b,w,b,w,b,w,b],
[b,w,b,w,b,w,b,w]]
sense.set_pixels(sum(blankboard,[]))
class KnightsTour:
def __init__(self, width, height):
self.w = width #store the width in the object
self.h = height #store the height in the object
self.board = [] #make a 0 dimension board
for i in range(self.h): #for each column...
self.board.append([0]*self.w) #add a row of 0's
def generate_legal_moves(self, cur_pos): #Generates a list of legal moves for the knight to take next
possible_pos = [] #assume no moves are legal
move_offsets = [(1, 2), (1, -2), (-1, 2), (-1, -2),(2, 1), (2, -1), (-2, 1), (-2, -1)] #relative positions of possible moves
for move in move_offsets: #for each possible move
new_x = cur_pos[0] + move[0] #set new position
new_y = cur_pos[1] + move[1] #set new position
if (new_x >= self.h or new_x < 0 or new_y >= self.w or new_y < 0): #if the new position is off the board
continue #don't do anything (this should be turned into a single if statement, instead of an if..else
else: #if the new position is on the board, append it
possible_pos.append((new_x, new_y)) #append the validated position to the possible_position list
return possible_pos #return the possible position list
def sort_lonely_neighbors(self, to_visit):
"""
It is more efficient to visit the lonely neighbors first,
since these are at the edges of the chessboard and cannot
be reached easily if done later in the traversal
"""
neighbor_list = self.generate_legal_moves(to_visit) #get all the moves from the current position that are on the board
empty_neighbours = [] #make a place to list all the empty neighbors
for neighbor in neighbor_list: #for each legal move
np_value = self.board[neighbor[0]][neighbor[1]] #get the value of the first legal move
if np_value == 0: #if it's zero, it hasn't been visited, so it's an empty neighbor
empty_neighbours.append(neighbor) #if it hasn't been visited, add it to the empty neighbors list
scores = [] #make a place to store the scores
for empty in empty_neighbours: #for all legal moves that haven't been visited
score = [empty, 0] #give the move a starting score of 0
moves = self.generate_legal_moves(empty) #figure out the moves from each of to_visit's neighbors
for m in moves: #for all legal moves
if self.board[m[0]][m[1]] == 0: #if the move is 0, it's empty, give it a higher score
score[1] += 1 #increment the neighbor for each of the neighbor's empty neighbors
scores.append(score) #put the score for this move into the list
scores_sort = sorted(scores, key = lambda s: s[1]) #sort by the score
sorted_neighbours = [s[0] for s in scores_sort] #put the moves in order into the sorted neighbors list
return sorted_neighbours #return the list
def tour(self, n, path, to_visit): #n = current depth of search tree, path = current path taken, to_visit = node to visit
self.board[to_visit[0]][to_visit[1]] = n #put the current count in the new position
path.append(to_visit) #append the newest vertex to the current point
if n == self.w * self.h: #if every grid is filled
print(path) #output the solution
prev_x,prev_y = path[0][0],path[0][1] #save the starting position as the new position
for space in path: #mark the path
sense.set_pixel(prev_x,prev_y,p) #mark the previous position with a lighter color
sense.set_pixel(space[0],space[1],k) #mark the current position of the knight
prev_x,prev_y = space[0],space[1] #save the current position as the previous position
sense.set_pixel(startpos[0],startpos[1],(255,0,0)) #mark the starting position
sleep(0.25) #give the user a chance to see where things are (animation speed)
sys.exit(1) #exit the script because it was successful (if we return, the assumption is that it was unsuccessful)
else: #not every space is filled
sorted_neighbours = self.sort_lonely_neighbors(to_visit) #figure out the best order to try legal moves against
for neighbor in sorted_neighbours: #for every legal move
self.tour(n+1, path, neighbor) #recursion point
#If we exit this loop, all neighbours failed so we reset
self.board[to_visit[0]][to_visit[1]] = 0 #mark that new spot with a 0 to indicate it can't be done from this path
try:
path.pop() #take the most recent attempt off the path
except IndexError: #if there's nothing on the path to take off...
print('No available paths.')
sys.exit(1) #exit unsuccessfully
kt = KnightsTour(8, 8) #instantiate the board
startpos = tuple(int(x.strip()) for x in raw_input('Enter the starting position in the format X,Y: ').split(',')) #prompt for starting position
kt.tour(1,[],startpos) #start looking for a path from the prompted location
|
sweenig/piwars
|
knightstour/sophie.py
|
Python
|
gpl-3.0
| 5,585
|
[
"VisIt"
] |
52da4568f9a9745b37a0416e273027115403d3a0e65690b56556e53aa661cbd9
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Common definitions for GAN metrics."""
import os
import time
import hashlib
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
import config
from training import misc
from training import dataset
#----------------------------------------------------------------------------
# Standard metrics.
fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8)
ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16)
ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16)
ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16)
ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16)
ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4)
dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging
#----------------------------------------------------------------------------
# Base class for metrics.
class MetricBase:
def __init__(self, name):
self.name = name
self._network_pkl = None
self._dataset_args = None
self._mirror_augment = None
self._results = []
self._eval_time = None
def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True):
self._network_pkl = network_pkl
self._dataset_args = dataset_args
self._mirror_augment = mirror_augment
self._results = []
if (dataset_args is None or mirror_augment is None) and run_dir is not None:
run_config = misc.parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config['dataset'])
self._dataset_args['shuffle_mb'] = 0
self._mirror_augment = run_config['train'].get('mirror_augment', False)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
_G, _D, Gs = misc.load_pkl(self._network_pkl)
self._evaluate(Gs, num_gpus=num_gpus)
self._eval_time = time.time() - time_begin
if log_results:
result_str = self.get_result_str()
if run_dir is not None:
log = os.path.join(run_dir, 'metric-%s.txt' % self.name)
with dnnlib.util.Logger(log, 'a'):
print(result_str)
else:
print(result_str)
def get_result_str(self):
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if len(network_name) > 29:
network_name = '...' + network_name[-26:]
result_str = '%-30s' % network_name
result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
for res in self._results:
result_str += ' ' + self.name + res.suffix + ' '
result_str += res.fmt % res.value
return result_str
def update_autosummaries(self):
for res in self._results:
tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
def _evaluate(self, Gs, num_gpus):
raise NotImplementedError # to be overridden by subclasses
def _report_result(self, value, suffix='', fmt='%-10.4f'):
self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1]
return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
def _iterate_reals(self, minibatch_size):
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args)
while True:
images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
images = misc.apply_mirror_augment(images)
yield images
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
yield images
#----------------------------------------------------------------------------
# Group of multiple metrics.
class MetricGroup:
def __init__(self, metric_kwarg_list):
self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
def run(self, *args, **kwargs):
for metric in self.metrics:
metric.run(*args, **kwargs)
def get_result_str(self):
return ' '.join(metric.get_result_str() for metric in self.metrics)
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries()
#----------------------------------------------------------------------------
# Dummy metric for debugging purposes.
class DummyMetric(MetricBase):
def _evaluate(self, Gs, num_gpus):
_ = Gs, num_gpus
self._report_result(0.0)
#----------------------------------------------------------------------------
|
microsoft/DiscoFaceGAN
|
metrics/metric_base.py
|
Python
|
mit
| 6,479
|
[
"VisIt"
] |
c9013c84af79bc60cfca7025781fef8d124ac26a6fef5ca23ce59b891ba5a93f
|
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
field1 = Field('scalar_tp', nm.float64, (1,1), omega1, approx_order=1)
ff1 = {field1.name : field1}
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('scalar_si', nm.float64, (1,1), omega2, approx_order=0)
ff2 = {field2.name : field2}
vv1 = Variables.from_conf(transform_variables(variables1), ff1)
u1 = vv1['u']
u1.set_from_mesh_vertices(data)
vv2 = Variables.from_conf(transform_variables(variables2), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.1)
fname = in_dir(self.options.out_dir)
u1.save_as_mesh(fname('test_mesh_interp_block_scalar.vtk'))
u2.save_as_mesh(fname('test_mesh_interp_cube_scalar.vtk'))
return True
|
olivierverdier/sfepy
|
tests/test_mesh_interp.py
|
Python
|
bsd-3-clause
| 5,360
|
[
"VTK"
] |
aaa41468b18cee229194e9fea7145167d5ad82004187bfc19f250774e034eae7
|
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
import hexablock
doc = hexablock.addDocument ("model")
center = doc.addVertex (0,0,0)
vx = doc.addVector (1,0,0)
vz = doc.addVector (0,0,1)
rhole = 1.0
rext = 4.0
angle = 260.0
trayons = [ 1, 2, 3 , 4 ]
tangles = [ 0, 30, 60, 90, 120, 150, 180]
tphi = [ -30, -10, 0, 10, 30, 45, 60, 75, 80 ]
doc.makeSphere (center,vx,vz, trayons, tangles, tphi)
doc.saveVtk ("model1.vtk")
doc.addLaws (0.5, True)
mesh_hexas = hexablock.mesh(doc)
|
FedoraScientific/salome-hexablock
|
src/TEST_PY/test_v6/makeSphere.py
|
Python
|
lgpl-2.1
| 1,306
|
[
"VTK"
] |
188438c49d379dc8c02066408bde606bcfc3db21f592a8c0707c89c26f6115c7
|
import os, sys
import constants as C
def count_keys(hash):
return len([x for x in hash])
def trim_distal_primer(primers_list, seq, trim_type, begin, end):
"""Doc string here.."""
d_primer_found = ""
trimmedPortion = ""
loc = 0
# chage this to find the primer within C.distal_from_end or not at all
for p in primers_list:
# find the furthest RIGHT p in seq
if seq.rfind(p) != -1:
pos = seq.rfind(p)
if ( trim_type == "distal") and ( (len(seq) - len(p) - pos) > C.distal_from_end):
print "Length OutSeq:",len(seq), "Length d:", len(p), "Pos:",pos
continue
elif ( trim_type == "internal") and ( ( pos < begin ) or ( pos > end) ):
print "Exact trimming, pos is outside window from begin to end\n"
continue
# found whole exact primer
primer_found = p
loc = seq.find(p)
trimmedPortion = seq[loc:]
seq = seq[:loc]
return primer_found, trimmedPortion, seq
#return trimmedPortion,seq,primer_found
else:
truncLength = len(p) + 1 #add one just in case!
while truncLength >= 5:
dSeq = p[:truncLength] # cut primer to the appropriate length
if dSeq in seq:
primer_found = dSeq
loc = seq.find(p)
trimmedPortion = seq[loc:]
seq = seq[:loc]
return primer_found, trimmedPortion, seq
truncLength = truncLength - 1
return '', '', seq
def trim_fuzzy_distal(anchors_list,seq,trim_type,start,end):
"""Doc string here.."""
max_distance = 3
best_distance = max_distance + 1
found_fuzzy = 0
fuzzy_match = ""
for anchor in anchors_list:
anchor_length = len(anchor)
for pos in range(start,end):
seq_window = seq[pos:anchor_length]
dist = 0
#dist1 = abs( Levenshtein.ratio( anchor, seq_window ) )
# dist2 = abs( Levenshtein.ratio( seq_window, anchor ) )
dist1 = abs( levenshtein( anchor, seq_window ) )
dist2 = abs( levenshtein( seq_window, anchor ) )
if dist1 >= dist2: dist = dist1
else: dist = dist2
if (dist <= max_distance) and (dist < best_distance) and (seq_window[:2] == anchor[:2]):
if seq_window[-3:] != anchor[-3:]:
# check for deletion
if(seq_window[-4:][:3] == anchor[-3:]):
seq_window.strip()
print "Fuzzy with deletion",seq_window
# check for insertion
elif(seq_window[-3:] == anchor[-4:][:3]):
seq_window = seq_window + anchor[-1:]
print "fuzzy with insertion", seq_window
# Found a fuzzy match within tolerances, so store it
found_fuzzy = 1;
best_distance = dist;
best_position = pos;
fuzzy_match = seq_window;
if dist == 0:
found_exact = 1
break
fuzz_right = ''
if found_fuzzy:
fuzzy_right = seq
if( trim_type == 'internal'):
seq = seq[:best_position + len(fuzzy_match)]
else:
seq = seq[:best_position]
fuzzy_right = fuzz_right[len(seq):]
return fuzz_right, best_distance, seq, fuzzy_match
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def trim_proximal_primer(primer_list, seq):
"""Doc string here.."""
p_primer_found = "";
offset = 0;
#oppositDir = "";
#primerDir = "F";
#print primer
#print seq
for primer in primer_list:
#print primer
if(primer in seq):
if(seq.find(primer) == 0):
p_primer_found = primer
seq = seq[len(primer):]
break
elif(seq.find(primer) > 0 and seq.find(primer) < 10):
p_primer_found = primer
offset = seq.find(primer)
seq = seq[seq.find(primer) + len(primer):]
break
return p_primer_found, offset, seq
def expand_primers(primer_list):
"""Takes a list of ambiguous primer sequences and expands them to remove ambiguities.
Adds them to a hash (prevents dupes) and returns them.
"""
# each primer has p_direction prepended
expandedPrims={} # fully clean distals - hash
workingPrimers=[] # still cleaning distals
bases = ['A','C','G','T']
for dir_primer in primer_list:
#print 'Primer:\n',dir_primer
# change all '.' to N
primer = dir_primer.replace('.','N')
#print '1',primer
workingPrimers.append(primer.upper())
while (len(workingPrimers) > 0):
# this is the only place items are removed from working primers
t = workingPrimers.pop()
#print 'primer\n',d
# remove prepended direction to expand
# then replace it again when re-appending to workingPrimers
d = t[2:]
dir = t[:1]
# # For each N (blast doesn't like them) expand to 4 distals, one for each base
if ('N' in d ):
for b in bases:
workingPrimers.append(dir+':'+d.replace('N',b,1))
#print '1',d.replace('N',b,1)
# # For R, Y, W, S, M, K expand to the pair of bases
elif ('R' in d):
workingPrimers.append(dir+':'+d.replace('R','A',1))
workingPrimers.append(dir+':'+d.replace('R','G',1))
#print '1',d.replace('R','G',1)
elif ('Y' in d):
workingPrimers.append(dir+':'+d.replace('Y','C',1))
workingPrimers.append(dir+':'+d.replace('Y','T',1))
elif ('W' in d):
workingPrimers.append(dir+':'+d.replace('W','A',1))
workingPrimers.append(dir+':'+d.replace('W','T',1))
elif ('S' in d):
workingPrimers.append(dir+':'+d.replace('S','G',1))
workingPrimers.append(dir+':'+d.replace('S','C',1))
elif ('M' in d):
workingPrimers.append(dir+':'+d.replace('M','A',1))
workingPrimers.append(dir+':'+d.replace('M','C',1))
elif ('K' in d):
workingPrimers.append(dir+':'+d.replace('K','G',1))
workingPrimers.append(dir+':'+d.replace('K','T',1))
# # For each [CT] or [AT] ... expand to each base
elif ('[' in d):
#print d.find('[')
#print d.find(']')
if( d.find('[') + 3 == d.find(']') ):
base1 = d[d.find('[')+1:d.find('[')+2]
#print base1
base2 = d[d.find('[')+2:d.find('[')+3]
#print base2
replace = '['+base1+base2+']'
workingPrimers.append(dir+':'+d.replace(replace,base1,1))
workingPrimers.append(dir+':'+d.replace(replace,base2,1))
#print '1',d.replace(replace,base1,1)
#print '2',d.replace(replace,base2,1)
# # expand \?
elif ('?' in d):
if(d.find('?') == 0):
workingPrimers.append(dir+':'+d[1:])
#print '1',d[1:]
else:
preceder_plus = d[d.find('?')-1:d.find('?')+1]
#print preceder_plus
# the preceing base exists: remove '?' only
workingPrimers.append(dir+':'+d.replace('?','',1))
# preceding base doesn't exist: 'remove '?' and preceding base
workingPrimers.append(dir+':'+d.replace(preceder_plus,'',1))
#print '2',d.replace('?','',1)
#print '3',d.replace(preceder_plus,'',1)
# # next expand + to 1 or 2
elif ('+' in d):
preceder = d[d.find('+')-1:d.find('+')]
# the preceing base exists once: remove '+' only
workingPrimers.append(dir+':'+d.replace('+','',1))
# the preceing base exists twice: change '+' to preceding base
workingPrimers.append(dir+':'+d.replace('+',preceder,1))
# # expand * to 0,1,2
elif ('*' in d):
if(d.find('*') == 0):
workingPrimers.append(dir+':'+d[1:])
else:
preceder = d[d.find('*')-1:d.find('*')]
preceder_plus = d[d.find('*')-1:d.find('*')+1]
#print preceder,preceder_plus
# the preceding base doesn't exist': remove '*' and preceding base
workingPrimers.append(dir+':'+d.replace(preceder_plus,'',1))
# the preceding base exists once: remove '*' only
workingPrimers.append(dir+':'+d.replace('*','',1))
# the preceding base exists twice: change '*' to preceding base
workingPrimers.append(dir+':'+d.replace('*',preceder,1))
# # For repeat bases, e.g., C{5,8} becomes 4 new primers: Cx5, Cx6, Cx7, Cx8
elif('{' in d):
if(d.find('}') == d.find('{') + 4 ):
repeatBase = d[d.find('{')-1:d.find('{')]
toReplace = d[d.find('{')-1:d.find('}')+1]
#print toReplace
#print 'repeatbase',repeatBase
minCount = d[ d.find('{') + 1:d.find('{') + 2 ]
maxCount = d[ d.find('{') + 3:d.find('{') + 4 ]
#print minCount, maxCount
for i in range (int(minCount),int(maxCount) + 1 ):
homopolymer = i * repeatBase
workingPrimers.append(dir+':'+d.replace(toReplace, homopolymer))
# If it made it through everything else, move it to the final set
# Use hash so can filter out duplicates
else:
expandedPrims[d] = dir
# if ($verbose) {print join("\n", "Orig @$dist", keys %expandedDistals) . "\n";}
return expandedPrims
def expand(seq):
"""Takes a single ambiguous primer sequence and expands it to remove ambiguities.
Adds them to a hash (prevents dupes) and returns the hash.
"""
expandedPrims={} # fully clean distals - hash to prevent dupes
workingPrimers=[] # still cleaning holder
bases = ['A','C','G','T']
seq = seq.replace('.','N')
#print '1',primer
workingPrimers.append(seq.upper())
while (len(workingPrimers) > 0):
# this is the only place items are removed from working primers
d = workingPrimers.pop()
# # For each N (blast doesn't like them) expand to 4 distals, one for each base
if ('N' in d ):
for b in bases:
workingPrimers.append(d.replace('N',b,1))
# # For R, Y, W, S, M, K expand to the pair of bases
elif ('R' in d):
workingPrimers.append(d.replace('R','A',1))
workingPrimers.append(d.replace('R','G',1))
#print '1',d.replace('R','G',1)
elif ('Y' in d):
workingPrimers.append(d.replace('Y','C',1))
workingPrimers.append(d.replace('Y','T',1))
elif ('W' in d):
workingPrimers.append(d.replace('W','A',1))
workingPrimers.append(d.replace('W','T',1))
elif ('S' in d):
workingPrimers.append(d.replace('S','G',1))
workingPrimers.append(d.replace('S','C',1))
elif ('M' in d):
workingPrimers.append(d.replace('M','A',1))
workingPrimers.append(d.replace('M','C',1))
elif ('K' in d):
workingPrimers.append(d.replace('K','G',1))
workingPrimers.append(d.replace('K','T',1))
# # For each [CT] or [AT] ... expand to each base
elif ('[' in d):
if( d.find('[') + 3 == d.find(']') ):
base1 = d[d.find('[')+1:d.find('[')+2]
base2 = d[d.find('[')+2:d.find('[')+3]
replace = '['+base1+base2+']'
workingPrimers.append(d.replace(replace,base1,1))
workingPrimers.append(d.replace(replace,base2,1))
# # expand \?
elif ('?' in d):
if(d.find('?') == 0):
workingPrimers.append(d[1:])
else:
preceder_plus = d[d.find('?')-1:d.find('?')+1]
#print preceder_plus
# the preceing base exists: remove '?' only
workingPrimers.append(d.replace('?','',1))
# preceding base doesn't exist: 'remove '?' and preceding base
workingPrimers.append(d.replace(preceder_plus,'',1))
# # next expand + to 1 or 2
elif ('+' in d):
preceder = d[d.find('+')-1:d.find('+')]
# the preceing base exists once: remove '+' only
workingPrimers.append(d.replace('+','',1))
# the preceing base exists twice: change '+' to preceding base
workingPrimers.append(d.replace('+',preceder,1))
# # expand * to 0,1,2
elif ('*' in d):
if(d.find('*') == 0):
workingPrimers.append(d[1:])
else:
preceder = d[d.find('*')-1:d.find('*')]
preceder_plus = d[d.find('*')-1:d.find('*')+1]
#print preceder,preceder_plus
# the preceding base doesn't exist': remove '*' and preceding base
workingPrimers.append(d.replace(preceder_plus,'',1))
# the preceding base exists once: remove '*' only
workingPrimers.append(d.replace('*','',1))
# the preceding base exists twice: change '*' to preceding base
workingPrimers.append(d.replace('*',preceder,1))
# # For repeat bases, e.g., C{5,8} becomes 4 new primers: Cx5, Cx6, Cx7, Cx8
elif('{' in d):
if(d.find('}') == d.find('{') + 4 ):
repeatBase = d[d.find('{')-1:d.find('{')]
toReplace = d[d.find('{')-1:d.find('}')+1]
minCount = d[ d.find('{') + 1:d.find('{') + 2 ]
maxCount = d[ d.find('{') + 3:d.find('{') + 4 ]
#print minCount, maxCount
for i in range (int(minCount),int(maxCount) + 1 ):
homopolymer = i * repeatBase
workingPrimers.append(d.replace(toReplace, homopolymer))
# If it made it through everything else, move it to the final set
# Use hash so can filter out duplicates
else:
if(d):
expandedPrims[d] = 1
return expandedPrims.keys()
def get_expanded_primers(lane_key, sample_primers):
orig_primer_seqs = {}
expanded_primers = {}
if(os.path.exists(sample_primers) and os.path.isfile(sample_primers)):
f = open(sample_primers, 'r')
x = f.readlines()
orig_primer_seqs[lane_key] = [p.strip() for p in x]
else: # this has to be a single primer seq or comma separated list
orig_primer_seqs[lane_key] = sample_primers.split(',')
#for dir_raw_primer in proximal_primers[lane_key]:
#p_direction, raw_primer = dir_raw_primer.split(':')
# goal: expanded_primers[lane_key][p_direction] = [list of expanded primers]
expanded_primers[lane_key] = {}
#print lane_key
expanded = expand_primers(orig_primer_seqs[lane_key])
expanded_primers[lane_key]['F'] = []
expanded_primers[lane_key]['R'] = []
for p_list in expanded:
if not p_list: continue
if (expanded[p_list] == 'F'):
expanded_primers[lane_key][expanded[p_list]].append(p_list)
# reverse complement here and add to this list also
base_list = p_list[::-1] # reverses primer
xxx = [complementary_bases[base] for base in list(base_list)]
expanded_primers[lane_key][expanded[p_list]].append(''.join(xxx) )
elif(expanded[p_list] == 'R'):
expanded_primers[lane_key][expanded[p_list]].append(p_list)
# reverse complement here and add to this list also
base_list = p_list[::-1] # reverses primer
xxx = [complementary_bases[base] for base in list(base_list)]
expanded_primers[lane_key][expanded[p_list]].append(''.join(xxx) )
#print 'comp',item,''.join(xx)
else:
pass
#print "ERROR expanding primers"
print "no primer direction found for", p_list
return expanded_primers[lane_key]
|
avoorhis/mbl_sequencing_pipeline
|
pipeline/primer_utils20120211.py
|
Python
|
gpl-2.0
| 17,584
|
[
"BLAST"
] |
4a4a04c6dbc2ba2b5d8201601ace6dc7bcda6b0c840e989ddc00cacd7d04d84c
|
# -*- coding: UTF-8 -*-
"""
Routines to visualize the DA White Dwarf model atmosphere fit
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.stats import norm
from itertools import cycle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.font_manager import FontProperties as FM
from astropy.visualization import hist
from . import io
from . import passband
import corner
from six.moves import range
def plot_minuit_spectrum_fit(spec, objname, outdir, specfile, scale_factor, model, result, save=True):
"""
Plot the MLE fit of the spectrum with the model, assuming uncorrelated
noise.
Parameters
----------
spec : :py:class:`numpy.recarray`
The spectrum. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8')]``
objname : str
object name - used to title plots
outdir : str
controls where the plot is written out if ``save=True``
specfile : str
Used in the title, and to set the name of the ``outfile`` if ``save=True``
scale_factor : float
factor by which the flux was scaled for y-axis label
model : :py:class:`WDmodel.WDmodel.WDmodel` instance
The DA White Dwarf SED model generator
result : dict
dictionary of parameters with keywords ``value``, ``fixed``, ``scale``,
``bounds`` for each. Same format as returned from
:py:func:`WDmodel.io.read_params`
save : bool
if True, save the file
Returns
-------
fig : :py:class:`matplotlib.figure.Figure` instance
Notes
-----
The MLE fit uses :py:meth:`iminuit.Minuit.migrad` to fit the spectrum
with the model. This fit doesn't try to account for the covariance in
the data, and is not expected to be great - just fast, and capable of
setting a reasonable initial guess. If it is apparent from the plot
that this fit is very far off, refine the initial guess to the fitter.
"""
font_s = FM(size='small')
font_m = FM(size='medium')
font_l = FM(size='large')
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2, 1, height_ratios=[4,1])
ax_spec = fig.add_subplot(gs[0])
ax_resid = fig.add_subplot(gs[1])
ax_spec.fill_between(spec.wave, spec.flux+spec.flux_err, spec.flux-spec.flux_err,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_spec.plot(spec.wave, spec.flux, color='black', linestyle='-', marker='None', label=specfile)
print_params = ('teff', 'logg', 'av', 'dl')
outlabel = 'Model\n'
for param in print_params:
val = result[param]['value']
err = result[param]['scale']
fixed = result[param]['fixed']
if val is None:
thislabel = '{} = {} '.format(param, val)
else:
thislabel = '{} = {:.3f} '.format(param, val)
if not fixed:
thislabel += ' +/- {:.3f}'.format(err)
else:
thislabel = '[{} FIXED]'.format(thislabel)
thislabel +='\n'
outlabel += thislabel
fix_labels = list(set(result.keys()) - set(print_params))
for param in fix_labels:
val = result[param]['value']
if val is None:
thislabel = '{} = {} '.format(param, val)
else:
thislabel = '{} = {:.3f} '.format(param, val)
thislabel = '[{} FIXED]'.format(thislabel)
thislabel +='\n'
outlabel += thislabel
teff = result['teff']['value']
logg = result['logg']['value']
av = result['av']['value']
dl = result['dl']['value']
rv = result['rv']['value']
fwhm = result['fwhm']['value']
pixel_scale = 1./np.median(np.gradient(spec.wave))
mod = model._get_obs_model(teff, logg, av, fwhm, spec.wave, rv=rv, pixel_scale=pixel_scale)
smoothedmod = mod* (1./(4.*np.pi*(dl)**2.))
ax_spec.plot(spec.wave, smoothedmod, color='red', linestyle='-',marker='None', label=outlabel)
ax_resid.fill_between(spec.wave, spec.flux-smoothedmod+spec.flux_err, spec.flux-smoothedmod-spec.flux_err,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_resid.plot(spec.wave, spec.flux-smoothedmod, linestyle='-', marker=None, color='black')
ax_resid.set_xlabel('Wavelength~(\AA)',fontproperties=font_m, ha='center')
ax_spec.set_ylabel('Normalized Flux (Scale factor = {})'.format(1./scale_factor), fontproperties=font_m)
ax_resid.set_ylabel('Fit Residual Flux', fontproperties=font_m)
ax_spec.legend(frameon=False, prop=font_s)
fig.suptitle('Quick Fit for Initial Guess: %s (%s)'%(objname, specfile), fontproperties=font_l)
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
if save:
outfile = io.get_outfile(outdir, specfile, '_minuit.pdf')
fig.savefig(outfile)
return fig
def plot_mcmc_spectrum_fit(spec, objname, specfile, scale_factor, model, covmodel, result, param_names, samples,\
ndraws=21, everyn=1):
"""
Plot the spectrum of the DA White Dwarf and the "best fit" model
The full fit parametrizes the covariance model using a stationary Gaussian
process as defined by :py:class:`WDmodel.covariance.WDmodel_CovModel`. The
posterior function constructed in
:py:class:`WDmodel.likelihood.WDmodel_Posterior` is evaluated by the
sampler in the :py:func:`WDmodel.fit.fit_model` method. The median value is
reported as the best-fit value for each of the fit parameters in
:py:attr:`WDmodel.likelihood.WDmodel_Likelihood.parameter_names`.
Parameters
----------
spec : :py:class:`numpy.recarray`
The spectrum. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8')]``
objname : str
object name - used to title plots
outdir : str
controls where the plot is written out if ``save=True``
specfile : str
Used in the title, and to set the name of the ``outfile`` if ``save=True``
scale_factor : float
factor by which the flux was scaled for y-axis label
model : :py:class:`WDmodel.WDmodel.WDmodel` instance
The DA White Dwarf SED model generator
covmodel : :py:class:`WDmodel.covariance.WDmodel_CovModel` instance
The parametrized model for the covariance of the spectrum ``spec``
result : dict
dictionary of parameters with keywords ``value``, ``fixed``, ``scale``,
``bounds`` for each. Same format as returned from
:py:func:`WDmodel.io.read_params`
param_names : array-like
Ordered list of free parameter names
samples : array-like
Samples from the flattened Markov Chain with shape ``(N, len(param_names))``
ndraws : int, optional
Number of draws to make from the Markov Chain to overplot. Higher
numbers provide a better sense of the uncertainty in the model at the
cost of speed and a larger, slower to render output plot.
everyn : int, optional
If the posterior function was evaluated using only every nth
observation from the data, this should be specified to visually
indicate the observations used.
Returns
-------
fig : :py:class:`matplotlib.figure.Figure` instance
The output figure
draws : array-like
The actual draws from the Markov Chain used in ``fig``
Notes
-----
It's faster to draw samples from the posterior in one location, and
pass along the same samples to all the methods in :py:mod:`WDmodel.viz`.
Consequently, most require ``draws`` as an input. This makes all the
plots connected, and none will return if an error is thrown here, but
this is the correct behavior as all of them are visualizing one aspect
of the same fit.
Each element of ``draws`` contains
* ``smoothedmod`` - the model spectrum
* ``wres`` - the prediction from the Gaussian process
* ``wres_err`` - the diagonal of the covariance matrix for the prediction from the Gaussian process
* ``full_mod`` - the full model SED, in order to compute the synthetic photometry
* ``out_draw`` - the dictionary of model parameters from this draw. Same format as ``result``.
"""
font_s = FM(size='small')
font_m = FM(size='medium')
font_l = FM(size='large')
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2, 1, height_ratios=[4,1])
ax_spec = fig.add_subplot(gs[0])
ax_resid = fig.add_subplot(gs[1])
ax_spec.fill_between(spec.wave, spec.flux+spec.flux_err, spec.flux-spec.flux_err,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_spec.plot(spec.wave, spec.flux, color='black', linestyle='-', marker='None', label=specfile)
this_draw = io.copy_params(result)
draws = samples[np.random.randint(0, len(samples), ndraws),:]
pixel_scale = 1./np.median(np.gradient(spec.wave))
# plot one draw of the sample, bundled into a dict
def plot_one(this_draw, color='red', alpha=1., label=None, i=1):
teff = this_draw['teff']['value']
logg = this_draw['logg']['value']
av = this_draw['av']['value']
rv = this_draw['rv']['value']
dl = this_draw['dl']['value']
fwhm = this_draw['fwhm']['value']
fsig = this_draw['fsig']['value']
tau = this_draw['tau']['value']
fw = this_draw['fw']['value']
mod, full_mod = model._get_full_obs_model(teff, logg, av, fwhm, spec.wave,\
rv=rv, pixel_scale=pixel_scale)
smoothedmod = mod* (1./(4.*np.pi*(dl)**2.))
res = spec.flux - smoothedmod
wres, cov = covmodel.predict(spec.wave, res, spec.flux_err, fsig, tau, fw)
ax_spec.plot(spec.wave, smoothedmod+wres,\
color=color, linestyle='-',marker='None', alpha=alpha, label=label)
out_draw = io.copy_params(this_draw)
return smoothedmod, wres, cov, full_mod, out_draw
# for each draw, update the dict, and plot it
out = []
for i in range(ndraws):
for j, param in enumerate(param_names):
this_draw[param]['value'] = draws[i,j]
smoothedmod, wres, cov, full_mod, out_draw = plot_one(this_draw, color='orange', alpha=0.3, i=i)
wres_err = np.diag(cov)**0.5
out.append((smoothedmod, wres, wres_err, full_mod, out_draw))
outlabel = 'Model\n'
for param in result:
val = result[param]['value']
errp, errm = result[param]['errors_pm']
fixed = result[param]['fixed']
thislabel = '{} = {:.3f} '.format(param, val)
if not fixed:
thislabel += ' +{:.3f}/-{:.3f}'.format(errp, errm)
else:
thislabel = '[{} FIXED]'.format(thislabel)
thislabel +='\n'
outlabel += thislabel
# finally, overplot the best result draw as solid
smoothedmod, wres, cov, full_mod, out_draw = plot_one(result, color='red', alpha=1., label=outlabel)
wres_err = np.diag(cov)**0.5
out.append((smoothedmod, wres, wres_err, full_mod, out_draw))
# plot the residuals
ax_resid.fill_between(spec.wave, spec.flux-smoothedmod-wres+spec.flux_err, spec.flux-smoothedmod-wres-spec.flux_err,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_resid.plot(spec.wave, spec.flux-smoothedmod-wres, linestyle='-', marker=None, color='black')
for draw in out[:-1]:
ax_resid.plot(spec.wave, draw[0]+draw[1]-smoothedmod-wres, linestyle='-',\
marker=None, alpha=0.3, color='orange')
if everyn != 1:
ax_spec.plot(spec.wave[::everyn], spec.flux[::everyn], color='blue', marker='o', ls='None',\
alpha=0.5, label='everyn:{:n}'.format(everyn))
ax_resid.plot(spec.wave[::everyn], (spec.flux-smoothedmod-wres)[::everyn], color='blue', marker='o',\
alpha=0.5, ls='None')
ax_resid.axhline(0., color='red', linestyle='--')
ax_resid.fill_between(spec.wave, +wres_err, -wres_err,\
facecolor='red', alpha=0.3, interpolate=True)
# label the axes
ax_resid.set_xlabel('Wavelength~(\AA)',fontproperties=font_m, ha='center')
ax_spec.set_ylabel('Normalized Flux (Scale factor = {})'.format(1./scale_factor), fontproperties=font_m)
ax_resid.set_ylabel('Fit Residual Flux', fontproperties=font_m)
ax_spec.legend(frameon=False, prop=font_s)
fig.suptitle('MCMC Fit: %s (%s)'%(objname, specfile), fontproperties=font_l)
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
return fig, out
def plot_mcmc_photometry_res(objname, phot, phot_dispersion, model, pbs, draws):
"""
Plot the observed DA white dwarf photometry as well as the "best-fit" model
magnitudes
Parameters
----------
objname : str
object name - used to title plots
phot : None or :py:class:`numpy.recarray`
The photometry. Must have
``dtype=[('pb', 'str'), ('mag', '<f8'), ('mag_err', '<f8')]``
phot_dispersion : float, optional
Excess photometric dispersion to add in quadrature with the
photometric uncertainties ``phot.mag_err``. Use if the errors are
grossly underestimated. Default is ``0.``
model : :py:class:`WDmodel.WDmodel.WDmodel` instance
The DA White Dwarf SED model generator
pbs : dict
Passband dictionary containing the passbands corresponding to
``phot.pb`` and generated by :py:func:`WDmodel.passband.get_pbmodel`.
draws : array-like
produced by :py:func:`plot_mcmc_spectrum_fit` - see notes for content.
Returns
-------
fig : :py:class:`matplotlib.figure.Figure` instance
The output figure
mag_draws : array-like
The magnitudes corresponding to the parameters ``draws`` from the Markov
Chain used in ``fig``
Notes
-----
Each element of ``mag_draws`` contains
* ``wres`` - the difference between the observed and synthetic magnitudes
* ``model_mags`` - the model magnitudes corresponding to the current model parameters
* ``mu`` - the flux normalization parameter that must be added to the ``model_mags``
See Also
--------
:py:func:`WDmodel.viz.plot_mcmc_spectrum_fit`
"""
font_s = FM(size='small')
font_m = FM(size='medium')
font_l = FM(size='large')
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2, 1, height_ratios=[4,1])
ax_phot = fig.add_subplot(gs[0])
ax_resid = fig.add_subplot(gs[1])
refwave = np.array([x[4] for x in pbs.values()])
npb = len(pbs)
pbind = np.arange(npb)
# plot one draw of the sample
def plot_draw(draw, color='red', alpha=1.0, label=None, linestyle='None'):
_, _, _, model_spec, params = draw
mu = params['mu']['value']
model_mags = passband.get_model_synmags(model_spec, pbs, mu=mu)
ax_phot.plot(refwave, model_mags.mag, color=color, alpha=alpha, marker='o', label=label, linestyle=linestyle)
res = phot.mag - model_mags.mag
return res, model_mags, mu
out = []
mag_draws = []
# plot the draws
for draw in draws[:-1]:
res, model_mags, mu = plot_draw(draw, color='orange', alpha=0.3)
out.append(res)
mag_draws.append((res, model_mags, mu))
# plot the magnitudes
ax_phot.errorbar(refwave, phot.mag, yerr=phot.mag_err, color='k', marker='o',\
linestyle='None', label='Observed Magnitudes')
res, model_mags, mu = plot_draw(draws[-1], color='red', alpha=1.0, label='Model Magnitudes', linestyle='--')
mag_draws.append((res, model_mags, mu))
# the draws are already samples from the posterior distribution - just take the median
out = np.array(out)
errs = np.median(np.abs(out), axis=0)
scaling = norm.ppf(3/4.)
errs/=scaling
# plot the residuals
ax_resid.fill_between(pbind, -errs, errs, interpolate=True, facecolor='orange', alpha=0.3)
ax_resid.errorbar(pbind, res, yerr=phot.mag_err, color='black', marker='o')
ax_resid.axhline(0., color='red', linestyle='--')
# flip the y axis since mags
ax_phot.invert_yaxis()
ax_resid.invert_yaxis()
# label the axes
ax_resid.set_xlim(-0.5,npb-0.5)
ax_resid.set_xticks(pbind)
ax_resid.set_xticklabels(list(pbs.keys()))
ax_resid.set_xlabel('Passband',fontproperties=font_m, ha='center')
ax_phot.set_xlabel('Wavelength',fontproperties=font_m, ha='center')
ax_phot.set_ylabel('Magnitude (Photometric dispersion = {})'.format(phot_dispersion), fontproperties=font_m)
ax_resid.set_ylabel('Residual (mag)', fontproperties=font_m)
ax_phot.legend(frameon=False, prop=font_s)
fig.suptitle('Photometry for {}'.format(objname), fontproperties=font_l)
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
return fig, mag_draws
def plot_mcmc_spectrum_nogp_fit(spec, objname, specfile, scale_factor,\
cont_model, draws, covtype='Matern32', everyn=1):
"""
Plot the spectrum of the DA White Dwarf and the "best fit" model without
the Gaussian process
Unlike :py:func:`plot_mcmc_spectrum_fit` this version does not apply the
prediction from the Gaussian process to the spectrum model to match the
observed spectrum. This visualization is useful to indicate if the Gaussian
process - i.e. the kernel choice ``covtype`` used to parametrize the
covariance is - is appropriate.
Parameters
----------
spec : :py:class:`numpy.recarray`
The spectrum. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8')]``
objname : str
object name - used to title plots
outdir : str
controls where the plot is written out if ``save=True``
specfile : str
Used in the title, and to set the name of the outfile if ``save=True``
scale_factor : float
factor by which the flux was scaled for y-axis label
cont_model : :py:class:`numpy.recarray`
The continuuum model. Must have the same structure as ``spec``
Produced by :py:func:`WDmodel.fit.pre_process_spectrum`
draws : array-like
produced by :py:func:`plot_mcmc_spectrum_fit` - see notes for content.
covtype : ``{'Matern32', 'SHO', 'Exp', 'White'}``
stationary kernel type used to parametrize the covariance in
:py:class:`WDmodel.covariance.WDmodel_CovModel`
everyn : int, optional
If the posterior function was evaluated using only every nth
observation from the data, this should be specified to visually
indicate the observations used.
Returns
-------
fig : :py:class:`matplotlib.figure.Figure` instance
The output figure
See Also
--------
:py:func:`WDmodel.viz.plot_mcmc_spectrum_fit`
"""
font_s = FM(size='small')
font_m = FM(size='medium')
font_l = FM(size='large')
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2, 1, height_ratios=[4,1])
ax_spec = fig.add_subplot(gs[0])
ax_resid = fig.add_subplot(gs[1])
# plot the spectrum
ax_spec.fill_between(spec.wave, spec.flux+spec.flux_err, spec.flux-spec.flux_err,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_spec.plot(spec.wave, spec.flux, color='black', linestyle='-', marker='None', label=specfile)
# plot the continuum model
ax_spec.plot(cont_model.wave, cont_model.flux, color='blue', linestyle='--', marker='None', label='Continuum')
# plot the residual without the covariance term
smoothedmod, wres, wres_err, _ , _ = draws[-1]
ax_resid.fill_between(spec.wave, wres+wres_err, wres-wres_err, facecolor='red', alpha=0.3, interpolate=True)
ax_resid.fill_between(spec.wave, spec.flux-smoothedmod+spec.flux_err, spec.flux-smoothedmod-spec.flux_err,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_resid.plot(spec.wave, spec.flux - smoothedmod, color='black', linestyle='-', marker='None')
bestfit, bestres, _, _, _ = draws[-1]
def plot_draw(draw, color='red', alpha=1.0, label=None):
smoothedmod, wres, _, _, _ = draw
ax_resid.plot(spec.wave, wres+smoothedmod - bestfit, linestyle='-', marker=None, color=color, alpha=alpha)
ax_spec.plot(spec.wave, smoothedmod, color=color, linestyle='-', marker='None', alpha=alpha, label=label)
# plot each of the draws - we want to get a sense of the range of the covariance to plot wres
for draw in draws[:-1]:
plot_draw(draw, color='orange', alpha=0.3)
plot_draw(draws[-1], color='red', alpha=1.0, label='Model - no Covariance')
if everyn != 1:
smoothedmod, wres, _, _, _ = draws[-1]
ax_spec.plot(spec.wave[::everyn], spec.flux[::everyn], color='blue', marker='o', ls='None',\
alpha=0.5, label='everyn:{:n}'.format(everyn))
ax_resid.plot(spec.wave[::everyn], wres[::everyn], marker='o', color='blue', ls='None', alpha=0.5)
# label the axes
ax_resid.set_xlabel('Wavelength~(\AA)',fontproperties=font_m, ha='center')
ax_spec.set_ylabel('Normalized Flux (Scale factor = {})'.format(1./scale_factor), fontproperties=font_m)
ax_resid.set_ylabel('Fit Residual Flux', fontproperties=font_m)
ax_spec.legend(frameon=False, prop=font_s)
fig.suptitle('MCMC Fit - No {} Covariance: {} ({})'.format(covtype, objname, specfile), fontproperties=font_l)
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
return fig
def plot_mcmc_line_fit(spec, linedata, model, cont_model, draws, balmer=None):
"""
Plot a comparison of the normalized hydrogen Balmer lines of the spectrum
and model
Note that we fit the full spectrum, not just the lines. The lines are
extracted using a coarse continuum fit in
:py:func:`WDmodel.fit.pre_process_spectrum`. This fit is purely cosmetic
and in no way contributes to the likelihood. It's particularly useful to
detect small velocity offsets or wavelength calibration errors.
Parameters
----------
spec : :py:class:`numpy.recarray`
The spectrum. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8')]``
linedata : :py:class:`numpy.recarray`
The observations of the spectrum corresponding to the hydrogen Balmer
lines. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8'), ('line_mask', 'i4'), ('line_ind', 'i4')]``
model : :py:class:`WDmodel.WDmodel.WDmodel` instance
The DA White Dwarf SED model generator
cont_model : :py:class:`numpy.recarray`
The continuuum model. Must have the same structure as ``spec``
Produced by :py:func:`WDmodel.fit.pre_process_spectrum`
draws : array-like
produced by :py:func:`plot_mcmc_spectrum_fit` - see notes for content.
balmer : array-like, optional
list of Balmer lines to plot - elements must be in range ``[1, 6]``
These correspond to the lines defined in
:py:attr:`WDmodel.WDmodel.WDmodel._lines`. Default is ``range(1, 7)``
Returns
-------
fig : :py:class:`matplotlib.figure.Figure` instance
The output figure containing the line profile plot
fig2 : :py:class:`matplotlib.figure.Figure` instance
The output figure containing histograms of the line residuals
See Also
--------
:py:func:`WDmodel.viz.plot_mcmc_spectrum_fit`
"""
font_xs = FM(size='x-small')
font_s = FM(size='small')
font_m = FM(size='medium')
font_l = FM(size='large')
# create a figure for the line profiles
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(1, 1)
ax_lines = fig.add_subplot(gs[0])
if balmer is None:
balmer = list(model._lines.keys())
# create another figure with separate axes for each of the lines
uselines = set(np.unique(linedata.line_mask)) & set(balmer)
nlines = len(uselines)
Tot = nlines + 1
Cols = 3
Rows = Tot // Cols
Rows += Tot % Cols
fig2 = plt.figure(figsize=(10,8))
gs2 = gridspec.GridSpec(Rows, Cols )
# get the default color cycle
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
colors = cycle(colors)
# plot the distribution of residuals for the entire spectrum
ax_resid = fig2.add_subplot(gs2[0])
smoothedmod, wres, _, _, _ = draws[-1]
res = spec.flux - smoothedmod - wres
hist(res, bins='knuth', density=True, histtype='stepfilled', color='grey', alpha=0.5, label='Residuals',ax=ax_resid)
ax_resid.axvline(0., color='red', linestyle='--')
# label the axes, rotate the tick labels, and get the xlim
ax_resid.set_xlabel('Fit Residual Flux', fontproperties=font_m)
ax_resid.set_ylabel('Norm', fontproperties=font_m)
ax_resid.legend(loc='upper left', frameon=False, prop=font_s)
plt.setp(ax_resid.get_xticklabels(), rotation=30, horizontalalignment='right')
(res_xmin, res_xmax) = ax_resid.get_xlim()
k = 1
for i, line in enumerate(np.unique(linedata.line_mask)):
if not line in balmer:
continue
# select this line
mask = (linedata.line_mask == line)
wave = linedata.wave[mask]
# restore the line properties
linename, W0, D, eps = model._lines[line]
# find the matching indices in the spectrum/continuum model that match the line
ind = np.searchsorted(cont_model.wave, wave)
this_line_cont = cont_model.flux[ind]
# shift the wavelength so the centroids are 0
shifted_wave = wave - W0
shifted_flux = linedata.flux[mask]/this_line_cont
shifted_ferr = linedata.flux_err[mask]/this_line_cont
# plot the lines, adding a small vertical offset between each
voff = 0.2*i
ax_lines.fill_between(shifted_wave, shifted_flux + voff + shifted_ferr, shifted_flux + voff - shifted_ferr,\
facecolor='grey', alpha=0.5, interpolate=True)
ax_lines.plot(shifted_wave, shifted_flux + voff, linestyle='-', marker='None', color='black')
# add a text label for each line
label = '{} ({:.2f})'.format(linename, W0)
ax_lines.text(shifted_wave[-1]+10 , shifted_flux[-1] + voff, label, fontproperties=font_xs,\
color='blue', va='top', ha='center', rotation=90)
# plot one of the draws
def plot_draw(draw, color='red', alpha=1.0):
smoothedmod, wres, _, _, _ = draw
line_model = (smoothedmod + wres)[ind]
line_model /= this_line_cont
line_model += voff
ax_lines.plot(shifted_wave, line_model, linestyle='-', marker='None', color=color, alpha=alpha)
# overplot the model
for draw in draws[:-1]:
plot_draw(draw, color='orange', alpha=0.3)
plot_draw(draws[-1], color='red', alpha=1.0)
# overplot the best model err as the bottom layer
bestmod, bestres, bestres_err, _, _ = draws[-1]
besthi = (bestmod + bestres + bestres_err)[ind]
bestlo = (bestmod + bestres - bestres_err)[ind]
besthi /= this_line_cont
bestlo /= this_line_cont
besthi += voff
bestlo += voff
ax_lines.fill_between(shifted_wave, besthi, bestlo,\
facecolor='red', alpha=0.3, interpolate=True, zorder=-1)
# plot the residuals of this line
ax_resid = fig2.add_subplot(gs2[k])
hist(linedata.flux[mask] - (smoothedmod + wres)[ind] , bins='knuth', density=True, ax=ax_resid,\
histtype='stepfilled', label=label, alpha=0.3, color=next(colors))
ax_resid.axvline(0., color='red', linestyle='--')
# label the axis and match the limits for the overall residuals
ax_resid.set_xlabel('Fit Residual Flux', fontproperties=font_m)
ax_resid.set_ylabel('Norm', fontproperties=font_m)
ax_resid.set_xlim((res_xmin, res_xmax))
ax_resid.legend(frameon=False, prop=font_s)
plt.setp(ax_resid.get_xticklabels(), rotation=30, horizontalalignment='right')
k+=1
# label the axes
ax_lines.set_xlabel('Delta Wavelength~(\AA)',fontproperties=font_m, ha='center')
ax_lines.set_ylabel('Normalized Flux', fontproperties=font_m)
fig.suptitle('Line Profiles', fontproperties=font_l)
fig2.suptitle('Residual Distributions', fontproperties=font_l)
gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
gs2.tight_layout(fig2, rect=[0, 0.03, 1, 0.95])
return fig, fig2
def plot_mcmc_model(spec, phot, linedata, scale_factor, phot_dispersion,\
objname, outdir, specfile,\
model, covmodel, cont_model, pbs,\
params, param_names, samples, samples_lnprob,\
covtype='Matern32', balmer=None, ndraws=21, everyn=1, savefig=False):
"""
Make all the plots to visualize the full fit of the DA White Dwarf data
Wraps :py:func:`plot_mcmc_spectrum_fit`,
:py:func:`plot_mcmc_photometry_res`,
:py:func:`plot_mcmc_spectrum_nogp_fit`, :py:func:`plot_mcmc_line_fit` and
:py:func:`corner.corner` and saves all the plots to a combined PDF, and
optionally individual PDFs.
Parameters
----------
spec : :py:class:`numpy.recarray`
The spectrum. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8')]``
phot : None or :py:class:`numpy.recarray`
The photometry. Must have
``dtype=[('pb', 'str'), ('mag', '<f8'), ('mag_err', '<f8')]``
linedata : :py:class:`numpy.recarray`
The observations of the spectrum corresponding to the hydrogen Balmer
lines. Must have
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8'), ('line_mask', 'i4'), ('line_ind', 'i4')]``
scale_factor : float
factor by which the flux was scaled for y-axis label
phot_dispersion : float, optional
Excess photometric dispersion to add in quadrature with the
photometric uncertainties ``phot.mag_err``. Use if the errors are
grossly underestimated. Default is ``0.``
objname : str
object name - used to title plots
outdir : str
controls where the plot is written out if ``savefig=True``
specfile : str
Used in the title, and to set the name of the ``outfile`` if ``savefig=True``
model : :py:class:`WDmodel.WDmodel.WDmodel` instance
The DA White Dwarf SED model generator
covmodel : :py:class:`WDmodel.covariance.WDmodel_CovModel` instance
The parametrized model for the covariance of the spectrum ``spec``
cont_model : :py:class:`numpy.recarray`
The continuuum model. Must have the same structure as ``spec``
Produced by :py:func:`WDmodel.fit.pre_process_spectrum`
pbs : dict
Passband dictionary containing the passbands corresponding to
``phot.pb`` and generated by :py:func:`WDmodel.passband.get_pbmodel`.
params : dict
dictionary of parameters with keywords ``value``, ``fixed``, ``scale``,
``bounds`` for each. Same format as returned from
:py:func:`WDmodel.io.read_params`
param_names : array-like
Ordered list of free parameter names
samples : array-like
Samples from the flattened Markov Chain with shape ``(N, len(param_names))``
samples_lnprob : array-like
Log Posterior corresponding to ``samples`` from the flattened Markov
Chain with shape ``(N,)``
covtype : ``{'Matern32', 'SHO', 'Exp', 'White'}``
stationary kernel type used to parametrize the covariance in
:py:class:`WDmodel.covariance.WDmodel_CovModel`
balmer : array-like, optional
list of Balmer lines to plot - elements must be in range ``[1, 6]``
These correspond to the lines defined in
:py:attr:`WDmodel.WDmodel.WDmodel._lines`. Default is ``range(1, 7)``
ndraws : int, optional
Number of draws to make from the Markov Chain to overplot. Higher
numbers provide a better sense of the uncertainty in the model at the
cost of speed and a larger, slower to render output plot.
everyn : int, optional
If the posterior function was evaluated using only every nth
observation from the data, this should be specified to visually
indicate the observations used.
savefig : bool
if True, save the individual figures
Returns
-------
model_spec : :py:class:`numpy.recarray`
The model spectrum. Has
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8'), ('norm_flux', '<f8')]``
and same shape as input ``spec``. The ``norm_flux`` attribute has the
model flux without the Gaussian process prediction applied.
SED_model : :py:class:`numpy.recarray`
The SED model spectrum. Has
``dtype=[('wave', '<f8'), ('flux', '<f8'), ('flux_err', '<f8')]``
model_mags : None or :py:class:`numpy.recarray`
If there is observed photometry, this contains the model magnitudes.
Has ``dtype=[('pb', 'str'), ('mag', '<f8')]``
"""
draws = None
mag_draws = None
outfilename = io.get_outfile(outdir, specfile, '_mcmc.pdf')
with PdfPages(outfilename) as pdf:
# plot spectrum and model
fig, draws = plot_mcmc_spectrum_fit(spec, objname, specfile, scale_factor,\
model, covmodel, params, param_names, samples,\
ndraws=ndraws, everyn=everyn)
if savefig:
outfile = io.get_outfile(outdir, specfile, '_mcmc_spectrum.pdf')
fig.savefig(outfile)
pdf.savefig(fig)
# plot the photometry and residuals if we actually fit it, else skip
if phot is not None:
fig, mag_draws = plot_mcmc_photometry_res(objname, phot, phot_dispersion, model, pbs, draws)
if savefig:
outfile = io.get_outfile(outdir, specfile, '_mcmc_phot.pdf')
fig.savefig(outfile)
pdf.savefig(fig)
# plot continuum, model and draws without gp
fig = plot_mcmc_spectrum_nogp_fit(spec, objname, specfile, scale_factor,\
cont_model, draws, covtype=covtype, everyn=everyn)
if savefig:
outfile = io.get_outfile(outdir, specfile, '_mcmc_nogp.pdf')
fig.savefig(outfile)
pdf.savefig(fig)
# plot lines
fig, fig2 = plot_mcmc_line_fit(spec, linedata, model, cont_model, draws, balmer=balmer)
if savefig:
outfile = io.get_outfile(outdir, specfile, '_mcmc_lines.pdf')
fig.savefig(outfile)
outfile = io.get_outfile(outdir, specfile, '_mcmc_resids.pdf')
fig2.savefig(outfile)
pdf.savefig(fig)
pdf.savefig(fig2)
# plot corner plot
fig = corner.corner(samples, bins=51, labels=param_names, show_titles=True,quantiles=(0.16,0.84), smooth=1.)
if savefig:
outfile = io.get_outfile(outdir, specfile, '_mcmc_corner.pdf')
fig.savefig(outfile)
pdf.savefig(fig)
message = "Wrote output plot file {}".format(outfilename)
print(message)
#endwith
smoothedmod, wres, wres_err, full_mod, best_params = draws[-1]
res_spec = []
res_mod = []
bestmu = best_params['mu']['value']
full_mod.flux*=(10**(-0.4*bestmu))
for draw in draws[:-1]:
ts, tr, trerr, tm, params = draw
mu = params['mu']['value']
tm.flux*=(10**(-0.4*mu))
res_spec.append((ts+tr-smoothedmod-wres))
res_mod.append((tm.flux - full_mod.flux))
res_spec = np.vstack(res_spec)
res_mod = np.vstack(res_mod)
mad_spec = np.median(np.abs(res_spec), axis=0)
mad_mod = np.median(np.abs(res_mod), axis=0)
scaling = norm.ppf(3/4.)
sigma_spec = mad_spec/scaling
sigma_mod = mad_mod/scaling
names=str('wave,flux,flux_err,norm_flux')
model_spec = np.rec.fromarrays((spec.wave, smoothedmod+wres, sigma_spec, smoothedmod), names=names)
names=str('wave,flux,flux_err')
SED_model = np.rec.fromarrays((full_mod.wave, full_mod.flux, sigma_mod), names=names)
if mag_draws is not None:
_, model_mags, _ = mag_draws[-1]
else:
model_mags = None
return model_spec, SED_model, model_mags
|
gnarayan/WDmodel
|
WDmodel/viz.py
|
Python
|
gpl-3.0
| 36,265
|
[
"Gaussian"
] |
fc4c6bf6b32daeb12380b30897ddd6fce376c19e739a566cf87a2454674c9d28
|
#Robot Framework JMeter Library
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#to generate libdoc documentation run:
# python -m robot.libdoc JMeterLib JMeterLib.html
import JMeterClasses
class JMeterLib(JMeterClasses.JMeterKeywords):
"""
This library provides simple way to integrate Robot Framework and JMeter. JTL output
files can be analysed and converted to HTML, Python dictionary or SQLite format.
Version 1.2 released on 29th of December 2017.
What's new:
- adapted to new csv log format
Following software versions were used during development:
- Python-2.7.14
- robotframework-3.0.2
- robotframework-ride-1.5.2.1
- jmeter 2.12
- jmeter 3.3
Author: Marcin Kowalczyk
Website: http://sourceforge.net/projects/rf-jmeter-py/
Installation:
- run command: pip install robotframework-jmeterlibrary
OR
- download, unzip and run command: python setup.py install
Example for running JMeter and parsing results in single keyword:
| run jmeter analyse jtl convert | D:/apache-jmeter-2.12/bin/jmeter.bat | D:/Tests/Test1Thread1Loop.jmx | D:/Tests/output1.jtl |
Example for running JMeter and parsing results in separate keyword:
| ${logPath}= | set variable | D:/Tests/output1.jtl | |
| run jmeter | D:/apache-jmeter-2.12/bin/jmeter.bat | D:/Tests/Test1Thread1Loop.jmx | ${logPath} |
| analyse jtl convert | ${logPath} | | |
Example for reading parsed contents:
| ${result} | analyse jtl convert | ${logPath} | |
| log | ${result} | | |
| : FOR | ${ELEMENT} | IN | @{result} |
| | log dictionary | ${ELEMENT} | |
"""
def __init__(self):
pass
if __name__ == '__main__':
mainMsg = "robotframework-jmeterlib is a Robot Framework library " \
"for starting JMeter and parsing JMeter logs. For " \
"instruction on how to use please visit" \
" https://github.com/kowalpy/Robot-Framework-JMeter-Library"
print(mainMsg)
|
kowalpy/Robot-Framework-JMeter-Library
|
JMeterLib.py
|
Python
|
lgpl-3.0
| 2,485
|
[
"VisIt"
] |
222762a0bd66afbba3f7a2cf8f05656a0cdde038f3392eeb772a30cd5e0898ec
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of electric multipole moments based on data parsed by cclib."""
import sys
from collections.abc import Iterable
import numpy
from cclib.parser.utils import convertor
from cclib.method.calculationmethod import Method
class Moments(Method):
"""A class used to calculate electric multipole moments.
The obtained results are stored in `results` attribute as a
dictionary whose keys denote the used charge population scheme.
"""
def __init__(self, data):
self.required_attrs = ('atomcoords', 'atomcharges')
self.results = {}
super(Moments, self).__init__(data)
def __str__(self):
"""Returns a string representation of the object."""
return "Multipole moments of %s" % (self.data)
def __repr__(self):
"""Returns a representation of the object."""
return 'Moments("%s")' % (self.data)
def _calculate_dipole(self, charges, coords, origin):
"""Calculate the dipole moment from the given atomic charges
and their coordinates with respect to the origin.
"""
transl_coords_au = convertor(coords - origin, 'Angstrom', 'bohr')
dipole = numpy.dot(charges, transl_coords_au)
return convertor(dipole, 'ebohr', 'Debye')
def _calculate_quadrupole(self, charges, coords, origin):
"""Calculate the traceless quadrupole moment from the given
atomic charges and their coordinates with respect to the origin.
"""
transl_coords_au = convertor(coords - origin, 'Angstrom', 'bohr')
delta = numpy.eye(3)
Q = numpy.zeros([3, 3])
for i in range(3):
for j in range(3):
for q, r in zip(charges, transl_coords_au):
Q[i,j] += 1/2 * q * (3 * r[i] * r[j] - \
numpy.linalg.norm(r)**2 * delta[i,j])
triu_idxs = numpy.triu_indices_from(Q)
raveled_idxs = numpy.ravel_multi_index(triu_idxs, Q.shape)
quadrupole = numpy.take(Q.flatten(), raveled_idxs)
return convertor(quadrupole, 'ebohr2', 'Buckingham')
def calculate(self, origin='nuccharge', population='mulliken',
masses=None):
"""Calculate electric dipole and quadrupole moments using parsed
partial atomic charges.
Inputs:
origin - a choice of the origin of coordinate system. Can be
either a three-element iterable or a string. If
iterable, then it explicitly defines the origin (in
Angstrom). If string, then the value can be any one of
the following and it describes what is used as the
origin:
* 'nuccharge' -- center of positive nuclear charge
* 'mass' -- center of mass
population - a type of population analysis used to extract
corresponding atomic charges from the output file.
masses - if None, then use default atomic masses. Otherwise,
the user-provided will be used.
Returns:
A list where the first element is the origin of coordinates,
while other elements are dipole and quadrupole moments
expressed in terms of Debye and Buckingham units
respectively.
Raises:
ValueError when an argument with incorrect value or of
inappropriate type is passed to a method.
Notes:
To calculate the quadrupole moment the Buckingham definition
[1]_ is chosen. Hirschfelder et al. [2]_ define it two times
as much.
References:
.. [1] Buckingham, A. D. (1959). Molecular quadrupole moments.
Quarterly Reviews, Chemical Society, 13(3), 183.
https://doi.org:10.1039/qr9591300183.
.. [2] Hirschfelder J. O., Curtiss C. F. and Bird R. B. (1954).
The Molecular Theory of Gases and Liquids. New York: Wiley.
"""
coords = self.data.atomcoords[-1]
try:
charges = self.data.atomcharges[population]
except KeyError as e:
msg = ("charges coming from requested population analysis"
"scheme are not parsed")
raise ValueError(msg, e)
if isinstance(origin, Iterable) and not isinstance(origin, str):
origin_pos = numpy.asarray(origin)
elif origin == 'nuccharge':
origin_pos = numpy.average(coords, weights=self.data.atomnos, axis=0)
elif origin == 'mass':
if masses:
atommasses = numpy.asarray(masses)
else:
try:
atommasses = self.data.atommasses
except AttributeError as e:
msg = ("atomic masses were not parsed, consider provide "
"'masses' argument instead")
raise ValueError(msg, e)
origin_pos = numpy.average(coords, weights=atommasses, axis=0)
else:
raise ValueError("{} is invalid value for 'origin'".format(origin))
dipole = self._calculate_dipole(charges, coords, origin_pos)
quadrupole = self._calculate_quadrupole(charges, coords, origin_pos)
rv = [origin_pos, dipole, quadrupole]
self.results.update({population: rv})
return rv
|
langner/cclib
|
cclib/method/moments.py
|
Python
|
bsd-3-clause
| 5,571
|
[
"cclib"
] |
08bcb2997ea9550489cdcfc7cda805779f0209eeb8577cf8550de3c87d13ee5d
|
def update_param(name, param):
if name == 'distribution':
param['values'].remove('ordinal')
return param
return None # param untouched
extensions = dict(
extra_params=[('verbose', 'FALSE')],
validate_params="""
# Required maps for different names params, including deprecated params
.gbm.map <- c("x" = "ignored_columns",
"y" = "response_column")
"""
)
doc = dict(
preamble="""
Build gradient boosted classification or regression trees
Builds gradient boosted classification trees and gradient boosted regression trees on a parsed data set.
The default distribution function will guess the model type based on the response column type.
In order to run properly, the response column must be an numeric for "gaussian" or an
enum for "bernoulli" or "multinomial".
""",
params=dict(
verbose="""
\code{Logical}. Print scoring history to the console (Metrics per tree). Defaults to FALSE.
"""
),
seealso="""
\code{\link{predict.H2OModel}} for prediction
""",
examples="""
library(h2o)
h2o.init()
# Run regression GBM on australia data
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
independent <- c("premax", "salmax", "minairtemp", "maxairtemp", "maxsst",
"maxsoilmoist", "Max_czcs")
dependent <- "runoffnew"
h2o.gbm(y = dependent, x = independent, training_frame = australia,
ntrees = 3, max_depth = 3, min_rows = 2)
"""
)
|
michalkurka/h2o-3
|
h2o-bindings/bin/custom/R/gen_gbm.py
|
Python
|
apache-2.0
| 1,504
|
[
"Gaussian"
] |
67082deef2457ef7f8abfdf16eefd4ba01c89642700df204e79078801a896de0
|
"""An Image file reader object.
"""
# Author: KK Rai (kk.rai [at] iitb.ac.in)
# R. Ambareesha (ambareesha [at] iitb.ac.in)
# Chandrashekhar Kaushik
# Suyog Dutt Jain <suyog.jain [at] aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu [at] aero.iitb.ac.in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
from os.path import basename
# Enthought library imports.
from traits.api import Instance, Str, Dict
from traitsui.api import View, Group, Item, Include
from tvtk.api import tvtk
# Local imports.
from mayavi.core.file_data_source import FileDataSource
from mayavi.core.pipeline_info import PipelineInfo
########################################################################
# `ImageReader` class
########################################################################
class ImageReader(FileDataSource):
"""A Image file reader. The reader supports all the
different types of Image files.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The Image data file reader.
reader = Instance(tvtk.Object, allow_none=False, record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['image_data'])
# Our view.
view = View(Group(Include('time_step_group'),
Item(name='base_file_name'),
Item(name='reader',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# Private Traits
_image_reader_dict = Dict(Str, Instance(tvtk.Object))
######################################################################
# `object` interface
######################################################################
def __init__(self, **traits):
d = {'bmp':tvtk.BMPReader(),
'jpg':tvtk.JPEGReader(),
'png':tvtk.PNGReader(),
'pnm':tvtk.PNMReader(),
'dcm':tvtk.DICOMImageReader(),
'tiff':tvtk.TIFFReader(),
'ximg':tvtk.GESignaReader(),
'dem':tvtk.DEMReader(),
'mha':tvtk.MetaImageReader(),
'mhd':tvtk.MetaImageReader(),
}
# Account for pre 5.2 VTk versions, without MINC reader
if hasattr(tvtk, 'MINCImageReader'):
d['mnc'] = tvtk.MINCImageReader()
d['jpeg'] = d['jpg']
self._image_reader_dict = d
# Call parent class' init.
super(ImageReader, self).__init__(**traits)
def __set_pure_state__(self, state):
# The reader has its own file_name which needs to be fixed.
state.reader.file_name = state.file_path.abs_pth
# Now call the parent class to setup everything.
super(ImageReader, self).__set_pure_state__(state)
######################################################################
# `FileDataSource` interface
######################################################################
def update(self):
self.reader.update()
if len(self.file_path.get()) == 0:
return
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_path_changed(self, fpath):
value = fpath.get()
if len(value) == 0:
return
# Extract the file extension
splitname = value.strip().split('.')
extension = splitname[-1].lower()
# Select image reader based on file type
old_reader = self.reader
if self._image_reader_dict.has_key(extension):
self.reader = self._image_reader_dict[extension]
else:
self.reader = tvtk.ImageReader()
self.reader.file_name = value.strip()
self.reader.update()
self.reader.update_information()
if old_reader is not None:
old_reader.on_trait_change(self.render, remove=True)
self.reader.on_trait_change(self.render)
self.outputs = [self.reader.output]
# Change our name on the tree view
self.name = self._get_name()
def _get_name(self):
""" Returns the name to display on the tree view. Note that
this is not a property getter.
"""
fname = basename(self.file_path.get())
ret = "%s"%fname
if len(self.file_list) > 1:
ret += " (timeseries)"
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
|
liulion/mayavi
|
mayavi/sources/image_reader.py
|
Python
|
bsd-3-clause
| 4,931
|
[
"Mayavi",
"VTK"
] |
b2c59313c4eea0026c1aeee39b5d44f0af7315e56391a6392cef8342a969be7a
|
#!/usr/bin/env python
import os.path
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--lammps', type=str, nargs='+',
help='directories containing LAMMPS simulation files', default=[])
parser.add_argument('--espp', type=str, nargs='+',
help='directories containing ESPResSo++ simulation files', default=[])
parser.add_argument('--rate', type=float, default=0.1)
parser.add_argument('--NC', type=int, default=2500)
parser.add_argument('--NX', type=int, default=1000)
parser.add_argument('--dt', type=float, default=0.0025)
parser.add_argument('--dump-interval', type=int, default=200)
args = parser.parse_args()
from io import StringIO
import gzip
import numpy as np
import matplotlib.pyplot as plt
import h5py
plt.rcParams['figure.figsize']= (10,6)
plt.rcParams['font.size'] = 18
plt.rcParams['lines.linewidth'] = 2
def get_file(f):
"""Iterator over the timeframe in a lammps dump"""
sio = StringIO()
first = True
finish = False
while True:
l = f.readline()
if len(l)==0:
finish=True
if l.strip()=='ITEM: TIMESTEP' or finish:
if first:
first = False
else:
sio.seek(0)
yield sio
if finish: return
sio = StringIO()
sio.write(unicode(l))
for d in args.lammps:
zf = gzip.open(os.path.join(d,'nb.txt.gz'), 'r')
nb = []
for onef in get_file(zf):
nb.append(np.bincount(np.loadtxt(onef, skiprows=9, unpack=True, dtype=int), minlength=6))
nb = np.array(nb)/float(args.NX)
k_time = np.arange(nb.shape[0])*(args.dt*args.dump_interval*args.rate)
plt.plot(k_time,nb)
zf.close()
for d in args.espp:
a = h5py.File(os.path.join(d, 'dump.h5'), 'r')
sc_time = a['/observables/statecount/time'][:]
sc = a['/observables/statecount/value'][:]/float(args.NX)
plt.plot(sc_time*args.rate, sc)
a.close()
plt.xlabel(r'$k t$')
plt.show()
|
pdebuyl/cg_md_polymerization
|
code/analyse_epoxy.py
|
Python
|
bsd-3-clause
| 2,007
|
[
"ESPResSo",
"LAMMPS"
] |
07a7b7d8f538ce76765a798067d8a9f83c1046464d1a11ae649bf5c3e53d432d
|
from __future__ import print_function
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print('Wrote json to %s.' % filename)
|
giovannic/giovannic.github.com
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
|
Python
|
mit
| 3,465
|
[
"VisIt"
] |
5ff428825ee76a0ba3001a12c737ae25213f2d80d5482ed8d6ef227af6e1a2fb
|
"""Fast Attention Models utilities."""
import abc
import enum
import functools
from typing import Any, Callable, Dict, Optional, Tuple, Iterable
from absl import logging
from flax import linen as nn
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as np
from scenic.model_lib.layers import attention_layers
Initializer = Callable[[jnp.ndarray, Iterable[int], jnp.dtype], jnp.ndarray]
class LinformerEncoderAttention(nn.Module):
"""Linformer Encoder only multi-head dot-product self-attention.
Attributes:
num_heads: Number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
qkv_features: Dimension of the key, query, and value.
out_features: Dimension of the last projection.
broadcast_dropout: Use a broadcasted dropout along batch dims.
dropout_rate: Dropout rate.
kernel_init: Initializer for the kernel of the Dense layers.
bias_init: Initializer for the bias of the Dense layers.
bias: Whether pointwise QKVO dense transforms use bias.
dtype: The dtype of the computation.
precision: Numerical precision of the computation see `jax.lax.Precision`
for details.
low_rank_features: Low rank features.
proj_mode: Supports "linear", "mlp", or "cnn" projections.
downsample: Supports downsampling query too.
proj_configs: Configurations used in the low-rank projection.
qk_attention_fn: A function that given multi-headed key, query, and value
computes the attention and generates the new values.
"""
num_heads: int
qkv_features: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.1
kernel_init: Initializer = nn.linear.default_kernel_init
bias_init: Initializer = nn.initializers.zeros
use_bias: bool = True
dtype: jnp.dtype = jnp.float32
precision: Optional[jax.lax.Precision] = None
low_rank_features: int = 8
proj_mode: str = 'linear'
downsample: bool = False
proj_configs: Optional[Dict[Any, Any]] = None
print('attnbro', dir(attention_layers))
qk_attention_fn: Callable[
..., jnp.ndarray] = attention_layers.dot_product_attention
@nn.compact
def __call__(self,
inputs_q: jnp.ndarray,
inputs_kv: jnp.ndarray = None,
*,
deterministic: bool) -> jnp.ndarray:
"""Applies Linformer multi-head dot product self-attention.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
Args:
inputs_q: Input query of shape `[batch_sizes..., length, features]`.
inputs_kv: Input key-vale, which is ignored in linformer.
deterministic: Whether the model is run in deterministic mode (if so, do
not apply dropout).
Returns:
Output of shape `[batch_sizes..., length features]`.
"""
if inputs_kv is not None:
logging.warning(
'Ignoring inputs_kv as Linformer only supports self-attention.')
x = inputs_q
features = self.out_features or x.shape[-1]
qkv_features = self.qkv_features or x.shape[-1]
assert qkv_features % self.num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // self.num_heads
# Project inputs_q to multi-headed q/k/v.
# Dimensions are then [bs, dims..., n_heads, n_features_per_head].
dense = functools.partial(
nn.DenseGeneral,
features=(self.num_heads, head_dim),
axis=-1,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
precision=self.precision)
query, key, value = (dense(dtype=self.dtype, name='query')(x),
dense(dtype=self.dtype, name='key')(x),
dense(dtype=self.dtype, name='value')(x))
def _linear_low_rank_projection(key,
value,
features,
activation=None,
transpose=True,
query=None):
# By default, shared projections.
if transpose:
# Transpose if input is already transposed.
key = key.transpose((0, 3, 2, 1))
value = value.transpose((0, 3, 2, 1))
if query is not None:
query = query.transpose((0, 3, 2, 1))
dense_proj = functools.partial(
nn.DenseGeneral,
features=features,
axis=-1,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=None,
precision=self.precision,
dtype=self.dtype)
shared_dense_proj = dense_proj()
key = shared_dense_proj(key)
value = shared_dense_proj(value)
if query is not None:
query = shared_dense_proj(query)
if activation is not None:
key = activation(key)
value = activation(value)
if query is not None:
query = activation(query)
if transpose:
key = key.transpose((0, 3, 2, 1))
value = value.transpose((0, 3, 2, 1))
if query is not None:
query = query.transpose((0, 3, 2, 1))
return key, value, query
def _mlp_low_rank_projection(key, value, features):
"""MLP-based low rank projection function."""
# Handle transpose outside (before and after linear low rank projections).
key = key.transpose((0, 3, 2, 1))
value = value.transpose((0, 3, 2, 1))
for f in features[:-1]:
key, value, _ = _linear_low_rank_projection(
key, value, features=f, activation=nn.relu, transpose=False)
# Don't apply activation on the last layer.
key, value, _ = _linear_low_rank_projection(
key, value, features=features[-1], activation=None, transpose=False)
key = key.transpose((0, 3, 2, 1))
value = value.transpose((0, 3, 2, 1))
return key, value
if self.proj_mode == 'linear':
logging.info('Using linear low-rank projectors')
if self.downsample:
key, value, query = _linear_low_rank_projection(
key,
value,
features=self.low_rank_features,
transpose=True,
query=query)
else:
key, value, _ = _linear_low_rank_projection(
key, value, features=self.low_rank_features, transpose=True)
elif self.proj_mode == 'mlp':
# Note: do not support downsampling.
logging.info('Using MLP low-rank projectors')
key, value = _mlp_low_rank_projection(
key, value, features=[self.low_rank_features, self.low_rank_features])
else:
raise NotImplementedError('This low-rank projection is not supported.')
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# Apply attention.
x = self.qk_attention_fn(
query,
key,
value,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
deterministic=deterministic,
dtype=self.dtype,
precision=self.precision)
# Project back to the original inputs dimensions.
out = nn.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
name='out')(
x)
return out
class PerformerEncoderAttention(nn.Module):
"""Encoder only multi-head dot-product self-attention based on Performer.
based on: https://arxiv.org/abs/2009.14794
Attributes:
num_heads: Number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
qkv_features: Dimension of the key, query, and value.
out_features: Dimension of the last projection.
broadcast_dropout: Use a broadcasted dropout along batch dims.
dropout_rate: Dropout rate.
kernel_init: Initializer for the kernel of the Dense layers.
bias_init: Initializer for the bias of the Dense layers.
use_bias: Whether pointwise QKVO dense transforms use bias.
dtype: The dtype of the computation.
precision: Numerical precision of the computation see `jax.lax.Precision`
for details.
attention_fn_cls: Name of the attention function that is used by performer,
which can be 'softmax' or 'generalized'.
attention_fn_configs: Configurations that is passed to the performer
attention function.
"""
num_heads: int
qkv_features: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.1
kernel_init: Initializer = nn.linear.default_kernel_init
bias_init: Initializer = nn.initializers.zeros
use_bias: bool = True
dtype: jnp.dtype = jnp.float32
precision: Optional[jax.lax.Precision] = None
attention_fn_cls: str = 'generalized'
attention_fn_configs: Optional[Dict[Any, Any]] = None
@nn.compact
def __call__(self, inputs_q: jnp.ndarray, inputs_kv: Optional[jnp.ndarray], *,
deterministic: bool) -> jnp.ndarray:
"""Applies multi-head dot product self-attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
Args:
inputs_q: Input of shape `[batch_sizes..., length, features]`.
inputs_kv: Memory input of shape `[batch_sizes..., kv length, features]`.
deterministic: Whether the model is running in deterministic mode (if so,
do not apply dropout).
Returns:
Output of shape `[batch_sizes..., length features]`.
"""
qkv_features = self.qkv_features or inputs_q.shape[-1]
assert qkv_features % self.num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
if self.attention_fn_cls == 'softmax':
qk_attention_fn = make_fast_softmax_attention
elif self.attention_fn_cls == 'generalized':
qk_attention_fn = make_fast_generalized_attention
else:
raise ValueError(f'Unknown attention_fn_cls: {self.attention_fn_cls}.')
qk_attention_fn = (
qk_attention_fn if self.attention_fn_configs is None else
functools.partial(qk_attention_fn, **self.attention_fn_configs)) # pylint: disable=not-a-mapping
return attention_layers.MultiHeadAttention(
num_heads=self.num_heads,
qkv_features=qkv_features,
out_features=self.out_features,
broadcast_dropout=self.broadcast_dropout,
dropout_rate=self.dropout_rate,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
attention_fn=qk_attention_fn(
qkv_features // self.num_heads, unidirectional=False),
)(inputs_q=inputs_q, inputs_kv=inputs_kv, deterministic=deterministic)
class AttentionFunctionName(enum.Enum):
"""Defines name assigned to self attention modules."""
STANDARD = 'standard'
LINFORMER = 'linformer'
PERFORMER = 'performer'
def _get_attention_module(name: str, is_self_attention=True) -> Any:
"""Returns an attention module."""
function_name = AttentionFunctionName(name)
if function_name == AttentionFunctionName.STANDARD:
return attention_layers.MultiHeadAttention
elif function_name == AttentionFunctionName.LINFORMER:
if not is_self_attention:
raise NotImplementedError
else:
return LinformerEncoderAttention
elif function_name == AttentionFunctionName.PERFORMER:
return PerformerEncoderAttention
def _get_variant_args(name: str) -> Any:
"""Return self-attention variant specific list of attn args."""
standard_args = [
'num_heads', 'x', 'qkv_features', 'out_features', 'broadcast_dropout',
'dropout_rate', 'deterministic', 'kernel_init', 'bias_init', 'bias',
'dtype', 'precision', 'qkv_attention_fn'
]
if name == 'performer':
return ['attention_fn_cls'] + standard_args
elif name == 'linformer':
return ['low_rank_features', 'downsample', 'proj_mode', 'proj_configs'
] + standard_args
elif name == 'standard':
return standard_args
def get_axial_1d_input(x: jnp.ndarray, axis: int):
"""Converts 2d inputs to 1d for axial attention."""
assert x.ndim == 4, ('The input dimention should be '
'[batch_size, height, width, channel]')
batch_size, height, width, channel = x.shape
if axis == 1:
return x.transpose((0, 2, 1, 3)).reshape(batch_size * width, height,
channel)
elif axis == 2:
return x.reshape(batch_size * height, width, channel)
def get_axial_2d_input(x: jnp.ndarray, axis: int, two_d_shape: Tuple[int, int,
int, int]):
"""Converts 1d inputs back to 2d after axial attention."""
assert x.ndim == 3, ('The input dimention should be '
'[batch_size, height*width, channel]')
batch_size, height, width, channel = two_d_shape
if axis == 1:
assert x.shape[0] == batch_size * width
return x.reshape((batch_size, width, height, channel)).transpose(
(0, 2, 1, 3))
elif axis == 2:
assert x.shape[0] == batch_size * height
return x.reshape(two_d_shape)
class Encoder1DBlock(nn.Module):
"""1-Dimensional Transformer encoder block.
Attributes:
mlp_dim: dimension of the MLP on top of attention block.
attention_configs: Configs pass to the self-attention func.
attention_fn: Type of the seld-attention function.
dropout_rate: Dropout used in the MLP block.
attention_dropout_rate: Dropout for attention heads.
post_sa_fn: Function to be applied on the output of self-attention block.
dtype: The dtype of the computation.
"""
mlp_dim: int
attention_configs: Dict[Any, Any]
attention_fn: str
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
post_sa_fn: Optional[Callable[[jnp.ndarray], jnp.ndarray]] = None
droplayer_p: float = 0.0
dtype: jnp.ndarray = jnp.float32
def get_drop_pattern(self, x, deterministic):
if not deterministic and self.droplayer_p:
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
return jax.random.bernoulli(
self.make_rng('dropout'), self.droplayer_p, shape).astype('float32')
else:
return 0.0
@nn.compact
def __call__(self, inputs_q: jnp.ndarray, inputs_kv: jnp.ndarray, *,
deterministic: bool) -> jnp.ndarray:
"""Applies Encoder1DBlock module.
Args:
inputs_q: Input data in shape of `[bs, len, c]`.
inputs_kv: Memory data in shape of `[bs, memory len, c]`.
deterministic: Whether the model is in deterministic mode (if so, do not
apply dropout).
Returns:
Output after 1-d transformer encoder block.
"""
assert inputs_q.ndim == 3
if self.attention_fn:
is_self_attention = inputs_kv is None
# Attention block.
valid_args = _get_variant_args(self.attention_fn)
# Remove args that are potentially not needed for variant.
attention_configs = {
x: self.attention_configs[x]
for x in valid_args
if x in self.attention_configs
}
x = nn.LayerNorm(dtype=self.dtype)(inputs_q)
if not is_self_attention:
assert inputs_kv.ndim == 3
inputs_kv = nn.LayerNorm(dtype=self.dtype)(inputs_kv)
# Prepare the input for the attention modole.
# We shouldn't pass memory if it is self-attention.
init_arg_to_attention_module = {
'kernel_init': nn.initializers.xavier_uniform(),
'broadcast_dropout': False,
'dtype': self.dtype,
'dropout_rate': self.attention_dropout_rate,
}
inputs_to_attention_module = {
'inputs_q': x,
'deterministic': deterministic,
}
if is_self_attention:
inputs_to_attention_module['inputs_kv'] = x
else:
inputs_to_attention_module['inputs_kv'] = inputs_kv
x = _get_attention_module(
self.attention_fn,
is_self_attention)(**init_arg_to_attention_module,
**attention_configs)(**inputs_to_attention_module)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
if x.shape[-2] != inputs_q.shape[-2]:
# TODO(yitay): Support case where we downsample. How do we handle res?
# Currently bypassing this causes training problems...
raise ValueError('Shape not identical. Cannot add residual connection.')
drop_pattern = self.get_drop_pattern(x, deterministic)
x = x * (1.0 - drop_pattern) + inputs_q
if self.post_sa_fn is not None:
x = self.post_sa_fn(x) # pylint: disable=not-callable
else:
x = inputs_q
if self.mlp_dim is None:
# Skip the MLP block.
return x
# MLP block.
y = nn.LayerNorm(dtype=self.dtype)(x)
mlp_dim = self.mlp_dim
if isinstance(self.mlp_dim, int):
mlp_dim = (mlp_dim,)
for mlp_d in mlp_dim:
y = attention_layers.MlpBlock(
mlp_dim=mlp_d,
dtype=self.dtype,
dropout_rate=self.dropout_rate,
activation_fn=nn.gelu,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))(
y, deterministic=deterministic)
drop_pattern = self.get_drop_pattern(x, deterministic)
return x + y * (1.0 - drop_pattern)
class EncoderAxialBlock(nn.Module):
"""2-Dimensional Transformer encoder block with Axial attention.
This block is similar to Encoder1DBlock, where instead of `self-attention+MLP`
we have `row-self-attention + col-self-attention + MLP`.
Attributes:
mlp_dim: dimension of the mlp on top of attention block.
attention_configs: Configs pass to the self-attention func.
attention_fn: Type of the sel-attention function.
dropout_rate: Dropout used in the mlp block.
attention_dropout_rate: Dropout for attention heads.
factorization_axis: Axis over which we run attention.
post_sa_fn: Function to be applied on the output of self-attention block.
dtype: The dtype of the computation.
"""
mlp_dim: int
attention_configs: Dict[Any, Any]
attention_fn: str
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
factorization_axis: Tuple[int, ...] = (1, 2)
post_sa_fn: Optional[Callable[[jnp.ndarray], jnp.ndarray]] = None
droplayer_p: float = 0.0
dtype: jnp.ndarray = jnp.float32
def get_drop_pattern(self, x, deterministic):
if not deterministic and self.droplayer_p:
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
return jax.random.bernoulli(
self.make_rng('dropout'), self.droplayer_p, shape).astype('float32')
else:
return 0.0
@nn.compact
def __call__(self, inputs: jnp.ndarray, *,
deterministic: bool) -> jnp.ndarray:
"""Applies Encoder1DBlock module.
Args:
inputs: Input data in shape of `[bs, len, c]`.
deterministic: Whether the model is in deterministic mode (if so, do not
apply dropout).
Returns:
Output after axial attention encoder block.
"""
def _run_attention_on_axis(inputs, axis, two_d_shape):
inputs = get_axial_1d_input(inputs, axis=axis)
x = nn.LayerNorm(dtype=self.dtype)(inputs)
init_arg_to_attention_module = {
'kernel_init': nn.initializers.xavier_uniform(),
'broadcast_dropout': False,
'dtype': self.dtype,
'dropout_rate': self.attention_dropout_rate,
}
# Attention block.
valid_args = _get_variant_args(self.attention_fn)
# Remove args that are potentially not needed for variant.
attention_configs = {
x: self.attention_configs[x]
for x in valid_args
if x in self.attention_configs
}
x = _get_attention_module(
self.attention_fn,
is_self_attention=True)(**init_arg_to_attention_module,
**attention_configs)(
x, deterministic=deterministic)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
drop_pattern = self.get_drop_pattern(x, deterministic)
x = x * (1.0 - drop_pattern) + inputs
return get_axial_2d_input(x, axis=axis, two_d_shape=two_d_shape)
x = inputs
if self.attention_fn:
# Row attention block.
two_d_shape = inputs.shape
for ax in self.factorization_axis:
x = _run_attention_on_axis(x, ax, two_d_shape)
if self.post_sa_fn is not None:
x = self.post_sa_fn(x) # pylint: disable=not-callable
if self.mlp_dim is None:
# Skip the MLP block.
return x
# MLP block.
y = nn.LayerNorm(dtype=self.dtype)(x)
mlp_dim = self.mlp_dim
if isinstance(self.mlp_dim, int):
mlp_dim = (mlp_dim,)
for mlp_d in mlp_dim:
y = attention_layers.MlpBlock(
mlp_dim=mlp_d,
dtype=self.dtype,
dropout_rate=self.dropout_rate,
activation_fn=nn.gelu,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))(
y, deterministic=deterministic)
drop_pattern = self.get_drop_pattern(x, deterministic)
return x + y * (1.0 - drop_pattern)
def sample_categorical(rng: jnp.ndarray,
logits: jnp.ndarray,
num_samples: int,
*,
replacement: bool = True):
"""Sample catogorical with or without replacement for the top-k selector.
Args:
rng: JAX PRNG key.
logits: Categorical distribution logits of shape [batch_dims, num_classes].
num_samples: Number of samples to produce.
replacement: If True, sampling is done with replacement.
Returns:
Categorial samples of shape [batch_dims, num_samples].
"""
rng = jax.random.split(rng, num_samples)
if replacement:
samples = jax.vmap(jax.random.categorical, in_axes=(0, None))(rng, logits)
else:
num_categories = logits.shape[-1]
if num_categories < num_samples:
raise ValueError(f'Number of samples ({num_samples}) must be <= number of'
f' categories ({num_categories}) when sampling without'
f' replacement.')
def sample_one(logits, scan_rng):
samples = jax.random.categorical(scan_rng, logits, axis=-1)
mask = jax.nn.one_hot(samples, num_categories, dtype=jnp.bool_)
logits = jnp.where(mask, -1e10, logits)
return logits, samples
_, samples = jax.lax.scan(sample_one, logits, rng)
# Restore original shape.
ndim = samples.ndim
if ndim > 1:
samples = jnp.transpose(samples, axes=tuple(range(1, ndim)) + (0,))
return samples
class TopKTokenSelector(nn.Module):
"""A layer that selects top-k tokens.
Note that if `pool_unselected_tokens` is set to True, it pools all the
unselected tokens and appends it as an extra tokens and returns k+1 tokens.
Attributes:
top_k: Number of tokens we select.
sample_tokens: Whether sample the top-k tokens given their scores or just
take the top-k.
pool_unselected_tokens: Whether we pool the unselected tokens and attach the
pooled version as an extra token to the selected tokens.
exclude_cls: If set to True, it assumes the token at position 0 is CLS token
and should be excluded from the selection process and be attached back at
the end.
score_net_kernel_init: Kernel initialization for the score net.
dtype: Jax dtype.
"""
top_k: int
sample_tokens: bool
pool_unselected_tokens: bool
exclude_cls: bool = False
score_net_kernel_init: Initializer = nn.linear.default_kernel_init
dtype: jnp.ndarray = jnp.float32
@nn.compact
def __call__(self, inputs: jnp.ndarray, *, train: bool) -> jnp.ndarray:
if self.exclude_cls:
cls, inputs = jnp.split(inputs, (1,), axis=1)
input_len = inputs.shape[1]
if self.top_k > input_len:
raise ValueError(f'The value of top_{self.top_k} should be less than'
f'input length:{input_len}.')
logging.info('Selecting %d tokens out of %d tokens.',
self.top_k, input_len)
# TODO(dehghani): Explore if adding a non-linearity to the score_net helps.
score_logits = jnp.squeeze(
nn.Dense(
features=1,
dtype=self.dtype,
kernel_init=self.score_net_kernel_init,
# No bias is needed since it gets removed during normalization.
use_bias=False,
name='score_net')(inputs),
axis=-1)
if train and self.sample_tokens and self.top_k < input_len:
# We use dropout rng for sampling, which is always provided.
rng = self.make_rng('dropout')
selected_index = sample_categorical(
rng, score_logits, self.top_k, replacement=False)
selected_logits = jax.vmap(jnp.take, (0, 0, None))(
score_logits, selected_index, 0)
else:
selected_logits, selected_index = jax.lax.top_k(score_logits, self.top_k)
# Take selected tokens:
selected_tokens = jax.vmap(jnp.take, (0, 0, None))(
inputs, selected_index, 0)
# Normalize "selected logits" and used as weights for selected tokens:
selected_tokens = selected_tokens * jax.nn.softmax(selected_logits)[
..., jnp.newaxis]
if self.pool_unselected_tokens and self.top_k < input_len:
# Extract index of unselected tokens:
selected_index_one_hot = jax.nn.one_hot(
selected_index, num_classes=input_len, dtype=jnp.bool_)
unselected_index_one_hot = jnp.any(
jnp.logical_not(selected_index_one_hot), axis=1)
_, unselected_index = jax.lax.top_k(unselected_index_one_hot,
input_len - self.top_k)
# Take unselected tokens:
unselected_tokens = jax.vmap(jnp.take, (0, 0, None))(inputs,
unselected_index, 0)
unselected_logits = jax.vmap(jnp.take, (0, 0, None))(score_logits,
unselected_index, 0)
# Normalize "unselected logits" and used as weights for unselected tokens:
weighted_unselected_tokens = (
unselected_tokens *
jax.nn.softmax(unselected_logits)[..., jnp.newaxis])
unselected_tokens_rep = jnp.sum(
weighted_unselected_tokens, axis=1, keepdims=True)
selected_tokens = jnp.concatenate(
[selected_tokens, unselected_tokens_rep], axis=1)
if self.exclude_cls:
selected_tokens = jnp.concatenate([cls, selected_tokens], axis=1)
return selected_tokens
###### PERFORMER FUNCTIONS:
def nonnegative_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True,
eps=0.0001):
"""Constructs nonnegative kernel features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
is_query: predicate indicating whether input data corresponds to queries or
keys
normalize_data: predicate indicating whether data should be normalized,
eps: numerical stabilizer.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
last_dims_t = (len(data_dash.shape) - 1,)
if is_query:
data_dash = ratio * (
jnp.exp(data_dash - diag_data -
jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps)
else:
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(
data_dash, axis=last_dims_t + attention_dims_t, keepdims=True)) +
eps)
return data_dash
def sincos_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
normalize_data=True):
"""Constructs kernel sin-cos features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) *
# exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_dash_cos = ratio * jnp.cos(data_dash)
data_dash_sin = ratio * jnp.sin(data_dash)
data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1)
# Constructing D_data and data^{'}
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
# Additional renormalization for numerical stability
data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True)
diag_data -= data_renormalizer
diag_data = jnp.exp(diag_data)
data_prime = data_dash * diag_data
return data_prime
def generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t,
precision, kernel_fn, kernel_epsilon,
normalize_data):
"""Constructs kernel features for fast generalized attention.
Args:
data: input for which features are computes
projection_matrix: matrix used to compute features
batch_dims_t: tuple of batch dimensions
precision: precision parameter
kernel_fn: kernel function used
kernel_epsilon: additive positive term added to every feature for numerical
stability
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast generalized attention.
"""
if normalize_data:
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
else:
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime
def make_fast_softmax_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.000001,
nb_features=256,
ortho_features=True,
ortho_scaling=0.0,
redraw_features=True,
unidirectional=False,
nonnegative_features=True,
lax_scan_unroll=1):
"""Construct a fast softmax attention method."""
logging.info(
'Fast softmax attention: %s features and orthogonal=%s, renormalize=%s',
nb_features, ortho_features, renormalize_attention)
if ortho_features:
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix,
nb_features,
qkv_dim,
scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
return nonnegative_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision,
is_query, normalize_data, numerical_stabilizer)
else:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
del is_query
return sincos_softmax_kernel_feature_creator(data, projection_matrix,
attention_dims_t,
batch_dims_t, precision,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
def make_fast_generalized_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.0,
nb_features=256,
features_type='deterministic',
kernel_fn=jax.nn.relu,
kernel_epsilon=0.001,
redraw_features=False,
unidirectional=False,
lax_scan_unroll=1):
"""Construct a fast generalized attention menthod."""
logging.info('Fast generalized attention.: %s features and renormalize=%s',
nb_features, renormalize_attention)
if features_type == 'ortho':
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif features_type == 'iid':
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
elif features_type == 'deterministic':
matrix_creator = None
else:
raise ValueError('Unknown feature value type')
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=False):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(data, projection_matrix,
batch_dims_t, precision,
kernel_fn, kernel_epsilon,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
class RandomMatrix(metaclass=abc.ABCMeta):
"""Abstract class providing a method for constructing 2D random arrays.
Class is responsible for constructing 2D random arrays.
"""
@abc.abstractmethod
def get_2d_array(self):
raise NotImplementedError('Abstract method')
class GaussianUnstructuredRandomMatrix(RandomMatrix):
def __init__(self, nb_rows, nb_columns, key):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
def get_2d_array(self):
return random.normal(self.key, (self.nb_rows, self.nb_columns))
class GaussianOrthogonalRandomMatrix(RandomMatrix):
r"""Class providing a method to create Gaussian orthogonal matrix.
Class is responsible for constructing 2D Gaussian orthogonal arrays.
"""
def __init__(self, nb_rows, nb_columns, key, scaling=0):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
self.scaling = scaling
def get_2d_array(self):
nb_full_blocks = int(self.nb_rows / self.nb_columns)
block_list = []
rng = self.key
for _ in range(nb_full_blocks):
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
if remaining_rows > 0:
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if self.scaling == 0:
multiplier = jnp.linalg.norm(
random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
elif self.scaling == 1:
multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
else:
raise ValueError('Scaling must be one of {0, 1}. Was %s' % self.scaling)
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(metaclass=abc.ABCMeta):
"""Abstract class providing a method for fast attention.
Class is responsible for providing a method <dot_product_attention> for fast
approximate attention.
"""
@abc.abstractmethod
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying fast approximate dot-product
attention. It calculates the attention weights given query and key and
combines the values using the attention weights. This function supports
multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of [batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels].
key: keys for calculating attention with shape of [batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels].
value: values to be used in attention with shape of [batch_size, dim1,
dim2,..., dimN, num_heads, value_channels].
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout.
dropout_rate: dropout rate.
deterministic: bool, deterministic or not (to apply dropout).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels].
"""
raise NotImplementedError('Abstract method')
def _numerator(z_slice_shape, precision, unroll=1):
"""Computes the numartor."""
def fwd(qs, ks, vs):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum('...m,...d->...md', k, v, precision=precision)
x_slice = jnp.einsum('...m,...md->...d', q, p, precision=precision)
return p, x_slice
init_value = jnp.zeros(z_slice_shape)
p, w = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll)
return w, (p, qs, ks, vs)
def bwd(pqkv, w_ct):
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum('...d,...md->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...d,...m->...md', x_ct, q, precision=precision)
k_ct = jnp.einsum('...md,...d->...m', p_ct, v, precision=precision)
v_ct = jnp.einsum('...md,...m->...d', p_ct, k, precision=precision)
p -= jnp.einsum('...m,...d->...md', k, v, precision=precision)
return (p, p_ct), (q_ct, k_ct, v_ct)
p, qs, ks, vs = pqkv
_, (qs_ct, ks_ct, vs_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, w_ct),
reverse=True,
unroll=unroll)
return qs_ct, ks_ct, vs_ct
@jax.custom_vjp
def _numerator_impl(qs, ks, vs):
w, _ = fwd(qs, ks, vs)
return w
_numerator_impl.defvjp(fwd, bwd)
return _numerator_impl
def _denominator(t_slice_shape, precision, unroll=1):
"""Computes the denominator."""
def fwd(qs, ks):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum('...m,...m->...', q, p, precision=precision)
return p, x
p = jnp.zeros(t_slice_shape)
p, r = lax.scan(body, p, (qs, ks), unroll=unroll)
return r, (qs, ks, p)
def bwd(qkp, r_ct):
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...,...m->...m', x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
qs, ks, p = qkp
_, (qs_ct, ks_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, r_ct),
reverse=True,
unroll=unroll)
return (qs_ct, ks_ct)
@jax.custom_vjp
def _denominator_impl(qs, ks):
r, _ = fwd(qs, ks)
return r
_denominator_impl.defvjp(fwd, bwd)
return _denominator_impl
class FastAttentionviaLowRankDecomposition(FastAttention):
"""Class providing a method for fast attention via low rank decomposition.
Class is responsible for providing a method <dot_product_attention> for fast
dot-product attention with the use of low rank decomposition (e.g. with
random feature maps).
"""
def __init__(self,
matrix_creator,
kernel_feature_creator,
renormalize_attention,
numerical_stabilizer,
redraw_features,
unidirectional,
lax_scan_unroll=1): # For optimal GPU performance, set to 16.
rng = random.PRNGKey(0)
self.matrix_creator = matrix_creator
self.projection_matrix = self.draw_weights(rng)
self.kernel_feature_creator = kernel_feature_creator
self.renormalize_attention = renormalize_attention
self.numerical_stabilizer = numerical_stabilizer
self.redraw_features = redraw_features
self.unidirectional = unidirectional
self.lax_scan_unroll = lax_scan_unroll
def draw_weights(self, key):
if self.matrix_creator is None:
return None
matrixrng, _ = random.split(key)
projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array()
return projection_matrix
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
n = key.ndim
# Constructing projection tensor.
if self.redraw_features:
query_seed = lax.convert_element_type(
jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32)
rng = random.PRNGKey(query_seed)
self.projection_matrix = self.draw_weights(rng)
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(np.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
k_extra_perm = axis + batch_dims + (n - 1,)
key_extra = key.transpose(k_extra_perm)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
v_perm = batch_dims + axis + (n - 1,)
value = value.transpose(v_perm)
batch_dims_t = tuple(range(len(batch_dims)))
attention_dims_t = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
# Constructing tensors Q^{'} and K^{'}.
query_prime = self.kernel_feature_creator(query, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, True)
key_prime = self.kernel_feature_creator(key, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, False)
if self.unidirectional:
index = attention_dims_t[0]
z_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],) + (value.shape[-1],)
numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll)
w = numerator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0))
# Constructing w = (Q^{'}(K^{'})^{T})_{masked}V
w = jnp.moveaxis(w, 0, index)
if not self.renormalize_attention:
# Unidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = w.transpose(perm_inv)
return result
else:
# Unidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
index = attention_dims_t[0]
t_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],)
denominator_fn = _denominator(t_slice_shape, precision,
self.lax_scan_unroll)
r = denominator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0))
r = jnp.moveaxis(r, 0, index)
else:
contract_query = tuple(
range(len(batch_dims) + len(axis),
len(batch_dims) + len(axis) + 1))
contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1))
# Constructing z = (K^{'})^{T}V
# z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
z = lax.dot_general(
key_prime,
value,
((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)),
precision=precision)
# Constructing w = Q^{'} z = Q^{'}(K^{'})^{T}V
# q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m)
# z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
# w (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
w = lax.dot_general(
query_prime,
z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)),
precision=precision)
if not self.renormalize_attention:
# Bidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = w.transpose(perm_inv)
return result
else:
# Bidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
contract_key = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
contract_thick_all_ones = tuple(
range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim))
# Construct t = (K^{'})^{T} 1_L
# k (bs, <non-attention dims>, num_heads, <attention dims>, channels)
t = lax.dot_general(
key_prime,
thick_all_ones, ((contract_key, contract_thick_all_ones),
(batch_dims_t, batch_dims_t)),
precision=precision)
# Construct partition function: r = Q^{'} t = Q^{'}(K^{'})^{T} 1_L
# q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m)
# t (bs, <non-attention dims>, num_heads, channels_m)
r = lax.dot_general(
query_prime,
t, (((query_prime.ndim - 1,), (t.ndim - 1,)),
(batch_dims_t, range(0,
len(t.shape) - 1))),
precision=precision)
r = r + 2 * self.numerical_stabilizer * (
jnp.abs(r) <= self.numerical_stabilizer)
r = jnp.reciprocal(r)
r = jnp.expand_dims(r, len(r.shape))
# w (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
# r (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel)
result = w * r
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
result = result.transpose(perm_inv)
return result
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
|
google-research/scenic
|
scenic/projects/fast_vit/model_utils.py
|
Python
|
apache-2.0
| 52,572
|
[
"Gaussian"
] |
b8ac9542e8fdc6d1e11fc5ead8a82d76429958ca1a0593825c26e5d892f274be
|
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database
from spotpy import parameter
import numpy as np
import time
import threading
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction #grid, mazimize, minimize
print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions')
if optimization_direction == 'minimize':
self.compare = self.minimizer
print('The objective function will be minimized')
if optimization_direction == 'maximize':
self.compare = self.maximizer
print('The objective function will be minimized')
if optimization_direction == 'grid':
self.compare = self.grid
self.rep = 0
self.parnames = parnames
self.parameters= len(parnames)
self.params_min = [np.nan]*self.parameters
self.params_max = [np.nan]*self.parameters
self.objectivefunction_min = 1e308
self.objectivefunction_max = -1e308
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = repetitions
self.stop = False
def minimizer(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
def maximizer(self, objval, params):
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def grid(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def __call__(self, objectivefunction, params, block_print=False):
self.rep+=1
if type(objectivefunction) == type([]): #TODO: change to iterable
self.compare(objectivefunction[0], params)
elif type(objectivefunction) == type(np.array([])):
pass
else:
self.compare(objectivefunction, params)
if self.rep == self.repetitions:
self.stop = True
if not block_print:
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
if self.optimization_direction == 'minimize':
text = '%i of %i, minimal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, timestr)
if self.optimization_direction == 'maximize':
text = '%i of %i, maximal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_max, timestr)
if self.optimization_direction == 'grid':
text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr)
print(text)
self.last_print = time.time()
def print_status_final(self):
print('\n*** Final SPOTPY summary ***')
print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds')
print('Total Repetitions:', self.rep)
if self.optimization_direction == 'minimize':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
if self.optimization_direction == 'maximize':
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
if self.optimization_direction == 'grid':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
print('******************************\n')
def __repr__(self):
return 'Min objectivefunction: %g \n Max objectivefunction: %g' % (
self.objectivefunction_min, self.objectivefunction_max)
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_threshold: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
_unaccepted_parameter_types = (parameter.List, )
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
dbappend=False, parallel='seq', save_sim=True, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,
sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''):
# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types)
self.all_params = param_info['random']
self.constant_positions = parameter.get_constant_indices(spot_setup)
if self.constant_positions:
self.non_constant_positions = []
for i, val in enumerate(self.all_params):
if self.all_params[i] not in self.constant_positions:
self.non_constant_positions.append(i)
else:
self.non_constant_positions = np.arange(0,len(self.all_params))
self.parameter = self.get_parameters
self.parnames = param_info['name']
self.algorithm_name = algorithm_name
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.ParameterSet(param_info)
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.optimization_direction = optimization_direction
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'ram'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
# Two parameters to control the data base handling
# 'dbinit' triggers the initial creation of the data base file
# 'dbappend' used to append to the existing data base, after restart
self.dbinit = dbinit
self.dbappend = dbappend
# Set the random state
if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary.
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
try:
open(self.dbname+'.break')
except FileNotFoundError:
print('Backupfile not found')
self.dbappend = True
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# method "save" needs to know whether objective function result is list or float, default is float
self.like_struct_typ = type(1.1)
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
pars = parameter.get_parameters_array(self.setup)
return pars[self.non_constant_positions]
def set_repetiton(self, repetitions):
self.status = _RunStatistic(repetitions, self.algorithm_name,
self.optimization_direction, self.parnames)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
self.status.print_status_final()
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations,
save_sim=self.save_sim, dbappend=self.dbappend,
dbinit=self.dbinit, db_precision=self.db_precision,
setup=self.setup)
self.dbinit = False
def __is_list_type(self, data):
if type(data) == type:
return data == list or data == type(np.array([]))
else:
return type(data) == list or type(data) == type(np.array([]))
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
# Test if like and the save threshold are float/list and compare accordingly
if self.__is_list_type(like) and self.__is_list_type(self.save_threshold):
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and (not self.__is_list_type(self.save_threshold)):
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if self.__is_list_type(like) and (not self.__is_list_type(self.save_threshold)):
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and self.__is_list_type(self.save_threshold): #Compares float/list
if (like > self.save_threshold).all:
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
work,backuptime,repos,obmin,obmax=pickle.load(breakfile)
self.status.starttime=self.status.starttime-backuptime
self.status.rep=repos
self.status.objectivefunction_min=obmin
self.status.objectivefunction_max=obmax
return work
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
work=(work,self.status.last_print-self.status.starttime,self.status.rep,self.status.objectivefunction_min,self.status.objectivefunction_max)
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def update_params(self, params):
#Add potential Constant parameters
self.all_params[self.non_constant_positions] = params
return self.all_params
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False, block_print=False): # TODO: rep not necessaray
params = self.update_params(params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
self.status(like,params,block_print=block_print)
if save_run is True and simulation is not None:
self.save(like, params, simulations=simulation, chains=chains)
if type(like)==type([]):
return like[0]
else:
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
self.all_params[self.non_constant_positions] = params #TODO: List parameters are not updated if not accepted for the algorithm, we may have to warn/error if list is given
all_params = self.all_params
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,all_params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.setup.simulation(self.partype(*all_params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, all_params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' and will not be saved. Otherwise get the result from the thread
model_result = None
if not que.empty():
model_result = que.get()
return id, params, model_result
|
bees4ever/spotpy
|
spotpy/algorithms/_algorithm.py
|
Python
|
mit
| 20,835
|
[
"Gaussian"
] |
b2944632eca9b9ccb3842ee3d0b103b96d2c1a46399682233a5557bdde899a09
|
# .. py:currentmodule:: dolfin_adjoint
#
# Topology optimisation of fluids in Stokes flow
# ==============================================
#
# .. sectionauthor:: Patrick E. Farrell <patrick.farrell@maths.ox.ac.uk>
#
# This demo solves example 4 of :cite:`borrvall2003`.
#
# Problem definition
# ******************
#
# This problem is to minimise the dissipated power in the fluid
#
# .. math::
# \frac{1}{2} \int_{\Omega} \alpha(\rho) u \cdot u + \frac{\mu}{2} \int_{\Omega} \nabla u : \nabla u - \int_{\Omega} f u
#
# subject to the Stokes equations with velocity Dirichlet conditions
#
# .. math::
# \alpha(\rho) u - \mu \nabla^2 u + \nabla p &= f \qquad \mathrm{in} \ \Omega \\
# \mathrm{div}(u) &= 0 \qquad \mathrm{on} \ \Omega \\
# u &= b \qquad \mathrm{on} \ \delta \Omega \\
#
# and to the control constraints on available fluid volume
#
# .. math::
# 0 \le \rho(x) &\le 1 \qquad \forall x \in \Omega \\
# \int_{\Omega} \rho &\le V
#
# where :math:`u` is the velocity, :math:`p` is the pressure,
# :math:`\rho` is the control (:math:`\rho(x) = 1` means fluid present,
# :math:`\rho(x) = 0` means no fluid present), :math:`f` is a prescribed
# source term (here 0), :math:`V` is the volume bound on the control,
# :math:`\alpha(\rho)` models the inverse permeability as a function of
# the control
#
# .. math::
# \alpha(\rho) = \bar{\alpha} + (\underline{\alpha} - \bar{\alpha}) \rho \frac{1 + q}{\rho + q}
#
# with :math:`\bar{\alpha}`, :math:`\underline{\alpha}` and :math:`q`
# prescribed constants. The parameter :math:`q` penalises deviations
# from the values 0 or 1; the higher q, the closer the solution will be
# to having the two discrete values 0 or 1.
#
# The problem domain :math:`\Omega` is parameterised by the aspect ratio
# :math:`\delta` (the domain is 1 unit high and :math:`\delta` units
# wide); in this example, we will solve the harder problem of
# :math:`\delta = 1.5`. The boundary conditions are specified in figure
# 10 of Borrvall and Petersson, reproduced here.
#
# .. image:: stokes-topology-bcs.png
# :scale: 80
# :align: center
#
# Physically, this problem corresponds to finding the fluid-solid
# distribution :math:`\rho(x)` that minimises the dissipated power in
# the fluid.
#
# As Borrvall and Petersson comment, it is necessary to solve this
# problem with :math:`q=0.1` to ensure that the result approaches a
# discrete-valued solution, but solving this problem directly with this
# value of :math:`q` leads to a local minimum configuration of two
# straight pipes across the domain (like the top half of figure 11).
# Therefore, we follow their suggestion to first solve the optimisation
# problem with a smaller penalty parameter of :math:`q=0.01`; this
# optimisation problem does not yield bang-bang solutions but is easier
# to solve, and gives an initial guess from which the :math:`q=0.1` case
# converges to the better minimum.
#
# Implementation
# **************
#
# First, the :py:mod:`dolfin` and :py:mod:`dolfin_adjoint` modules are
# imported:
from __future__ import print_function
from dolfin import *
from dolfin_adjoint import *
# Next we import the Python interface to IPOPT. If IPOPT is
# unavailable on your system, we strongly :doc:`suggest you install it
# <../../download/index>`; IPOPT is a well-established open-source
# optimisation algorithm.
try:
import pyipopt
except ImportError:
info_red("""This example depends on IPOPT and pyipopt. \
When compiling IPOPT, make sure to link against HSL, as it \
is a necessity for practical problems.""")
raise
# turn off redundant output in parallel
parameters["std_out_all_processes"] = False
# Next we define some constants, and define the inverse permeability as
# a function of :math:`\rho`.
mu = Constant(1.0) # viscosity
alphaunderbar = 2.5 * mu / (100**2) # parameter for \alpha
alphabar = 2.5 * mu / (0.01**2) # parameter for \alpha
q = Constant(0.01) # q value that controls difficulty/discrete-valuedness of solution
def alpha(rho):
"""Inverse permeability as a function of rho, equation (40)"""
return alphabar + (alphaunderbar - alphabar) * rho * (1 + q) / (rho + q)
# Next we define the mesh (a rectangle 1 high and :math:`\delta` wide)
# and the function spaces to be used for the control :math:`\rho`, the
# velocity :math:`u` and the pressure :math:`p`. Here we will use the
# Taylor-Hood finite element to discretise the Stokes equations
# :cite:`taylor1973`.
N = 200
delta = 1.5 # The aspect ratio of the domain, 1 high and \delta wide
V = Constant(1.0/3) * delta # want the fluid to occupy 1/3 of the domain
mesh = RectangleMesh(mpi_comm_world(), Point(0.0, 0.0), Point(delta, 1.0), N, N)
A = FunctionSpace(mesh, "CG", 1) # control function space
U_h = VectorElement("CG", mesh.ufl_cell(), 2)
P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
W = FunctionSpace(mesh, U_h*P_h) # mixed Taylor-Hood function space
# Define the boundary condition on velocity
class InflowOutflow(Expression):
def eval(self, values, x):
values[1] = 0.0
values[0] = 0.0
l = 1.0/6.0
gbar = 1.0
if x[0] == 0.0 or x[0] == delta:
if (1.0/4 - l/2) < x[1] < (1.0/4 + l/2):
t = x[1] - 1.0/4
values[0] = gbar*(1 - (2*t/l)**2)
if (3.0/4 - l/2) < x[1] < (3.0/4 + l/2):
t = x[1] - 3.0/4
values[0] = gbar*(1 - (2*t/l)**2)
def value_shape(self):
return (2,)
# Next we define a function that given a control :math:`\rho` solves the
# forward PDE for velocity and pressure :math:`(u, p)`. (The advantage
# of formulating it in this manner is that it makes it easy to conduct
# :doc:`Taylor remainder convergence tests
# <../../documentation/verification>`.)
def forward(rho):
"""Solve the forward problem for a given fluid distribution rho(x)."""
w = Function(W)
(u, p) = split(w)
(v, q) = TestFunctions(W)
F = (alpha(rho) * inner(u, v) * dx + inner(grad(u), grad(v)) * dx +
inner(grad(p), v) * dx + inner(div(u), q) * dx)
bc = DirichletBC(W.sub(0), InflowOutflow(), "on_boundary")
solve(F == 0, w, bcs=bc)
return w
# Now we define the ``__main__`` section. We define the initial guess
# for the control and use it to solve the forward PDE. In order to
# ensure feasibility of the initial control guess, we interpolate the
# volume bound; this ensures that the integral constraint and the bound
# constraint are satisfied.
if __name__ == "__main__":
rho = interpolate(Constant(float(V)/delta), A, name="Control")
w = forward(rho)
(u, p) = split(w)
# With the forward problem solved once, :py:mod:`dolfin_adjoint` has
# built a *tape* of the forward model; it will use this tape to drive
# the optimisation, by repeatedly solving the forward model and the
# adjoint model for varying control inputs.
#
# As in the :doc:`Poisson topology example
# <../poisson-topology/poisson-topology>`, we will use an evaluation
# callback to dump the control iterates to disk for visualisation. As
# this optimisation problem (:math:`q=0.01`) is solved only to generate
# an initial guess for the main task (:math:`q=0.1`), we shall save
# these iterates in ``output/control_iterations_guess.pvd``.
controls = File("output/control_iterations_guess.pvd")
allctrls = File("output/allcontrols.pvd")
rho_viz = Function(A, name="ControlVisualisation")
def eval_cb(j, rho):
rho_viz.assign(rho)
controls << rho_viz
allctrls << rho_viz
# Now we define the functional and :doc:`reduced functional
# <../maths/2-problem>`:
J = Functional(0.5 * inner(alpha(rho) * u, u) * dx + mu * inner(grad(u), grad(u)) * dx)
m = Control(rho)
Jhat = ReducedFunctional(J, m, eval_cb_post=eval_cb)
# The control constraints are the same as the :doc:`Poisson topology
# example <../poisson-topology/poisson-topology>`, and so won't be
# discussed again here.
# Bound constraints
lb = 0.0
ub = 1.0
# Volume constraints
class VolumeConstraint(InequalityConstraint):
"""A class that enforces the volume constraint g(a) = V - a*dx >= 0."""
def __init__(self, V):
self.V = float(V)
# The derivative of the constraint g(x) is constant
# (it is the negative of the diagonal of the lumped mass matrix for the
# control function space), so let's assemble it here once.
# This is also useful in rapidly calculating the integral each time
# without re-assembling.
self.smass = assemble(TestFunction(A) * Constant(1) * dx)
self.tmpvec = Function(A)
def function(self, m):
print("Evaluting constraint residual")
self.tmpvec.vector()[:] = m
# Compute the integral of the control over the domain
integral = self.smass.inner(self.tmpvec.vector())
print("Current control integral: ", integral)
return [self.V - integral]
def jacobian(self, m):
print("Computing constraint Jacobian")
return [-self.smass]
def output_workspace(self):
return [0.0]
# Now that all the ingredients are in place, we can perform the initial
# optimisation. We set the maximum number of iterations for this initial
# optimisation problem to 30; there's no need to solve this to
# completion, as its only purpose is to generate an initial guess.
# Solve the optimisation problem with q = 0.01
problem = MinimizationProblem(Jhat, bounds=(lb, ub), constraints=VolumeConstraint(V))
parameters = {'maximum_iterations': 20}
solver = IPOPTSolver(problem, parameters=parameters)
rho_opt = solver.solve()
rho_opt_xdmf = XDMFFile(mpi_comm_world(), "output/control_solution_guess.xdmf")
rho_opt_xdmf.write(rho_opt)
# With the optimised value for :math:`q=0.01` in hand, we *reset* the
# dolfin-adjoint state, clearing its tape, and configure the new problem
# we want to solve. We need to update the values of :math:`q` and
# :math:`\rho`:
q.assign(0.1)
rho.assign(rho_opt)
adj_reset()
# Since we have cleared the tape, we need to execute the forward model
# once again to redefine the problem. (It is also possible to modify the
# tape, but this way is easier to understand.) We will also redefine the
# functionals and parameters; this time, the evaluation callback will
# save the optimisation iterations to
# ``output/control_iterations_final.pvd``.
rho_intrm = XDMFFile(mpi_comm_world(), "intermediate-guess-%s.xdmf" % N)
rho_intrm.write(rho)
w = forward(rho)
(u, p) = split(w)
# Define the reduced functionals
controls = File("output/control_iterations_final.pvd")
rho_viz = Function(A, name="ControlVisualisation")
def eval_cb(j, rho):
rho_viz.assign(rho)
controls << rho_viz
allctrls << rho_viz
J = Functional(0.5 * inner(alpha(rho) * u, u) * dx + mu * inner(grad(u), grad(u)) * dx)
m = Control(rho)
Jhat = ReducedFunctional(J, m, eval_cb_post=eval_cb)
# We can now solve the optimisation problem with :math:`q=0.1`, starting
# from the solution of :math:`q=0.01`:
problem = MinimizationProblem(Jhat, bounds=(lb, ub), constraints=VolumeConstraint(V))
parameters = {'maximum_iterations': 100}
solver = IPOPTSolver(problem, parameters=parameters)
rho_opt = solver.solve()
rho_opt_final = XDMFFile(mpi_comm_world(), "output/control_solution_final.xdmf")
rho_opt_final.write(rho_opt)
# The example code can be found in ``examples/stokes-topology/`` in the
# ``dolfin-adjoint`` source tree, and executed as follows:
#
# .. code-block:: bash
#
# $ mpiexec -n 4 python stokes-topology.py
# ...
# Number of Iterations....: 100
#
# (scaled) (unscaled)
# Objective...............: 4.5944633030224409e+01 4.5944633030224409e+01
# Dual infeasibility......: 1.8048641504211900e-03 1.8048641504211900e-03
# Constraint violation....: 0.0000000000000000e+00 0.0000000000000000e+00
# Complementarity.........: 9.6698653740681504e-05 9.6698653740681504e-05
# Overall NLP error.......: 1.8048641504211900e-03 1.8048641504211900e-03
#
#
# Number of objective function evaluations = 105
# Number of objective gradient evaluations = 101
# Number of equality constraint evaluations = 0
# Number of inequality constraint evaluations = 105
# Number of equality constraint Jacobian evaluations = 0
# Number of inequality constraint Jacobian evaluations = 101
# Number of Lagrangian Hessian evaluations = 0
# Total CPU secs in IPOPT (w/o function evaluations) = 11.585
# Total CPU secs in NLP function evaluations = 556.795
#
# EXIT: Maximum Number of Iterations Exceeded.
#
# The optimisation iterations can be visualised by opening
# ``output/control_iterations_final.pvd`` in paraview. The resulting
# solution appears very similar to the solution proposed in
# :cite:`borrvall2003`.
#
# .. image:: stokes-topology.png
# :scale: 25
# :align: center
#
# .. rubric:: References
#
# .. bibliography:: /documentation/stokes-topology/stokes-topology.bib
# :cited:
# :labelprefix: 4E-
|
live-clones/dolfin-adjoint
|
examples/stokes-topology/stokes-topology.py
|
Python
|
lgpl-3.0
| 13,406
|
[
"ParaView"
] |
93fe9ef2396494845c61e7197a8ac8e7932a5b78c166e42559fba642dfb23c24
|
#
# _____ _____ _______ __ _ _______ ______ _______ _____
# | | |_____] |______ | \ | | |_____/ |_____| | |
# |_____| | |______ | \_| | | \_ | | __|__ |_____
#
# _______ _____ __ _ _ _ _______ ______ _______ _____ _____ __ _
# | | | | \ | \ / |______ |_____/ |______ | | | | \ |
# |_____ |_____| | \_| \/ |______ | \_ ______| __|__ |_____| | \_|
#
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyshp
import shapefile
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyproj
import pyproj
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#requests
import requests
import hashlib, collections, csv, os, sys, zipfile
import json
import csv
# http://www.codeforamerica.org/specifications/trails/spec.html
TRAILS_URL = 'http://library.oregonmetro.gov/rlisdiscovery/trails.zip'
PARKS_URL = 'http://library.oregonmetro.gov/rlisdiscovery/orca_sites.zip'
WGS84 = pyproj.Proj("+init=EPSG:4326") # LatLon with WGS84 datum used for geojson
ORSP = pyproj.Proj("+init=EPSG:2913", preserve_units=True) # datum used by Oregon Metro
STEWARDS = []
ORCA_SITES = {}
if not os.path.exists(os.getcwd()+'/output'):
"""
Create a directory to hold the output
"""
os.makedirs(os.getcwd()+'/output')
def get_duplicates(arr):
"""
helper function to check for duplicate ids
"""
dup_arr = arr[:]
for i in set(arr):
dup_arr.remove(i)
return list(set(dup_arr))
def download(path, file):
if not os.path.exists(os.getcwd()+'/src'):
os.makedirs(os.getcwd()+'/src')
with open(os.getcwd()+'/src/'+file+'.zip', 'wb') as handle:
response = requests.get(path, stream=True)
if not response.ok:
# Something went wrong
print "Failed to download "+file
sys.exit()
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
print 'Downloaded '+file
unzip(file)
print 'Unzipped '+file
def unzip(file):
zfile = zipfile.ZipFile(os.getcwd()+'/src/'+file+'.zip')
for name in zfile.namelist():
(dirname, filename) = os.path.split(name)
zfile.extract(name, os.getcwd()+'/src/')
zfile.close()
def get_steward_id(steward):
try:
id = [x['steward_id'] for x in STEWARDS if x["name"] == steward][0]
return id
except IndexError as e:
#Crap stewards
if steward=='Home Owner Association': return 9999 #private
if steward=='North Clackamas Parks and Recreation Department': return 58672 #should be district
if steward=='United States Fish & Wildlife' : return 43262
if steward=='Wood Village Parks & Recreation' : return 8348
if steward is None: return 9999 #private
return 9999
def compare_segment_arrays(a, b):
if len(a) != len(b): return False
for n in a:
if n in b:
continue
else:
return False
return True
def is_subset(a,b):
foo=True
for val in a:
if val in b:
continue
else:
#print val
foo= False
return foo
def process_trail_segments():
trail_segments = []
named_trails = []
# read the trails shapefile
reader = shapefile.Reader(os.getcwd()+'/src/trails.shp')
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
#iterate trails
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record))
# we're only allowing open existing trails to pass
if (atr['STATUS'].upper() == 'OPEN' or atr['STATUS'].upper() == 'OPEN_FEE') and atr['SYSTEMTYPE'].upper() != 'OTHER':
props = collections.OrderedDict()
#effectively join to the stewards table
id = props['id'] = str(int(atr['TRAILID']))
props['steward_id'] = get_steward_id(atr['AGENCYNAME'])
props['motor_vehicles'] = 'no'
props['foot'] = 'yes' if atr['HIKE'] == 'Yes' else 'No'
props['bicycle'] = 'yes' if atr['ROADBIKE'] == 'Yes'\
or atr['MTNBIKE'] == 'Yes' else 'no'
props['horse'] = 'yes' if atr['EQUESTRIAN'] == 'Yes' else 'no'
props['ski'] = 'no'
# spec: "yes", "no", "permissive", "designated"
props['wheelchair'] = 'yes' if atr['ACCESSIBLE'] == 'Accessible' else 'no'
props['osm_tags'] = 'surface='+atr['TRLSURFACE']+';width='+atr['WIDTH']
# Assumes single part geometry == our (RLIS) trails.shp
n_geom = []
geom = sr.shape.__geo_interface__
if geom['type'] !='LineString':
print 'Encountered multipart...skipping'
continue
for point in geom['coordinates']:
n_geom.append(pyproj.transform(ORSP, WGS84, point[0], point[1]))
segment= collections.OrderedDict()
segment['type']='Feature'
segment['properties'] = props
segment['geometry'] = {"type":"LineString", "coordinates":n_geom}
trail_segments.append(segment)
if atr['TRAILNAME'] != None and ' ' not in atr['TRAILNAME']:
if len([x for x in named_trails if x["atomic_name"]==atr['TRAILNAME']+'|'+atr['COUNTY']])==0:
named_trails.append({'atomic_name': atr['TRAILNAME']+'|'+atr['COUNTY'], 'name':atr['TRAILNAME'],'segment_ids':[atr['TRAILID']]})
else:
[x for x in named_trails if x["atomic_name"]==atr['TRAILNAME']+'|'+atr['COUNTY']][0]['segment_ids'].append(atr['TRAILID'])
if atr['SYSTEMNAME'] != None and ' ' not in atr['SYSTEMNAME']:
if len([x for x in named_trails if x['atomic_name']==atr['SYSTEMNAME']])==0:
named_trails.append({'atomic_name': atr['SYSTEMNAME'], 'name':atr['SYSTEMNAME'],'segment_ids':[atr['TRAILID']]})
else:
[x for x in named_trails if x["atomic_name"]==atr['SYSTEMNAME']][0]['segment_ids'].append(atr['TRAILID'])
if atr['SHAREDNAME'] != None and ' ' not in atr['SHAREDNAME']:
if len([x for x in named_trails if x['atomic_name']==atr['SHAREDNAME']])==0:
named_trails.append({'atomic_name': atr['SHAREDNAME'], 'name':atr['SHAREDNAME'],'segment_ids':[atr['TRAILID']]})
else:
[x for x in named_trails if x["atomic_name"]==atr['SHAREDNAME']][0]['segment_ids'].append(atr['TRAILID'])
#Release the trails shapefile
reader = None
#step 1
#remove duplicate geometries in named_trails
all_arrays = []
for trail in named_trails: all_arrays.append(trail['segment_ids'])
#identify duplicate geometries
duplicates = [x for x in named_trails if len([y for y in all_arrays if compare_segment_arrays(x['segment_ids'],y)])>1]
glob_segs = None
counter = 0
for dup in duplicates:
if glob_segs is None or not compare_segment_arrays(dup['segment_ids'],glob_segs):
#find ur buddy
d = [x for x in duplicates if compare_segment_arrays(x['segment_ids'],dup['segment_ids'])]
glob_segs = dup['segment_ids']
to_remove = [x for x in d if '|' in x['atomic_name']]
if len(to_remove) == 1:
named_trails.remove(to_remove[0])
else:
print 'no piped atomic name... I dunno'
#step 2 - remove atomically stored trails (with county) that are pure
# subsets of a regional trail superset
glob_name = None
for trail in named_trails:
if glob_name is None or trail['name'] != glob_name:
dups = [x for x in named_trails if x['name']==trail['name']]
glob_name = trail['name']
#determine the dup with the most segs *heinous*
superset = max(enumerate(dups), key = lambda tup: len(tup[1]['segment_ids']))
superitem = [x for x in dups if x==superset[1]][0]
for dup in dups:
if len(dup['segment_ids']) != len(superitem['segment_ids']):
foo =is_subset(dup['segment_ids'], superitem['segment_ids'])
if foo and '|' in dup['atomic_name']:
#print 'Removed '+dup['atomic_name'] + ' from named_trails'
named_trails.remove(dup)
glob_name = trail['name']
#step 3 - remove atomically stored trails (with county) that are
# *impure* subsets of a regional trail superset
#So let's look for where the name matches the atomic name of an existing
#named trail - the assumption being that the atomic name of a regional
#trail will not include the pipe '|' and county
to_delete=[]
for trail in named_trails:
if '|' in trail['atomic_name']:
for test_trail in named_trails:
if trail['name'] == test_trail['atomic_name']:
#print trail['name'] + ' combined with regional trail'
#Insert whatever segments in trail that aren't in
#test_trail
for segment in trail['segment_ids']:
if segment not in test_trail['segment_ids']:
test_trail['segment_ids'].append(segment)
#append to to_delete
to_delete.append(trail)
#delete
for trail in to_delete:
named_trails.remove(trail)
#step 4 - assign named trail id from reference table
for trail in named_trails:
if '|' in trail['atomic_name']:
county = trail['atomic_name'].split('|')[1].strip()
name = trail['atomic_name'].split('|')[0].strip()
else: #don't need the county == blank
name = trail['atomic_name']
county = ''
id= [x for x in NAMED_TRAIL_IDS if x[1]==county and x[2]==name]
if len(id)==0:
print '*' +name+' || '+ county # no id in named_trails
else:
[x for x in named_trails if x['atomic_name']==trail['atomic_name']][0]['named_trail_id'] = id[0]
#step 5 - remove atomic name
for n in named_trails:
n.pop('atomic_name')
print ("Completed trails")
return trail_segments, named_trails
def process_areas():
# read the parks shapefile
reader = shapefile.Reader(os.getcwd()+'/src/orca_sites.shp') #this is actually ORCA_sites_beta
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
areas = []
counter = 0
for sr in reader.shapeRecords():
# if counter == 1000: break #Take the 1st 10,000 features, ORCA is a supermassive YKW
atr = dict(zip(field_names, sr.record))
# if atr['STATUS'] == 'Closed': #We don't want any closed sites to show up.
# continue
"""
SELECT *
FROM orca
WHERE county IN ( 'Clackamas', 'Multnomah', 'Washington' )
AND ( ( ownlev1 IN ( 'Private', 'Non-Profits' )
AND ( unittype IN ( 'Natural Area', 'Other' )
AND recreation = 'Yes' )
OR conservation = 'High' )
OR ( ownlev1 NOT IN ( 'Private', 'Non-Profits' )
AND ( unittype = 'Other'
AND ( recreation = 'Yes'
OR conservation IN ( 'High', 'Medium' ) )
OR unittype = 'Natural Area' ) )
OR ( ownlev2 = 'Non-profit Conservation' )
OR ( unittype = 'Park' ) )
"""
# if atr['COUNTY'] in ['Clackamas', 'Multnomah', 'Washington'] and ((atr['OWNLEV1'] in ['Private', 'Non-Profits'] and (atr['UNITTYPE'] in ['Natural Area', 'Other'] and atr['RECREATION']=='Yes') or atr['CONSERVATI']=='High') or (atr['OWNLEV1'] not in ['Private', 'Non-Profits'] and (atr['UNITTYPE']== 'Other' and (atr['RECREATION']=='Yes' or atr['CONSERVATI'] in ['High', 'Medium']) or atr['UNITTYPE'] == 'Natural Area') ) or atr['OWNLEV2'] == 'Non-profit Conservation' or atr['UNITTYPE']== 'Park'):
if atr['TYPE'] == 'Park and/or Natural Area':
props = collections.OrderedDict()
# if atr['MANAGER'] not in stewards.iterkeys():
# m = hashlib.sha224(atr['MANAGER']).hexdigest()
# agency_id = str(int(m[-6:], 16))
# stewards[atr['MANAGER']] = agency_id
geom = sr.shape.__geo_interface__
if geom['type'] == 'MultiPolygon':
polys=[]
for poly in geom['coordinates']:
rings = []
for ring in poly:
n_geom = []
for point in ring:
n_geom.append(pyproj.transform(ORSP, WGS84, point[0], point[1]))
rings.append(n_geom)
polys.append(rings)
new_geom = {"type":"MultiPolygon", "coordinates":polys}
else:
rings = []
for ring in geom['coordinates']:
n_geom = []
for point in ring:
n_geom.append(pyproj.transform(ORSP, WGS84, point[0], point[1]))
rings.append(n_geom)
new_geom = {"type":"Polygon", "coordinates":rings}
props['name'] = atr['SITENAME']
props['id'] = int(atr['DISSOLVEID'])
if props['id'] in ORCA_SITES:
props['steward_id'] = ORCA_SITES[props['id']]
else:
props['steward_id'] = 5127
props['url'] = ''
props['osm_tags'] = ''
_area= collections.OrderedDict()
_area['type']='Feature'
_area['properties'] = props
_area['geometry'] = new_geom
areas.append(_area)
counter +=1
# free up the shp file.
reader = None
return areas
if __name__ == "__main__":
#####################################################
# Download data from RLIS
#
# download(TRAILS_URL, 'trails')
#download(ORCA_URL, 'orca')
#
#####################################################
#####################################################
# Load Stewards into Python object
#
with open(os.getcwd() + "/output/stewards.csv", mode='r') as infile:
reader = csv.DictReader(infile, ['steward_id', 'name', 'url', 'phone', 'address','publisher', 'license']) #stewards.csv header
reader.next()
for row in reader:
STEWARDS.append(row)
for row in STEWARDS:
row['steward_id'] = int(row['steward_id'])
print "Parsed Stewards"
#
#
#####################################################
#####################################################
# Load Named Trails into Python object
#
with open(os.getcwd() + "/ref/named_trails_lookup.csv", mode='r') as infile:
reader = csv.reader(infile)
reader.next() #skip header line
NAMED_TRAIL_IDS = list(reader)
for row in NAMED_TRAIL_IDS:
row[0] = int(row[0])
print "Parsed Named Trail IDs"
#####################################################
# Load Named Trails into Python object
#
with open(os.getcwd() + "/ref/orca_sites_to_steward.csv", mode='r') as infile:
reader = csv.reader(infile)
reader.next() #skip header line
for row in reader:
# print row
ORCA_SITES[int(row[0])] = int(row[1])
print "Parsed ORCA Sites"
#
#
#####################################################
#####################################################
# Load objects and arrays with calls to core functions
#
trail_segments, named_trails = process_trail_segments()
######################################################
# write named_trails.csv
#
named_trails_out = open(os.getcwd() + "/output/named_trails.csv", "w")
named_trails_out.write('"name","segment_ids","id","description","part_of"\n')
for named_trail in named_trails:
try: #horrible hack for trails that are in the current (2014 Q4) Trails download in RLIS
#discovery that are not in named_trails.csv because they were removed or whatever...
named_trails_out.write(named_trail['name']+","+ ";".join(str(int(x)) for x in named_trail['segment_ids'])+","+ str(named_trail['named_trail_id'][0]) + ",,\n")
except:
pass
named_trails_out.close()
print 'Created named_trails.csv'
#
########################################################
########################################################
# write trail_segments.geojson
#
trail_segments_out = open(os.getcwd() + "/output/trail_segments.geojson", "w")
trail_segments_out.write(json.dumps({"type": "FeatureCollection",\
"features": trail_segments}, indent=2) + "\n")
trail_segments_out.close()
print 'Created trail_segments.geojson'
#
########################################################
# sys.exit(1)
areas= process_areas()
########################################################
# write areas.geojson
#
areas_out = open(os.getcwd()+"/output/areas.geojson", "w")
areas_out.write(json.dumps({"type": "FeatureCollection",\
"features": areas}, indent=2, encoding="Latin1") + "\n")
areas_out.close()
print 'Created areas.geojson'
#
########################################################
print 'Process complete'
|
trailheadlabs/RLIS_Trails_to_OT
|
RLISTrails2OT.py
|
Python
|
mit
| 17,351
|
[
"ORCA"
] |
874f914518ab6e5a09627a033c8477b5d6dd1779f2398c151b6ab66abd3b2a80
|
#!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import datetime
import json
import os.path
from distutils import log
from distutils.core import Command
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
from setuptools import setup, find_packages
from subprocess import check_output
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
IS_LIGHT_BUILD = os.environ.get('SENTRY_LIGHT_BUILD') == '1'
dev_requires = [
'flake8>=2.0,<2.1',
'Click',
]
tests_require = [
'blist', # used by cassandra
'casscache',
'cqlsh',
'datadog',
'elasticsearch',
'httpretty',
'pytest-cov>=1.4',
'pytest-timeout',
'python-coveralls',
'responses',
]
install_requires = [
'BeautifulSoup>=3.2.1,<3.3.0',
'celery>=3.1.8,<3.2.0',
'cssutils>=0.9.9,<0.10.0',
'Django>=1.6.0,<1.7',
'django-bitfield>=1.7.0,<1.8.0',
'django-crispy-forms>=1.4.0,<1.5.0',
'django-debug-toolbar>=1.3.2,<1.4.0',
'django-paging>=0.2.5,<0.3.0',
'django-jsonfield>=0.9.13,<0.9.14',
'django-picklefield>=0.3.0,<0.4.0',
'django-recaptcha>=1.0.4,<1.1.0',
'django-social-auth>=0.7.28,<0.8.0',
'django-sudo>=1.1.3,<1.2.0',
'django-templatetag-sugar>=0.1.0',
'djangorestframework>=2.3.8,<2.4.0',
'email-reply-parser>=0.2.0,<0.3.0',
'enum34>=0.9.18,<0.10.0',
'exam>=0.5.1',
'gunicorn>=19.2.1,<20.0.0',
'ipaddr>=2.1.11,<2.2.0',
'logan>=0.7.1,<0.8.0',
'lxml>=3.4.1',
'mock>=0.8.0,<1.1',
'markdown>=2.4.1,<2.5.0',
'petname>=1.7,<1.8',
'progressbar>=2.2,<2.4',
'pytest>=2.6.4,<2.7.0',
'pytest-django>=2.6.0,<2.7.0',
'python-dateutil>=2.0.0,<3.0.0',
'python-memcached>=1.53,<2.0.0',
'PyYAML>=3.11,<4.0',
'raven>=5.3.0',
'redis>=2.10.3,<2.11.0',
'requests%s>=2.7.0,<2.8.0' % (not IS_LIGHT_BUILD and '[security]' or ''),
'simplejson>=3.2.0,<3.9.0',
'six>=1.6.0,<2.0.0',
'setproctitle>=1.1.7,<1.2.0',
'statsd>=3.1.0,<3.2.0',
'South==1.0.1',
'toronado>=0.0.4,<0.1.0',
'urllib3>=1.11,<1.12',
'rb>=1.1.0,<2.0.0',
]
postgres_requires = [
'psycopg2>=2.5.0,<2.6.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
class DevelopWithBuildStatic(develop):
def install_for_development(self):
if not IS_LIGHT_BUILD:
self.run_command('build_static')
return develop.install_for_development(self)
class SdistWithBuildStatic(sdist):
def make_release_tree(self, *a, **kw):
dist_path = self.distribution.get_fullname()
sdist.make_release_tree(self, *a, **kw)
self.reinitialize_command('build_static', work_path=dist_path)
self.run_command('build_static')
with open(os.path.join(dist_path, 'sentry-package.json'), 'w') as fp:
json.dump({
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
}, fp)
class BuildStatic(Command):
user_options = [
('work-path=', 'w',
"The working directory for source files. Defaults to ."),
]
def initialize_options(self):
self.work_path = None
def finalize_options(self):
if self.work_path is None:
self.work_path = ROOT
def run(self):
work_path = self.work_path
log.info("initializing git submodules")
check_output(['git', 'submodule', 'init'], cwd=work_path)
check_output(['git', 'submodule', 'update'], cwd=work_path)
log.info("running [npm install --quiet]")
check_output(['npm', 'install', '--quiet'], cwd=work_path)
log.info("running [gulp dist]")
check_output([os.path.join('node_modules', '.bin', 'gulp'), 'dist:css'],
cwd=work_path)
# Enable React production optimization
os.environ['NODE_ENV'] = 'production'
log.info("running [webpack]")
check_output([os.path.join('node_modules', '.bin', 'webpack'), '-p'],
cwd=work_path)
class SmartInstall(install):
"""
Installs Sentry into the Python environment.
If the package indicator is missing, this will also force a run of
`build_static` which is required for JavaScript assets and other things.
"""
def _needs_static(self):
return not os.path.exists(os.path.join(ROOT, 'sentry-package.json'))
def run(self):
if self._needs_static():
self.run_command('build_static')
install.run(self)
setup(
name='sentry',
version='8.0.0.dev0',
author='David Cramer',
author_email='dcramer@gmail.com',
url='https://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
cmdclass={
'build_static': BuildStatic,
'develop': DevelopWithBuildStatic,
'sdist': SdistWithBuildStatic,
'install': SmartInstall,
},
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development'
],
)
|
ngonzalvez/sentry
|
setup.py
|
Python
|
bsd-3-clause
| 6,988
|
[
"GULP"
] |
534e1a28d9742220910e85f014234fe0902582361018332aea453d1d15a14c55
|
"""Validate dependencies."""
import ast
from pathlib import Path
from typing import Dict, Set
from homeassistant.requirements import DISCOVERY_INTEGRATIONS
from .model import Integration
class ImportCollector(ast.NodeVisitor):
"""Collect all integrations referenced."""
def __init__(self, integration: Integration):
"""Initialize the import collector."""
self.integration = integration
self.referenced: Dict[Path, Set[str]] = {}
# Current file or dir we're inspecting
self._cur_fil_dir = None
def collect(self) -> None:
"""Collect imports from a source file."""
for fil in self.integration.path.glob("**/*.py"):
if not fil.is_file():
continue
self._cur_fil_dir = fil.relative_to(self.integration.path)
self.referenced[self._cur_fil_dir] = set()
self.visit(ast.parse(fil.read_text()))
self._cur_fil_dir = None
def _add_reference(self, reference_domain: str):
"""Add a reference."""
self.referenced[self._cur_fil_dir].add(reference_domain)
def visit_ImportFrom(self, node):
"""Visit ImportFrom node."""
if node.module is None:
return
if node.module.startswith("homeassistant.components."):
# from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
# from homeassistant.components.logbook import bla
self._add_reference(node.module.split(".")[2])
elif node.module == "homeassistant.components":
# from homeassistant.components import sun
for name_node in node.names:
self._add_reference(name_node.name)
def visit_Import(self, node):
"""Visit Import node."""
# import homeassistant.components.hue as hue
for name_node in node.names:
if name_node.name.startswith("homeassistant.components."):
self._add_reference(name_node.name.split(".")[2])
def visit_Attribute(self, node):
"""Visit Attribute node."""
# hass.components.hue.async_create()
# Name(id=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
# self.hass.components.hue.async_create()
# Name(id=self)
# .Attribute(attr=hass) or .Attribute(attr=_hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
if (
isinstance(node.value, ast.Attribute)
and node.value.attr == "components"
and (
(
isinstance(node.value.value, ast.Name)
and node.value.value.id == "hass"
)
or (
isinstance(node.value.value, ast.Attribute)
and node.value.value.attr in ("hass", "_hass")
)
)
):
self._add_reference(node.attr)
else:
# Have it visit other kids
self.generic_visit(node)
ALLOWED_USED_COMPONENTS = {
# Internal integrations
"alert",
"automation",
"conversation",
"device_automation",
"frontend",
"group",
"hassio",
"homeassistant",
"input_boolean",
"input_datetime",
"input_number",
"input_select",
"input_text",
"onboarding",
"persistent_notification",
"person",
"script",
"shopping_list",
"sun",
"system_health",
"system_log",
"timer",
"webhook",
"websocket_api",
"zone",
# Entity integrations with platforms
"alarm_control_panel",
"binary_sensor",
"climate",
"cover",
"device_tracker",
"fan",
"humidifier",
"image_processing",
"light",
"lock",
"media_player",
"scene",
"sensor",
"switch",
"vacuum",
"water_heater",
# Other
"mjpeg", # base class, has no reqs or component to load.
"stream", # Stream cannot install on all systems, can be imported without reqs.
}
IGNORE_VIOLATIONS = {
# Has same requirement, gets defaults.
("sql", "recorder"),
# Sharing a base class
("openalpr_cloud", "openalpr_local"),
("lutron_caseta", "lutron"),
("ffmpeg_noise", "ffmpeg_motion"),
# Demo
("demo", "manual"),
("demo", "openalpr_local"),
# This should become a helper method that integrations can submit data to
("websocket_api", "lovelace"),
("websocket_api", "shopping_list"),
"logbook",
}
def calc_allowed_references(integration: Integration) -> Set[str]:
"""Return a set of allowed references."""
allowed_references = (
ALLOWED_USED_COMPONENTS
| set(integration.manifest.get("dependencies", []))
| set(integration.manifest.get("after_dependencies", []))
)
# Discovery requirements are ok if referenced in manifest
for check_domain, to_check in DISCOVERY_INTEGRATIONS.items():
if any(check in integration.manifest for check in to_check):
allowed_references.add(check_domain)
return allowed_references
def find_non_referenced_integrations(
integrations: Dict[str, Integration],
integration: Integration,
references: Dict[Path, Set[str]],
):
"""Find intergrations that are not allowed to be referenced."""
allowed_references = calc_allowed_references(integration)
referenced = set()
for path, refs in references.items():
if len(path.parts) == 1:
# climate.py is stored as climate
cur_fil_dir = path.stem
else:
# climate/__init__.py is stored as climate
cur_fil_dir = path.parts[0]
is_platform_other_integration = cur_fil_dir in integrations
for ref in refs:
# We are always allowed to import from ourselves
if ref == integration.domain:
continue
# These references are approved based on the manifest
if ref in allowed_references:
continue
# Some violations are whitelisted
if (integration.domain, ref) in IGNORE_VIOLATIONS:
continue
# If it's a platform for another integration, the other integration is ok
if is_platform_other_integration and cur_fil_dir == ref:
continue
# These have a platform specified in this integration
if not is_platform_other_integration and (
(integration.path / f"{ref}.py").is_file()
# Platform dir
or (integration.path / ref).is_dir()
):
continue
referenced.add(ref)
return referenced
def validate_dependencies(
integrations: Dict[str, Integration], integration: Integration
):
"""Validate all dependencies."""
# Some integrations are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
# Find usage of hass.components
collector = ImportCollector(integration)
collector.collect()
for domain in sorted(
find_non_referenced_integrations(
integrations, integration, collector.referenced
)
):
integration.add_error(
"dependencies",
f"Using component {domain} but it's not in 'dependencies' "
"or 'after_dependencies'",
)
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check for non-existing dependencies
for integration in integrations.values():
if not integration.manifest:
continue
validate_dependencies(integrations, integration)
if config.specific_integrations:
continue
# check that all referenced dependencies exist
after_deps = integration.manifest.get("after_dependencies", [])
for dep in integration.manifest.get("dependencies", []):
if dep in after_deps:
integration.add_error(
"dependencies",
f"Dependency {dep} is both in dependencies and after_dependencies",
)
if dep not in integrations:
integration.add_error(
"dependencies", f"Dependency {dep} does not exist"
)
|
nkgilley/home-assistant
|
script/hassfest/dependencies.py
|
Python
|
apache-2.0
| 8,331
|
[
"VisIt"
] |
566f1ffbf6435c89d8fba22b19a0b16e95c91100b8ac9d8bb8f7deb9abff71cf
|
import numpy as np
import scipy.sparse as ss
from tomviz import utils
import tomviz.operators
import time
class ReconARTOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset, Niter=1):
"""
3D Reconstruction using Algebraic Reconstruction Technique (ART)
"""
self.progress.maximum = 1
# Get Tilt angles
tiltAngles = utils.get_tilt_angles(dataset)
# Get Tilt Series
tiltSeries = utils.get_array(dataset)
(Nslice, Nray, Nproj) = tiltSeries.shape
if tiltSeries is None:
raise RuntimeError("No scalars found!")
# Generate measurement matrix
self.progress.message = 'Generating measurement matrix'
A = parallelRay(Nray, 1.0, tiltAngles, Nray, 1.0) #A is a sparse matrix
recon = np.empty([Nslice, Nray, Nray], dtype=float, order='F')
A = A.todense()
(Nslice, Nray, Nproj) = tiltSeries.shape
(Nrow, Ncol) = A.shape
rowInnerProduct = np.zeros(Nrow)
row = np.zeros(Ncol)
f = np.zeros(Ncol) # Placeholder for 2d image
beta = 1.0
# Calculate row inner product
for j in range(Nrow):
row[:] = A[j, ].copy()
rowInnerProduct[j] = np.dot(row, row)
self.progress.maximum = Nslice
step = 0
t0 = time.time()
etcMessage = 'Estimated time to complete: n/a'
counter = 1
for s in range(Nslice):
if self.canceled:
return
f[:] = 0
b = tiltSeries[s, :, :].transpose().flatten()
for i in range(Niter):
self.progress.message = 'Slice No.%d/%d, iteration No.%d/%d. ' \
% (s + 1, Nslice, i + 1, Niter) + etcMessage
for j in range(Nrow):
row[:] = A[j, ].copy()
row_f_product = np.dot(row, f)
a = (b[j] - row_f_product) / rowInnerProduct[j]
f = f + row * a * beta
timeLeft = (time.time() - t0) / counter * \
(Nslice * Niter - counter)
counter += 1
timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
timeLeftHour, timeLeftMin, timeLeftSec)
recon[s, :, :] = f.reshape((Nray, Nray))
step += 1
self.progress.value = step
from vtk import vtkImageData
# Set up the output dataset
recon_dataset = vtkImageData()
recon_dataset.CopyStructure(dataset)
utils.set_array(recon_dataset, recon)
utils.mark_as_volume(recon_dataset)
returnValues = {}
returnValues["reconstruction"] = recon_dataset
return returnValues
def parallelRay(Nside, pixelWidth, angles, Nray, rayWidth):
# Suppress warning messages that pops up when dividing zeros
np.seterr(all='ignore')
Nproj = angles.size # Number of projections
# Ray coordinates at 0 degrees.
offsets = np.linspace(-(Nray * 1.0 - 1) / 2,
(Nray * 1.0 - 1) / 2, Nray) * rayWidth
# Intersection lines/grid Coordinates
xgrid = np.linspace(-Nside * 0.5, Nside * 0.5, Nside + 1) * pixelWidth
ygrid = np.linspace(-Nside * 0.5, Nside * 0.5, Nside + 1) * pixelWidth
# Initialize vectors that contain matrix elements and corresponding
# row/column numbers
rows = np.zeros(2 * Nside * Nproj * Nray)
cols = np.zeros(2 * Nside * Nproj * Nray)
vals = np.zeros(2 * Nside * Nproj * Nray)
idxend = 0
for i in range(0, Nproj): # Loop over projection angles
ang = angles[i] * np.pi / 180.
# Points passed by rays at current angles
xrayRotated = np.cos(ang) * offsets
yrayRotated = np.sin(ang) * offsets
xrayRotated[np.abs(xrayRotated) < 1e-8] = 0
yrayRotated[np.abs(yrayRotated) < 1e-8] = 0
a = -np.sin(ang)
a = rmepsilon(a)
b = np.cos(ang)
b = rmepsilon(b)
for j in range(0, Nray): # Loop rays in current projection
#Ray: y = tx * x + intercept
t_xgrid = (xgrid - xrayRotated[j]) / a
y_xgrid = b * t_xgrid + yrayRotated[j]
t_ygrid = (ygrid - yrayRotated[j]) / b
x_ygrid = a * t_ygrid + xrayRotated[j]
# Collect all points
t_grid = np.append(t_xgrid, t_ygrid)
xx = np.append(xgrid, x_ygrid)
yy = np.append(y_xgrid, ygrid)
# Sort the coordinates according to intersection time
I = np.argsort(t_grid)
xx = xx[I]
yy = yy[I]
# Get rid of points that are outside the image grid
Ix = np.logical_and(xx >= -Nside / 2.0 * pixelWidth,
xx <= Nside / 2.0 * pixelWidth)
Iy = np.logical_and(yy >= -Nside / 2.0 * pixelWidth,
yy <= Nside / 2.0 * pixelWidth)
I = np.logical_and(Ix, Iy)
xx = xx[I]
yy = yy[I]
# If the ray pass through the image grid
if (xx.size != 0 and yy.size != 0):
# Get rid of double counted points
I = np.logical_and(np.abs(np.diff(xx)) <=
1e-8, np.abs(np.diff(yy)) <= 1e-8)
I2 = np.zeros(I.size + 1)
I2[0:-1] = I
xx = xx[np.logical_not(I2)]
yy = yy[np.logical_not(I2)]
# Calculate the length within the cell
length = np.sqrt(np.diff(xx)**2 + np.diff(yy)**2)
#Count number of cells the ray passes through
numvals = length.size
# Remove the rays that are on the boundary of the box in the
# top or to the right of the image grid
check1 = np.logical_and(b == 0, np.absolute(
yrayRotated[j] - Nside / 2 * pixelWidth) < 1e-15)
check2 = np.logical_and(a == 0, np.absolute(
xrayRotated[j] - Nside / 2 * pixelWidth) < 1e-15)
check = np.logical_not(np.logical_or(check1, check2))
if np.logical_and(numvals > 0, check):
# Calculate corresponding indices in measurement matrix
# First, calculate the mid points coord. between two
# adjacent grid points
midpoints_x = rmepsilon(0.5 * (xx[0:-1] + xx[1:]))
midpoints_y = rmepsilon(0.5 * (yy[0:-1] + yy[1:]))
#Calculate the pixel index for mid points
pixelIndicex = \
(np.floor(Nside / 2.0 - midpoints_y / pixelWidth)) * \
Nside + (np.floor(midpoints_x /
pixelWidth + Nside / 2.0))
# Create the indices to store the values to the measurement
# matrix
idxstart = idxend
idxend = idxstart + numvals
idx = np.arange(idxstart, idxend)
# Store row numbers, column numbers and values
rows[idx] = i * Nray + j
cols[idx] = pixelIndicex
vals[idx] = length
else:
print("Ray No. %d at %f degree is out of image grid!" %
(j + 1, angles[i]))
# Truncate excess zeros.
rows = rows[:idxend]
cols = cols[:idxend]
vals = vals[:idxend]
A = ss.coo_matrix((vals, (rows, cols)), shape=(Nray * Nproj, Nside**2))
return A
def rmepsilon(input):
if (input.size > 1):
input[np.abs(input) < 1e-10] = 0
else:
if np.abs(input) < 1e-10:
input = 0
return input
|
cryos/tomviz
|
tomviz/python/Recon_ART.py
|
Python
|
bsd-3-clause
| 7,960
|
[
"VTK"
] |
1ab2b4d663051c72b1f168a9de05667def5352a1cbfa772891a0ec4ee5a7de5a
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from os.path import abspath, dirname, join, exists
src_dir = abspath(dirname(__file__))
data_dir = join(src_dir, 'data')
licenses_data_dir = join(data_dir, 'licenses')
rules_data_dir = join(data_dir, 'rules')
|
pierrelapointe/scancode-toolkit
|
src/licensedcode/__init__.py
|
Python
|
apache-2.0
| 1,570
|
[
"VisIt"
] |
44b5bd252594c5857d42c3710ab50f9c507b9cdd59e91e15eff7026b26b347fe
|
from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.shortcuts import render
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, require_server_admin, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Callable, Dict, List, Optional, Set, Text, \
Tuple, Type, Union
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render(request,
'analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['realm_active_humans::day']
tables = [RealmCount]
subgroup_to_label = {None: 'human'} # type: Dict[Optional[str], str]
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroup_to_label = {'false': 'human', 'true': 'bot'}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroup_to_label = {'public_stream': 'Public streams',
'private_stream': 'Private streams',
'private_message': 'Private messages',
'huddle_message': 'Group private messages'}
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
# Note that the labels are further re-written by client_label_map
subgroup_to_label = {str(id): name for id, name in Client.objects.values_list('id', 'name')}
labels_sort_function = sort_client_labels
include_empty_subgroups = False
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency}
for table in tables:
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroup_to_label, include_empty_subgroups)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroup_to_label, include_empty_subgroups)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = [(sum(values), label) for label, values in value_arrays.items()]
totals.sort(reverse=True)
return [label for total, label in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise AssertionError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipElectron":
return "Desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "Mobile app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroup_to_label, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], int, List[datetime], Dict[Optional[str], str], bool) -> Dict[str, List[int]]
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in subgroup_to_label.items():
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@require_server_admin
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title, is_home=True),
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = timezone_now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@require_server_admin
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=None, title=title),
)
@require_server_admin
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title),
)
|
vaidap/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 36,396
|
[
"VisIt"
] |
96ab6d693523945d48d48a5e43a0e11a1695f69ee341175ccdf3b7c44a60a9db
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
import pytest
from MDAnalysis.analysis import nuclinfo
from MDAnalysisTests.datafiles import RNA_PSF, RNA_PDB
from numpy.testing import (
assert_almost_equal,
assert_allclose,
)
@pytest.fixture(scope='module')
def u():
return mda.Universe(RNA_PSF, RNA_PDB)
@pytest.mark.parametrize('i, bp, seg1, seg2, expected_value', (
( 1, 2, 'RNAA', 'RNAA', 4.3874702),
(22, 23, 'RNAA', 'RNAA', 4.1716404),
))
def test_wc_pair(u, i, bp, seg1, seg2, expected_value):
val = nuclinfo.wc_pair(u, i, bp, seg1=seg1, seg2=seg2)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('i, bp, seg1, seg2, expected_value', (
( 3, 17, 'RNAA', 'RNAA', 15.06506),
(20, 5, 'RNAA', 'RNAA', 3.219116),
))
def test_minor_pair(u, i, bp, seg1, seg2, expected_value):
val = nuclinfo.minor_pair(u, i, bp, seg1=seg1, seg2=seg2)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('i, bp, seg1, seg2, expected_value', (
(2, 12, 'RNAA', 'RNAA', 26.884272),
(5, 9, 'RNAA', 'RNAA', 13.578535),
))
def test_major_pair(u, i, bp, seg1, seg2, expected_value):
val = nuclinfo.major_pair(u, i, bp, seg1=seg1, seg2=seg2)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 9, 3.16497),
('RNAA', 21, 22.07721),
))
def test_phase_cp(u, seg, i, expected_value):
val = nuclinfo.phase_cp(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 1, 359.57580),
('RNAA', 11, 171.71645),
))
def test_phase_as(u, seg, i, expected_value):
val = nuclinfo.phase_as(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 5, [302.203802, 179.043077, 35.271411, 79.499729, 201.000393,
282.14321 , 210.709327]),
('RNAA', 21, [280.388619, 185.12919 , 56.616215, 64.87354 , 187.153367,
279.340915, 215.332144]),
))
def test_tors(u, seg, i, expected_value):
val = nuclinfo.tors(u, seg=seg, i=i)
assert_allclose(val, expected_value, rtol=1e-03)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 6, 279.15103),
('RNAA', 18, 298.09936),
))
def test_tors_alpha(u, seg, i, expected_value):
val = nuclinfo.tors_alpha(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 7, 184.20501),
('RNAA', 15, 169.70042),
))
def test_tors_beta(u, seg, i, expected_value):
val = nuclinfo.tors_beta(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 7, 52.72022),
('RNAA', 15, 54.59684),
))
def test_tors_gamma(u, seg, i, expected_value):
val = nuclinfo.tors_gamma(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 7, 84.80554),
('RNAA', 15, 82.00043),
))
def test_tors_delta(u, seg, i, expected_value):
val = nuclinfo.tors_delta(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 7, 200.40990),
('RNAA', 15, 210.96953),
))
def test_tors_eps(u, seg, i, expected_value):
val = nuclinfo.tors_eps(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 7, 297.84736),
('RNAA', 15, 330.24898)
))
def test_tors_zeta(u, seg, i, expected_value):
val = nuclinfo.tors_zeta(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 1, 178.37435),
('RNAA', 2, 202.03418),
('RNAA', 7, 200.91674),
('RNAA', 15, 209.32109),
))
def test_tors_chi(u, seg, i, expected_value):
val = nuclinfo.tors_chi(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('seg, i, expected_value', (
('RNAA', 20, 103.07024),
('RNAA', 5, 156.62223),
('RNAA', 7 , 77.94538),
('RNAA', 15, 130.18539),
))
def test_hydroxyl(u, seg, i, expected_value):
val = nuclinfo.hydroxyl(u, seg=seg, i=i)
assert_almost_equal(val, expected_value, decimal=3)
@pytest.mark.parametrize('bp1, bp2, i, seg1, seg2, seg3, expected_value', (
(16, 2, 3, 'RNAA', 'RNAA', 'RNAA', 314.69804),
(8, 9, 10, 'RNAA', 'RNAA', 'RNAA', 34.50106),
))
def test_pseudo_dihe_baseflip(u, bp1, bp2, i, seg1, seg2, seg3, expected_value):
val = nuclinfo.pseudo_dihe_baseflip(u, bp1, bp2, i, seg1, seg2, seg3)
assert_almost_equal(val, expected_value, decimal=3)
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_nuclinfo.py
|
Python
|
gpl-2.0
| 5,976
|
[
"MDAnalysis"
] |
dd3720c3941242df81e5990c077e131f4601c044e0f90aec32ae7436431a6cc8
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__ = '$Id$'
from tools.docco.rl_doc_utils import *
from reportlab.platypus.tableofcontents import TableOfContents
from datetime import datetime
import reportlab
from reportlab.rl_config import invariant
title("ReportLab PDF Library")
title("User Guide")
centred('ReportLab Version ' + reportlab.Version)
centred((datetime(2000,1,1,0,0,0) if invariant else datetime.now()).strftime('Document generated on %Y/%m/%d %H:%M:%S %Z'))
nextTemplate("TOC")
headingTOC()
toc = TableOfContents()
PS = ParagraphStyle
toc.levelStyles = [
PS(fontName='Times-Bold', fontSize=14, name='TOCHeading1', leftIndent=20, firstLineIndent=-20, spaceBefore=5, leading=16),
PS(fontSize=12, name='TOCHeading2', leftIndent=40, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading3', leftIndent=60, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading4', leftIndent=100, firstLineIndent=-20, spaceBefore=0, leading=12),
]
getStory().append(toc)
nextTemplate("Normal")
########################################################################
#
# Chapter 1
#
########################################################################
heading1("Introduction")
heading2("About this document")
disc("""This document is an introduction to the ReportLab PDF library.
Some previous programming experience
is presumed and familiarity with the Python Programming language is
recommended. If you are new to Python, we tell you in the next section
where to go for orientation.
""")
disc("""
This manual does not cover 100% of the features, but should explain all
the main concepts and help you get started, and point you at other
learning resources.
After working your way through this, you should be ready to begin
writing programs to produce sophisticated reports.
""")
disc("""In this chapter, we will cover the groundwork:""")
bullet("What is ReportLab all about, and why should I use it?")
bullet("What is Python?")
bullet("How do I get everything set up and running?")
todo("""
We need your help to make sure this manual is complete and helpful.
Please send any feedback to our user mailing list,
which is signposted from <a href="http://www.reportlab.com/">www.reportlab.com</a>.
""")
heading2("What is the ReportLab PDF Library?")
disc("""This is a software library that lets you directly
create documents in Adobe's Portable Document Format (PDF) using
the Python programming language. It also creates charts and data graphics
in various bitmap and vector formats as well as PDF.""")
disc("""PDF is the global standard for electronic documents. It
supports high-quality printing yet is totally portable across
platforms, thanks to the freely available Acrobat Reader. Any
application which previously generated hard copy reports or driving a printer
can benefit from making PDF documents instead; these can be archived,
emailed, placed on the web, or printed out the old-fashioned way.
However, the PDF file format is a complex
indexed binary format which is impossible to type directly.
The PDF format specification is more than 600 pages long and
PDF files must provide precise byte offsets -- a single extra
character placed anywhere in a valid PDF document can render it
invalid. This makes it harder to generate than HTML.""")
disc("""Most of the world's PDF documents have been produced
by Adobe's Acrobat tools, or rivals such as JAWS PDF Creator, which act
as 'print drivers'. Anyone wanting to automate PDF production would
typically use a product like Quark, Word or Framemaker running in a loop
with macros or plugins, connected to Acrobat. Pipelines of several
languages and products can be slow and somewhat unwieldy.
""")
disc("""The ReportLab library directly creates PDF based on
your graphics commands. There are no intervening steps. Your applications
can generate reports extremely fast - sometimes orders
of magnitude faster than traditional report-writing
tools. This approach is shared by several other libraries - PDFlib for C,
iText for Java, iTextSharp for .NET and others. However, The ReportLab library
differs in that it can work at much higher levels, with a full featured engine
for laying out documents complete with tables and charts. """)
disc("""In addition, because you are writing a program
in a powerful general purpose language, there are no
restrictions at all on where you get your data from,
how you transform it, and the kind of output
you can create. And you can reuse code across
whole families of reports.""")
disc("""The ReportLab library is expected to be useful
in at least the following contexts:""")
bullet("Dynamic PDF generation on the web")
bullet("High-volume corporate reporting and database publishing")
bullet("""An embeddable print engine for other applications, including
a 'report language' so that users can customize their own reports. <i>
This is particularly relevant to cross-platform apps which cannot
rely on a consistent printing or previewing API on each operating
system</i>.""")
bullet("""A 'build system' for complex documents with charts, tables
and text such as management accounts, statistical reports and
scientific papers """)
bullet("""Going from XML to PDF in one step""")
heading2("ReportLab's commercial software")
disc("""
The ReportLab library forms the foundation of our commercial solution for
PDF generation, Report Markup Language (RML). This is available for evaluation
on our web site with full documentation. We believe that RML is the fastest
and easiest way to develop rich PDF workflows. You work in a markup language
at a similar level to HTML, using your favorite templating system to populate
an RML document; then call our rml2pdf API function to generate a PDF. It's
what ReportLab staff use to build all of the solutions you can see on reportlab.com.
Key differences:
""")
bullet("""Fully documented with two manuals, a formal specification (the DTD) and extensive self-documenting tests. (By contrast, we try to make sure the open source documentation isn't wrong, but we don't always keep up with the code)""")
bullet("""Work in high-level markup rather than constructing graphs of Python objects """)
bullet("""Requires no Python expertise - your colleagues may thank you after you've left!'""")
bullet("""Support for vector graphics and inclusion of other PDF documents""")
bullet("""Many more useful features expressed with a single tag, which would need a lot
of coding in the open source package""")
bullet("""Commercial support is included""")
disc("""
We ask open source developers to consider trying out RML where it is appropriate.
You can register on our site and try out a copy before buying.
The costs are reasonable and linked to the volume of the project, and the revenue
helps us spend more time developing this software.""")
heading2("What is Python?")
disc("""
Python is an <i>interpreted, interactive, object-oriented</i> programming language. It is often compared to Tcl, Perl,
Scheme or Java.
""")
disc("""
Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level
dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to
various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++.
Python is also usable as an extension language for applications that need a programmable interface.
""")
disc("""
Python is as old as Java and has been growing steadily in popularity for years; since our
library first came out it has entered the mainstream. Many ReportLab library users are
already Python devotees, but if you are not, we feel that the language is an excellent
choice for document-generation apps because of its expressiveness and ability to get
data from anywhere.
""")
disc("""
Python is copyrighted but <b>freely usable and distributable, even for commercial use</b>.
""")
heading2("Acknowledgements")
disc("""Many people have contributed to ReportLab. We would like to thank in particular
(in alphabetical order):
Albertas Agejevas,
Alex Buck,
Andre Reitz,
Andrew Cutler,
Andrew Mercer,
Ben Echols,
Benjamin Dumke,
Benn B,
Chad Miller,
Chris Lee,
Christian Jacobs,
Dinu Gherman,
Eric Johnson,
Felix Labrecque,
Gary Poster,
Germán M. Bravo,
Guillaume Francois,
Hans Brand,
Henning Vonbargen,
Hosam Aly,
Ian Stevens,
James Martin-Collar,
Jeff Bauer,
Jerome Alet,
Jerry Casiano,
Jorge Godoy,
Keven D Smith,
Magnus Lie Hetland,
Marcel Tromp,
Marius Gedminas,
Matthew Duggan,
Matthias Kirst,
Matthias Klose,
Max M,
Michael Egorov,
Mike Folwell,
Mirko Dziadzka,
Moshe Wagner,
Nate Silva,
Paul McNett,
Peter Johnson,
PJACock,
Publio da Costa Melo,
Randolph Bentson,
Robert Alsina,
Robert Hölzl,
Robert Kern,
Ron Peleg,
Simon King,
Stephan Richter,
Steve Halasz,
T Blatter,
Tim Roberts,
Tomasz Swiderski,
Ty Sarna,
Volker Haas,
Yoann Roman,
and many more.""")
disc("""Special thanks go to Just van Rossum for his valuable assistance with
font technicalities.""")
disc("""Moshe Wagner and Hosam Aly deserve a huge thanks for contributing to the RTL patch, which is not yet on the trunk.""")
disc("""Marius Gedminas deserves a big hand for contributing the work on TrueType fonts and we
are glad to include these in the toolkit. Finally we thank Michal Kosmulski for the DarkGarden font
for and Bitstream Inc. for the Vera fonts.""")
heading2("Installation and Setup")
disc("""To avoid duplication, the installation instructions are kept in the README file
in our distribution, which can be viewed online at ^http://bitbucket.org/rptlab/reportlab/^""")
disc("""This release (3.0) of ReportLab requires Python versions 2.7, 3.3 or higher.
If you need to use Python 2.5 or 2.6, please use the latest ReportLab 2.x package.
""")
heading2("Getting Involved")
disc("""ReportLab is an Open Source project. Although we are
a commercial company we provide the core PDF generation
sources freely, even for commercial purposes, and we make no income directly
from these modules. We also welcome help from the community
as much as any other Open Source project. There are many
ways in which you can help:""")
bullet("""General feedback on the core API. Does it work for you?
Are there any rough edges? Does anything feel clunky and awkward?""")
bullet("""New objects to put in reports, or useful utilities for the library.
We have an open standard for report objects, so if you have written a nice
chart or table class, why not contribute it?""")
bullet("""Snippets and Case Studies: If you have produced some nice
output, register online on ^http://www.reportlab.com^ and submit a snippet
of your output (with or without scripts). If ReportLab solved a
problem for you at work, write a little 'case study' and submit it.
And if your web site uses our tools to make reports, let us link to it.
We will be happy to display your work (and credit it with your name
and company) on our site!""")
bullet("""Working on the core code: we have a long list of things
to refine or to implement. If you are missing some features or
just want to help out, let us know!""")
disc("""The first step for anyone wanting to learn more or
get involved is to join the mailing list. To Subscribe visit
$http://two.pairlist.net/mailman/listinfo/reportlab-users$.
From there you can also browse through the group's archives
and contributions. The mailing list is
the place to report bugs and get support. """)
disc("""The code now lives on BitBucket ($http://bitbucket.org/rptlab/reportlab/$)
in a Mercurial repository, along with an issue tracker and wiki. Everyone should
feel free to contribute, but if you are working actively on some improvements
or want to draw attention to an issue, please use the mailing list to let us know.""")
heading2("Site Configuration")
disc("""There are a number of options which most likely need to be configured globally for a site.
The python script module $reportlab/rl_config.py$ may be edited to change the values of several
important sitewide properties.""")
bullet("""verbose: set to integer values to control diagnostic output.""")
bullet("""shapeChecking: set this to zero to turn off a lot of error checking in the graphics modules""")
bullet("""defaultEncoding: set this to WinAnsiEncoding or MacRomanEncoding.""")
bullet("""defaultPageSize: set this to one of the values defined in reportlab/lib/pagesizes.py; as delivered
it is set to pagesizes.A4; other values are pagesizes.letter etc.""")
bullet("""defaultImageCaching: set to zero to inhibit the creation of .a85 files on your
hard-drive. The default is to create these preprocessed PDF compatible image files for faster loading""")
bullet("""T1SearchPath: this is a python list of strings representing directories that
may be queried for information on Type 1 fonts""")
bullet("""TTFSearchPath: this is a python list of strings representing directories that
may be queried for information on TrueType fonts""")
bullet("""CMapSearchPath: this is a python list of strings representing directories that
may be queried for information on font code maps.""")
bullet("""showBoundary: set to non-zero to get boundary lines drawn.""")
bullet("""ZLIB_WARNINGS: set to non-zero to get warnings if the Python compression extension is not found.""")
bullet("""pageComression: set to non-zero to try and get compressed PDF.""")
bullet("""allowtableBoundsErrors: set to 0 to force an error on very large Platypus table elements""")
bullet("""emptyTableAction: Controls behaviour for empty tables, can be 'error' (default), 'indicate' or 'ignore'.""")
heading2("Learning More About Python")
disc("""
If you are a total beginner to Python, you should check out one or more from the
growing number of resources on Python programming. The following are freely
available on the web:
""")
bullet("""<b>Python Documentation. </b>
A list of documentation on the Python.org web site.
$http://www.python.org/doc/$
""")
bullet("""<b>Python Tutorial. </b>
The official Python Tutorial , originally written by Guido van Rossum himself.
$http://docs.python.org/tutorial/$
""")
bullet("""<b>Learning to Program. </b>
A tutorial on programming by Alan Gauld. Has a heavy emphasis on
Python, but also uses other languages.
$http://www.freenetpages.co.uk/hp/alan.gauld/$
""")
bullet("""<b>Instant Python</b>.
A 6-page minimal crash course by Magnus Lie Hetland.
$http://www.hetland.org/python/instant-python.php$
""")
bullet("""<b>Dive Into Python</b>.
A free Python tutorial for experienced programmers.
$http://www.diveintopython.net/$
""")
from reportlab.lib.codecharts import SingleByteEncodingChart
from tools.docco.stylesheet import getStyleSheet
styles = getStyleSheet()
indent0_style = styles['Indent0']
indent1_style = styles['Indent1']
heading2("Goals for the 3.x release series")
disc("""ReportLab 3.0 has been produced to help in the migration to Python 3.x. Python 3.x will
be standard in future Ubuntu releases and is gaining popularity, and a good proportion
of major Python packages now run on Python 3. """)
bullet("""Python 3.x compatibility. A single line of code should run on 2.7 and 3.3""")
bullet(""" __init__.py restricts to 2.7 or >=3.3""")
bullet("""__init__.py allow the import of on optional reportlab.local_rl_mods to allow monkey patching etc.""")
bullet("""rl_config now imports rl_settings & optionally local_rl_settings""")
bullet("""ReportLab C extensions now live inside reportlab; _rl_accel is no longer required. All _rl_accel imports now pass through reportlab.lib.rl_accel""")
bullet("""xmllib is gone, alongside the paraparser stuff that caused issues in favour of HTMLParser.""")
bullet("""some obsolete C extensions (sgmlop and pyHnj) are gone""")
bullet("""Improved support for multi-threaded systems to the _rl_accel C extension module.""")
bullet("""Removed reportlab/lib/ para.py & pycanvas.py. These would better belong in third party packages, which can make use of the monkeypatching feature above.""")
bullet("""Add ability to output greyscale and 1-bit PIL images without conversion to RGB. (contributed by Matthew Duggan)""")
bullet("""highlight annotation (contributed by Ben Echols)""")
bullet("""full compliance with pip, easy_install, wheels etc""")
disc("""Detailed release notes are available at
$http://www.reportlab.com/software/documentation/relnotes/30/$""")
|
Distrotech/reportlab
|
docs/userguide/ch1_intro.py
|
Python
|
bsd-3-clause
| 16,512
|
[
"VisIt"
] |
d23e51ceadce8cd513f3c5c067a740fdc4d3d83cc096a7d6000b72e279ea11ae
|
import datetime as dt
import re
import pytest
from marshmallow import fields, validate
from .schemas import CategorySchema, CustomList, CustomStringField, CustomIntegerField
from .utils import build_ref, get_schemas
def test_field2choices_preserving_order(openapi):
choices = ["a", "b", "c", "aa", "0", "cc"]
field = fields.String(validate=validate.OneOf(choices))
assert openapi.field2choices(field) == {"enum": choices}
@pytest.mark.parametrize(
("FieldClass", "jsontype"),
[
(fields.Integer, "integer"),
(fields.Number, "number"),
(fields.Float, "number"),
(fields.String, "string"),
(fields.Str, "string"),
(fields.Boolean, "boolean"),
(fields.Bool, "boolean"),
(fields.UUID, "string"),
(fields.DateTime, "string"),
(fields.Date, "string"),
(fields.Time, "string"),
(fields.TimeDelta, "integer"),
(fields.Email, "string"),
(fields.URL, "string"),
# Custom fields inherit types from their parents
(CustomStringField, "string"),
(CustomIntegerField, "integer"),
],
)
def test_field2property_type(FieldClass, jsontype, spec_fixture):
field = FieldClass()
res = spec_fixture.openapi.field2property(field)
assert res["type"] == jsontype
@pytest.mark.parametrize("FieldClass", [fields.Field, fields.Raw])
def test_field2property_no_type_(FieldClass, spec_fixture):
field = FieldClass()
res = spec_fixture.openapi.field2property(field)
assert "type" not in res
@pytest.mark.parametrize("ListClass", [fields.List, CustomList])
def test_formatted_field_translates_to_array(ListClass, spec_fixture):
field = ListClass(fields.String)
res = spec_fixture.openapi.field2property(field)
assert res["type"] == "array"
assert res["items"] == spec_fixture.openapi.field2property(fields.String())
@pytest.mark.parametrize(
("FieldClass", "expected_format"),
[
(fields.UUID, "uuid"),
(fields.DateTime, "date-time"),
(fields.Date, "date"),
(fields.Email, "email"),
(fields.URL, "url"),
],
)
def test_field2property_formats(FieldClass, expected_format, spec_fixture):
field = FieldClass()
res = spec_fixture.openapi.field2property(field)
assert res["format"] == expected_format
def test_field_with_description(spec_fixture):
field = fields.Str(metadata={"description": "a username"})
res = spec_fixture.openapi.field2property(field)
assert res["description"] == "a username"
def test_field_with_load_default(spec_fixture):
field = fields.Str(dump_default="foo", load_default="bar")
res = spec_fixture.openapi.field2property(field)
assert res["default"] == "bar"
def test_boolean_field_with_false_load_default(spec_fixture):
field = fields.Boolean(dump_default=None, load_default=False)
res = spec_fixture.openapi.field2property(field)
assert res["default"] is False
def test_datetime_field_with_load_default(spec_fixture):
field = fields.Date(load_default=dt.date(2014, 7, 18))
res = spec_fixture.openapi.field2property(field)
assert res["default"] == dt.date(2014, 7, 18).isoformat()
def test_field_with_load_default_callable(spec_fixture):
field = fields.Str(load_default=lambda: "dummy")
res = spec_fixture.openapi.field2property(field)
assert "default" not in res
def test_field_with_default(spec_fixture):
field = fields.Str(metadata={"default": "Manual default"})
res = spec_fixture.openapi.field2property(field)
assert res["default"] == "Manual default"
def test_field_with_default_and_load_default(spec_fixture):
field = fields.Int(load_default=12, metadata={"default": 42})
res = spec_fixture.openapi.field2property(field)
assert res["default"] == 42
def test_field_with_choices(spec_fixture):
field = fields.Str(validate=validate.OneOf(["freddie", "brian", "john"]))
res = spec_fixture.openapi.field2property(field)
assert set(res["enum"]) == {"freddie", "brian", "john"}
def test_field_with_equal(spec_fixture):
field = fields.Str(validate=validate.Equal("only choice"))
res = spec_fixture.openapi.field2property(field)
assert res["enum"] == ["only choice"]
def test_only_allows_valid_properties_in_metadata(spec_fixture):
field = fields.Str(
load_default="foo",
metadata={
"description": "foo",
"not_valid": "lol",
"allOf": ["bar"],
"enum": ["red", "blue"],
},
)
res = spec_fixture.openapi.field2property(field)
assert res["default"] == field.load_default
assert "description" in res
assert "enum" in res
assert "allOf" in res
assert "not_valid" not in res
def test_field_with_choices_multiple(spec_fixture):
field = fields.Str(
validate=[
validate.OneOf(["freddie", "brian", "john"]),
validate.OneOf(["brian", "john", "roger"]),
]
)
res = spec_fixture.openapi.field2property(field)
assert set(res["enum"]) == {"brian", "john"}
def test_field_with_additional_metadata(spec_fixture):
field = fields.Str(metadata={"minLength": 6, "maxLength": 100})
res = spec_fixture.openapi.field2property(field)
assert res["maxLength"] == 100
assert res["minLength"] == 6
@pytest.mark.parametrize("spec_fixture", ("2.0", "3.0.0", "3.1.0"), indirect=True)
def test_field_with_allow_none(spec_fixture):
field = fields.Str(allow_none=True)
res = spec_fixture.openapi.field2property(field)
if spec_fixture.openapi.openapi_version.major < 3:
assert res["x-nullable"] is True
elif spec_fixture.openapi.openapi_version.minor < 1:
assert res["nullable"] is True
else:
assert "nullable" not in res
assert res["type"] == ["string", "null"]
def test_field_with_dump_only(spec_fixture):
field = fields.Str(dump_only=True)
res = spec_fixture.openapi.field2property(field)
assert res["readOnly"] is True
@pytest.mark.parametrize("spec_fixture", ("2.0", "3.0.0", "3.1.0"), indirect=True)
def test_field_with_load_only(spec_fixture):
field = fields.Str(load_only=True)
res = spec_fixture.openapi.field2property(field)
if spec_fixture.openapi.openapi_version.major < 3:
assert "writeOnly" not in res
else:
assert res["writeOnly"] is True
def test_field_with_range_no_type(spec_fixture):
field = fields.Field(validate=validate.Range(min=1, max=10))
res = spec_fixture.openapi.field2property(field)
assert res["x-minimum"] == 1
assert res["x-maximum"] == 10
assert "type" not in res
@pytest.mark.parametrize("field", (fields.Number, fields.Integer))
def test_field_with_range_string_type(spec_fixture, field):
field = field(validate=validate.Range(min=1, max=10))
res = spec_fixture.openapi.field2property(field)
assert res["minimum"] == 1
assert res["maximum"] == 10
assert isinstance(res["type"], str)
@pytest.mark.parametrize("spec_fixture", ("3.1.0",), indirect=True)
def test_field_with_range_type_list_with_number(spec_fixture):
@spec_fixture.openapi.map_to_openapi_type(["integer", "null"], None)
class NullableInteger(fields.Field):
"""Nullable integer"""
field = NullableInteger(validate=validate.Range(min=1, max=10))
res = spec_fixture.openapi.field2property(field)
assert res["minimum"] == 1
assert res["maximum"] == 10
assert res["type"] == ["integer", "null"]
@pytest.mark.parametrize("spec_fixture", ("3.1.0",), indirect=True)
def test_field_with_range_type_list_without_number(spec_fixture):
@spec_fixture.openapi.map_to_openapi_type(["string", "null"], None)
class NullableInteger(fields.Field):
"""Nullable integer"""
field = NullableInteger(validate=validate.Range(min=1, max=10))
res = spec_fixture.openapi.field2property(field)
assert res["x-minimum"] == 1
assert res["x-maximum"] == 10
assert res["type"] == ["string", "null"]
def test_field_with_str_regex(spec_fixture):
regex_str = "^[a-zA-Z0-9]$"
field = fields.Str(validate=validate.Regexp(regex_str))
ret = spec_fixture.openapi.field2property(field)
assert ret["pattern"] == regex_str
def test_field_with_pattern_obj_regex(spec_fixture):
regex_str = "^[a-zA-Z0-9]$"
field = fields.Str(validate=validate.Regexp(re.compile(regex_str)))
ret = spec_fixture.openapi.field2property(field)
assert ret["pattern"] == regex_str
def test_field_with_no_pattern(spec_fixture):
field = fields.Str()
ret = spec_fixture.openapi.field2property(field)
assert "pattern" not in ret
def test_field_with_multiple_patterns(recwarn, spec_fixture):
regex_validators = [validate.Regexp("winner"), validate.Regexp("loser")]
field = fields.Str(validate=regex_validators)
with pytest.warns(UserWarning, match="More than one regex validator"):
ret = spec_fixture.openapi.field2property(field)
assert ret["pattern"] == "winner"
def test_field2property_nested_spec_metadatas(spec_fixture):
spec_fixture.spec.components.schema("Category", schema=CategorySchema)
category = fields.Nested(
CategorySchema,
metadata={
"description": "A category",
"invalid_property": "not in the result",
"x_extension": "A great extension",
},
)
result = spec_fixture.openapi.field2property(category)
assert result == {
"allOf": [build_ref(spec_fixture.spec, "schema", "Category")],
"description": "A category",
"x-extension": "A great extension",
}
def test_field2property_nested_spec(spec_fixture):
spec_fixture.spec.components.schema("Category", schema=CategorySchema)
category = fields.Nested(CategorySchema)
assert spec_fixture.openapi.field2property(category) == build_ref(
spec_fixture.spec, "schema", "Category"
)
def test_field2property_nested_many_spec(spec_fixture):
spec_fixture.spec.components.schema("Category", schema=CategorySchema)
category = fields.Nested(CategorySchema, many=True)
ret = spec_fixture.openapi.field2property(category)
assert ret["type"] == "array"
assert ret["items"] == build_ref(spec_fixture.spec, "schema", "Category")
def test_field2property_nested_ref(spec_fixture):
category = fields.Nested(CategorySchema)
ref = spec_fixture.openapi.field2property(category)
assert ref == build_ref(spec_fixture.spec, "schema", "Category")
def test_field2property_nested_many(spec_fixture):
categories = fields.Nested(CategorySchema, many=True)
res = spec_fixture.openapi.field2property(categories)
assert res["type"] == "array"
assert res["items"] == build_ref(spec_fixture.spec, "schema", "Category")
def test_nested_field_with_property(spec_fixture):
category_1 = fields.Nested(CategorySchema)
category_2 = fields.Nested(CategorySchema, dump_only=True)
category_3 = fields.Nested(CategorySchema, many=True)
category_4 = fields.Nested(CategorySchema, many=True, dump_only=True)
spec_fixture.spec.components.schema("Category", schema=CategorySchema)
assert spec_fixture.openapi.field2property(category_1) == build_ref(
spec_fixture.spec, "schema", "Category"
)
assert spec_fixture.openapi.field2property(category_2) == {
"allOf": [build_ref(spec_fixture.spec, "schema", "Category")],
"readOnly": True,
}
assert spec_fixture.openapi.field2property(category_3) == {
"items": build_ref(spec_fixture.spec, "schema", "Category"),
"type": "array",
}
assert spec_fixture.openapi.field2property(category_4) == {
"items": build_ref(spec_fixture.spec, "schema", "Category"),
"readOnly": True,
"type": "array",
}
class TestField2PropertyPluck:
@pytest.fixture(autouse=True)
def _setup(self, spec_fixture):
self.field2property = spec_fixture.openapi.field2property
self.spec = spec_fixture.spec
self.spec.components.schema("Category", schema=CategorySchema)
self.unplucked = get_schemas(self.spec)["Category"]["properties"]["breed"]
def test_spec(self, spec_fixture):
breed = fields.Pluck(CategorySchema, "breed")
assert self.field2property(breed) == self.unplucked
def test_with_property(self):
breed = fields.Pluck(CategorySchema, "breed", dump_only=True)
assert self.field2property(breed) == {**self.unplucked, "readOnly": True}
def test_metadata(self):
breed = fields.Pluck(
CategorySchema,
"breed",
metadata={
"description": "Category breed",
"invalid_property": "not in the result",
"x_extension": "A great extension",
},
)
assert self.field2property(breed) == {
**self.unplucked,
"description": "Category breed",
"x-extension": "A great extension",
}
def test_many(self):
breed = fields.Pluck(CategorySchema, "breed", many=True)
assert self.field2property(breed) == {"type": "array", "items": self.unplucked}
def test_many_with_property(self):
breed = fields.Pluck(CategorySchema, "breed", many=True, dump_only=True)
assert self.field2property(breed) == {
"items": self.unplucked,
"type": "array",
"readOnly": True,
}
def test_custom_properties_for_custom_fields(spec_fixture):
def custom_string2properties(self, field, **kwargs):
ret = {}
if isinstance(field, CustomStringField):
if self.openapi_version.major == 2:
ret["x-customString"] = True
else:
ret["x-customString"] = False
return ret
spec_fixture.marshmallow_plugin.converter.add_attribute_function(
custom_string2properties
)
properties = spec_fixture.marshmallow_plugin.converter.field2property(
CustomStringField()
)
assert properties["x-customString"] == (
spec_fixture.openapi.openapi_version == "2.0"
)
def test_field2property_with_non_string_metadata_keys(spec_fixture):
class _DesertSentinel:
pass
field = fields.Boolean(metadata={"description": "A description"})
field.metadata[_DesertSentinel()] = "to be ignored"
result = spec_fixture.openapi.field2property(field)
assert result == {"description": "A description", "type": "boolean"}
|
marshmallow-code/apispec
|
tests/test_ext_marshmallow_field.py
|
Python
|
mit
| 14,507
|
[
"Brian"
] |
322638d21641fb6092f0ac640cd0dc172c6fa65b2d2a5aa16890414906b50579
|
import os, sys, re, inspect, types, errno, pprint, subprocess, io, shutil
import ParseGetPot
import copy
from socket import gethostname
#from options import *
from util import *
from time import sleep
from RunParallel import RunParallel
from CSVDiffer import CSVDiffer
from XMLDiffer import XMLDiffer
from Tester import Tester
from PetscJacobianTester import PetscJacobianTester
from InputParameters import InputParameters
from Factory import Factory
import argparse
from optparse import OptionParser, OptionGroup, Values
from timeit import default_timer as clock
class TestHarness:
def __init__(self, argv, app_name, moose_dir):
self.factory = Factory()
self.test_table = []
self.num_passed = 0
self.num_failed = 0
self.num_skipped = 0
self.num_pending = 0
self.host_name = gethostname()
self.moose_dir = os.path.abspath(moose_dir) + '/'
self.run_tests_dir = os.path.abspath('.')
self.code = '2d2d6769726c2d6d6f6465'
# Assume libmesh is a peer directory to MOOSE if not defined
if os.environ.has_key("LIBMESH_DIR"):
self.libmesh_dir = os.environ['LIBMESH_DIR']
else:
self.libmesh_dir = self.moose_dir + '../libmesh/installed'
self.file = None
# Parse arguments
self.parseCLArgs(argv)
self.checks = {}
self.checks['platform'] = getPlatforms()
self.checks['compiler'] = getCompilers(self.libmesh_dir)
self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
# Override the MESH_MODE option if using '--parallel-mesh' option
if self.options.parallel_mesh == True or \
(self.options.cli_args != None and \
self.options.cli_args.find('--parallel-mesh') != -1):
option_set = set()
option_set.add('ALL')
option_set.add('PARALLEL')
self.checks['mesh_mode'] = option_set
method = set()
method.add('ALL')
method.add(self.options.method.upper())
self.checks['method'] = method
self.initialize(argv, app_name)
def findAndRunTests(self):
self.preRun()
self.start_time = clock()
# PBS STUFF
if self.options.pbs and os.path.exists(self.options.pbs):
self.options.processingPBS = True
self.processPBSResults()
else:
self.options.processingPBS = False
for dirpath, dirnames, filenames in os.walk(os.getcwd(), followlinks=True):
if (self.test_match.search(dirpath) and "contrib" not in dirpath):
for file in filenames:
# set cluster_handle to be None initially (happens for each test)
self.options.cluster_handle = None
# See if there were other arguments (test names) passed on the command line
if file == self.options.input_file_name: #and self.test_match.search(file):
saved_cwd = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
tests = self.parseGetPotTestFormat(file)
if self.options.enable_recover:
tests = self.appendRecoverableTests(tests)
# Go through the list of test specs and run them
for test in tests:
# Strip begining and ending spaces to input file name
test['input'] = test['input'].strip()
# Double the alloted time for tests when running with the valgrind option
if self.options.valgrind_mode == 'NORMAL':
test['max_time'] = test['max_time'] * 2
elif self.options.valgrind_mode == 'HEAVY':
test['max_time'] = test['max_time'] * 4
# Build the requested Tester object and run
tester = self.factory.create(test['type'], test)
# When running in valgrind mode, we end up with a ton of output for each failed
# test. Therefore, we limit the number of fails...
if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
else:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
# Create the cluster launcher input file
if self.options.pbs and self.options.cluster_handle == None:
self.options.cluster_handle = open(dirpath + '/tests.cluster', 'a')
self.options.cluster_handle.write('[Jobs]\n')
command = tester.getCommand(self.options)
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
if (reason != ''):
self.handleTestResult(test, '', reason)
self.runner.jobSkipped(test['test_name'])
if self.options.cluster_handle != None:
self.options.cluster_handle.write('[]\n')
self.options.cluster_handle.close()
self.options.cluster_handle = None
os.chdir(saved_cwd)
sys.path.pop()
self.runner.join()
# Wait for all tests to finish
if self.options.pbs and self.options.processingPBS == False:
print '\n< checking batch status >\n'
self.options.processingPBS = True
self.processPBSResults()
self.cleanupAndExit()
else:
self.cleanupAndExit()
def parseGetPotTestFormat(self, filename):
tests = []
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
# Filter tests that we want to run
# Under the new format, we will filter based on directory not filename since it is fixed
will_run = False
if len(self.tests) == 0:
will_run = True
else:
for item in self.tests:
if test_dir.find(item) > -1:
will_run = True
if not will_run:
return tests
try:
data = ParseGetPot.readInputFile(filename)
except: # ParseGetPot class
print "Parse Error: " + test_dir + "/" + filename
return tests
# We expect our root node to be called "Tests"
if 'Tests' in data.children:
tests_node = data.children['Tests']
for testname, test_node in tests_node.children.iteritems():
# First retrieve the type so we can get the valid params
if 'type' not in test_node.params:
print "Type missing in " + test_dir + filename
sys.exit(1)
params = self.factory.getValidParams(test_node.params['type'])
# Now update all the base level keys
params_parsed = set()
params_ignored = set()
for key, value in test_node.params.iteritems():
params_parsed.add(key)
if key in params:
if params.type(key) == list:
params[key] = value.split(' ')
else:
if re.match('".*"', value): # Strip quotes
params[key] = value[1:-1]
else:
# Prevent bool types from being stored as strings. This can lead to the
# strange situation where string('False') evaluates to true...
if params.isValid(key) and (type(params[key]) == type(bool())):
# We support using the case-insensitive strings {true, false} and the string '0', '1'.
if (value.lower()=='true') or (value=='1'):
params[key] = True
elif (value.lower()=='false') or (value=='0'):
params[key] = False
else:
print "Unrecognized (key,value) pair: (", key, ',', value, ")"
sys.exit(1)
# Otherwise, just do normal assignment
else:
params[key] = value
else:
params_ignored.add(key)
# Make sure that all required parameters are supplied
required_params_missing = params.required_keys() - params_parsed
if len(required_params_missing):
print 'Required Missing Parameter(s): ', required_params_missing
if len(params_ignored):
print 'Ignored Parameter(s): ', params_ignored
# We are going to do some formatting of the path that is printed
# Case 1. If the test directory (normally matches the input_file_name) comes first,
# we will simply remove it from the path
# Case 2. If the test directory is somewhere in the middle then we should preserve
# the leading part of the path
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
formatted_name = relative_path + '.' + testname
params['test_name'] = formatted_name
params['test_dir'] = test_dir
params['relative_path'] = relative_path
params['executable'] = self.executable
params['hostname'] = self.host_name
params['moose_dir'] = self.moose_dir
if params.isValid('prereq'):
if type(params['prereq']) != list:
print "Option 'prereq' needs to be of type list in " + params['test_name']
sys.exit(1)
params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]
# Build a list of test specs (dicts) to return
tests.append(params)
return tests
def augmentTestSpecs(self, test):
test['executable'] = self.executable
test['hostname'] = self.host_name
# This method splits a lists of tests into two pieces each, the first piece will run the test for
# approx. half the number of timesteps and will write out a restart file. The second test will
# then complete the run using the MOOSE recover option.
def appendRecoverableTests(self, tests):
new_tests = []
for part1 in tests:
if part1['recover'] == True:
# Clone the test specs
part2 = copy.deepcopy(part1)
# Part 1:
part1['test_name'] += '_part1'
part1['cli_args'].append('--half-transient')
part1['cli_args'].append('Outputs/auto_recovery_part1=true')
part1['skip_checks'] = True
# Part 2:
part2['prereq'].append(part1['test_name'])
part2['delete_output_before_running'] = False
part2['cli_args'].append('Outputs/auto_recovery_part2=true')
part2['cli_args'].append('--recover')
part2.addParam('caveats', ['recover'], "")
new_tests.append(part2)
tests.extend(new_tests)
return tests
## Finish the test by inspecting the raw output
def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
caveats = []
test = tester.specs # Need to refactor
if test.isValid('caveats'):
caveats = test['caveats']
if self.options.pbs and self.options.processingPBS == False:
(reason, output) = self.buildPBSBatch(output, tester)
else:
(reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)
if self.options.scaling and test['scale_refine']:
caveats.append('scaled')
did_pass = True
if reason == '':
# It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
if self.options.extra_info:
checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
for check in checks:
if not 'ALL' in test[check]:
caveats.append(', '.join(test[check]))
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] OK'
elif self.options.pbs and self.options.processingPBS == False:
result = 'LAUNCHED'
else:
result = 'OK'
else:
result = 'FAILED (%s)' % reason
did_pass = False
self.handleTestResult(tester.specs, output, result, start, end)
return did_pass
def getTiming(self, output):
time = ''
m = re.search(r"Active time=(\S+)", output)
if m != None:
return m.group(1)
def getSolveTime(self, output):
time = ''
m = re.search(r"solve().*", output)
if m != None:
return m.group().split()[5]
def checkExpectError(self, output, expect_error):
if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
#print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
return False
else:
return True
# PBS Defs
def processPBSResults(self):
# If batch file exists, check the contents for pending tests.
if os.path.exists(self.options.pbs):
# Build a list of launched jobs
batch_file = open(self.options.pbs)
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
# Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
for job in batch_list:
file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
tests = self.parseGetPotTestFormat(file)
for test in tests:
# Build the requested Tester object
if job[1] == test['test_name']:
# Create Test Type
tester = self.factory.create(test['type'], test)
# Get job status via qstat
qstat = ['qstat', '-f', '-x', str(job[0])]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
if qstat_stdout != None:
output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
else:
return ('QSTAT NOT FOUND', '')
# Report the current status of JOB_ID
if output_value == 'F':
# F = Finished. Get the exit code reported by qstat
exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))
# Read the stdout file
if os.path.exists(job[2]):
output_file = open(job[2], 'r')
# Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
test['test_dir'] = '/'.join(job[2].split('/')[:-1])
outfile = output_file.read()
output_file.close()
else:
# I ran into this scenario when the cluster went down, but launched/completed my job :)
self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)
self.testOutputAndFinish(tester, exit_code, outfile)
elif output_value == 'R':
# Job is currently running
self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
elif output_value == 'E':
# Job is exiting
self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
elif output_value == 'Q':
# Job is currently queued
self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
else:
return ('BATCH FILE NOT FOUND', '')
def buildPBSBatch(self, output, tester):
# Create/Update the batch file
if 'command not found' in output:
return('QSUB NOT FOUND', '')
else:
# Get the PBS Job ID using qstat
# TODO: Build an error handler. If there was any issue launching the cluster launcher due to <any thing>, why die here.
job_id = re.findall(r'.*JOB_ID: (\d+)', output)[0]
qstat = ['qstat', '-f', '-x', str(job_id)]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
# Get the Output_Path from qstat stdout
if qstat_stdout != None:
output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '')
else:
return ('QSTAT NOT FOUND', '')
# Write job_id, test['test_name'], and Ouput_Path to the batch file
file_name = self.options.pbs
job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
job_list.write(str(job_id) + ':' + tester.specs['test_name'] + ':' + output_value + ':' + self.options.input_file_name + '\n')
job_list.close()
# Return to TestHarness and inform we have launched the job
return ('', 'LAUNCHED')
def cleanPBSBatch(self):
# Open the PBS batch file and assign it to a list
if os.path.exists(self.options.pbs_cleanup):
batch_file = open(self.options.pbs_cleanup, 'r')
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
else:
print 'PBS batch file not found:', self.options.pbs_cleanup
sys.exit(1)
# Loop through launched jobs and delete whats found.
for job in batch_list:
if os.path.exists(job[2]):
batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
if os.path.exists('/'.join(batch_dir)):
shutil.rmtree('/'.join(batch_dir))
if os.path.exists('/'.join(batch_dir[:-1]) + '/' + job[3] + '.cluster'):
os.remove('/'.join(batch_dir[:-1]) + '/' + job[3] + '.cluster')
os.remove(self.options.pbs_cleanup)
# END PBS Defs
## Update global variables and print output based on the test result
# Containing OK means it passed, skipped means skipped, anything else means it failed
def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
timing = ''
if self.options.timing:
timing = self.getTiming(output)
elif self.options.store_time:
timing = self.getSolveTime(output)
# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
self.test_table.append( (specs, output, result, timing, start, end) )
if result.find('OK') != -1:
self.num_passed += 1
elif result.find('skipped') != -1:
self.num_skipped += 1
elif result.find('deleted') != -1:
self.num_skipped += 1
elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
self.num_pending += 1
else:
self.num_failed += 1
self.postRun(specs, timing)
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(specs['test_name'], result, timing, start, end, self.options)
if self.options.verbose or ('FAILED' in result and not self.options.quiet):
output = output.replace('\r', '\n') # replace the carriage returns with newlines
lines = output.split('\n');
color = ''
if 'EXODIFF' in result or 'CSVDIFF' in result:
color = 'YELLOW'
elif 'FAILED' in result:
color = 'RED'
else:
color = 'GREEN'
test_name = colorText(specs['test_name'] + ": ", self.options, color)
output = ("\n" + test_name).join(lines)
print output
# Print result line again at the bottom of the output for failed tests
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
else:
print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"
if not 'skipped' in result:
if self.options.file:
if self.options.show_directory:
self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
else:
self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
f = open(fname, 'w')
f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
f.write(output)
f.close()
# Write the app_name to a file, if the tests passed
def writeState(self, app_name):
# If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
if os.environ.has_key("BITTEN_STATUS_MOOSE"):
result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
result_file.write(str(os.path.split(app_name)[1][:-4]) + '\n')
result_file.close()
# Print final results, close open files, and exit with the correct error code
def cleanupAndExit(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in self.test_table:
if self.options.show_directory:
print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(test['test_name'], result, timing, start, end, self.options)
time = clock() - self.start_time
print '-' * (TERM_COLS-1)
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)
if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>, '
else:
summary += ', <b>%d pending</b>, '
if self.num_failed:
summary += '<r>%d FAILED</r>'
else:
summary += '<b>%d failed</b>'
print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), self.options, "", html=True )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()
if self.num_failed == 0:
self.writeState(self.executable)
sys.exit(0)
else:
sys.exit(1)
def initialize(self, argv, app_name):
# Initialize the parallel runner with how many tests to run in parallel
self.runner = RunParallel(self, self.options.jobs, self.options.load)
## Save executable-under-test name to self.executable
self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method
# Check for built application
if not os.path.exists(self.executable):
print 'Application not found: ' + str(self.executable)
sys.exit(1)
# Emulate the standard Nose RegEx for consistency
self.test_match = re.compile(r"(?:^|\b|[_-])[Tt]est")
# Save the output dir since the current working directory changes during tests
self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)
# Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
if self.options.output_dir:
try:
os.makedirs(self.output_dir)
except OSError, ex:
if ex.errno == errno.EEXIST: pass
else: raise
# Open the file to redirect output to and set the quiet option for file output
if self.options.file:
self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
if self.options.file or self.options.fail_files or self.options.sep_files:
self.options.quiet = True
## Parse command line options and assign them to self.options
def parseCLArgs(self, argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='A tool used to test MOOSE based applications')
parser.add_argument('test_name', nargs=argparse.REMAINDER)
parser.add_argument('--opt', action='store_const', dest='method', const='opt', help='test the app_name-opt binary')
parser.add_argument('--dbg', action='store_const', dest='method', const='dbg', help='test the app_name-dbg binary')
parser.add_argument('--devel', action='store_const', dest='method', const='dev', help='test the app_name-devel binary')
parser.add_argument('--oprof', action='store_const', dest='method', const='oprof', help='test the app_name-oprof binary')
parser.add_argument('--pro', action='store_const', dest='method', const='pro', help='test the app_name-pro binary')
parser.add_argument('-j', '--jobs', nargs=1, metavar='int', action='store', type=int, dest='jobs', default=1, help='run test binaries in parallel')
parser.add_argument('-e', action='store_true', dest='extra_info', help='Display "extra" information including all caveats and deleted tests')
parser.add_argument('-c', '--no-color', action='store_false', dest='colored', help='Do not show colored output')
parser.add_argument('--heavy', action='store_true', dest='heavy_tests', help='Run tests marked with HEAVY : True')
parser.add_argument('--all-tests', action='store_true', dest='all_tests', help='Run normal tests and tests marked with HEAVY : True')
parser.add_argument('-g', '--group', action='store', type=str, dest='group', default='ALL', help='Run only tests in the named group')
parser.add_argument('--not_group', action='store', type=str, dest='not_group', help='Run only tests NOT in the named group')
# parser.add_argument('--dofs', action='store', dest='dofs', help='This option is for automatic scaling which is not currently implemented in MOOSE 2.0')
parser.add_argument('--dbfile', nargs='?', action='store', dest='dbFile', help='Location to timings data base file. If not set, assumes $HOME/timingDB/timing.sqlite')
parser.add_argument('-l', '--load-average', action='store', type=float, dest='load', default=64.0, help='Do not run additional tests if the load average is at least LOAD')
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='tests', help='The default test specification file to look for (default="tests").')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
parser.add_argument('--n-threads', nargs=1, action='store', type=int, dest='nthreads', default=1, help='Number of threads to use when running mpiexec')
parser.add_argument('-d', action='store_true', dest='debug_harness', help='Turn on Test Harness debugging')
parser.add_argument('--recover', action='store_true', dest='enable_recover', help='Run a test in recover mode')
parser.add_argument('--valgrind', action='store_const', dest='valgrind_mode', const='NORMAL', help='Run normal valgrind tests')
parser.add_argument('--valgrind-heavy', action='store_const', dest='valgrind_mode', const='HEAVY', help='Run heavy valgrind tests')
parser.add_argument('--valgrind-max-fails', nargs=1, type=int, dest='valgrind_max_fails', default=5, help='The number of valgrind tests allowed to fail before any additional valgrind tests will run')
parser.add_argument('--pbs', nargs='?', metavar='batch_file', dest='pbs', const='generate', help='Enable launching tests via PBS. If no batch file is specified one will be created for you')
parser.add_argument('--pbs-cleanup', nargs=1, metavar='batch_file', help='Clean up the directories/files created by PBS. You must supply the same batch_file used to launch PBS.')
parser.add_argument('--re', action='store', type=str, dest='reg_exp', help='Run tests that match --re=regular_expression')
parser.add_argument('--parallel-mesh', action='store_true', dest='parallel_mesh', help="Pass --parallel-mesh to executable")
parser.add_argument('--cli-args', nargs='?', type=str, dest='cli_args', help='Append the following list of arguments to the command line (Encapsulate the command in quotes)')
outputgroup = parser.add_argument_group('Output Options', 'These options control the output of the test harness. The sep-files options write output to files named test_name.TEST_RESULT.txt. All file output will overwrite old files')
outputgroup.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='show the output of every test that fails')
outputgroup.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='only show the result of every test, don\'t show test output even if it fails')
outputgroup.add_argument('--show-directory', action='store_true', dest='show_directory', help='Print test directory path in out messages')
outputgroup.add_argument('-o', '--output-dir', nargs=1, metavar='directory', dest='output_dir', default='', help='Save all output files in the directory, and create it if necessary')
outputgroup.add_argument('-f', '--file', nargs=1, action='store', dest='file', help='Write verbose output of each test to FILE and quiet output to terminal')
outputgroup.add_argument('-x', '--sep-files', action='store_true', dest='sep_files', help='Write the output of each test to a separate file. Only quiet output to terminal. This is equivalant to \'--sep-files-fail --sep-files-ok\'')
outputgroup.add_argument('--sep-files-ok', action='store_true', dest='ok_files', help='Write the output of each passed test to a separate file')
outputgroup.add_argument('-a', '--sep-files-fail', action='store_true', dest='fail_files', help='Write the output of each FAILED test to a separate file. Only quiet output to terminal.')
outputgroup.add_argument("--store-timing", action="store_true", dest="store_time", help="Store timing in the SQL database: $HOME/timingDB/timing.sqlite A parent directory (timingDB) must exist.")
outputgroup.add_argument("--revision", nargs=1, action="store", type=str, dest="revision", help="The current revision being tested. Required when using --store-timing.")
outputgroup.add_argument("--yaml", action="store_true", dest="yaml", help="Dump the parameters for the testers in Yaml Format")
outputgroup.add_argument("--dump", action="store_true", dest="dump", help="Dump the parameters for the testers in GetPot Format")
code = True
if self.code.decode('hex') in argv:
del argv[argv.index(self.code.decode('hex'))]
code = False
self.options = parser.parse_args()
self.tests = self.options.test_name
self.options.code = code
# Convert all list based options of length one to scalars
for key, value in vars(self.options).items():
if type(value) == list and len(value) == 1:
tmp_str = getattr(self.options, key)
setattr(self.options, key, value[0])
self.checkAndUpdateCLArgs()
## Called after options are parsed from the command line
# Exit if options don't make any sense, print warnings if they are merely weird
def checkAndUpdateCLArgs(self):
opts = self.options
if opts.output_dir and not (opts.file or opts.sep_files or opts.fail_files or opts.ok_files):
print 'WARNING: --output-dir is specified but no output files will be saved, use -f or a --sep-files option'
if opts.group == opts.not_group:
print 'ERROR: The group and not_group options cannot specify the same group'
sys.exit(1)
if opts.store_time and not (opts.revision):
print 'ERROR: --store-timing is specified but no revision'
sys.exit(1)
if opts.store_time:
# timing returns Active Time, while store_timing returns Solve Time.
# Thus we need to turn off timing.
opts.timing = False
opts.scaling = True
if opts.valgrind_mode and (opts.parallel > 1 or opts.nthreads > 1):
print 'ERROR: --parallel and/or --threads can not be used with --valgrind'
sys.exit(1)
# Update any keys from the environment as necessary
if not self.options.method:
if os.environ.has_key('METHOD'):
self.options.method = os.environ['METHOD']
else:
self.options.method = 'opt'
if not self.options.valgrind_mode:
self.options.valgrind_mode = ''
# Update libmesh_dir to reflect arguments
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir
# Generate a batch file if PBS argument supplied with out a file
if opts.pbs == 'generate':
largest_serial_num = 0
for name in os.listdir('.'):
m = re.search('pbs_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
opts.pbs = "pbs_" + str(largest_serial_num+1).zfill(3)
def postRun(self, specs, timing):
return
def preRun(self):
if self.options.yaml:
self.factory.printYaml("Tests")
sys.exit(0)
elif self.options.dump:
self.factory.printDump("Tests")
sys.exit(0)
if self.options.pbs_cleanup:
self.cleanPBSBatch()
sys.exit(0)
def populateParams(self, params, test):
# TODO: Print errors or warnings about unused parameters
# Set difference
# viewkeys does not work with older Python...
# unused_params = test.viewkeys() - params.desc
params.valid = test
return params
def getFactory(self):
return self.factory
# Notes:
# SHOULD_CRASH returns > 0, cuz < 0 means process interrupted
|
amburan/moose
|
framework/scripts/TestHarness/TestHarness.py
|
Python
|
lgpl-2.1
| 34,926
|
[
"MOOSE",
"VTK"
] |
49229dab8125b7354aa271575553947b5fd456a052a84eece748b9f4a86b2010
|
"""
:mod: Utils
Module that collects utility functions.
"""
import fnmatch
from DIRAC import gConfig, S_OK
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
def voimport(base_mod):
"""
Function to import from extensions, if not found, tries from DIRAC.
"""
for ext in gConfig.getValue("DIRAC/Extensions", []):
try:
return __import__(ext + base_mod, globals(), locals(), ["*"])
except ImportError:
continue
# If not found in extensions, import it in DIRAC base.
return __import__(base_mod, globals(), locals(), ["*"])
def getCSTree(csPath=""):
"""
Gives the configuration rooted at path in a Python dict. The
result is a Python dictionary that reflects the structure of the
configuration file.
"""
opHelper = Operations()
def getCSTreeAsDict(treePath):
"""
Function to recursively iterate over a CS tree
"""
csTreeDict = {}
opts = opHelper.getOptionsDict(treePath)
if opts["OK"]:
opts = opts["Value"]
for optKey, optValue in opts.items():
if optValue.find(",") > -1:
optValue = List.fromChar(optValue)
else:
optValue = [optValue]
csTreeDict[optKey] = optValue
secs = opHelper.getSections(treePath)
if secs["OK"]:
secs = secs["Value"]
for sec in secs:
secTree = getCSTreeAsDict("%s/%s" % (treePath, sec))
if not secTree["OK"]:
return secTree
csTreeDict[sec] = secTree["Value"]
return S_OK(csTreeDict)
return getCSTreeAsDict(csPath)
def configMatch(candidateParams, configParams):
"""
For a given configuration, the candidate will be rejected if:
- it is missing at least one of the params in the config
- if a param of the candidate does not match the config params
- if a candidate param is None, is considered as wildcard
"""
for key in candidateParams:
if key not in configParams:
# The candidateParams is missing one of the parameters required
# return False
continue
if candidateParams[key] is None:
# None is assumed to be a wildcard (*)
continue
cParameter = candidateParams[key]
if not isinstance(cParameter, list):
cParameter = [cParameter]
# We allow using UNIX-like regular expression ( wild-cards ) on the CS
_matches = False
for configItem in configParams[key]:
if fnmatch.filter(set(cParameter), configItem):
_matches = True
break
if not _matches:
return False
return True
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Utilities/Utils.py
|
Python
|
gpl-3.0
| 2,883
|
[
"DIRAC"
] |
4ec8cf94f4860802f803c108bfee1a0402a6dbfb75aacfa8ddb6c3c5b18fcfaa
|
# Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
import base64
from datetime import datetime, timedelta
import json
import os
import tempfile
from unittest import TestCase
import nacl.encoding
import requests.cookies
import six
from six.moves.urllib.parse import parse_qs
from six.moves.http_cookies import SimpleCookie
from httmock import (
HTTMock,
urlmatch,
response
)
import macaroonbakery as bakery
import macaroonbakery.httpbakery as httpbakery
import macaroonbakery.checkers as checkers
import macaroonbakery.httpbakery.agent as agent
class TestAgents(TestCase):
def setUp(self):
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(agent_file)
self.agent_filename = filename
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(bad_key_agent_file)
self.bad_key_agent_filename = filename
fd, filename = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(no_username_agent_file)
self.no_username_agent_filename = filename
def tearDown(self):
os.remove(self.agent_filename)
os.remove(self.bad_key_agent_filename)
os.remove(self.no_username_agent_filename)
def test_load_agents(self):
cookies, key = agent.load_agent_file(self.agent_filename)
self.assertEqual(key.encode(nacl.encoding.Base64Encoder),
b'CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU=')
self.assertEqual(
key.public_key.encode(nacl.encoding.Base64Encoder),
b'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=')
value = cookies.get('agent-login', domain='1.example.com')
jv = base64.b64decode(value)
if six.PY3:
jv = jv.decode('utf-8')
data = json.loads(jv)
self.assertEqual(data['username'], 'user-1')
self.assertEqual(data['public_key'],
'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=')
value = cookies.get('agent-login', domain='2.example.com',
path='/discharger')
jv = base64.b64decode(value)
if six.PY3:
jv = jv.decode('utf-8')
data = json.loads(jv)
self.assertEqual(data['username'], 'user-2')
self.assertEqual(data['public_key'],
'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=')
def test_load_agents_into_cookies(self):
cookies = requests.cookies.RequestsCookieJar()
c1, key = agent.load_agent_file(
self.agent_filename,
cookies=cookies,
)
self.assertEqual(c1, cookies)
self.assertEqual(
key.encode(nacl.encoding.Base64Encoder),
b'CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU=',
)
self.assertEqual(
key.public_key.encode(nacl.encoding.Base64Encoder),
b'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=',
)
value = cookies.get('agent-login', domain='1.example.com')
jv = base64.b64decode(value)
if six.PY3:
jv = jv.decode('utf-8')
data = json.loads(jv)
self.assertEqual(data['username'], 'user-1')
self.assertEqual(data['public_key'],
'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=')
value = cookies.get('agent-login', domain='2.example.com',
path='/discharger')
jv = base64.b64decode(value)
if six.PY3:
jv = jv.decode('utf-8')
data = json.loads(jv)
self.assertEqual(data['username'], 'user-2')
self.assertEqual(data['public_key'],
'YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=')
def test_load_agents_with_bad_key(self):
with self.assertRaises(agent.AgentFileFormatError):
agent.load_agent_file(self.bad_key_agent_filename)
def test_load_agents_with_no_username(self):
with self.assertRaises(agent.AgentFileFormatError):
agent.load_agent_file(self.no_username_agent_filename)
def test_agent_login(self):
discharge_key = bakery.generate_key()
class _DischargerLocator(bakery.ThirdPartyLocator):
def third_party_info(self, loc):
if loc == 'http://0.3.2.1':
return bakery.ThirdPartyInfo(
public_key=discharge_key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
server_key = bakery.generate_key()
server_bakery = bakery.Bakery(key=server_key, locator=d)
@urlmatch(path='.*/here')
def server_get(url, request):
ctx = checkers.AuthContext()
test_ops = [bakery.Op(entity='test-op', action='read')]
auth_checker = server_bakery.checker.auth(
httpbakery.extract_macaroons(request.headers))
try:
auth_checker.allow(ctx, test_ops)
resp = response(status_code=200,
content='done')
except bakery.PermissionDenied:
caveats = [
checkers.Caveat(location='http://0.3.2.1',
condition='is-ok')
]
m = server_bakery.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=caveats, ops=test_ops)
content, headers = httpbakery.discharge_required_response(
m, '/',
'test',
'message')
resp = response(status_code=401,
content=content,
headers=headers)
return request.hooks['response'][0](resp)
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
if qs.get('token64') is None:
return response(
status_code=401,
content={
'Code': httpbakery.ERR_INTERACTION_REQUIRED,
'Message': 'interaction required',
'Info': {
'InteractionMethods': {
'agent': {'login-url': '/login'},
},
},
},
headers={'Content-Type': 'application/json'})
else:
qs = parse_qs(request.body)
content = {q: qs[q][0] for q in qs}
m = httpbakery.discharge(checkers.AuthContext(), content,
discharge_key, None, alwaysOK3rd)
return {
'status_code': 200,
'content': {
'Macaroon': m.to_dict()
}
}
key = bakery.generate_key()
@urlmatch(path='.*/login')
def login(url, request):
b = bakery.Bakery(key=discharge_key)
m = b.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=[bakery.local_third_party_caveat(
key.public_key,
version=httpbakery.request_version(request.headers))],
ops=[bakery.Op(entity='agent', action='login')])
return {
'status_code': 200,
'content': {
'macaroon': m.to_dict()
}
}
with HTTMock(server_get), \
HTTMock(discharge), \
HTTMock(login):
client = httpbakery.Client(interaction_methods=[
agent.AgentInteractor(
agent.AuthInfo(
key=key,
agents=[
agent.Agent(
username='test-user',
url=u'http://0.3.2.1'
)
],
),
),
])
resp = requests.get(
'http://0.1.2.3/here',
cookies=client.cookies,
auth=client.auth())
self.assertEquals(resp.content, b'done')
def test_agent_legacy(self):
discharge_key = bakery.generate_key()
class _DischargerLocator(bakery.ThirdPartyLocator):
def third_party_info(self, loc):
if loc == 'http://0.3.2.1':
return bakery.ThirdPartyInfo(
public_key=discharge_key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
server_key = bakery.generate_key()
server_bakery = bakery.Bakery(key=server_key, locator=d)
@urlmatch(path='.*/here')
def server_get(url, request):
ctx = checkers.AuthContext()
test_ops = [bakery.Op(entity='test-op', action='read')]
auth_checker = server_bakery.checker.auth(
httpbakery.extract_macaroons(request.headers))
try:
auth_checker.allow(ctx, test_ops)
resp = response(status_code=200,
content='done')
except bakery.PermissionDenied:
caveats = [
checkers.Caveat(location='http://0.3.2.1',
condition='is-ok')
]
m = server_bakery.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=caveats, ops=test_ops)
content, headers = httpbakery.discharge_required_response(
m, '/',
'test',
'message')
resp = response(
status_code=401,
content=content,
headers=headers,
)
return request.hooks['response'][0](resp)
class InfoStorage:
info = None
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
if qs.get('caveat64') is not None:
content = {q: qs[q][0] for q in qs}
class InteractionRequiredError(Exception):
def __init__(self, error):
self.error = error
class CheckerInError(bakery.ThirdPartyCaveatChecker):
def check_third_party_caveat(self, ctx, info):
InfoStorage.info = info
raise InteractionRequiredError(
httpbakery.Error(
code=httpbakery.ERR_INTERACTION_REQUIRED,
version=httpbakery.request_version(
request.headers),
message='interaction required',
info=httpbakery.ErrorInfo(
wait_url='http://0.3.2.1/wait?'
'dischargeid=1',
visit_url='http://0.3.2.1/visit?'
'dischargeid=1'
),
),
)
try:
httpbakery.discharge(
checkers.AuthContext(), content,
discharge_key, None, CheckerInError())
except InteractionRequiredError as exc:
return response(
status_code=401,
content={
'Code': exc.error.code,
'Message': exc.error.message,
'Info': {
'WaitURL': exc.error.info.wait_url,
'VisitURL': exc.error.info.visit_url,
},
},
headers={'Content-Type': 'application/json'})
key = bakery.generate_key()
@urlmatch(path='.*/visit?$')
def visit(url, request):
if request.headers.get('Accept') == 'application/json':
return {
'status_code': 200,
'content': {
'agent': request.url
}
}
cs = SimpleCookie()
cookies = request.headers.get('Cookie')
if cookies is not None:
cs.load(str(cookies))
public_key = None
for c in cs:
if c == 'agent-login':
json_cookie = json.loads(
base64.b64decode(cs[c].value).decode('utf-8'))
public_key = bakery.PublicKey.deserialize(
json_cookie.get('public_key'))
ms = httpbakery.extract_macaroons(request.headers)
if len(ms) == 0:
b = bakery.Bakery(key=discharge_key)
m = b.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=datetime.utcnow() + timedelta(days=1),
caveats=[bakery.local_third_party_caveat(
public_key,
version=httpbakery.request_version(request.headers))],
ops=[bakery.Op(entity='agent', action='login')])
content, headers = httpbakery.discharge_required_response(
m, '/',
'test',
'message')
resp = response(status_code=401,
content=content,
headers=headers)
return request.hooks['response'][0](resp)
return {
'status_code': 200,
'content': {
'agent-login': True
}
}
@urlmatch(path='.*/wait?$')
def wait(url, request):
class EmptyChecker(bakery.ThirdPartyCaveatChecker):
def check_third_party_caveat(self, ctx, info):
return []
if InfoStorage.info is None:
self.fail('visit url has not been visited')
m = bakery.discharge(
checkers.AuthContext(),
InfoStorage.info.id,
InfoStorage.info.caveat,
discharge_key,
EmptyChecker(),
_DischargerLocator(),
)
return {
'status_code': 200,
'content': {
'Macaroon': m.to_dict()
}
}
with HTTMock(server_get), \
HTTMock(discharge), \
HTTMock(visit), \
HTTMock(wait):
client = httpbakery.Client(interaction_methods=[
agent.AgentInteractor(
agent.AuthInfo(
key=key,
agents=[agent.Agent(username='test-user',
url=u'http://0.3.2.1')],
),
),
])
resp = requests.get(
'http://0.1.2.3/here',
cookies=client.cookies,
auth=client.auth(),
)
self.assertEquals(resp.content, b'done')
agent_file = '''
{
"key": {
"public": "YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=",
"private": "CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU="
},
"agents": [{
"url": "https://1.example.com/",
"username": "user-1"
}, {
"url": "https://2.example.com/discharger",
"username": "user-2"
}]
}
'''
bad_key_agent_file = '''
{
"key": {
"public": "YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=",
"private": "CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJE=="
},
"agents": [{
"url": "https://1.example.com/",
"username": "user-1"
}, {
"url": "https://2.example.com/discharger",
"username": "user-2"
}]
}
'''
no_username_agent_file = '''
{
"key": {
"public": "YAhRSsth3a36mRYqQGQaLiS4QJax0p356nd+B8x7UQE=",
"private": "CqoSgj06Zcgb4/S6RT4DpTjLAfKoznEY3JsShSjKJEU="
},
"agents": [{
"url": "https://1.example.com/"
}, {
"url": "https://2.example.com/discharger",
"username": "user-2"
}]
}
'''
class ThirdPartyCaveatCheckerF(bakery.ThirdPartyCaveatChecker):
def __init__(self, check):
self._check = check
def check_third_party_caveat(self, ctx, info):
cond, arg = checkers.parse_caveat(info.condition)
return self._check(cond, arg)
alwaysOK3rd = ThirdPartyCaveatCheckerF(lambda cond, arg: [])
|
fabricematrat/py-macaroon-bakery
|
macaroonbakery/tests/test_agent.py
|
Python
|
lgpl-3.0
| 17,234
|
[
"VisIt"
] |
9e7286780a9c8579bedc44e3a764a45a41fc92875b412ae7ef8795d95d4e4485
|
import MDAnalysis
import matplotlib.pyplot as plt
import numpy as np
from MDAnalysis.analysis.align import *
from MDAnalysis.analysis.rms import rmsd
def proRMSD(u,ref):
"""
This function produces RMSD data and plots for Protein.
:input
1) Universe of Trajectory
2) reference universe
:return
1) matplot object
2) array for RMSD data.
"""
RMSD = []
RMSDAllAtom = []
backbone = u.select_atoms("protein and (name C or name N or name CA)")
reference = ref.select_atoms("protein and (name C or name N or name CA)")
Allcurrent = u.select_atoms("protein and not name H*")
Allreference = ref.select_atoms("protein and not name H*")
for ts in u.trajectory:
A = backbone.coordinates()
B = reference.coordinates()
E = Allcurrent.coordinates()
F = Allreference.coordinates()
C = rmsd(A,B)
G = rmsd(E,F)
RMSD.append((u.trajectory.frame, C))
RMSDAllAtom.append((u.trajectory.frame, G))
RMSD = np.array(RMSD)
RMSDAllAtom = np.array(RMSDAllAtom)
#print RMSDAllAtom
#print RMSD
ax = plt.subplot(111)
ax.plot(RMSD[:,0], RMSD[:,1], 'r', lw=2, label="Calpha RMSD")
ax.plot(RMSDAllAtom[:,0], RMSDAllAtom[:,1], 'g', lw=2, label="All Atom RMSD (noH)")
ax.set_xlabel("Frame")
ax.set_ylabel(r"RMSD of Backbone ($\AA$)")
#ax.figure.savefig("RMSD.pdf")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc = 'lower left')
#plt.draw()
return ax, RMSD, RMSDAllAtom
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='This function will plot Radius of gyration for a given universe (trajectory).')
parser.add_argument('-j', '--jobname', help='Enter your job name and it will appear as first coloumn in the result file', default='Test')
parser.add_argument('-trj', '--trajectory', help='Filename of Trajecotry file.', required=True)
parser.add_argument('-top', '--topology', help='Filename of psf/topology file', required=True)
args = parser.parse_args()
u = MDAnalysis.Universe(args.topology, args.trajectory)
ref = MDAnalysis.Universe(args.topology, args.trajectory)
caRMSD =[]
allRMSD = []
fig,caRMSD,allRMSD = proRMSD(u,ref)
#print caRMSD
np.savetxt(args.jobname+"-caRMSD-pro.data", caRMSD)
np.savetxt(args.jobname+"-allRMSD-pro.data", allRMSD)
fig.figure.savefig(args.jobname+"-proRMSD.pdf")
|
mktumbi/SimAnaRep
|
SimAnaRepproRMSD.py
|
Python
|
gpl-2.0
| 2,516
|
[
"MDAnalysis"
] |
b3bde7bda0c9c3cb34cdf6dc9b87fff7806e44d7e6f05e330df4ffedf54c47f5
|
#!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import argparse
import glob
import logging
import os
import re
import sys
# Append mugqic_pipelines directory to Python library path
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))))
# MUGQIC Modules
from core.config import *
from core.job import *
from core.pipeline import *
from bfx.readset import *
from bfx import differential_expression
from bfx import gq_seq_utils
from pipelines import common
from bfx import rmarkdown
from bfx import samtools
from bfx import tools
from bfx import trinity
from bfx import trinotate
from bfx import blast
from bfx import exonerate
log = logging.getLogger(__name__)
class RnaSeqDeNovoAssembly(common.Illumina):
"""
RNA-Seq De Novo Assembly Pipeline
=================================
The standard MUGQIC RNA-Seq De Novo Assembly pipeline uses the [Trinity](http://trinityrnaseq.sourceforge.net/)
software suite to reconstruct transcriptomes from RNA-Seq data without using any reference genome or transcriptome.
First, reads are trimmed with [Trimmomatic](http://www.usadellab.org/cms/index.php?page=trimmomatic)
and normalized in order to reduce memory requirement and decrease assembly runtime, using the Trinity
normalization utility inspired by the [Diginorm](http://arxiv.org/abs/1203.4802) algorithm.
Then, the transcriptome is assembled on normalized reads using the Trinity assembler. Trinity creates
a Trinity.fasta file with a list of contigs representing the transcriptome isoforms. Those transcripts
are grouped in components mostly representing genes.
Components and transcripts are functionally annotated using the [Trinotate](http://trinotate.sourceforge.net/) suite.
Gene abundance estimation for each sample has been performed using [RSEM](http://deweylab.biostat.wisc.edu/rsem/)
(RNA-Seq by Expectation-Maximization). Differential gene expression analysis is performed using
[DESeq](http://genomebiology.com/2010/11/10/R106) and [edgeR](http://bioinformatics.oxfordjournals.org/content/26/1/139/) R Bioconductor packages.
The DESeq and edgeR methods model **count data** by a negative binomial distribution. The parameters of
the distribution (mean and dispersion) are estimated from the data, i.e. from the read counts in the input files.
Both methods compute a measure of read abundance, i.e. expression level (called *base mean* or
*mean of normalized counts* in DESeq, and *concentration* in edgeR) for each gene and apply a hypothesis test
to each gene to evaluate differential expression. In particular, both methods determine a p-value and
a log2 fold change (in expression level) for each gene. The Log2 FC of EdgeR is reported in the differential gene
results file, one file per design.
The log2fold change is the logarithm (to basis 2) of the fold change condition from condition A to B
(mutation or treatment are the most common conditions). A "fold change" between conditions A and B at a gene
or transcript is normally computed as the ratio at gene or transcript of the base mean of scaled counts
for condition B to the base mean of scaled counts for condition A. Counts are scaled by a size factor in
a step called normalization (if the counts of non-differentially expressed genes in one sample are, on average,
twice as high as in another, the size factor for the first sample should be twice that of the other sample).
Each column of the count table is then divided by the size factor for this column and the count values
are brought to a common scale, making them comparable. See the [EdgeR vignette](http://www.bioconductor.org/packages/2.12/bioc/vignettes/edgeR/inst/doc/edgeR.pdf) for additional information on normalization approaches used in the pipeline.
The differential gene analysis is followed by a Gene Ontology (GO) enrichment analysis.
This analysis use the [goseq approach](http://bioconductor.org/packages/release/bioc/html/goseq.html).
The goseq is based on the use of non-native GO terms resulting from trinotate annotations (see details in the section 5 of
[the corresponding vignette](http://bioconductor.org/packages/release/bioc/vignettes/goseq/inst/doc/goseq.pdf).
Thus a high quality contigs assembly is created by extracting all transcripts having a functionnal annotation as defined by trinotate,
the Top BLASTX hit and TmHMM annotations are used by default.
Finally, different exploratory data analysis (EDA) techniques are applied to filtered isoforms expression levels.
Main goals of expression level EDA are the detection of outliers, potential mislabeling, to explore the homogeneity
of biological replicates and to appreciate the global effects of the different experimental variables.
An HTML summary report is automatically generated by the pipeline. This report contains description of
the sequencing experiment as well as a detailed presentation of the pipeline steps and results. Various
Quality Control (QC) summary statistics are included in the report and additional QC analysis is accessible
for download directly through the report. The report includes also the main references of the software and
methods used during the analysis, together with the full list of parameters that have been passed
to the pipeline main script.
"""
def __init__(self):
# Add pipeline specific arguments
self.argparser.add_argument("-d", "--design", help="design file", type=file)
super(RnaSeqDeNovoAssembly, self).__init__()
def insilico_read_normalization_readsets(self):
"""
Normalize each readset, using the Trinity normalization utility.
"""
jobs = []
for readset in self.readsets:
trim_file_prefix = os.path.join("trim", readset.sample.name, readset.name + ".trim.")
normalization_directory = os.path.join("insilico_read_normalization", readset.name)
if readset.run_type == "PAIRED_END":
left_or_single_reads = [trim_file_prefix + "pair1.fastq.gz"]
right_reads = [trim_file_prefix + "pair2.fastq.gz"]
elif readset.run_type == "SINGLE_END":
left_or_single_reads = [trim_file_prefix + "single.fastq.gz"]
right_reads = []
else:
raise Exception("Error: run type \"" + readset.run_type +
"\" is invalid for readset \"" + readset.name + "\" (should be PAIRED_END or SINGLE_END)!")
job = trinity.insilico_read_normalization(
left_or_single_reads,
right_reads,
"fq",
config.param('insilico_read_normalization_readsets', 'jellyfish_memory'),
normalization_directory,
config.param('insilico_read_normalization_readsets', 'cpu', required=False, type='int')
)
job.name = "insilico_read_normalization_readsets." + readset.name
jobs.append(job)
return jobs
def insilico_read_normalization_all(self):
"""
Merge all normalized readsets together and normalize the result, using the Trinity normalization utility.
"""
jobs = []
normalization_directory = "insilico_read_normalization"
normalization_directory_all = os.path.join(normalization_directory, "all")
left_or_single_reads = []
right_reads = []
for readset in self.readsets:
if readset.run_type == "PAIRED_END":
left_or_single_reads.append(os.path.join(normalization_directory, readset.name, "left.norm.fq"))
right_reads.append(os.path.join(normalization_directory, readset.name, "right.norm.fq"))
elif readset.run_type == "SINGLE_END":
left_or_single_reads.append(os.path.join(normalization_directory, readset.name, "single.norm.fq"))
else:
raise Exception("Error: run type \"" + readset.run_type +
"\" is invalid for readset \"" + readset.name + "\" (should be PAIRED_END or SINGLE_END)!")
job = trinity.insilico_read_normalization(
left_or_single_reads,
right_reads,
"fq",
config.param('insilico_read_normalization_all', 'jellyfish_memory'),
normalization_directory_all,
config.param('insilico_read_normalization_all', 'cpu', required=False, type='int')
)
job.name = "insilico_read_normalization_all"
jobs.append(job)
report_file = os.path.join("report", "RnaSeqDeNovoAssembly.insilico_read_normalization_all.md")
normalization_stats_file = os.path.join("insilico_read_normalization", "all", "normalization.stats.tsv")
jobs.append(
Job(
[normalization_stats_file],
[report_file],
[['insilico_read_normalization_all', 'module_pandoc']],
command="""\
mkdir -p report && \\
sum_norm=`cut -f2 {normalization_stats_file}` && \\
normalization_table=`sed '1d' report/trimReadsetTable.tsv | LC_NUMERIC=en_CA awk -v sum_norm=$sum_norm '{{sum_trim+=$4}}END{{print sprintf("%\\47d", sum_trim)"|"sprintf("%\\47d", sum_norm)"|"sprintf("%.2f", sum_norm / sum_trim * 100)}}'` && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable read_type="{read_type}" \\
--variable normalization_table="$normalization_table" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
read_type="Paired" if self.run_type == 'PAIRED_END' else "Single",
normalization_stats_file=normalization_stats_file,
report_file=report_file
),
report_files=[report_file],
name="insilico_read_normalization_all_report")
)
return jobs
def trinity(self):
"""
Create a de novo assembly from normalized readsets using [Trinity](http://trinityrnaseq.sourceforge.net/).
"""
jobs = []
normalization_directory = os.path.join("insilico_read_normalization", "all")
output_directory = "trinity_out_dir"
trinity_fasta = os.path.join(output_directory, "Trinity.fasta")
trinity_stats_prefix = os.path.join(output_directory, "Trinity.stats")
if self.run_type == "PAIRED_END":
left_reads = os.path.join(normalization_directory, "left.norm.fq")
right_reads = os.path.join(normalization_directory, "right.norm.fq")
input_files = [left_reads, right_reads]
reads_option = "--left " + left_reads + " \\\n --right " + right_reads
elif self.run_type == "SINGLE_END":
single_reads = os.path.join(normalization_directory, "single.norm.fq")
input_files = [single_reads]
reads_option = "--single " + single_reads
# Trinity job
jobs.append(concat_jobs([
trinity.trinity(input_files, trinity_fasta, output_directory, reads_option),
Job(
[trinity_fasta],
[trinity_fasta + ".zip"],
command="zip -j " + trinity_fasta + ".zip " + trinity_fasta),
Job(
[trinity_fasta],
[trinity_stats_prefix + ".csv", trinity_stats_prefix + ".jpg", trinity_stats_prefix + ".pdf"],
[['trinity', 'module_R'], ['trinity', 'module_mugqic_R_packages']],
command="Rscript -e 'library(gqSeqUtils); dnaFastaStats(filename = \"" + trinity_fasta + "\", type = \"trinity\", output.prefix = \"" + trinity_stats_prefix + "\")'")
], name="trinity"))
report_file = os.path.join("report", "RnaSeqDeNovoAssembly.trinity.md")
jobs.append(
Job(
[trinity_fasta + ".zip", trinity_stats_prefix + ".csv", trinity_stats_prefix + ".jpg", trinity_stats_prefix + ".pdf"],
[report_file],
[['trinity', 'module_pandoc']],
command="""\
mkdir -p report && \\
cp {trinity_fasta}.zip {trinity_stats_prefix}.csv {trinity_stats_prefix}.jpg {trinity_stats_prefix}.pdf report/ && \\
assembly_table=`sed '1d' {trinity_stats_prefix}.csv | perl -pe 's/^"([^"]*)",/\\1\t/g' | grep -P "^(Nb. Transcripts|Nb. Components|Total Transcripts Length|Min. Transcript Length|Median Transcript Length|Mean Transcript Length|Max. Transcript Length|N50)" | LC_NUMERIC=en_CA awk -F"\t" '{{print $1"|"sprintf("%\\47d", $2)}}'` && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable assembly_table="$assembly_table" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
trinity_fasta=trinity_fasta,
trinity_stats_prefix=trinity_stats_prefix,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="trinity_report")
)
return jobs
def exonerate_fastasplit(self):
"""
Split the Trinity assembly FASTA into chunks for further parallel BLAST annotations.
"""
trinity_directory = "trinity_out_dir"
trinity_fasta = os.path.join(trinity_directory, "Trinity.fasta")
trinity_fasta_for_blast = os.path.join(trinity_directory, "Trinity.fa")
trinity_chunks_directory = os.path.join(trinity_directory, "Trinity.fasta_chunks")
num_fasta_chunks = config.param('exonerate_fastasplit', 'num_fasta_chunks', type='posint')
return [concat_jobs([
Job(command="rm -rf " + trinity_chunks_directory),
Job(command="mkdir -p " + trinity_chunks_directory),
trinity.prepare_for_blast(trinity_fasta, trinity_fasta_for_blast),
exonerate.fastasplit(trinity_fasta_for_blast, trinity_chunks_directory, "Trinity.fa_chunk", num_fasta_chunks)
], name="exonerate_fastasplit.Trinity.fasta")]
def blastx_trinity_uniprot(self):
"""
Annotate Trinity FASTA chunks with Swiss-Prot and UniRef databases using [blastx](http://blast.ncbi.nlm.nih.gov/).
"""
jobs = []
trinity_chunks_directory = os.path.join("trinity_out_dir", "Trinity.fasta_chunks")
blast_directory = "blast"
num_fasta_chunks = config.param('exonerate_fastasplit', 'num_fasta_chunks', type='posint')
program = "blastx"
swissprot_db = config.param("blastx_trinity_uniprot", "swissprot_db", type='prefixpath')
uniref_db = config.param("blastx_trinity_uniprot", "uniref_db", type='prefixpath')
cpu = config.param('blastx_trinity_uniprot', 'cpu')
# (Removed blast on uniref_db since it's too long)
for db in [swissprot_db]:
if not glob.glob(db + ".*phr"):
raise Exception("Error: " + db + " BLAST db files do not exist!")
for i in range(num_fasta_chunks):
trinity_chunk = os.path.join(trinity_chunks_directory, "Trinity.fa_chunk_{:07d}".format(i))
query_chunk = os.path.join(blast_directory, "query_Trinity_" + os.path.basename(db) + "_chunk_{:07d}.tsv".format(i))
blast_chunk = os.path.join(blast_directory, program + "_Trinity_" + os.path.basename(db) + "_chunk_{:07d}.tsv".format(i))
jobs.append(concat_jobs([
Job(command="mkdir -p " + blast_directory, removable_files=[blast_directory]),
Job(command="ln -s -f " + os.path.relpath(trinity_chunk, os.path.dirname(query_chunk)) + " " + query_chunk, removable_files=[blast_directory]),
blast.parallel_blast(trinity_chunk, query_chunk, blast_chunk, program, db, cpu),
], name="blastx_trinity_uniprot." + os.path.basename(db) + ".chunk_{:07d}".format(i)))
return jobs
def blastx_trinity_uniprot_merge(self):
"""
Merge blastx Swiss-Prot and UniRef chunks results.
"""
jobs = []
blast_directory = "blast"
num_fasta_chunks = config.param('exonerate_fastasplit', 'num_fasta_chunks', type='posint')
program = "blastx"
blast_prefix = os.path.join(blast_directory, program + "_Trinity_")
swissprot_db = config.param("blastx_trinity_uniprot", "swissprot_db", type='prefixpath')
uniref_db = config.param("blastx_trinity_uniprot", "uniref_db", type='prefixpath')
# (Removed blast on uniref_db since it's too long)
for db in [swissprot_db]:
blast_chunks = [os.path.join(blast_prefix + os.path.basename(db) + "_chunk_{:07d}.tsv".format(i)) for i in range(num_fasta_chunks)]
blast_result = os.path.join(blast_prefix + os.path.basename(db) + ".tsv")
jobs.append(concat_jobs([
Job(
blast_chunks,
[blast_result],
command="cat \\\n " + " \\\n ".join(blast_chunks) + " \\\n > " + blast_result
),
Job([blast_result], [blast_result + ".zip"], command="zip -j {blast_result}.zip {blast_result}".format(blast_result=blast_result))
], name="blastx_trinity_" + os.path.basename(db) + "_merge"))
report_file = os.path.join("report", "RnaSeqDeNovoAssembly.blastx_trinity_uniprot_merge.md")
jobs.append(
Job(
[blast_prefix + os.path.basename(swissprot_db) + ".tsv.zip"],
[report_file],
[['blastx_trinity_uniprot_merge', 'module_pandoc']],
command="""\
mkdir -p report && \\
cp {blast_prefix}{blast_db}.tsv.zip report/ && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable blast_db="{blast_db}" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
blast_prefix=blast_prefix,
blast_db=os.path.basename(swissprot_db),
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[report_file],
name="blastx_trinity_uniprot_merge_report")
)
return jobs
def transdecoder(self):
"""
Identifies candidate coding regions within transcript sequences using [Transdecoder](http://transdecoder.github.io/).
"""
trinity_fasta = os.path.join("trinity_out_dir", "Trinity.fasta")
transdecoder_directory = os.path.join("trinotate", "transdecoder")
transdecoder_subdirectory = os.path.join(os.path.basename(trinity_fasta) + ".transdecoder_dir")
return trinotate.transdecoder(trinity_fasta, transdecoder_directory, transdecoder_subdirectory)
def hmmer(self):
"""
Identifies protein domains using [HMMR](http://hmmer.janelia.org/).
"""
transdecoder_directory = os.path.join("trinotate", "transdecoder")
transdecoder_fasta = os.path.join(transdecoder_directory, "Trinity.fasta.transdecoder.pep")
transdecoder_pfam = os.path.join(transdecoder_directory, "Trinity.fasta.transdecoder.pfam")
return trinotate.hmmer(transdecoder_directory, transdecoder_fasta, transdecoder_pfam)
def rnammer_transcriptome(self):
"""
Identify potential rRNA transcripts using [RNAmmer](http://www.cbs.dtu.dk/cgi-bin/sw_request?rnammer).
"""
trinity_fasta = os.path.join("trinity_out_dir", "Trinity.fasta")
rnammer_directory = os.path.join("trinotate", "rnammer")
return trinotate.rnammer_transcriptome(trinity_fasta, rnammer_directory)
def blastp_transdecoder_uniprot(self):
"""
Search Transdecoder-predicted coding regions for sequence homologies on UniProt using [blastp](http://blast.ncbi.nlm.nih.gov/).
"""
blast_directory = os.path.join("trinotate", "blastp")
transdecoder_fasta = os.path.join("trinotate", "transdecoder", "Trinity.fasta.transdecoder.pep")
db = config.param("blastp_transdecoder_uniprot", "swissprot_db", type='prefixpath')
return trinotate.blastp_transdecoder_uniprot(blast_directory, transdecoder_fasta, db)
def signalp(self):
"""
Predict signal peptides using [SignalP](http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?signalp).
"""
transdecoder_fasta = os.path.join("trinotate", "transdecoder", "Trinity.fasta.transdecoder.pep")
signalp_gff = os.path.join("trinotate", "signalp", "signalp.gff")
return trinotate.signalp(transdecoder_fasta, signalp_gff)
def tmhmm(self):
"""
Predict transmembrane regions using [TMHMM](http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?tmhmm).
"""
transdecoder_fasta = os.path.join("trinotate", "transdecoder", "Trinity.fasta.transdecoder.pep")
tmhmm_output = os.path.join("trinotate", "tmhmm", "tmhmm.out")
return trinotate.tmhmm(transdecoder_fasta, tmhmm_output)
def trinotate(self):
"""
Perform transcriptome functional annotation and analysis using [Trinotate](http://trinotate.sourceforge.net/).
All functional annotation data is integrated into a SQLite database and a whole annotation report is created.
"""
jobs = []
swissprot_db = os.path.basename(config.param("blastx_trinity_uniprot", "swissprot_db", type='prefixpath'))
transdecoder_pep = os.path.join("trinotate", "transdecoder", "Trinity.fasta.transdecoder.pep")
jobs.append( trinotate.trinotate(
swissprot_db = swissprot_db ,
trinity_fasta = os.path.join("trinity_out_dir", "Trinity.fasta"),
swissprot_blastx = os.path.join("blast", "blastx_Trinity_" + swissprot_db + ".tsv"),
transdecoder_pep = transdecoder_pep,
transdecoder_pfam = os.path.join("trinotate", "transdecoder", "Trinity.fasta.transdecoder.pfam"),
swissprot_blastp = os.path.join("trinotate", "blastp", "blastp_" + os.path.basename(transdecoder_pep) + "_" + swissprot_db + ".tsv"),
rnammer = os.path.join("trinotate", "rnammer", "Trinity.fasta.rnammer.gff"),
signalp = os.path.join("trinotate", "signalp", "signalp.gff"),
tmhmm = os.path.join("trinotate", "tmhmm", "tmhmm.out"),
trinotate_sqlite = os.path.join("trinotate", "Trinotate.sqlite"),
trinotate_report = os.path.join("trinotate", "trinotate_annotation_report.tsv")
)
)
# Render Rmarkdown Report
jobs.append(
rmarkdown.render(
job_input = os.path.join("trinotate", "trinotate_annotation_report.tsv"),
job_name = "trinotate_report",
input_rmarkdown_file = os.path.join(self.report_template_dir, "RnaSeqDeNovoAssembly.trinotate.Rmd") ,
render_output_dir = 'report',
module_section = 'report',
prerun_r = 'report_dir="report"; source_dir="trinotate";'
)
)
return jobs
def align_and_estimate_abundance_prep_reference(self):
"""
Index Trinity FASTA file for further abundance estimation using [Trinity align_and_estimate_abundance.pl utility](http://trinityrnaseq.sourceforge.net/analysis/abundance_estimation.html).
"""
trinity_fasta = os.path.join("trinity_out_dir", "Trinity.fasta")
return [trinity.align_and_estimate_abundance(trinity_fasta, prep_reference=True)]
def align_and_estimate_abundance(self):
"""
Estimate transcript abundance using [RSEM](http://deweylab.biostat.wisc.edu/rsem/) via
[Trinity align_and_estimate_abundance.pl utility](http://trinityrnaseq.sourceforge.net/analysis/abundance_estimation.html).
"""
jobs = []
trinity_fasta = os.path.join("trinity_out_dir", "Trinity.fasta")
for sample in self.samples:
trim_directory = os.path.join("trim", sample.name)
output_directory = os.path.join("align_and_estimate_abundance", sample.name)
left_or_single_reads = []
right_reads = []
for readset in sample.readsets:
if readset.run_type == "PAIRED_END":
left_or_single_reads.append(os.path.join(trim_directory, readset.name + ".trim.pair1.fastq.gz"))
right_reads.append(os.path.join(trim_directory, readset.name + ".trim.pair2.fastq.gz"))
elif readset.run_type == "SINGLE_END":
left_or_single_reads.append(os.path.join(trim_directory, readset.name + ".trim.single.fastq.gz"))
else:
raise Exception("Error: run type \"" + readset.run_type +
"\" is invalid for readset \"" + readset.name + "\" (should be PAIRED_END or SINGLE_END)!")
jobs.append(trinity.align_and_estimate_abundance(trinity_fasta=trinity_fasta, output_directory=output_directory, prep_reference=False, left_or_single_reads=left_or_single_reads, right_reads=right_reads, sample_name=sample.name ) )
# Generate read files and matrix of estimated abundances, send to the differential_expression directory (God bless Joel)
output_directory = "differential_expression"
for item in "genes","isoforms":
matrix = os.path.join(output_directory, item + ".counts.matrix")
count_files = os.path.join(output_directory, item + ".counts.files")
align_and_estimate_abundance_results = [os.path.join("align_and_estimate_abundance", sample.name, sample.name + "." + item + ".results") for sample in self.samples]
out_prefix=os.path.join(output_directory, item)
jobs.append(concat_jobs([
Job(command="mkdir -p " + os.path.join(output_directory, item)),
Job(
align_and_estimate_abundance_results,
[count_files],
command="echo -e \"" + "\\n".join(align_and_estimate_abundance_results) + "\" > " + count_files
),
# Create isoforms and genes matrices with counts of RNA-seq fragments per feature using Trinity RSEM utility
trinity.abundance_estimates_to_matrix(count_files, matrix, out_prefix),
trinity.prepare_abundance_matrix_for_dge(matrix, item),
trinity.extract_lengths_from_RSEM_output(align_and_estimate_abundance_results[0], os.path.join(output_directory, item + ".lengths.tsv"))
], name="align_and_estimate_abundance." + item))
# Parse Trinotate results to obtain blast, go annotation and a filtered set of contigs
isoforms_lengths = os.path.join(output_directory, "isoforms.lengths.tsv")
trinotate_annotation_report = os.path.join("trinotate", "trinotate_annotation_report.tsv")
gene_id_column = "#gene_id" if not config.param('trinotate', 'gene_column', required=False) else config.param('trinotate', 'gene_column', required=False)
transcript_id_column = "transcript_id" if not config.param('trinotate', 'transcript_column', required=False) else config.param('trinotate', 'gene_column', required=False)
trinotate_filters = None if not config.param('filter_annotated_components', 'filters_trinotate', required=False) else config.param('filter_annotated_components', 'filters_trinotate', required=False).split("\n")
jobs.append(
tools.py_parseTrinotateOutput(trinotate_annotation_report, trinotate_annotation_report + ".genes" , trinotate_annotation_report + ".isoforms",
gene_id_column,
transcript_id_column,
isoforms_lengths,
"align_and_estimate_abundance.parse_trinotate",
trinotate_filters)
)
return jobs
def gq_seq_utils_exploratory_analysis_rnaseq_denovo(self):
"""
Exploratory analysis using the gqSeqUtils R package.
"""
jobs = []
# gqSeqUtils function call
jobs.append(concat_jobs([
Job(command="mkdir -p exploratory"),
gq_seq_utils.exploratory_analysis_rnaseq_denovo(
os.path.join("differential_expression", "genes.counts.matrix"),
os.path.join("differential_expression", "genes.lengths.tsv"),
"exploratory"
)
], name="gq_seq_utils_exploratory_analysis_rnaseq_denovo"))
# Render Rmarkdown Report
jobs.append(
rmarkdown.render(
job_input = os.path.join("exploratory", "index.tsv"),
job_name = "gq_seq_utils_exploratory_analysis_rnaseq_denovo_report",
input_rmarkdown_file = os.path.join(self.report_template_dir, "RnaSeqDeNovoAssembly.gq_seq_utils_exploratory_analysis_rnaseq.Rmd") ,
render_output_dir = 'report',
module_section = 'report', # TODO: this or exploratory?
prerun_r = 'report_dir="report";' # TODO: really necessary or should be hard-coded in exploratory.Rmd?
)
)
return jobs
def filter_annotated_components(self):
"""
Filter high quality contigs based on values in trinotate annotations. Recreate a high quality contigs fasta file and run Assembly statistics using the gqSeqUtils R package.
"""
jobs = []
output_directory = "filtered_assembly"
trinity_fasta = os.path.join("trinity_out_dir", "Trinity.fasta")
trinity_filtered = os.path.join(output_directory, "Trinity.fasta")
trinity_filtered_prefix = os.path.join(output_directory, "Trinity")
trinity_stats_prefix = os.path.join(output_directory, "trinity_filtered.stats")
trinotate_annotation_report_filtered = os.path.join("trinotate", "trinotate_annotation_report.tsv" + ".isoforms_filtered.tsv")
# Use python to extract selected headers
jobs.append(concat_jobs([
Job(command="mkdir -p " + output_directory
),
tools.py_filterAssemblyToFastaToTsv(trinity_fasta , trinotate_annotation_report_filtered, 0, trinity_filtered_prefix),
Job(
[trinity_filtered],
[trinity_stats_prefix + ".csv", trinity_stats_prefix + ".jpg", trinity_stats_prefix + ".pdf"],
[['filter_annotated_components', 'module_R'], ['filter_annotated_components', 'module_mugqic_R_packages']],
command="""\
Rscript -e 'library(gqSeqUtils); dnaFastaStats(filename = \"{trinity_filtered}\", type = \"trinity\", output.prefix = \"{trinity_stats_prefix}\")' """.format(
trinity_filtered=trinity_filtered,
trinity_stats_prefix=trinity_stats_prefix)
),
Job(
[trinity_filtered],
[trinity_filtered + ".zip"],
command="zip -j " + trinity_filtered + ".zip " + trinity_filtered + " " + trinity_filtered_prefix + ".tsv"
)
], name="filter_annotated_components"))
report_file = os.path.join("report", "RnaSeqDeNovoAssembly.filtered.trinity.md")
jobs.append(
Job(
[trinity_filtered + ".zip", trinity_stats_prefix + ".csv", trinity_stats_prefix + ".jpg", trinity_stats_prefix + ".pdf"],
[report_file],
[['trinity', 'module_pandoc']],
command="""\
mkdir -p report && \\
cp {trinity_filtered}.zip report/{output_directory}.zip && \\
cp {trinity_stats_prefix}.csv {trinity_stats_prefix}.jpg {trinity_stats_prefix}.pdf report/ && \\
assembly_table=`sed '1d' {trinity_stats_prefix}.csv | perl -pe 's/^"([^"]*)",/\\1\t/g' | grep -P "^(Nb. Transcripts|Nb. Components|Total Transcripts Length|Min. Transcript Length|Median Transcript Length|Mean Transcript Length|Max. Transcript Length|N50)" | LC_NUMERIC=en_CA awk -F"\t" '{{print $1"|"sprintf("%\\47d", $2)}}'` && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable assembly_table="$assembly_table" \\
--variable filter_string="{filter_string}" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
trinity_filtered=trinity_filtered,
output_directory=output_directory,
trinity_stats_prefix=trinity_stats_prefix,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file,
filter_string="" if not config.param('filter_annotated_components', 'filters_trinotate', required=False) else config.param('filter_annotated_components', 'filters_trinotate', required=False)
),
name="filter_annotated_components_report",
report_files=[report_file]
)
)
return jobs
def gq_seq_utils_exploratory_analysis_rnaseq_denovo_filtered(self):
"""
Exploratory analysis using the gqSeqUtils R package using a subset of filtered transcripts
"""
# Run exploratory analysis on filtered components
# Extract filtered components from counts file
jobs=[]
exploratory_output_dir = os.path.join("filtered_assembly","exploratory")
counts_file = os.path.join("filtered_assembly", "isoforms.counts.matrix")
trinotate_annotation_report_filtered = os.path.join("trinotate", "trinotate_annotation_report.tsv" + ".isoforms_filtered.tsv")
trinotate_annotation_report_filtered_header="trinotate/trinotate_annotation_report.tsv.isoforms_filtered_header.tsv"
lengths_file=os.path.join("differential_expression", "isoforms.lengths.tsv")
lengths_filtered_file = os.path.join("filtered_assembly", "isoforms.lengths.tsv")
jobs.append(concat_jobs([
Job(command="mkdir -p " + exploratory_output_dir),
Job([trinotate_annotation_report_filtered],
[trinotate_annotation_report_filtered_header],
command="sed '1s/^/ \\n/' " + trinotate_annotation_report_filtered + " > " + trinotate_annotation_report_filtered_header),
tools.py_parseMergeCsv([ trinotate_annotation_report_filtered_header, os.path.join("differential_expression", "isoforms.counts.matrix") ],
"\\\\t",
counts_file,
"\'\'",
left_join=True,
exclude="\'\'"
),
tools.py_parseMergeCsv([ trinotate_annotation_report_filtered_header, lengths_file ],
"\\\\t",
lengths_filtered_file,
"\'\' transcript_id",
left_join=True,
exclude="\' \'"
)
], name="filter_annotated_components_exploratory"
)
)
# gqSeqUtils function call
jobs.append(concat_jobs([
Job(command="mkdir -p " + exploratory_output_dir),
gq_seq_utils.exploratory_analysis_rnaseq_denovo(
counts_file,
lengths_filtered_file,
exploratory_output_dir
)
], name="gq_seq_utils_exploratory_analysis_rnaseq_denovo"))
# Render Rmarkdown Report
jobs.append(
rmarkdown.render(
job_input = os.path.join(exploratory_output_dir, "index.tsv"),
job_name = "gq_seq_utils_exploratory_analysis_rnaseq_denovo_filtered_report",
input_rmarkdown_file = os.path.join(self.report_template_dir, "RnaSeqDeNovoAssembly.gq_seq_utils_exploratory_analysis_rnaseq_filtered.Rmd") ,
render_output_dir = 'report',
module_section = 'report',
prerun_r = 'report_dir="report/filtered_assembly"; exploratory_dir="' + exploratory_output_dir + '";'
)
)
return jobs
def differential_expression_and_goseq_rsem(self, output_directory, item, trinotate_annotation_report):
"""
This function returns jobs related to differential gene expression analysis using [DESEQ](http://bioconductor.org/packages/release/bioc/html/DESeq.html) and [EDGER](http://www.bioconductor.org/packages/release/bioc/html/edgeR.html).
Merge the results of the analysis in a single csv file. Also, performs Gene Ontology analysis for RNA-Seq denovo Assembly using the Bioconductor's R package [goseq](http://www.bioconductor.org/packages/release/bioc/html/goseq.html).
Generates GO annotations for differential genes and isoforms expression analysis, based on associated GOTERMS generated by trinotate.
"""
jobs = []
# Parameters from ini file
gene_id_column = "#gene_id" if not config.param('trinotate', 'gene_column', required=False) else config.param('trinotate', 'gene_column', required=False)
transcript_id_column = "transcript_id" if not config.param('trinotate', 'transcript_column', required=False) else config.param('trinotate', 'gene_column', required=False)
trinotate_filters = None if not config.param('filter_annotated_components', 'filters_trinotate', required=False) else config.param('filter_annotated_components', 'filters_trinotate', required=False).split("\n")
trinotate_columns_to_exclude = None if not config.param('differential_expression', 'trinotate_columns_to_exclude', required=False) else config.param('differential_expression', 'trinotate_columns_to_exclude', required=False)
# mkdir
jobs.append(Job([trinotate_annotation_report],
[],
command="mkdir -p " + os.path.join(output_directory, item)))
# Run DGE and merge dge results with annotations
matrix = os.path.join(output_directory, item + ".counts.matrix")
# Perform edgeR
edger_job = differential_expression.edger(os.path.relpath(self.args.design.name, self.output_dir), matrix + ".symbol", os.path.join(output_directory, item))
edger_job.output_files = [os.path.join(output_directory, item ,contrast.name, "edger_results.csv") for contrast in self.contrasts]
# Perform DESeq
deseq_job = differential_expression.deseq(os.path.relpath(self.args.design.name, self.output_dir), matrix + ".symbol", os.path.join(output_directory, item))
deseq_job.output_files = [os.path.join(output_directory, item ,contrast.name, "dge_results.csv") for contrast in self.contrasts]
jobs.append(concat_jobs([
edger_job,
deseq_job,
], name="differential_expression.run." + item))
for contrast in self.contrasts:
# Merge with annotations
jobs.append(concat_jobs([
tools.py_parseMergeCsv([os.path.join(output_directory, item, contrast.name, "dge_results.csv"), trinotate_annotation_report + "." + item + "_blast.tsv"],
"\\\\t",
os.path.join(output_directory, item, contrast.name, "dge_trinotate_results.csv"),
"id " + "\"" + gene_id_column + "\"" if item == "genes" else "id " + transcript_id_column,
None,
trinotate_columns_to_exclude,
True,
"edger.p.value",
True
),
# Run GOseq
differential_expression.goseq(
os.path.join(output_directory, item , contrast.name, "dge_trinotate_results.csv"),
config.param("differential_expression", "dge_input_columns"),
os.path.join(output_directory, item, contrast.name ,"gene_ontology_results.csv"),
os.path.join(output_directory, item +".lengths.tsv.noheader.tsv"),
trinotate_annotation_report + "." + item + "_go.tsv"
)
], name="differential_expression.merge.annotations.goseq." + item + "." + contrast.name ))
return jobs
def differential_expression(self):
"""
Performs differential gene expression analysis using [DESEQ](http://bioconductor.org/packages/release/bioc/html/DESeq.html) and [EDGER](http://www.bioconductor.org/packages/release/bioc/html/edgeR.html).
Merge the results of the analysis in a single csv file. Also, performs Gene Ontology analysis for RNA-Seq denovo Assembly using the Bioconductor's R package [goseq](http://www.bioconductor.org/packages/release/bioc/html/goseq.html).
Generates GO annotations for differential genes and isoforms expression analysis, based on associated GOTERMS generated by trinotate.
"""
output_directory = "differential_expression"
jobs = []
trinotate_annotation_report = os.path.join("trinotate", "trinotate_annotation_report.tsv")
report_dir= 'report'
input_rmarkdown_file=os.path.join(self.report_template_dir, "RnaSeqDeNovoAssembly.differential_expression_goseq.Rmd")
# Run DGE and merge dge results with annotations
for item in "genes","isoforms":
jobs.append(concat_jobs( self.differential_expression_and_goseq_rsem(output_directory, item, trinotate_annotation_report)
, name= "differential_expression_" + item)
)
# DGE Report
# Render Rmarkdown Report
output_files = []
for job_item in jobs:
output_files.extend([output_file for output_file in job_item.output_files if output_file not in output_files])
jobs.append(
rmarkdown.render(
job_input = output_files,
job_name = "differential_expression_goseq_rnaseq_denovo_report",
input_rmarkdown_file = input_rmarkdown_file,
render_output_dir = 'report',
module_section = 'report',
prerun_r = 'design_file="' + os.path.relpath(self.args.design.name, self.output_dir) +
'"; report_dir="' + report_dir + '"; source_dir="' + output_directory + '"; ' + 'top_n_results=10; contrasts=c("' + '","'.join(contrast.name for contrast in self.contrasts) + '");'
)
)
return jobs
def differential_expression_filtered(self):
"""
Differential Expression and GOSEQ analysis based on filtered transcripts and genes
"""
output_directory = os.path.join("filtered_assembly","differential_expression")
jobs = []
trinotate_annotation_report = os.path.join("trinotate", "trinotate_annotation_report.tsv")
report_dir= os.path.join("report","filtered_assembly")
input_rmarkdown_file=os.path.join(self.report_template_dir, "RnaSeqDeNovoAssembly.differential_expression_goseq_filtered.Rmd")
# Filter input files
trinotate_annotation_report_filtered = trinotate_annotation_report + ".isoforms_filtered.tsv"
trinotate_annotation_report_filtered_header={}
trinotate_annotation_report_filtered_header["isoforms"] = trinotate_annotation_report + ".isoforms_filtered_header.tsv"
trinotate_annotation_report_filtered_header["genes"]= trinotate_annotation_report + ".genes_filtered_header.tsv"
counts_ids = { 'genes':"Genes", 'isoforms':"Isoforms" }
trinotate_filters = None if not config.param('filter_annotated_components', 'filters_trinotate', required=False) else config.param('filter_annotated_components', 'filters_trinotate', required=False).split("\n")
source_directory = "differential_expression"
# Create the files containing filtered isoforms and genes with headers
jobs.append(concat_jobs([
Job(command="mkdir -p " + output_directory ),
Job([trinotate_annotation_report_filtered],
[trinotate_annotation_report_filtered_header["genes"]],
command="cat " + trinotate_annotation_report_filtered + " | awk 'BEGIN{OFS=\"_\";FS=\"_\"}{print $1,$2}' | uniq | sed '1s/^/ \\n/' " + " > " + trinotate_annotation_report_filtered_header["genes"]
),
Job([trinotate_annotation_report_filtered],
[trinotate_annotation_report_filtered_header["isoforms"]],
command="sed '1s/^/ \\n/' " + trinotate_annotation_report_filtered + " > " + trinotate_annotation_report_filtered_header["isoforms"])
],name="differential_expression_filtered_get_trinotate")
)
# Run DGE and merge dge results with annotations
for item in "genes","isoforms":
matrix = os.path.join(output_directory, item + ".counts.matrix.symbol")
job=tools.py_parseMergeCsv([ trinotate_annotation_report_filtered_header[item], os.path.join(source_directory, item + ".counts.matrix.symbol") ],
"\\\\t",
matrix,
"\'\' " + counts_ids[item],
left_join=True,
exclude="\' \'")
jobs.append(concat_jobs([
job,
Job([os.path.join(source_directory, item +".lengths.tsv.noheader.tsv")],
[os.path.join(output_directory, item +".lengths.tsv.noheader.tsv")],
command="cp " + os.path.join(source_directory, item +".lengths.tsv.noheader.tsv") + " " + os.path.join(output_directory, item +".lengths.tsv.noheader.tsv")),
concat_jobs(self.differential_expression_and_goseq_rsem(output_directory, item, trinotate_annotation_report), name="differential_expression_filtered_" + item)
], name="differential_expression_filtered_" + item)
)
# Dependencies for report
output_files = []
for job_item in jobs:
output_files.extend([output_file for output_file in job_item.output_files if output_file not in output_files])
# DGE Report
# Render Rmarkdown Report
jobs.append(
rmarkdown.render(
job_input = output_files,
job_name = "differential_expression_goseq_rnaseq_denovo_filtered_report",
input_rmarkdown_file = input_rmarkdown_file ,
render_output_dir = 'report',
module_section = 'report',
prerun_r = 'report_dir="' + report_dir + '"; source_dir="' + output_directory + '"; ' + 'top_n_results=10; contrasts=c("' + '","'.join(contrast.name for contrast in self.contrasts) + '");'
)
)
return jobs
@property
def steps(self):
return [
self.picard_sam_to_fastq,
self.trimmomatic,
self.merge_trimmomatic_stats,
self.insilico_read_normalization_readsets,
self.insilico_read_normalization_all,
self.trinity,
self.exonerate_fastasplit,
self.blastx_trinity_uniprot,
self.blastx_trinity_uniprot_merge,
self.transdecoder,
self.hmmer,
self.rnammer_transcriptome,
self.blastp_transdecoder_uniprot,
self.signalp,
self.tmhmm,
self.trinotate,
self.align_and_estimate_abundance_prep_reference,
self.align_and_estimate_abundance,
self.gq_seq_utils_exploratory_analysis_rnaseq_denovo,
self.differential_expression,
self.filter_annotated_components,
self.gq_seq_utils_exploratory_analysis_rnaseq_denovo_filtered,
self.differential_expression_filtered,
]
if __name__ == '__main__':
RnaSeqDeNovoAssembly()
|
ccmbioinfo/mugqic_pipelines
|
pipelines/rnaseq_denovo_assembly/rnaseq_denovo_assembly.py
|
Python
|
lgpl-3.0
| 50,453
|
[
"BLAST",
"Bioconductor"
] |
66e5df6cd3431fa82268e87d7fcea033e3b5e254804297ff8aa04a2c8f32197f
|
from .tabular_utils import parse_tab_blast, get_queries, get_targets
from ...configuration import DaijinConfiguration, MikadoConfiguration
import functools
def _serialise_tabular(self):
if isinstance(self.xml, str):
self.xml = [self.xml]
else:
assert isinstance(self.xml, (list, set))
assert isinstance(self.configuration, (DaijinConfiguration, MikadoConfiguration))
matrix_name = self.configuration.serialise.substitution_matrix
program = self.configuration.serialise.blast_flavour
qmult, tmult = self.get_multipliers(None, program)
if self._blast_loading_debug is False and (self.single_thread is True or self.procs == 1):
queries = get_queries(self.engine)
targets = get_targets(self.engine)
parser = functools.partial(parse_tab_blast,
self=self,
queries=queries,
targets=targets,
procs=1,
matrix_name=matrix_name,
qmult=qmult, tmult=tmult)
for fname in self.xml:
parser(bname=fname)
self.logger.debug("Finished %s", fname)
else:
self.logger.info("Creating a pool with %d workers for analysing BLAST results",
self.procs)
queries = get_queries(self.engine)
targets = get_targets(self.engine)
parser = functools.partial(parse_tab_blast,
self=self,
queries=queries,
targets=targets,
procs=self.procs,
matrix_name=matrix_name,
qmult=qmult, tmult=tmult)
for fname in self.xml:
parser(bname=fname)
self.logger.info("Finished loading blast hits")
|
lucventurini/mikado
|
Mikado/serializers/blast_serializer/tab_serialiser.py
|
Python
|
lgpl-3.0
| 1,954
|
[
"BLAST"
] |
19620dafdf269b1c3f565e511eef786d42f5aadc16b760cb9332cd67e81a7cfe
|
"""PHENICX-Anechoic Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
This dataset includes audio and annotations useful for tasks as score-informed source separation, score following, multi-pitch estimation, transcription or instrument detection, in the context of symphonic music:
M. Miron, J. Carabias-Orti, J. J. Bosch, E. Gómez and J. Janer, "Score-informed source separation for multi-channel orchestral recordings", Journal of Electrical and Computer Engineering (2016))"
We do not provide the original audio files, which can be found at the web page hosted by Aalto University. However, with their permission we distribute the denoised versions for some of the anechoic orchestral recordings. The original dataset was introduced in:
Pätynen, J., Pulkki, V., and Lokki, T., "Anechoic recording system for symphony orchestra," Acta Acustica united with Acustica, vol. 94, nr. 6, pp. 856-865, November/December 2008.
Additionally, we provide the associated musical note onset and offset annotations, and the Roomsim configuration files used to generate the multi-microphone recordings.
The original anechoic dataset in Pätynen et al. consists of four passages of symphonic music from the Classical and Romantic periods. This work presented a set of anechoic recordings for each of the instruments, which were then synchronized between them so that they could later be combined to a mix of the orchestra. In order to keep the evaluation setup consistent between the four pieces, we selected the following instruments: violin, viola, cello, double bass, oboe, flute, clarinet, horn, trumpet and bassoon. A list of the characteristics of the four pieces can be found below:
Mozart
- duration: 3min 47s
- period: classical
- no. sources: 8
- total no. instruments: 10
- max. instruments/source: 2
Beethoven
- duration: 3min 11s
- period: classical
- no. sources: 10
- total no. instruments: 20
- max. instruments/source: 4
Beethoven
- duration: 2min 12s
- period: romantic
- no. sources: 10
- total no. instruments: 30
- max. instruments/source: 4
Bruckner
- duration: 1min 27s
- period: romantic
- no. sources: 10
- total no. instruments: 39
- max. instruments/source: 12
For more details, please visit: https://www.upf.edu/web/mtg/phenicx-anechoic
"""
from typing import BinaryIO, Optional, TextIO, Tuple, cast
from deprecated.sphinx import deprecated
import librosa
import numpy as np
from mirdata import annotations, core, download_utils, io, jams_utils
BIBTEX = """
@article{miron2016score,
title={Score-informed source separation for multichannel orchestral recordings},
author={Miron, Marius and Carabias-Orti, Julio J and Bosch, Juan J and G{\'o}mez, Emilia and Janer, Jordi},
journal={Journal of Electrical and Computer Engineering},
volume={2016},
year={2016},
publisher={Hindawi}
}
@article{patynen2008anechoic,
title={Anechoic recording system for symphony orchestra},
author={P{\"a}tynen, Jukka and Pulkki, Ville and Lokki, Tapio},
journal={Acta Acustica united with Acustica},
volume={94},
number={6},
pages={856--865},
year={2008},
publisher={S. Hirzel Verlag}
}
"""
INDEXES = {
"default": "1",
"test": "1",
"1": core.Index(filename="phenicx_anechoic_index_1.json"),
}
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="PHENICX-Anechoic.zip",
url="https://zenodo.org/record/840025/files/PHENICX-Anechoic.zip?download=1",
checksum="7fec47568263476ecac0103aef608629",
unpack_directories=["PHENICX-Anechoic"],
)
}
LICENSE_INFO = """
Creative Commons Attribution Non Commercial Share Alike 4.0 International
"""
DATASET_SECTIONS = {
"doublebass": "strings",
"cello": "strings",
"clarinet": "woodwinds",
"viola": "strings",
"violin": "strings",
"oboe": "woodwinds",
"flute": "woodwinds",
"trumpet": "brass",
"bassoon": "woodwinds",
"horn": "brass",
}
class Track(core.Track):
"""Phenicx-Anechoic Track class
Args:
track_id (str): track id of the track
Attributes:
audio_path (list): path to the audio files
notes_path (list): path to the score files
notes_original_path (list): path to the original score files
instrument (str): the name of the instrument
piece (str): the name of the piece
n_voices (int): the number of voices in this instrument
track_id (str): track id
Cached Properties:
notes (NoteData): notes annotations that have been time-aligned to the audio
notes_original (NoteData): original score representation, not time-aligned
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.instrument = self.track_id.split("-")[1]
self.piece = self.track_id.split("-")[0]
self.audio_paths = [
self.get_path(key) for key in self._track_paths if "audio_" in key
]
self.n_voices = len(self.audio_paths)
self.notes_path = self.get_path("notes")
self.notes_original_path = self.get_path("notes_original")
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""the track's audio
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
audio_mix, sr = cast(Tuple[np.ndarray, float], load_audio(self.audio_paths[0]))
for i in range(1, self.n_voices):
audio, _ = cast(Tuple[np.ndarray, float], load_audio(self.audio_paths[i]))
audio_mix += audio
audio_mix /= self.n_voices
return audio_mix, sr
@core.cached_property
def notes(self) -> Optional[annotations.NoteData]:
"""the track's notes corresponding to the score aligned to the audio
Returns:
NoteData: Note data for the track
"""
return load_score(self.notes_path)
@core.cached_property
def notes_original(self) -> Optional[annotations.NoteData]:
"""the track's notes corresponding to the original score
Returns:
NoteData: Note data for the track
"""
return load_score(self.notes_original_path)
def get_audio_voice(self, id_voice: int) -> Optional[Tuple[np.ndarray, float]]:
"""the track's audio
Args:
id_voice (int): The integer identifier for the voice
e.g. 2 for bassoon-2
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
if id_voice >= self.n_voices:
raise ValueError("id_voice={} is out of range".format(id_voice))
return load_audio(self.audio_paths[id_voice])
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_paths[0], note_data=[(self.notes, "aligned notes")]
)
class MultiTrack(core.MultiTrack):
"""Phenicx-Anechoic MultiTrack class
Args:
mtrack_id (str): track id of the track
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets/Phenicx-Anechoic`
Attributes:
track_audio_property (str): the attribute of track which is used for mixing
mtrack_id (str): multitrack id
piece (str): the classical music piece associated with this multitrack
tracks (dict): dict of track ids and the corresponding Tracks
instruments (dict): dict of instruments and the corresponding track
sections (dict): dict of sections and the corresponding list of tracks for each section
"""
def __init__(
self,
mtrack_id,
data_home,
dataset_name,
index,
track_class,
metadata,
):
super().__init__(
mtrack_id,
data_home,
dataset_name,
index,
Track,
metadata,
)
#### parse the keys for the dictionary of instruments and strings
self.instruments = {
source.replace(self.mtrack_id + "-", ""): source
for source in self.track_ids
}
self.sections = {"brass": [], "strings": [], "woodwinds": []}
for instrument, track_id in self.instruments.items():
self.sections[DATASET_SECTIONS[instrument]].append(track_id)
self.piece = self.mtrack_id
@property
def track_audio_property(self):
#### the attribute of Track which returns the relevant audio file for mixing
return "audio"
def get_audio_for_instrument(self, instrument):
"""Get the audio for a particular instrument
Args:
instrument (str): the instrument to get audio for
Returns:
np.ndarray: instrument audio with shape (n_samples, n_channels)
"""
if instrument not in self.instruments.keys():
raise ValueError(
"instrument={} is not in this multitrack. Must be one of {}".format(
instrument, self.instruments.keys()
)
)
return getattr(
self.tracks[self.instruments[instrument]], self.track_audio_property
)[0]
def get_audio_for_section(self, section):
"""Get the audio for a particular section
Args:
section (str): the section to get audio for
Returns:
np.ndarray: section audio with shape (n_samples, n_channels)
"""
if section not in self.sections.keys():
raise ValueError(
"section={} is not valid for this multitrack, must be one of {}".format(
section, self.sections.keys()
)
)
return self.get_target(self.sections[section])
def get_notes_target(self, track_keys, notes_property="notes"):
"""Get the notes for all the tracks
Args:
track_keys (list): list of track keys to get the NoteData for
notes_property (str): the attribute associated with NoteData, notes or notes_original
Returns:
NoteData: Note data for the tracks
"""
notes_target = None
for k in track_keys:
score = getattr(self.tracks[k], notes_property)
if notes_target is None:
notes_target = score
else:
notes_target += score
return notes_target
def get_notes_for_instrument(self, instrument, notes_property="notes"):
"""Get the notes for a particular instrument
Args:
instrument (str): the instrument to get the notes for
notes_property (str): the attribute associated with NoteData, notes or notes_original
Returns:
NoteData: Note data for the instrument
"""
return getattr(self.tracks[self.instruments[instrument]], notes_property)
def get_notes_for_section(self, section, notes_property="notes"):
"""Get the notes for a particular section
Args:
section (str): the section to get the notes for
notes_property (str): the attribute associated with NoteData, notes or notes_original
Returns:
NoteData: Note data for the section
"""
return self.get_notes_target(
self.sections[section], notes_property=notes_property
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a Phenicx-Anechoic audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=None, mono=True)
@io.coerce_to_string_io
def load_score(fhandle: TextIO) -> annotations.NoteData:
"""Load a Phenicx-Anechoic score file.
Args:
fhandle (str or file-like): File-like object or path to score file
Returns:
NoteData: Note data for the given track
"""
#### read start, end times
intervals = np.loadtxt(fhandle, delimiter=",", usecols=[0, 1], dtype=np.float_)
#### read notes as string
fhandle.seek(0)
content = fhandle.readlines()
values = np.array(
[librosa.note_to_hz(line.split(",")[2].strip("\n")) for line in content]
)
return annotations.NoteData(intervals, "s", values, "hz")
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The Phenicx-Anechoic dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="phenicx_anechoic",
track_class=Track,
multitrack_class=MultiTrack,
bibtex=BIBTEX,
indexes=INDEXES,
remotes=REMOTES,
license_info=LICENSE_INFO,
)
@deprecated(
reason="Use mirdata.datasets.phenicx_anechoic.load_audio",
version="0.3.4",
)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.phenicx_anechoic.load_score",
version="0.3.4",
)
def load_score(self, *args, **kwargs):
return load_score(*args, **kwargs)
|
mir-dataset-loaders/mirdata
|
mirdata/datasets/phenicx_anechoic.py
|
Python
|
bsd-3-clause
| 13,876
|
[
"VisIt"
] |
c36ddc46f34334eb63cbf818d9198f63e46e02361e6334acc71ea9863de758b3
|
""" Compares the run-time of the Python and the libmaxdiv implementation of the MDI algorithm on time-series of varying length. """
import sys
sys.path.append('..')
import numpy as np
from time import time
import csv
from maxdiv import maxdiv, maxdiv_util, libmaxdiv_wrapper
# ensure reproducable results
np.random.seed(0)
def sample_gp(length, dim = 1, sigma = 0.02, noise = 0.001):
""" sample a function from a Gaussian process with Gaussian kernel """
X = np.arange(0, length / 250.0, 0.004)
X = np.reshape(X, [1, len(X)])
meany = np.zeros(X.shape[1])
K = maxdiv_util.calc_gaussian_kernel(X, sigma) + noise * np.eye(X.shape[1])
return np.random.multivariate_normal(meany, K, dim)
if (len(sys.argv) < 2) or (sys.argv[1].lower() == 'noplot'):
# Parameters
min_len = 10
max_len = 100
N = np.arange(200, 2501, 100)
times = np.ndarray((len(N), 4), dtype = np.float64)
# Prepare libmaxdiv pipelines
params = libmaxdiv_wrapper.maxdiv_params_t()
libmaxdiv_wrapper.libmaxdiv.maxdiv_init_params(params)
params.min_size[0] = min_len
params.max_size[0] = max_len
params.preproc.embedding.kt = 3
params.preproc.embedding.temporal_borders = libmaxdiv_wrapper.enums['MAXDIV_BORDER_POLICY_CONSTANT']
params.estimator = libmaxdiv_wrapper.enums['MAXDIV_GAUSSIAN']
pipeline_gaussian = libmaxdiv_wrapper.libmaxdiv.maxdiv_compile_pipeline(params)
params.estimator = libmaxdiv_wrapper.enums['MAXDIV_KDE']
pipeline_parzen = libmaxdiv_wrapper.libmaxdiv.maxdiv_compile_pipeline(params)
# Measure runtimes and write them to timing.csv
with open('timing.csv', 'w') as outFile:
outFile.write('Length,Gaussian (Python),KDE (Python),Gaussian (libmaxdiv),KDE (libmaxdiv)\n')
for i, n in enumerate(N):
gps = sample_gp(n)
start = time()
maxdiv.maxdiv(gps, 'gaussian_cov', None, 'dense', useLibMaxDiv = False, mode = 'I_OMEGA', preproc = 'td', extint_min_len = min_len, extint_max_len = max_len)
stop = time()
times[i, 0] = stop - start
start = time()
maxdiv.maxdiv(gps, 'parzen', None, 'dense', useLibMaxDiv = False, mode = 'I_OMEGA', preproc = 'td', extint_min_len = min_len, extint_max_len = max_len)
stop = time()
times[i, 1] = stop - start
start = time()
libmaxdiv_wrapper.maxdiv_exec(gps, pipeline_gaussian, None)
stop = time()
times[i, 2] = stop - start
start = time()
libmaxdiv_wrapper.maxdiv_exec(gps, pipeline_parzen, None)
stop = time()
times[i, 3] = stop - start
outFile.write('{},{:.3f},{:.3f},{:.3f},{:.3f}\n'.format(n, *times[i,:]))
outFile.flush()
else:
N = []
times = []
with open(sys.argv[1]) as inFile:
for i, d in enumerate(csv.reader(inFile)):
if i > 0:
N.append(int(d[0]))
times.append([float(x) for x in d[1:5]])
times = np.array(times)
# Plot results
if (len(sys.argv) < 2) or (sys.argv[1].lower() != 'noplot'):
import matplotlib.pylab as plt
plt.plot(N, times[:, 0] * 1000, 'b--', label = 'Gaussian (Python)')
plt.plot(N, times[:, 1] * 1000, 'r--', label = 'KDE (Python)')
plt.plot(N, times[:, 2] * 1000, 'b-', label = 'Gaussian (libmaxdiv)')
plt.plot(N, times[:, 3] * 1000, 'r-', label = 'KDE (libmaxdiv)')
plt.xlabel('Length of Time Series')
plt.ylabel('Algorithm Run-Time')
plt.yscale('log')
ticks = [10 ** e for e in range(1, 6, 1)]
plt.yticks(ticks, ['{:.0f} s'.format(l / 1000) if l >= 1000 else '{:.0f} ms'.format(l) for l in ticks])
plt.grid(True)
plt.legend(loc = 'lower right')
plt.show()
|
cvjena/libmaxdiv
|
tools/timing.py
|
Python
|
lgpl-3.0
| 3,848
|
[
"Gaussian"
] |
7d35e5063f652e41bc9ad877349d40e32aa57db51fba66e23fdabc30ca9174ff
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import (fft, ifft, ifftshift, fft2, ifft2, fftn,
ifftn, fftfreq)
from numpy.fft import rfftn, irfftn
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) *
rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(face, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the instantaneous
phase in respect to time. The instantaneous phase corresponds to the phase
angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing,
Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
|
felipebetancur/scipy
|
scipy/signal/signaltools.py
|
Python
|
bsd-3-clause
| 87,587
|
[
"Gaussian"
] |
0c9b57c8e83ab0593b452516ea3de6566363dd9abf659eaf2026c4d524e68195
|
#!/usr/bin/env python
#
# $File: PyMutator.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
import random
def incAllele(allele):
return allele + random.randint(1, 5)
pop = sim.Population(size=1000, loci=[20])
pop.evolve(
initOps=sim.InitSex(),
matingScheme=sim.RandomMating(),
postOps=sim.PyMutator(func=incAllele, rates=[1e-4, 1e-3],
loci=[2, 10]),
gen = 1000
)
# count the average number tandem repeats at both loci
def avgAllele(pop, loc):
ret = 0
for ind in pop.individuals():
ret += ind.allele(loc, 0) + ind.allele(loc, 1)
return ret / (pop.popSize() * 2.)
print('Average number of repeats at two loci are %.2f and %.2f.' % \
(avgAllele(pop, 2), avgAllele(pop, 10)))
|
BoPeng/simuPOP
|
docs/PyMutator.py
|
Python
|
gpl-2.0
| 1,744
|
[
"VisIt"
] |
b28da68cb3f5b74acac5bb0220f924579191b03c972ce324a884d0f23820c8b0
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Water dynamics analysis --- :mod:`MDAnalysis.analysis.waterdynamics`
=======================================================================
:Author: Alejandro Bernardin
:Year: 2014-2015
:Copyright: GNU Public License v3
.. versionadded:: 0.11.0
This module provides functions to analize water dynamics trajectories and water
interactions with other molecules. The functions in this module are: water
orientational relaxation (WOR) [Yeh1999]_, hydrogen bond lifetimes (HBL)
[Rapaport1983]_, angular distribution (AD) [Grigera1995]_, mean square
displacement (MSD) [Brodka1994]_ and survival probability (SP) [Liu2004]_.
For more information about this type of analysis please refer to
[Araya-Secchi2014]_ (water in a protein cavity) and [Milischuk2011]_ (water in
a nanopore).
.. rubric:: References
.. [Rapaport1983] D.C. Rapaport (1983): Hydrogen bonds in water, Molecular
Physics: An International Journal at the Interface Between
Chemistry and Physics, 50:5, 1151-1162.
.. [Yeh1999] Yu-ling Yeh and Chung-Yuan Mou (1999). Orientational Relaxation
Dynamics of Liquid Water Studied by Molecular Dynamics Simulation,
J. Phys. Chem. B 1999, 103, 3699-3705.
.. [Grigera1995] Raul Grigera, Susana G. Kalko and Jorge Fischbarg
(1995). Wall-Water Interface. A Molecular Dynamics Study,
Langmuir 1996,12,154-158
.. [Liu2004] Pu Liu, Edward Harder, and B. J. Berne (2004).On the Calculation
of Diffusion Coefficients in Confined Fluids and Interfaces with
an Application to the Liquid-Vapor Interface of Water,
J. Phys. Chem. B 2004, 108, 6595-6602.
.. [Brodka1994] Aleksander Brodka (1994). Diffusion in restricted volume,
Molecular Physics, 1994, Vol. 82, No. 5, 1075-1078.
.. [Araya-Secchi2014] Araya-Secchi, R., Tomas Perez-Acle, Seung-gu Kang, Tien
Huynh, Alejandro Bernardin, Yerko Escalona, Jose-Antonio
Garate, Agustin D. Martinez, Isaac E. Garcia, Juan
C. Saez, Ruhong Zhou (2014). Characterization of a novel
water pocket inside the human Cx26 hemichannel
structure. Biophysical journal, 107(3), 599-612.
.. [Milischuk2011] Anatoli A. Milischuk and Branka M. Ladanyi. Structure and
dynamics of water confined in silica
nanopores. J. Chem. Phys. 135, 174709 (2011); doi:
10.1063/1.3657408
Example use of the analysis classes
-----------------------------------
HydrogenBondLifetimes
~~~~~~~~~~~~~~~~~~~~~
To analyse hydrogen bond lifetime, use
:meth:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis.HydrogenBondAnalysis.liftetime`.
See Also
--------
:mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis`
WaterOrientationalRelaxation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Analyzing water orientational relaxation (WOR)
:class:`WaterOrientationalRelaxation`. In this case we are analyzing "how fast"
water molecules are rotating/changing direction. If WOR is very stable we can
assume that water molecules are rotating/changing direction very slow, on the
other hand, if WOR decay very fast, we can assume that water molecules are
rotating/changing direction very fast::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import WaterOrientationalRelaxation as WOR
u = MDAnalysis.Universe(pdb, trajectory)
select = "byres name OH2 and sphzone 6.0 protein and resid 42"
WOR_analysis = WOR(universe, select, 0, 1000, 20)
WOR_analysis.run()
time = 0
#now we print the data ready to plot. The first two columns are WOR_OH vs t plot,
#the second two columns are WOR_HH vs t graph and the third two columns are WOR_dip vs t graph
for WOR_OH, WOR_HH, WOR_dip in WOR_analysis.timeseries:
print("{time} {WOR_OH} {time} {WOR_HH} {time} {WOR_dip}".format(time=time, WOR_OH=WOR_OH, WOR_HH=WOR_HH,WOR_dip=WOR_dip))
time += 1
#now, if we want, we can plot our data
plt.figure(1,figsize=(18, 6))
#WOR OH
plt.subplot(131)
plt.xlabel('time')
plt.ylabel('WOR')
plt.title('WOR OH')
plt.plot(range(0,time),[column[0] for column in WOR_analysis.timeseries])
#WOR HH
plt.subplot(132)
plt.xlabel('time')
plt.ylabel('WOR')
plt.title('WOR HH')
plt.plot(range(0,time),[column[1] for column in WOR_analysis.timeseries])
#WOR dip
plt.subplot(133)
plt.xlabel('time')
plt.ylabel('WOR')
plt.title('WOR dip')
plt.plot(range(0,time),[column[2] for column in WOR_analysis.timeseries])
plt.show()
where t0 = 0, tf = 1000 and dtmax = 20. In this way we create 20 windows
timesteps (20 values in the x axis), the first window is created with 1000
timestep average (1000/1), the second window is created with 500 timestep
average(1000/2), the third window is created with 333 timestep average (1000/3)
and so on.
AngularDistribution
~~~~~~~~~~~~~~~~~~~
Analyzing angular distribution (AD) :class:`AngularDistribution` for OH vector,
HH vector and dipole vector. It returns a line histogram with vector
orientation preference. A straight line in the output plot means no
preferential orientation in water molecules. In this case we are analyzing if
water molecules have some orientational preference, in this way we can see if
water molecules are under an electric field or if they are interacting with
something (residue, protein, etc)::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import AngularDistribution as AD
u = MDAnalysis.Universe(pdb, trajectory)
selection = "byres name OH2 and sphzone 6.0 (protein and (resid 42 or resid 26) )"
bins = 30
AD_analysis = AD(universe,selection,bins)
AD_analysis.run()
#now we print data ready to graph. The first two columns are P(cos(theta)) vs cos(theta) for OH vector ,
#the seconds two columns are P(cos(theta)) vs cos(theta) for HH vector and thirds two columns
#are P(cos(theta)) vs cos(theta) for dipole vector
for bin in range(bins):
print("{AD_analysisOH} {AD_analysisHH} {AD_analysisDip}".format(AD_analysis.graph0=AD_analysis.graph[0][bin], AD_analysis.graph1=AD_analysis.graph[1][bin],AD_analysis.graph2=AD_analysis.graph[2][bin]))
#and if we want to graph our results
plt.figure(1,figsize=(18, 6))
#AD OH
plt.subplot(131)
plt.xlabel('cos theta')
plt.ylabel('P(cos theta)')
plt.title('PDF cos theta for OH')
plt.plot([float(column.split()[0]) for column in AD_analysis.graph[0][:-1]],[float(column.split()[1]) for column in AD_analysis.graph[0][:-1]])
#AD HH
plt.subplot(132)
plt.xlabel('cos theta')
plt.ylabel('P(cos theta)')
plt.title('PDF cos theta for HH')
plt.plot([float(column.split()[0]) for column in AD_analysis.graph[1][:-1]],[float(column.split()[1]) for column in AD_analysis.graph[1][:-1]])
#AD dip
plt.subplot(133)
plt.xlabel('cos theta')
plt.ylabel('P(cos theta)')
plt.title('PDF cos theta for dipole')
plt.plot([float(column.split()[0]) for column in AD_analysis.graph[2][:-1]],[float(column.split()[1]) for column in AD_analysis.graph[2][:-1]])
plt.show()
where `P(cos(theta))` is the angular distribution or angular probabilities.
MeanSquareDisplacement
~~~~~~~~~~~~~~~~~~~~~~
Analyzing mean square displacement (MSD) :class:`MeanSquareDisplacement` for
water molecules. In this case we are analyzing the average distance that water
molecules travels inside protein in XYZ direction (cylindric zone of radius
11[nm], Zmax 4.0[nm] and Zmin -8.0[nm]). A strong rise mean a fast movement of
water molecules, a weak rise mean slow movement of particles::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import MeanSquareDisplacement as MSD
u = MDAnalysis.Universe(pdb, trajectory)
select = "byres name OH2 and cyzone 11.0 4.0 -8.0 protein"
MSD_analysis = MSD(universe, select, 0, 1000, 20)
MSD_analysis.run()
#now we print data ready to graph. The graph
#represents MSD vs t
time = 0
for msd in MSD_analysis.timeseries:
print("{time} {msd}".format(time=time, msd=msd))
time += 1
#Plot
plt.xlabel('time')
plt.ylabel('MSD')
plt.title('MSD')
plt.plot(range(0,time),MSD_analysis.timeseries)
plt.show()
.. _SP-examples:
SurvivalProbability
~~~~~~~~~~~~~~~~~~~
Analyzing survival probability (SP) :class:`SurvivalProbability` of molecules.
In this case we are analyzing how long water molecules remain in a
sphere of radius 12.3 centered in the geometrical center of resid 42 and 26.
A slow decay of SP means a long permanence time of water molecules in
the zone, on the other hand, a fast decay means a short permanence time::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import SurvivalProbability as SP
import matplotlib.pyplot as plt
universe = MDAnalysis.Universe(pdb, trajectory)
select = "byres name OH2 and sphzone 12.3 (resid 42 or resid 26) "
sp = SP(universe, select, verbose=True)
sp.run(start=0, stop=101, tau_max=20)
tau_timeseries = sp.tau_timeseries
sp_timeseries = sp.sp_timeseries
# print in console
for tau, sp in zip(tau_timeseries, sp_timeseries):
print("{time} {sp}".format(time=tau, sp=sp))
# plot
plt.xlabel('Time')
plt.ylabel('SP')
plt.title('Survival Probability')
plt.plot(tau_timeseries, sp_timeseries)
plt.show()
One should note that the `stop` keyword as used in the above example has an
`exclusive` behaviour, i.e. here the final frame used will be 100 not 101.
This behaviour is aligned with :class:`AnalysisBase` but currently differs from
other :mod:`MDAnalysis.analysis.waterdynamics` classes, which all exhibit
`inclusive` behaviour for their final frame selections.
Another example applies to the situation where you work with many different "residues".
Here we calculate the SP of a potassium ion around each lipid in a membrane and
average the results. In this example, if the SP analysis were run without treating each lipid
separately, potassium ions may hop from one lipid to another and still be counted as remaining
in the specified region. That is, the survival probability of the potassium ion around the
entire membrane will be calculated.
Note, for this example, it is advisable to use `Universe(in_memory=True)` to ensure that the
simulation is not being reloaded into memory for each lipid::
import MDAnalysis as mda
from MDAnalysis.analysis.waterdynamics import SurvivalProbability as SP
import numpy as np
u = mda.Universe("md.gro", "md100ns.xtc", in_memory=True)
lipids = u.select_atoms('resname LIPIDS')
joined_sp_timeseries = [[] for _ in range(20)]
for lipid in lipids.residues:
print("Lipid ID: %d" % lipid.resid)
select = "resname POTASSIUM and around 3.5 (resid %d and name O13 O14) " % lipid.resid
sp = SP(u, select, verbose=True)
sp.run(tau_max=20)
# Raw SP points for each tau:
for sps, new_sps in zip(joined_sp_timeseries, sp.sp_timeseries_data):
sps.extend(new_sps)
# average all SP datapoints
sp_data = [np.mean(sp) for sp in joined_sp_timeseries]
for tau, sp in zip(range(1, tau_max + 1), sp_data):
print("{time} {sp}".format(time=tau, sp=sp))
.. _Output:
Output
------
WaterOrientationalRelaxation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Water orientational relaxation (WOR) data is returned per window timestep,
which is stored in :attr:`WaterOrientationalRelaxation.timeseries`::
results = [
[ # time t0
<WOR_OH>, <WOR_HH>, <WOR_dip>
],
[ # time t1
<WOR_OH>, <WOR_HH>, <WOR_dip>
],
...
]
AngularDistribution
~~~~~~~~~~~~~~~~~~~
Angular distribution (AD) data is returned per vector, which is stored in
:attr:`AngularDistribution.graph`. In fact, AngularDistribution returns a
histogram::
results = [
[ # OH vector values
# the values are order in this way: <x_axis y_axis>
<cos_theta0 ang_distr0>, <cos_theta1 ang_distr1>, ...
],
[ # HH vector values
<cos_theta0 ang_distr0>, <cos_theta1 ang_distr1>, ...
],
[ # dip vector values
<cos_theta0 ang_distr0>, <cos_theta1 ang_distr1>, ...
],
]
MeanSquareDisplacement
~~~~~~~~~~~~~~~~~~~~~~
Mean Square Displacement (MSD) data is returned in a list, which each element
represents a MSD value in its respective window timestep. Data is stored in
:attr:`MeanSquareDisplacement.timeseries`::
results = [
#MSD values orders by window timestep
<MSD_t0>, <MSD_t1>, ...
]
SurvivalProbability
~~~~~~~~~~~~~~~~~~~
Survival Probability (SP) computes two lists: a list of taus (:attr:`SurvivalProbability.tau_timeseries`) and a list of
the corresponding survival probabilities (:attr:`SurvivalProbability.sp_timeseries`).
results = [ tau1, tau2, ..., tau_n ], [ sp_tau1, sp_tau2, ..., sp_tau_n]
Additionally, a list :attr:`SurvivalProbability.sp_timeseries_data`, is provided which contains
a list of all SPs calculated for each tau. This can be used to compute the distribution or time dependence of SP, etc.
Classes
--------
.. autoclass:: WaterOrientationalRelaxation
:members:
:inherited-members:
.. autoclass:: AngularDistribution
:members:
:inherited-members:
.. autoclass:: MeanSquareDisplacement
:members:
:inherited-members:
.. autoclass:: SurvivalProbability
:members:
:inherited-members:
"""
from MDAnalysis.lib.correlations import autocorrelation, correct_intermittency
import MDAnalysis.analysis.hbonds
from itertools import zip_longest
import logging
import warnings
import numpy as np
logger = logging.getLogger('MDAnalysis.analysis.waterdynamics')
from MDAnalysis.lib.log import ProgressBar
class WaterOrientationalRelaxation(object):
r"""Water orientation relaxation analysis
Function to evaluate the Water Orientational Relaxation proposed by Yu-ling
Yeh and Chung-Yuan Mou [Yeh1999_]. WaterOrientationalRelaxation indicates
"how fast" water molecules are rotating or changing direction. This is a
time correlation function given by:
.. math::
C_{\hat u}(\tau)=\langle \mathit{P}_2[\mathbf{\hat{u}}(t_0)\cdot\mathbf{\hat{u}}(t_0+\tau)]\rangle
where :math:`P_2=(3x^2-1)/2` is the second-order Legendre polynomial and :math:`\hat{u}` is
a unit vector along HH, OH or dipole vector.
Parameters
----------
universe : Universe
Universe object
selection : str
Selection string for water [‘byres name OH2’].
t0 : int
frame where analysis begins
tf : int
frame where analysis ends
dtmax : int
Maximum dt size, `dtmax` < `tf` or it will crash.
.. versionadded:: 0.11.0
.. versionchanged:: 1.0.0
Changed `selection` keyword to `select`
"""
def __init__(self, universe, select, t0, tf, dtmax, nproc=1):
self.universe = universe
self.selection = select
self.t0 = t0
self.tf = tf
self.dtmax = dtmax
self.nproc = nproc
self.timeseries = None
def _repeatedIndex(self, selection, dt, totalFrames):
"""
Indicates the comparation between all the t+dt.
The results is a list of list with all the repeated index per frame
(or time).
Ex: dt=1, so compare frames (1,2),(2,3),(3,4)...
Ex: dt=2, so compare frames (1,3),(3,5),(5,7)...
Ex: dt=3, so compare frames (1,4),(4,7),(7,10)...
"""
rep = []
for i in range(int(round((totalFrames - 1) / float(dt)))):
if (dt * i + dt < totalFrames):
rep.append(self._sameMolecTandDT(
selection, dt * i, (dt * i) + dt))
return rep
def _getOneDeltaPoint(self, universe, repInd, i, t0, dt):
"""
Gives one point to calculate the mean and gets one point of the plot
C_vect vs t.
Ex: t0=1 and tau=1 so calculate the t0-tau=1-2 intervale.
Ex: t0=5 and tau=3 so calcultate the t0-tau=5-8 intervale.
i = come from getMeanOnePoint (named j) (int)
"""
valOH = 0
valHH = 0
valdip = 0
n = 0
for j in range(len(repInd[i]) // 3):
begj = 3 * j
universe.trajectory[t0]
Ot0 = repInd[i][begj]
H1t0 = repInd[i][begj + 1]
H2t0 = repInd[i][begj + 2]
OHVector0 = H1t0.position - Ot0.position
HHVector0 = H1t0.position - H2t0.position
dipVector0 = ((H1t0.position + H2t0.position) * 0.5) - Ot0.position
universe.trajectory[t0 + dt]
Otp = repInd[i][begj]
H1tp = repInd[i][begj + 1]
H2tp = repInd[i][begj + 2]
OHVectorp = H1tp.position - Otp.position
HHVectorp = H1tp.position - H2tp.position
dipVectorp = ((H1tp.position + H2tp.position) * 0.5) - Otp.position
normOHVector0 = np.linalg.norm(OHVector0)
normOHVectorp = np.linalg.norm(OHVectorp)
normHHVector0 = np.linalg.norm(HHVector0)
normHHVectorp = np.linalg.norm(HHVectorp)
normdipVector0 = np.linalg.norm(dipVector0)
normdipVectorp = np.linalg.norm(dipVectorp)
unitOHVector0 = [OHVector0[0] / normOHVector0,
OHVector0[1] / normOHVector0,
OHVector0[2] / normOHVector0]
unitOHVectorp = [OHVectorp[0] / normOHVectorp,
OHVectorp[1] / normOHVectorp,
OHVectorp[2] / normOHVectorp]
unitHHVector0 = [HHVector0[0] / normHHVector0,
HHVector0[1] / normHHVector0,
HHVector0[2] / normHHVector0]
unitHHVectorp = [HHVectorp[0] / normHHVectorp,
HHVectorp[1] / normHHVectorp,
HHVectorp[2] / normHHVectorp]
unitdipVector0 = [dipVector0[0] / normdipVector0,
dipVector0[1] / normdipVector0,
dipVector0[2] / normdipVector0]
unitdipVectorp = [dipVectorp[0] / normdipVectorp,
dipVectorp[1] / normdipVectorp,
dipVectorp[2] / normdipVectorp]
valOH += self.lg2(np.dot(unitOHVector0, unitOHVectorp))
valHH += self.lg2(np.dot(unitHHVector0, unitHHVectorp))
valdip += self.lg2(np.dot(unitdipVector0, unitdipVectorp))
n += 1
return (valOH/n, valHH/n, valdip/n) if n > 0 else (0, 0, 0)
def _getMeanOnePoint(self, universe, selection1, selection_str, dt,
totalFrames):
"""
This function gets one point of the plot C_vec vs t. It uses the
_getOneDeltaPoint() function to calculate the average.
"""
repInd = self._repeatedIndex(selection1, dt, totalFrames)
sumsdt = 0
n = 0.0
sumDeltaOH = 0.0
sumDeltaHH = 0.0
sumDeltadip = 0.0
for j in range(totalFrames // dt - 1):
a = self._getOneDeltaPoint(universe, repInd, j, sumsdt, dt)
sumDeltaOH += a[0]
sumDeltaHH += a[1]
sumDeltadip += a[2]
sumsdt += dt
n += 1
# if no water molecules remain in selection, there is nothing to get
# the mean, so n = 0.
return (sumDeltaOH / n, sumDeltaHH / n, sumDeltadip / n) if n > 0 else (0, 0, 0)
def _sameMolecTandDT(self, selection, t0d, tf):
"""
Compare the molecules in the t0d selection and the t0d+dt selection and
select only the particles that are repeated in both frame. This is to
consider only the molecules that remains in the selection after the dt
time has elapsed.
The result is a list with the indexs of the atoms.
"""
a = set(selection[t0d])
b = set(selection[tf])
sort = sorted(list(a.intersection(b)))
return sort
def _selection_serial(self, universe, selection_str):
selection = []
for ts in ProgressBar(universe.trajectory, verbose=True,
total=universe.trajectory.n_frames):
selection.append(universe.select_atoms(selection_str))
return selection
@staticmethod
def lg2(x):
"""Second Legendre polynomial"""
return (3*x*x - 1)/2
def run(self, **kwargs):
"""Analyze trajectory and produce timeseries"""
# All the selection to an array, this way is faster than selecting
# later.
if self.nproc == 1:
selection_out = self._selection_serial(
self.universe, self.selection)
else:
# selection_out = self._selection_parallel(self.universe,
# self.selection, self.nproc)
# parallel selection to be implemented
selection_out = self._selection_serial(
self.universe, self.selection)
self.timeseries = []
for dt in list(range(1, self.dtmax + 1)):
output = self._getMeanOnePoint(
self.universe, selection_out, self.selection, dt, self.tf)
self.timeseries.append(output)
class AngularDistribution(object):
r"""Angular distribution function analysis
The angular distribution function (AD) is defined as the distribution
probability of the cosine of the :math:`\theta` angle formed by the OH
vector, HH vector or dipolar vector of water molecules and a vector
:math:`\hat n` parallel to chosen axis (z is the default value). The cosine
is define as :math:`\cos \theta = \hat u \cdot \hat n`, where :math:`\hat
u` is OH, HH or dipole vector. It creates a histogram and returns a list
of lists, see Output_. The AD is also know as Angular Probability (AP).
Parameters
----------
universe : Universe
Universe object
select : str
Selection string to evaluate its angular distribution ['byres name OH2']
bins : int (optional)
Number of bins to create the histogram by means of :func:`numpy.histogram`
axis : {'x', 'y', 'z'} (optional)
Axis to create angle with the vector (HH, OH or dipole) and calculate
cosine theta ['z'].
.. versionadded:: 0.11.0
.. versionchanged:: 1.0.0
Changed `selection` keyword to `select`
"""
def __init__(self, universe, select, bins=40, nproc=1, axis="z"):
self.universe = universe
self.selection_str = select
self.bins = bins
self.nproc = nproc
self.axis = axis
self.graph = None
def _getCosTheta(self, universe, selection, axis):
valOH = []
valHH = []
valdip = []
i = 0
while i <= (len(selection) - 1):
universe.trajectory[i]
line = selection[i].positions
Ot0 = line[::3]
H1t0 = line[1::3]
H2t0 = line[2::3]
OHVector0 = H1t0 - Ot0
HHVector0 = H1t0 - H2t0
dipVector0 = (H1t0 + H2t0) * 0.5 - Ot0
unitOHVector0 = OHVector0 / \
np.linalg.norm(OHVector0, axis=1)[:, None]
unitHHVector0 = HHVector0 / \
np.linalg.norm(HHVector0, axis=1)[:, None]
unitdipVector0 = dipVector0 / \
np.linalg.norm(dipVector0, axis=1)[:, None]
j = 0
while j < len(line) / 3:
if axis == "z":
valOH.append(unitOHVector0[j][2])
valHH.append(unitHHVector0[j][2])
valdip.append(unitdipVector0[j][2])
elif axis == "x":
valOH.append(unitOHVector0[j][0])
valHH.append(unitHHVector0[j][0])
valdip.append(unitdipVector0[j][0])
elif axis == "y":
valOH.append(unitOHVector0[j][1])
valHH.append(unitHHVector0[j][1])
valdip.append(unitdipVector0[j][1])
j += 1
i += 1
return (valOH, valHH, valdip)
def _getHistogram(self, universe, selection, bins, axis):
"""
This function gets a normalized histogram of the cos(theta) values. It
return a list of list.
"""
a = self._getCosTheta(universe, selection, axis)
cosThetaOH = a[0]
cosThetaHH = a[1]
cosThetadip = a[2]
lencosThetaOH = len(cosThetaOH)
lencosThetaHH = len(cosThetaHH)
lencosThetadip = len(cosThetadip)
histInterval = bins
histcosThetaOH = np.histogram(cosThetaOH, histInterval, density=True)
histcosThetaHH = np.histogram(cosThetaHH, histInterval, density=True)
histcosThetadip = np.histogram(cosThetadip, histInterval, density=True)
return (histcosThetaOH, histcosThetaHH, histcosThetadip)
def _hist2column(self, aList):
"""
This function transform from the histogram format
to a column format.
"""
a = []
for x in zip_longest(*aList, fillvalue="."):
a.append(" ".join(str(i) for i in x))
return a
def run(self, **kwargs):
"""Function to evaluate the angular distribution of cos(theta)"""
if self.nproc == 1:
selection = self._selection_serial(
self.universe, self.selection_str)
else:
# not implemented yet
# selection = self._selection_parallel(self.universe,
# self.selection_str,self.nproc)
selection = self._selection_serial(
self.universe, self.selection_str)
self.graph = []
output = self._getHistogram(
self.universe, selection, self.bins, self.axis)
# this is to format the exit of the file
# maybe this output could be improved
listOH = [list(output[0][1]), list(output[0][0])]
listHH = [list(output[1][1]), list(output[1][0])]
listdip = [list(output[2][1]), list(output[2][0])]
self.graph.append(self._hist2column(listOH))
self.graph.append(self._hist2column(listHH))
self.graph.append(self._hist2column(listdip))
def _selection_serial(self, universe, selection_str):
selection = []
for ts in ProgressBar(universe.trajectory, verbose=True,
total=universe.trajectory.n_frames):
selection.append(universe.select_atoms(selection_str))
return selection
class MeanSquareDisplacement(object):
r"""Mean square displacement analysis
Function to evaluate the Mean Square Displacement (MSD_). The MSD gives the
average distance that particles travels. The MSD is given by:
.. math::
\langle\Delta r(t)^2\rangle = 2nDt
where :math:`r(t)` is the position of particle in time :math:`t`,
:math:`\Delta r(t)` is the displacement after time lag :math:`t`,
:math:`n` is the dimensionality, in this case :math:`n=3`,
:math:`D` is the diffusion coefficient and :math:`t` is the time.
.. _MSD: http://en.wikipedia.org/wiki/Mean_squared_displacement
Parameters
----------
universe : Universe
Universe object
select : str
Selection string for water [‘byres name OH2’].
t0 : int
frame where analysis begins
tf : int
frame where analysis ends
dtmax : int
Maximum dt size, `dtmax` < `tf` or it will crash.
.. versionadded:: 0.11.0
.. versionchanged:: 1.0.0
Changed `selection` keyword to `select`
"""
def __init__(self, universe, select, t0, tf, dtmax, nproc=1):
self.universe = universe
self.selection = select
self.t0 = t0
self.tf = tf
self.dtmax = dtmax
self.nproc = nproc
self.timeseries = None
def _repeatedIndex(self, selection, dt, totalFrames):
"""
Indicate the comparation between all the t+dt.
The results is a list of list with all the repeated index per frame
(or time).
- Ex: dt=1, so compare frames (1,2),(2,3),(3,4)...
- Ex: dt=2, so compare frames (1,3),(3,5),(5,7)...
- Ex: dt=3, so compare frames (1,4),(4,7),(7,10)...
"""
rep = []
for i in range(int(round((totalFrames - 1) / float(dt)))):
if (dt * i + dt < totalFrames):
rep.append(self._sameMolecTandDT(
selection, dt * i, (dt * i) + dt))
return rep
def _getOneDeltaPoint(self, universe, repInd, i, t0, dt):
"""
Gives one point to calculate the mean and gets one point of the plot
C_vect vs t.
- Ex: t0=1 and dt=1 so calculate the t0-dt=1-2 interval.
- Ex: t0=5 and dt=3 so calcultate the t0-dt=5-8 interva
i = come from getMeanOnePoint (named j) (int)
"""
valO = 0
n = 0
for j in range(len(repInd[i]) // 3):
begj = 3 * j
universe.trajectory[t0]
# Plus zero is to avoid 0to be equal to 0tp
Ot0 = repInd[i][begj].position + 0
universe.trajectory[t0 + dt]
# Plus zero is to avoid 0to be equal to 0tp
Otp = repInd[i][begj].position + 0
# position oxygen
OVector = Ot0 - Otp
# here it is the difference with
# waterdynamics.WaterOrientationalRelaxation
valO += np.dot(OVector, OVector)
n += 1
# if no water molecules remain in selection, there is nothing to get
# the mean, so n = 0.
return valO/n if n > 0 else 0
def _getMeanOnePoint(self, universe, selection1, selection_str, dt,
totalFrames):
"""
This function gets one point of the plot C_vec vs t. It's uses the
_getOneDeltaPoint() function to calculate the average.
"""
repInd = self._repeatedIndex(selection1, dt, totalFrames)
sumsdt = 0
n = 0.0
sumDeltaO = 0.0
valOList = []
for j in range(totalFrames // dt - 1):
a = self._getOneDeltaPoint(universe, repInd, j, sumsdt, dt)
sumDeltaO += a
valOList.append(a)
sumsdt += dt
n += 1
# if no water molecules remain in selection, there is nothing to get
# the mean, so n = 0.
return sumDeltaO/n if n > 0 else 0
def _sameMolecTandDT(self, selection, t0d, tf):
"""
Compare the molecules in the t0d selection and the t0d+dt selection and
select only the particles that are repeated in both frame. This is to
consider only the molecules that remains in the selection after the dt
time has elapsed. The result is a list with the indexs of the atoms.
"""
a = set(selection[t0d])
b = set(selection[tf])
sort = sorted(list(a.intersection(b)))
return sort
def _selection_serial(self, universe, selection_str):
selection = []
for ts in ProgressBar(universe.trajectory, verbose=True,
total=universe.trajectory.n_frames):
selection.append(universe.select_atoms(selection_str))
return selection
def run(self, **kwargs):
"""Analyze trajectory and produce timeseries"""
# All the selection to an array, this way is faster than selecting
# later.
if self.nproc == 1:
selection_out = self._selection_serial(
self.universe, self.selection)
else:
# parallel not yet implemented
# selection = selection_parallel(universe, selection_str, nproc)
selection_out = self._selection_serial(
self.universe, self.selection)
self.timeseries = []
for dt in list(range(1, self.dtmax + 1)):
output = self._getMeanOnePoint(
self.universe, selection_out, self.selection, dt, self.tf)
self.timeseries.append(output)
class SurvivalProbability(object):
r"""
Survival Probability (SP) gives the probability for a group of particles to remain in a certain region.
The SP is given by:
.. math::
P(\tau) = \frac1T \sum_{t=1}^T \frac{N(t,t+\tau)}{N(t)}
where :math:`T` is the maximum time of simulation, :math:`\tau` is the
timestep, :math:`N(t)` the number of particles at time :math:`t`, and
:math:`N(t, t+\tau)` is the number of particles at every frame from :math:`t` to `\tau`.
Parameters
----------
universe : Universe
Universe object
select : str
Selection string; any selection is allowed. With this selection you
define the region/zone where to analyze, e.g.: "resname SOL and around 5 (resid 10)". See `SP-examples`_.
verbose : Boolean, optional
When True, prints progress and comments to the console.
Notes
-----
Currently :class:`SurvivalProbability` is the only on in
:mod:`MDAnalysis.analysis.waterdynamics` to support an `exclusive`
behaviour (i.e. similar to the current behaviour of :class:`AnalysisBase`
to the `stop` keyword passed to :meth:`SurvivalProbability.run`. Unlike
other :mod:`MDAnalysis.analysis.waterdynamics` final frame definitions
which are `inclusive`.
.. versionadded:: 0.11.0
.. versionchanged:: 1.0.0
Using the MDAnalysis.lib.correlations.py to carry out the intermittency
and autocorrelation calculations.
Changed `selection` keyword to `select`.
Removed support for the deprecated `t0`, `tf`, and `dtmax` keywords.
These should instead be passed to :meth:`SurvivalProbability.run` as
the `start`, `stop`, and `tau_max` keywords respectively.
The `stop` keyword as passed to :meth:`SurvivalProbability.run` has now
changed behaviour and will act in an `exclusive` manner (instead of it's
previous `inclusive` behaviour),
"""
def __init__(self, universe, select, verbose=False):
self.universe = universe
self.selection = select
self.verbose = verbose
def run(self, tau_max=20, start=None, stop=None, step=None, residues=False,
intermittency=0, verbose=False):
"""
Computes and returns the Survival Probability (SP) timeseries
Parameters
----------
start : int, optional
Zero-based index of the first frame to be analysed, Default: None
(first frame).
stop : int, optional
Zero-based index of the last frame to be analysed (exclusive),
Default: None (last frame).
step : int, optional
Jump every `step`-th frame. This is compatible but independant of
the taus used, and it is good to consider using the `step` equal
to `tau_max` to remove the overlap. Note that `step` and `tau_max`
work consistently with intermittency. Default: None
(use every frame).
tau_max : int, optional
Survival probability is calculated for the range
1 <= `tau` <= `tau_max`.
residues : Boolean, optional
If true, the analysis will be carried out on the residues
(.resids) rather than on atom (.ids). A single atom is sufficient
to classify the residue as within the distance.
intermittency : int, optional
The maximum number of consecutive frames for which an atom can
leave but be counted as present if it returns at the next frame.
An intermittency of `0` is equivalent to a continuous survival
probability, which does not allow for the leaving and returning of
atoms. For example, for `intermittency=2`, any given atom may leave
a region of interest for up to two consecutive frames yet be
treated as being present at all frames. The default is continuous
(0).
verbose : Boolean, optional
Print the progress to the console.
Returns
-------
tau_timeseries : list
tau from 1 to `tau_max`. Saved in the field tau_timeseries.
sp_timeseries : list
survival probability for each value of `tau`. Saved in the field
sp_timeseries.
sp_timeseries_data: list
raw datapoints from which the average is taken (sp_timeseries).
Time dependancy and distribution can be extracted.
.. versionchanged:: 1.0.0
To math other analysis methods, the `stop` keyword is now exclusive
rather than inclusive.
"""
start, stop, step = self.universe.trajectory.check_slice_indices(
start,
stop,
step
)
if tau_max > (stop - start):
raise ValueError("Too few frames selected for given tau_max.")
# preload the frames (atom IDs) to a list of sets
self._selected_ids = []
# fixme - to parallise: the section should be rewritten so that this loop only creates a list of indices,
# on which the parallel _single_frame can be applied.
# skip frames that will not be used in order to improve performance
# because AtomGroup.select_atoms is the most expensive part of this calculation
# Example: step 5 and tau 2: LLLSS LLLSS, ... where L = Load, and S = Skip
# Intermittency means that we have to load the extra frames to know if the atom is actually missing.
# Say step=5 and tau=1, intermittency=0: LLSSS LLSSS
# Say step=5 and tau=1, intermittency=1: LLLSL LLLSL
frame_loaded_counter = 0
# only for the first window (frames before t are not used)
frames_per_window = tau_max + 1 + intermittency
# This number will apply after the first windows was loaded
frames_per_window_subsequent = (tau_max + 1) + (2 * intermittency)
num_frames_to_skip = max(step - frames_per_window_subsequent, 0)
frame_no = start
while frame_no < stop: # we have already added 1 to stop, therefore <
if num_frames_to_skip != 0 and frame_loaded_counter == frames_per_window:
logger.info("Skipping the next %d frames:", num_frames_to_skip)
frame_no += num_frames_to_skip
frame_loaded_counter = 0
# Correct the number of frames to be loaded after the first window (which starts at t=0, and
# intermittency does not apply to the frames before)
frames_per_window = frames_per_window_subsequent
continue
# update the frame number
self.universe.trajectory[frame_no]
logger.info("Loading frame: %d", self.universe.trajectory.frame)
atoms = self.universe.select_atoms(self.selection)
# SP of residues or of atoms
ids = atoms.residues.resids if residues else atoms.ids
self._selected_ids.append(set(ids))
frame_no += 1
frame_loaded_counter += 1
# adjust for the frames that were not loaded (step>tau_max + 1),
# and for extra frames that were loaded (intermittency)
window_jump = step - num_frames_to_skip
self._intermittent_selected_ids = correct_intermittency(self._selected_ids, intermittency=intermittency)
tau_timeseries, sp_timeseries, sp_timeseries_data = autocorrelation(self._intermittent_selected_ids,
tau_max, window_jump)
# warn the user if the NaN are found
if all(np.isnan(sp_timeseries[1:])):
logger.warning('NaN Error: Most likely data was not found. Check your atom selections. ')
# user can investigate the distribution and sample size
self.sp_timeseries_data = sp_timeseries_data
self.tau_timeseries = tau_timeseries
self.sp_timeseries = sp_timeseries
return self
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/analysis/waterdynamics.py
|
Python
|
gpl-2.0
| 41,156
|
[
"MDAnalysis"
] |
f2f0fb9427e50748ac578103b9301159f60a2772c31c267118d1f9cfd30db75a
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Provides an interface to manage URI scheme support in iris.
"""
import collections
from collections import OrderedDict
import glob
import os.path
import pathlib
import re
import iris.exceptions
# Saving routines, indexed by file extension.
class _SaversDict(dict):
"""A dictionary that can only have string keys with no overlap."""
def __setitem__(self, key, value):
if not isinstance(key, str):
raise ValueError("key is not a string")
if key in self:
raise ValueError("A saver already exists for", key)
for k in self.keys():
if k.endswith(key) or key.endswith(k):
raise ValueError(
"key %s conflicts with existing key %s" % (key, k)
)
dict.__setitem__(self, key, value)
_savers = _SaversDict()
def run_callback(callback, cube, field, filename):
"""
Runs the callback mechanism given the appropriate arguments.
Args:
* callback:
A function to add metadata from the originating field and/or URI which
obeys the following rules:
1. Function signature must be: ``(cube, field, filename)``.
2. Modifies the given cube inplace, unless a new cube is
returned by the function.
3. If the cube is to be rejected the callback must raise
an :class:`iris.exceptions.IgnoreCubeException`.
.. note::
It is possible that this function returns None for certain callbacks,
the caller of this function should handle this case.
"""
from iris.cube import Cube
if callback is None:
return cube
# Call the callback function on the cube, generally the function will
# operate on the cube in place, but it is also possible that the function
# will return a completely new cube instance.
try:
result = callback(cube, field, filename)
except iris.exceptions.IgnoreCubeException:
result = None
else:
if result is None:
result = cube
elif not isinstance(result, Cube):
raise TypeError(
"Callback function returned an " "unhandled data type."
)
return result
def decode_uri(uri, default="file"):
r"""
Decodes a single URI into scheme and scheme-specific parts.
In addition to well-formed URIs, it also supports bare file paths as strings
or :class:`pathlib.PurePath`. Both Windows and UNIX style paths are
accepted.
.. testsetup::
from iris.io import *
Examples:
>>> from iris.io import decode_uri
>>> print(decode_uri('http://www.thing.com:8080/resource?id=a:b'))
('http', '//www.thing.com:8080/resource?id=a:b')
>>> print(decode_uri('file:///data/local/dataZoo/...'))
('file', '///data/local/dataZoo/...')
>>> print(decode_uri('/data/local/dataZoo/...'))
('file', '/data/local/dataZoo/...')
>>> print(decode_uri('file:///C:\data\local\dataZoo\...'))
('file', '///C:\\data\\local\\dataZoo\\...')
>>> print(decode_uri('C:\data\local\dataZoo\...'))
('file', 'C:\\data\\local\\dataZoo\\...')
>>> print(decode_uri('dataZoo/...'))
('file', 'dataZoo/...')
"""
if isinstance(uri, pathlib.PurePath):
uri = str(uri)
# make sure scheme has at least 2 letters to avoid windows drives
# put - last in the brackets so it refers to the character, not a range
# reference on valid schemes: http://tools.ietf.org/html/std66#section-3.1
match = re.match(r"^([a-zA-Z][a-zA-Z0-9+.-]+):(.+)", uri)
if match:
scheme = match.group(1)
part = match.group(2)
else:
# Catch bare UNIX and Windows paths
scheme = default
part = uri
return scheme, part
def expand_filespecs(file_specs):
"""
Find all matching file paths from a list of file-specs.
Args:
* file_specs (iterable of string):
File paths which may contain '~' elements or wildcards.
Returns:
A well-ordered list of matching absolute file paths.
If any of the file-specs match no existing files, an
exception is raised.
"""
# Remove any hostname component - currently unused
filenames = [
os.path.abspath(
os.path.expanduser(fn[2:] if fn.startswith("//") else fn)
)
for fn in file_specs
]
# Try to expand all filenames as globs
glob_expanded = OrderedDict(
[[fn, sorted(glob.glob(fn))] for fn in filenames]
)
# If any of the specs expanded to an empty list then raise an error
all_expanded = glob_expanded.values()
if not all(all_expanded):
msg = "One or more of the files specified did not exist:"
for pattern, expanded in glob_expanded.items():
if expanded:
msg += '\n - "{}" matched {} file(s)'.format(
pattern, len(expanded)
)
else:
msg += '\n * "{}" didn\'t match any files'.format(pattern)
raise IOError(msg)
return [fname for fnames in all_expanded for fname in fnames]
def load_files(filenames, callback, constraints=None):
"""
Takes a list of filenames which may also be globs, and optionally a
constraint set and a callback function, and returns a
generator of Cubes from the given files.
.. note::
Typically, this function should not be called directly; instead, the
intended interface for loading is :func:`iris.load`.
"""
from iris.fileformats import FORMAT_AGENT
all_file_paths = expand_filespecs(filenames)
# Create default dict mapping iris format handler to its associated filenames
handler_map = collections.defaultdict(list)
for fn in all_file_paths:
with open(fn, "rb") as fh:
handling_format_spec = FORMAT_AGENT.get_spec(
os.path.basename(fn), fh
)
handler_map[handling_format_spec].append(fn)
# Call each iris format handler with the approriate filenames
for handling_format_spec in sorted(handler_map):
fnames = handler_map[handling_format_spec]
if handling_format_spec.constraint_aware_handler:
for cube in handling_format_spec.handler(
fnames, callback, constraints
):
yield cube
else:
for cube in handling_format_spec.handler(fnames, callback):
yield cube
def load_http(urls, callback):
"""
Takes a list of OPeNDAP URLs and a callback function, and returns a generator
of Cubes from the given URLs.
.. note::
Typically, this function should not be called directly; instead, the
intended interface for loading is :func:`iris.load`.
"""
# Create default dict mapping iris format handler to its associated filenames
from iris.fileformats import FORMAT_AGENT
handler_map = collections.defaultdict(list)
for url in urls:
handling_format_spec = FORMAT_AGENT.get_spec(url, None)
handler_map[handling_format_spec].append(url)
# Call each iris format handler with the appropriate filenames
for handling_format_spec in sorted(handler_map):
fnames = handler_map[handling_format_spec]
for cube in handling_format_spec.handler(fnames, callback):
yield cube
def _dot_save(cube, target):
# A simple wrapper for `iris.fileformats.dot.save` which allows the
# saver to be registered without triggering the import of
# `iris.fileformats.dot`.
from iris.fileformats.dot import save
return save(cube, target)
def _dot_save_png(cube, target, **kwargs):
# A simple wrapper for `iris.fileformats.dot.save_png` which allows the
# saver to be registered without triggering the import of
# `iris.fileformats.dot`.
from iris.fileformats.dot import save_png
return save_png(cube, target, **kwargs)
def _grib_save(cube, target, append=False, **kwargs):
# A simple wrapper for the grib save routine, which allows the saver to be
# registered without having the grib implementation installed.
try:
from iris_grib import save_grib2
except ImportError:
raise RuntimeError(
"Unable to save GRIB file - "
'"iris_grib" package is not installed.'
)
save_grib2(cube, target, append, **kwargs)
def _check_init_savers():
from iris.fileformats import netcdf, pp
if "pp" not in _savers:
_savers.update(
{
"pp": pp.save,
"nc": netcdf.save,
"dot": _dot_save,
"dotpng": _dot_save_png,
"grib2": _grib_save,
}
)
def add_saver(file_extension, new_saver):
"""
Add a custom saver to the Iris session.
Args:
* file_extension: A string such as "pp" or "my_format".
* new_saver: A function of the form ``my_saver(cube, target)``.
See also :func:`iris.io.save`
"""
# Make sure it's a func with 2+ args
if (
not hasattr(new_saver, "__call__")
or new_saver.__code__.co_argcount < 2
):
raise ValueError("Saver routines must be callable with 2+ arguments.")
# Try to add this saver. Invalid keys will be rejected.
_savers[file_extension] = new_saver
def find_saver(filespec):
"""
Find the saver function appropriate to the given filename or extension.
Args:
* filespec
A string such as "my_file.pp" or "PP".
Returns:
A save function or None.
Save functions can be passed to :func:`iris.io.save`.
"""
_check_init_savers()
matches = [
ext
for ext in _savers
if filespec.lower().endswith("." + ext) or filespec.lower() == ext
]
# Multiple matches could occur if one of the savers included a '.':
# e.g. _savers = {'.dot.png': dot_png_saver, '.png': png_saver}
if len(matches) > 1:
fmt = "Multiple savers found for %r: %s"
matches = ", ".join(map(repr, matches))
raise ValueError(fmt % (filespec, matches))
return _savers[matches[0]] if matches else None
def save(source, target, saver=None, **kwargs):
"""
Save one or more Cubes to file (or other writeable).
Iris currently supports three file formats for saving, which it can
recognise by filename extension:
* netCDF - the Unidata network Common Data Format:
* see :func:`iris.fileformats.netcdf.save`
* GRIB2 - the WMO GRIdded Binary data format:
* see :func:`iris_grib.save_grib2`.
* PP - the Met Office UM Post Processing Format:
* see :func:`iris.fileformats.pp.save`
A custom saver can be provided to the function to write to a different
file format.
Args:
* source:
:class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or
sequence of cubes.
* target:
A filename (or writeable, depending on file format).
When given a filename or file, Iris can determine the
file format. Filename can be given as a string or
:class:`pathlib.PurePath`.
Kwargs:
* saver:
Optional. Specifies the file format to save.
If omitted, Iris will attempt to determine the format.
If a string, this is the recognised filename extension
(where the actual filename may not have it).
Otherwise the value is a saver function, of the form:
``my_saver(cube, target)`` plus any custom keywords. It
is assumed that a saver will accept an ``append`` keyword
if it's file format can handle multiple cubes. See also
:func:`iris.io.add_saver`.
All other keywords are passed through to the saver function; see the
relevant saver documentation for more information on keyword arguments.
Examples::
# Save a cube to PP
iris.save(my_cube, "myfile.pp")
# Save a cube list to a PP file, appending to the contents of the file
# if it already exists
iris.save(my_cube_list, "myfile.pp", append=True)
# Save a cube to netCDF, defaults to NETCDF4 file format
iris.save(my_cube, "myfile.nc")
# Save a cube list to netCDF, using the NETCDF3_CLASSIC storage option
iris.save(my_cube_list, "myfile.nc", netcdf_format="NETCDF3_CLASSIC")
.. warning::
Saving a cube whose data has been loaded lazily
(if `cube.has_lazy_data()` returns `True`) to the same file it expects
to load data from will cause both the data in-memory and the data on
disk to be lost.
.. code-block:: python
cube = iris.load_cube("somefile.nc")
# The next line causes data loss in 'somefile.nc' and the cube.
iris.save(cube, "somefile.nc")
In general, overwriting a file which is the source for any lazily loaded
data can result in corruption. Users should proceed with caution when
attempting to overwrite an existing file.
"""
from iris.cube import Cube, CubeList
# Determine format from filename
if isinstance(target, pathlib.PurePath):
target = str(target)
if isinstance(target, str) and saver is None:
saver = find_saver(target)
elif hasattr(target, "name") and saver is None:
saver = find_saver(target.name)
elif isinstance(saver, str):
saver = find_saver(saver)
if saver is None:
raise ValueError("Cannot save; no saver")
# Single cube?
if isinstance(source, Cube):
saver(source, target, **kwargs)
# CubeList or sequence of cubes?
elif isinstance(source, CubeList) or (
isinstance(source, (list, tuple))
and all([isinstance(i, Cube) for i in source])
):
# Only allow cubelist saving for those fileformats that are capable.
if "iris.fileformats.netcdf" not in saver.__module__:
# Make sure the saver accepts an append keyword
if "append" not in saver.__code__.co_varnames:
raise ValueError(
"Cannot append cubes using saver function "
"'%s' in '%s'"
% (saver.__code__.co_name, saver.__code__.co_filename)
)
# Force append=True for the tail cubes. Don't modify the incoming
# kwargs.
kwargs = kwargs.copy()
for i, cube in enumerate(source):
if i != 0:
kwargs["append"] = True
saver(cube, target, **kwargs)
# Netcdf saver.
else:
saver(source, target, **kwargs)
else:
raise ValueError("Cannot save; non Cube found in source")
|
SciTools/iris
|
lib/iris/io/__init__.py
|
Python
|
lgpl-3.0
| 15,009
|
[
"NetCDF"
] |
502aa2cec08784ebe6a76ff2e13cfa314ace3eae235e3f89fa9298a63cb870d1
|
# hdfutil.py ---
#
# Filename: hdfutil.py
# Description:
# Author:
# Maintainer:
# Created: Thu Aug 23 17:34:55 2012 (+0530)
# Version:
# Last-Updated: Mon Sep 3 17:55:03 2012 (+0530)
# By: subha
# Update #: 618
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Utility function to save data in hdf5 format.
#
# In this utility we are trying to address a serialization
# problem. The ultimate goal is to be able to save a snapshot of
# complete simulator state in a portable format so that it can be
# loaded later to reach that state and continue from there.
#
# TODO: random number generators: the full RNG state has to be
# saved. MOOSE does not provide access to this at user level.
#
# TODO: what about internal variables? they affect the state of the
# simulation yet MOOSE does not allow access to these variables. Do we
# need a change in the API to make all internal variables accessible
# in a generic manner?
#
# TODO: How do we translate MOOSE tree to HDF5? MOOSE has vec and
# elements. vec is a container and each element belongs to an
# vec.
#
# em-0
# el-00 el-01 el-02
# / | \
# / | \
# em-1 em-2 em-3
# el-10 el-20 el-30 el-31 el-32 el-33
# / \
# / \
# em-4 em-5
# el-40 el-41 el-50
#
#
# Serializing MOOSE tree structure into an HDF5 tree structure has
# some issues to be resolved. Each vec is saved as a HDF
# group. All the elements inside it as a HDF dataset. But the problem
# is that HDF datasets cannot have children. But in MOOSE the
# parent-child relation is opposite, each element can have one or more
# ematrices as children.
#
# Serializing MOOSE tree structure into HDF5 tables for each class.
# This is the approach I took initially. This is possibly more space
# saving.
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from __future__ import print_function
from . import moose as moose__
import numpy as np
import h5py as h5
import time
from collections import defaultdict
size_step=256
# maps cpp data type names to numpy data types
cpptonp = {
'int': 'i4',
'long': 'i8',
'bool': 'b',
'unsigned int': 'u4',
'unsigned long': 'u8',
'float': 'f4',
'double': 'f8',
'string': 'S1024',
'Id': 'S1024',
'ObjId': 'S1024',
}
dtype_table = {}
# dims allows only 3 dims for the time being
em_dtype = np.dtype([('path', 'S1024'), ('class', 'S64'), ('dims', 'i4', (3,))])
def get_rec_dtype(em):
bad_fields = []
# Check if already have data type information for this class.
# Create it otherwise.
if em.className in dtype_table:
dtype = dtype_table[em.className]
else:
print('Creating entries for class:', obj.className)
fielddict = moose__.getFieldDict(obj.className, 'valueFinfo')
print(fielddict)
keys = sorted(list(fielddict.keys()))
fields = [] # [('path', 'S1024')]
for fieldname in keys:
ftype = fielddict[fieldname]
# If we do not have the type of this field in cpp-np data
# type map, skip it. We can handle vectors as nested
# table, but the problem with that is numpy requires all
# entries to have a fixed size for this table. Since
# vectors can have arbitrary length, we cannot get such a
# fixed size without checking the size of the vector field
# in all elements.
if ftype in cpptonp:
fields.append((fieldname, cpptonp[ftype]))
dtype_table[obj.className] = np.dtype(fields)
return dtype_table[obj.className]
def save_dataset(classname, rec, dtype, hdfnode):
"""Saves the data from rec into dataset"""
# Check if there is a dataset for this class. Create it
# otherwise.
if len(rec) == 0:
return
if classname in hdfnode:
ds = hdfnode[classname]
oldlen = ds.len()
newlen = oldlen + len(rec)
ds.resize(newlen)
else:
ds = hdfnode.create_dataset(classname,
shape=(len(rec),),
dtype=dtype,
compression='gzip',
compression_opts=6)
ds[oldlen:newlen] = np.rec.array(rec, dtype=dtype)
def savetree(moosenode, hdfnode):
"""Dump the MOOSE element tree rooted at moosenode as datasets
under hdfnode."""
# Keep track of total number of ematrices seen in each class.
obj_count = defaultdict(int)
# Buffer the last `size_step` object records for each class in
# this array.
obj_rec = defaultdict(list)
em_rec = []
hdfnode.attr['path'] = moosenode.path
elements = hdfnode.create_group('elements')
for em in moose.wildcardFind(moosenode.path+'/##'):
em_rec.append((em.path, em.className, em.shape))
dtype = get_rec_dtype(em)
for obj in em:
fields = []
for fname in dtype.names:
f = obj.getField(fname)
if isinstance(f, moose.vec) or isinstance(f, moose.element):
fields.append(f.path)
else:
fields.append(f)
obj_rec[em.className].append(fields)
obj_count[em.className] += 1
if obj_count[em.className] % size_step == 0:
save_dataset(em.className, obj_rec[em.className], dtype, elements)
obj_rec[em.className][:] = [] # clear the records after saving
# now save the remaining records (length < size_step)
for classname, rec in list(obj_rec.items()):
save_dataset(classname, rec, dtype_table[classname], hdfnode)
vec = hdfnode.create_dataset('vec', shape=(len(em_rec),), dtype=em_dtype)
vec[:] = em_rec
def loadtree(hdfnode, moosenode):
"""Load the element tree saved under the group `hdfnode` into `moosenode`"""
pathclass = {}
basepath = hdfnode.attr['path']
if basepath != '/':
basepath = basepath + '/'
emdata = hdfnode['vec'][:]
sorted_paths = sorted(emdata['path'], key=lambda x: x.count('/'))
dims = dict(emdata['path', 'dims'])
classes = dict(emdata['path', 'class'])
current = moose.getCwe()
moose.setCwe(moosenode)
# First create all the ematrices
for path in sorted_paths:
rpath = path[len(basepath):]
classname = classes[path]
shape = dims[path]
em = moose.vec(rpath, shape, classname)
wfields = {}
for cinfo in moose__.element('/classes').children:
cname = cinfo[0].name
wfields[cname] = [f[len('set_'):] for f in moose__.getFieldNames(cname, 'destFinfo')
if f.startswith('set_') and f not in ['set_this', 'set_name', 'set_lastDimension', 'set_runTime'] and not f.startswith('set_num_')]
for key in hdfnode['/elements']:
dset = hdfnode['/elements/'][key][:]
fieldnames = dset.dtype.names
for ii in range(len(dset)):
obj = moose__.element(dset['path'][ii][len(basepath):])
for f in wfields[obj.className]:
obj.setField(f, dset[f][ii])
def savestate(filename=None):
"""Dump the state of MOOSE in an hdf5 file.
The file will have a data set for each class.
Each such dataset will be a column of field values.
This function needs more work on moose serialization.
"""
if filename is None:
filename = 'moose_session_' + time.strftime('%Y%m%d_%H%M%S') + '.hdf5'
with h5.File(filename, 'w') as fd:
root = fd.create_group('/elements')
meta = fd.create_group('/metadata')
typeinfo_dataset = meta.create_dataset('typeinfo', shape=(size_step,), dtype=[('path', 'S1024'), ('class', 'S64'), ('dims', 'S64'), ('parent', 'S1024')], compression='gzip', compression_opts=6)
typeinfo = []
class_dataset_dict = {}
class_count_dict = {}
class_array_dict = {}
objcount = 0
for obj in moose__.wildcardFind("/##"):
if obj.path.startswith('/Msg') or obj.path.startswith('/class') or obj.className == 'Table' or obj.className == 'TableEntry':
continue
print('Processing:', obj.path, obj.className)
typeinfo.append((obj.path, obj.className, str(obj.shape), obj[0].parent.path))
objcount += 1
if len(typeinfo) == size_step:
typeinfo_dataset.resize(objcount)
typeinfo_dataset[objcount - size_step: objcount] = np.rec.array(typeinfo, typeinfo_dataset.dtype)
typeinfo = []
# If we do not yet have dataset for this class, create one and keep it in dict
if obj.className not in class_dataset_dict:
print('Creating entries for class:', obj.className)
fielddict = moose__.getFieldDict(obj.className, 'valueFinfo')
print(fielddict)
keys = sorted(list(fielddict.keys()))
fields = [] # [('path', 'S1024')]
for fieldname in keys:
ftype = fielddict[fieldname]
if ftype in cpptonp:
fields.append((fieldname, cpptonp[ftype]))
elif ftype == 'Id' or ftype == 'ObjId':
fields.append((fieldname, 'S1024'))
# print fields
ds = root.create_dataset(obj.className, shape=(size_step,), dtype=fields, compression='gzip', compression_opts=6)
class_dataset_dict[obj.className] = ds
class_array_dict[obj.className] = []
class_count_dict[obj.className] = 0
# Lookup the dataset for the class this object belongs to
ds = class_dataset_dict[obj.className]
for entry in obj:
fields = []
print(entry.path, end=' ')
for f in ds.dtype.names:
print('getting field:', f)
entry.getField(f)
fields = [f.path if isinstance(f, moose__.vec) or isinstance(f, moose__.element) else f for f in fields]
class_array_dict[obj.className].append(fields)
# print 'fields:'
# print fields
# print 'length:', len(class_array_dict[obj.className])
class_count_dict[obj.className] += 1
if class_count_dict[obj.className] == size_step:
oldlen = ds.len()
if oldlen <= class_count_dict[obj.className]:
ds.resize((class_count_dict[obj.className]))
ds[oldlen: class_count_dict[obj.className]] = np.rec.array(class_array_dict[obj.className], dtype=ds.dtype)
class_array_dict[obj.className] = []
for classname in class_array_dict:
ds = class_dataset_dict[classname]
ds.resize((class_count_dict[classname], ))
if len(class_array_dict[classname]) > 0:
start = class_count_dict[classname] - len(class_array_dict[classname])
ds[start:] = np.rec.array(class_array_dict[classname], dtype=ds.dtype)
if len(typeinfo) > 0:
typeinfo_dataset.resize((objcount,))
typeinfo_dataset[objcount-len(typeinfo): objcount] = np.rec.array(typeinfo, dtype=typeinfo_dataset.dtype)
def restorestate(filename):
wfields = {}
for cinfo in moose__.element('/classes').children:
cname = cinfo[0].name
wfields[cname] = [f[len('set_'):] for f in moose__.getFieldNames(cname, 'destFinfo')
if f.startswith('set_') and f not in ['set_this', 'set_name', 'set_lastDimension', 'set_runTime'] and not f.startswith('set_num_')]
with h5.File(filename, 'r') as fd:
typeinfo = fd['/metadata/typeinfo'][:]
classdict = {}
dimsdict = dict(list(zip(typeinfo['path'], typeinfo['dims'])))
classdict = dict(list(zip(typeinfo['path'], typeinfo['class'])))
parentdict = dict(list(zip(typeinfo['path'], typeinfo['parent'])))
sorted_paths = sorted(typeinfo['path'], key=lambda x: x.count('/'))
for path in sorted_paths:
name = path.rpartition('/')[-1].partition('[')[0]
moose__.vec(parentdict[path] + '/' + name, eval(dimsdict[path]), classdict[path])
for key in fd['/elements']:
dset = fd['/elements/'][key][:]
fieldnames = dset.dtype.names
for ii in range(len(dset)):
obj = moose__.element(dset['path'][ii])
for f in wfields[obj.className]:
obj.setField(f, dset[f][ii])
#
# hdfutil.py ends here
|
dilawar/moose-full
|
moose-core/python/moose/hdfutil.py
|
Python
|
gpl-2.0
| 13,698
|
[
"MOOSE"
] |
b433a2d8cd8c76fd47698e4b954b4c920c56d2166b81833695d4babeb78be06d
|
import unittest
from test import support
from itertools import *
from weakref import proxy
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(list(islice(count(-maxsize-5), 10)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(list(islice(count(10, maxsize+5), 3)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(next(c), -8)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
self.assertEqual(next(pickle.loads(pickle.dumps(c))), value)
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
# Test implementation detail: tuple re-use
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
def tupleize(*args):
return args
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
self.assertRaises(TypeError, islice, range(10))
self.assertRaises(TypeError, islice, range(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, range(10), -5, 10, 1)
self.assertRaises(ValueError, islice, range(10), 1, -5, -1)
self.assertRaises(ValueError, islice, range(10), 1, 10, -1)
self.assertRaises(ValueError, islice, range(10), 1, 10, 0)
self.assertRaises(ValueError, islice, range(10), 'a')
self.assertRaises(ValueError, islice, range(10), 'a', 1)
self.assertRaises(ValueError, islice, range(10), 1, 'a')
self.assertRaises(ValueError, islice, range(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, range(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
def test_tee(self):
n = 200
def irange(n):
for i in range(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumlate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
theheros/kbengine
|
kbe/res/scripts/common/Lib/test/test_itertools.py
|
Python
|
lgpl-3.0
| 71,090
|
[
"GULP"
] |
428e9737858a5ec2d42ebbfc60e811c93fc11aa9f5c2b6276a3d434c435c2e1f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Adrian Torrie'
SITENAME = 'adriantorrie.github.io'
SITESUBTITLE = 'Python Data Science and Machine Learning portfolio.'
SITEURL = 'https://adriantorrie.github.io'
RELATIVE_URLS = True
CATEGORY_URL = 'category/{slug}.html'
PATH = 'content'
TIMEZONE = 'Australia/Melbourne'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('Github', 'https://github.com/adriantorrie'),
('Stackoverflow', 'http://stackoverflow.com/story/adriantorrie'),
('LinkedIn', 'https://au.linkedin.com/in/adriantorrie'),)
DEFAULT_PAGINATION = 5
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# Content admin
# STATIC_PATHS = ['images', 'figures', 'downloads', 'favicon.png']
STATIC_PATHS = ['images', 'figures', 'downloads']
IGNORE_FILES = ['*.ipynb_checkpoints']
# Theme and plugins
# For a list and description of plugins visit:
# https://github.com/getpelican/pelican-plugins/blob/master/Readme.rst
THEME = "themes/tuxlite_tbs"
MARKUP = ('md', )
PLUGIN_PATHS = ['pelican-plugins', 'plugins']
PLUGINS = ['ipynb.liquid', 'series', 'summary']
# other
TYPOGRIFY = False
DELETE_OUTPUT_DIRECTORY = False
# Following items are often useful when publishing
DISQUS_SITENAME = "adriantorrie-github-io"
GOOGLE_ANALYTICS = "UA-41099240-3"
GITHUB_URL = "https://github.com/adriantorrie/adriantorrie.github.io_src/content"
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
|
adriantorrie/adriantorrie.github.io_src
|
pelicanconf.py
|
Python
|
mit
| 1,942
|
[
"VisIt"
] |
09093445a7e91ec1eb74ec7d783c0f4f0f393eb5329258aa57c37f5d117f9755
|
import unittest
import numpy as np
from numpy.testing import assert_allclose
from doubly_stochastic_dgp.layers import GPMC_Layer, GPR_Layer
from gpflow import settings as _settings
from gpflow import session_manager as _session_manager
custom_config = _settings.get_settings()
custom_config.numerics.jitter_level = 1e-12
with _settings.temp_settings(custom_config),\
_session_manager.get_session().as_default():
from gpflow.models import SVGP, GPR
from gpflow.kernels import Matern52, RBF
from gpflow.likelihoods import Gaussian, Bernoulli, MultiClass
from gpflow.training import ScipyOptimizer
from gpflow.mean_functions import Zero, Identity, Linear, Constant
from gpflow.training import NatGradOptimizer
from gpflow import settings
from doubly_stochastic_dgp.model_zoo import DGP_Heinonen
from doubly_stochastic_dgp.dgp import DGP, DGP_Base, DGP_Quad
from doubly_stochastic_dgp.layer_initializations import init_layers_linear
np.random.seed(0)
class TestHeinonen(unittest.TestCase):
def setUp(self):
Ns, N, D_X, D_Y = 5, 6, 3, 2
self.X = np.random.uniform(size=(N, D_X))
self.Xs = self.X #np.random.uniform(size=(Ns, D_X))
self.D_Y = D_Y
def test_vs_single_layer(self):
lik = Gaussian()
lik_var = 0.01
lik.variance = lik_var
N, Ns, D_Y, D_X = self.X.shape[0], self.Xs.shape[0], self.D_Y, self.X.shape[1]
Y = np.random.randn(N, D_Y)
Ys = np.random.randn(Ns, D_Y)
kern = Matern52(self.X.shape[1], lengthscales=0.5)
# mf = Linear(A=np.random.randn(D_X, D_Y), b=np.random.randn(D_Y))
mf = Zero()
m_gpr = GPR(self.X, Y, kern, mean_function=mf)
m_gpr.likelihood.variance = lik_var
mean_gpr, var_gpr = m_gpr.predict_y(self.Xs)
test_lik_gpr = m_gpr.predict_density(self.Xs, Ys)
pred_m_gpr, pred_v_gpr = m_gpr.predict_f(self.Xs)
pred_mfull_gpr, pred_vfull_gpr = m_gpr.predict_f_full_cov(self.Xs)
kerns = []
kerns.append(Matern52(self.X.shape[1], lengthscales=0.5, variance=1e-1))
kerns.append(kern)
layer0 = GPMC_Layer(kerns[0], self.X.copy(), D_X, Identity())
layer1 = GPR_Layer(kerns[1], mf, D_Y)
m_dgp = DGP_Heinonen(self.X, Y, lik, [layer0, layer1])
mean_dgp, var_dgp = m_dgp.predict_y(self.Xs, 1)
test_lik_dgp = m_dgp.predict_density(self.Xs, Ys, 1)
pred_m_dgp, pred_v_dgp = m_dgp.predict_f(self.Xs, 1)
pred_mfull_dgp, pred_vfull_dgp = m_dgp.predict_f_full_cov(self.Xs, 1)
tol = 1e-4
assert_allclose(mean_dgp[0], mean_gpr, atol=tol, rtol=tol)
assert_allclose(test_lik_dgp, test_lik_gpr, atol=tol, rtol=tol)
assert_allclose(pred_m_dgp[0], pred_m_gpr, atol=tol, rtol=tol)
assert_allclose(pred_mfull_dgp[0], pred_mfull_gpr, atol=tol, rtol=tol)
assert_allclose(pred_vfull_dgp[0], pred_vfull_gpr, atol=tol, rtol=tol)
def test_vs_DGP2(self):
lik = Gaussian()
lik_var = 0.1
lik.variance = lik_var
N, Ns, D_Y, D_X = self.X.shape[0], self.Xs.shape[0], self.D_Y, self.X.shape[1]
q_mu = np.random.randn(N, D_X)
Y = np.random.randn(N, D_Y)
Ys = np.random.randn(Ns, D_Y)
kern1 = Matern52(self.X.shape[1], lengthscales=0.5)
kern2 = Matern52(self.X.shape[1], lengthscales=0.5)
kerns = [kern1, kern2]
# mf = Linear(A=np.random.randn(D_X, D_Y), b=np.random.randn(D_Y))
mf = Zero()
m_dgp = DGP(self.X, Y, self.X, kerns, lik, mean_function=mf, white=True)
m_dgp.layers[0].q_mu = q_mu
m_dgp.layers[0].q_sqrt = m_dgp.layers[0].q_sqrt.read_value() * 1e-24
Fs, ms, vs = m_dgp.predict_all_layers(self.Xs, 1)
Z = self.X.copy()
Z[:len(self.Xs)] = ms[0][0]
m_dgp.layers[1].feature.Z = Z # need to put the inducing points in the right place
var_list = [[m_dgp.layers[1].q_mu, m_dgp.layers[1].q_sqrt]]
NatGradOptimizer(gamma=1).minimize(m_dgp, var_list=var_list, maxiter=1)
mean_dgp, var_dgp = m_dgp.predict_y(self.Xs, 1)
test_lik_dgp = m_dgp.predict_density(self.Xs, Ys, 1)
pred_m_dgp, pred_v_gpr = m_dgp.predict_f(self.Xs, 1)
pred_mfull_dgp, pred_vfull_dgp = m_dgp.predict_f_full_cov(self.Xs, 1)
# mean_functions = [Identity(), mf]
layer0 = GPMC_Layer(kerns[0], self.X.copy(), D_X, Identity())
layer1 = GPR_Layer(kerns[1], mf, D_Y)
m_heinonen = DGP_Heinonen(self.X, Y, lik, [layer0, layer1])
m_heinonen.layers[0].q_mu = q_mu
mean_heinonen, var_heinonen = m_heinonen.predict_y(self.Xs, 1)
test_lik_heinonen = m_heinonen.predict_density(self.Xs, Ys, 1)
pred_m_heinonen, pred_v_heinonen = m_heinonen.predict_f(self.Xs, 1)
pred_mfull_heinonen, pred_vfull_heinonen = m_heinonen.predict_f_full_cov(self.Xs, 1)
tol = 1e-4
assert_allclose(mean_dgp, mean_heinonen, atol=tol, rtol=tol)
assert_allclose(test_lik_dgp, test_lik_heinonen, atol=tol, rtol=tol)
assert_allclose(pred_m_dgp, pred_m_heinonen, atol=tol, rtol=tol)
assert_allclose(pred_mfull_dgp, pred_mfull_heinonen, atol=tol, rtol=tol)
assert_allclose(pred_vfull_dgp, pred_vfull_heinonen, atol=tol, rtol=tol)
if __name__ == '__main__':
unittest.main()
|
ICL-SML/Doubly-Stochastic-DGP
|
tests/test_zoo_models.py
|
Python
|
apache-2.0
| 5,700
|
[
"Gaussian"
] |
66a38e3487a02a1cc20ef214e845fb25a73026c67470b9f0f1fe86508b56c36c
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import itertools
import unittest as ut
import unittest_decorators as utx
import tests_common
import espressomd.electrostatics
class ElectrostaticInteractionsTests:
MMM1D = None
# Handle to espresso system
system = espressomd.System(box_l=[10.0] * 3)
system.periodicity = [0, 0, 1]
system.time_step = 0.01
system.cell_system.skin = 0.4
system.cell_system.set_n_square()
system.thermostat.set_langevin(kT=0, gamma=1, seed=8)
data = np.loadtxt(tests_common.abspath("data/mmm1d_data.txt"))
p_pos = data[:, 1:4]
p_q = data[:, 4]
forces_target = data[:, 5:8]
energy_target = -7.156365298205383
allowed_error = 2e-5
def setUp(self):
self.system.periodicity = [0, 0, 1]
self.system.part.add(pos=self.p_pos, q=self.p_q)
self.mmm1d = self.MMM1D(prefactor=1.0, maxPWerror=1e-20)
self.system.actors.add(self.mmm1d)
self.system.integrator.run(steps=0)
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
def test_forces(self):
measured_f = np.copy(self.system.part[:].f)
np.testing.assert_allclose(measured_f, self.forces_target,
atol=self.allowed_error)
def test_energy(self):
measured_el_energy = self.system.analysis.energy()["coulomb"]
self.assertAlmostEqual(
measured_el_energy, self.energy_target, delta=self.allowed_error,
msg="Measured energy deviates too much from stored result")
def test_with_analytical_result(self, prefactor=1.0, accuracy=1e-4):
self.system.part.clear()
p = self.system.part.add(pos=[0, 0, 0], q=1)
self.system.part.add(pos=[0, 0, 1], q=1)
self.system.integrator.run(steps=0)
f_measured = p.f
energy_measured = self.system.analysis.energy()["total"]
target_energy_config = 1.00242505606 * prefactor
target_force_z_config = -0.99510759 * prefactor
self.assertAlmostEqual(
f_measured[0], 0, delta=self.allowed_error,
msg="Measured force in x deviates too much from analytical result")
self.assertAlmostEqual(
f_measured[1], 0, delta=self.allowed_error,
msg="Measured force in y deviates too much from analytical result")
self.assertAlmostEqual(
f_measured[2], target_force_z_config, delta=accuracy,
msg="Measured force in z deviates too much from analytical result")
self.assertAlmostEqual(
energy_measured, target_energy_config, delta=self.allowed_error,
msg="Measured energy deviates too much from analytical result")
def test_bjerrum_length_change(self):
self.system.part.clear()
self.system.actors.clear()
prefactor = 2
mmm1d = self.MMM1D(prefactor=prefactor, maxPWerror=1e-20)
self.system.actors.add(mmm1d)
self.test_with_analytical_result(prefactor=prefactor, accuracy=0.0017)
def test_exceptions(self):
self.system.actors.clear()
del self.mmm1d
# check periodicity exceptions
for periodicity in itertools.product(range(2), range(2), range(2)):
if periodicity == (0, 0, 1):
continue
self.system.periodicity = periodicity
with self.assertRaisesRegex(Exception, r"MMM1D requires periodicity \(0, 0, 1\)"):
mmm1d = self.MMM1D(prefactor=1.0, maxPWerror=1e-2)
self.system.actors.add(mmm1d)
self.system.periodicity = (0, 0, 1)
self.system.actors.clear()
@utx.skipIfMissingFeatures(["ELECTROSTATICS"])
class MMM1D_Test(ElectrostaticInteractionsTests, ut.TestCase):
def setUp(self):
self.MMM1D = espressomd.electrostatics.MMM1D
super().setUp()
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/mmm1d.py
|
Python
|
gpl-3.0
| 4,601
|
[
"ESPResSo"
] |
5bb31972b4c2deb7706b13c73db6a1bc246f65158b30179d1248181dbe781380
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
from espressopp.external.transformations import *
except:
print('Warning: numpy module not available')
|
espressopp/espressopp
|
src/external/__init__.py
|
Python
|
gpl-3.0
| 978
|
[
"ESPResSo"
] |
63d59a4f4155a9efa5ad2a5a30f8a50dad82a81634487dca20764767e14970fe
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from jinja2 import FileSystemLoader
from camelot.core.resources import resource_filename
import camelot.view
loader = FileSystemLoader(resource_filename(camelot.view.__name__, 'templates', 'CAMELOT_MAIN_DIRECTORY'))
|
kurtraschke/camelot
|
camelot/view/templates/__init__.py
|
Python
|
gpl-2.0
| 1,286
|
[
"VisIt"
] |
b6f8ea405b3a938559cb002fbc9024390d323512eb455b74689380029d8e381b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.