text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for interfacing with phonopy, see https://atztogo.github.io/phonopy/
"""
from typing import Dict, List, Any
import numpy as np
from monty.dev import requires
from monty.serialization import loadfn
from scipy.interpolate import InterpolatedUnivariateSpline
from pymatgen.core import Lattice, Structure
from pymatgen.phonon.bandstructure import (
PhononBandStructure,
PhononBandStructureSymmLine,
)
from pymatgen.phonon.dos import CompletePhononDos, PhononDos
from pymatgen.phonon.gruneisen import GruneisenParameter, GruneisenPhononBandStructureSymmLine
from pymatgen.symmetry.bandstructure import HighSymmKpath
try:
from phonopy import Phonopy
from phonopy.file_IO import write_disp_yaml
from phonopy.structure.atoms import PhonopyAtoms
except ImportError:
Phonopy = None
write_disp_yaml = None
PhonopyAtoms = None
@requires(Phonopy, "phonopy not installed!") # type: ignore
def get_pmg_structure(phonopy_structure):
"""
Convert a PhonopyAtoms object to pymatgen Structure object.
Args:
phonopy_structure (PhonopyAtoms): A phonopy structure object.
"""
lattice = phonopy_structure.get_cell()
frac_coords = phonopy_structure.get_scaled_positions()
symbols = phonopy_structure.get_chemical_symbols()
masses = phonopy_structure.get_masses()
mms = phonopy_structure.get_magnetic_moments()
mms = mms or [0] * len(symbols)
return Structure(
lattice,
symbols,
frac_coords,
site_properties={"phonopy_masses": masses, "magnetic_moments": mms},
)
@requires(Phonopy, "phonopy not installed!") # type: ignore
def get_phonopy_structure(pmg_structure):
"""
Convert a pymatgen Structure object to a PhonopyAtoms object.
Args:
pmg_structure (pymatgen Structure): A Pymatgen structure object.
"""
symbols = [site.specie.symbol for site in pmg_structure]
return PhonopyAtoms(
symbols=symbols,
cell=pmg_structure.lattice.matrix,
scaled_positions=pmg_structure.frac_coords,
)
def get_structure_from_dict(d):
"""
Extracts a structure from the dictionary extracted from the output
files of phonopy like phonopy.yaml or band.yaml.
Adds "phonopy_masses" in the site_properties of the structures.
Compatible with older phonopy versions.
"""
species = []
frac_coords = []
masses = []
if "points" in d:
for p in d["points"]:
species.append(p["symbol"])
frac_coords.append(p["coordinates"])
masses.append(p["mass"])
elif "atoms" in d:
for p in d["atoms"]:
species.append(p["symbol"])
frac_coords.append(p["position"])
masses.append(p["mass"])
else:
raise ValueError("The dict does not contain structural information")
return Structure(d["lattice"], species, frac_coords, site_properties={"phonopy_masses": masses})
def eigvec_to_eigdispl(v, q, frac_coords, mass):
r"""
Converts a single eigenvector to an eigendisplacement in the primitive cell
according to the formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
Compared to the modulation option in phonopy, here all the additional
multiplicative and phase factors are set to 1.
Args:
v: the vector that should be converted. A 3D complex numpy array.
q: the q point in fractional coordinates
frac_coords: the fractional coordinates of the atom
mass: the mass of the atom
"""
c = np.exp(2j * np.pi * np.dot(frac_coords, q)) / np.sqrt(mass)
return c * v
def get_ph_bs_symm_line_from_dict(bands_dict, has_nac=False, labels_dict=None):
r"""
Creates a pymatgen PhononBandStructure object from the dictionary
extracted by the band.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_dict: the dictionary extracted from the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml.
"""
structure = get_structure_from_dict(bands_dict)
qpts = []
frequencies = []
eigendisplacements = []
phonopy_labels_dict = {}
for p in bands_dict["phonon"]:
q = p["q-position"]
qpts.append(q)
bands = []
eig_q = []
for b in p["band"]:
bands.append(b["frequency"])
if "eigenvector" in b:
eig_b = []
for i, eig_a in enumerate(b["eigenvector"]):
v = np.zeros(3, np.complex)
for x in range(3):
v[x] = eig_a[x][0] + eig_a[x][1] * 1j
eig_b.append(
eigvec_to_eigdispl(
v,
q,
structure[i].frac_coords,
structure.site_properties["phonopy_masses"][i],
)
)
eig_q.append(eig_b)
frequencies.append(bands)
if "label" in p:
phonopy_labels_dict[p["label"]] = p["q-position"]
if eig_q:
eigendisplacements.append(eig_q)
qpts = np.array(qpts)
# transpose to match the convention in PhononBandStructure
frequencies = np.transpose(frequencies)
if eigendisplacements:
eigendisplacements = np.transpose(eigendisplacements, (1, 0, 2, 3))
rec_latt = Lattice(bands_dict["reciprocal_lattice"])
labels_dict = labels_dict or phonopy_labels_dict
ph_bs = PhononBandStructureSymmLine(
qpts,
frequencies,
rec_latt,
has_nac=has_nac,
labels_dict=labels_dict,
structure=structure,
eigendisplacements=eigendisplacements,
)
return ph_bs
def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None):
r"""
Creates a pymatgen PhononBandStructure from a band.yaml file.
The labels will be extracted from the dictionary, if present.
If the 'eigenvector' key is found the eigendisplacements will be
calculated according to the formula:
\\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_path: path to the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
"""
return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict)
def get_ph_dos(total_dos_path):
"""
Creates a pymatgen PhononDos from a total_dos.dat file.
Args:
total_dos_path: path to the total_dos.dat file.
"""
a = np.loadtxt(total_dos_path)
return PhononDos(a[:, 0], a[:, 1])
def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path):
"""
Creates a pymatgen CompletePhononDos from a partial_dos.dat and
phonopy.yaml files.
The second is produced when generating a Dos and is needed to extract
the structure.
Args:
partial_dos_path: path to the partial_dos.dat file.
phonopy_yaml_path: path to the phonopy.yaml file.
"""
a = np.loadtxt(partial_dos_path).transpose()
d = loadfn(phonopy_yaml_path)
structure = get_structure_from_dict(d["primitive_cell"])
total_dos = PhononDos(a[0], a[1:].sum(axis=0))
pdoss = {}
for site, pdos in zip(structure, a[1:]):
pdoss[site] = pdos.tolist()
return CompletePhononDos(structure, total_dos, pdoss)
@requires(Phonopy, "phonopy not installed!")
def get_displaced_structures(pmg_structure, atom_disp=0.01, supercell_matrix=None, yaml_fname=None, **kwargs):
r"""
Generate a set of symmetrically inequivalent displaced structures for
phonon calculations.
Args:
pmg_structure (Structure): A pymatgen structure object.
atom_disp (float): Atomic displacement. Default is 0.01 $\\AA$.
supercell_matrix (3x3 array): Scaling matrix for supercell.
yaml_fname (string): If not None, it represents the full path to
the outputting displacement yaml file, e.g. disp.yaml.
**kwargs: Parameters used in Phonopy.generate_displacement method.
Return:
A list of symmetrically inequivalent structures with displacements, in
which the first element is the perfect supercell structure.
"""
is_plusminus = kwargs.get("is_plusminus", "auto")
is_diagonal = kwargs.get("is_diagonal", True)
is_trigonal = kwargs.get("is_trigonal", False)
ph_structure = get_phonopy_structure(pmg_structure)
if supercell_matrix is None:
supercell_matrix = np.eye(3) * np.array((1, 1, 1))
phonon = Phonopy(unitcell=ph_structure, supercell_matrix=supercell_matrix)
phonon.generate_displacements(
distance=atom_disp,
is_plusminus=is_plusminus,
is_diagonal=is_diagonal,
is_trigonal=is_trigonal,
)
if yaml_fname is not None:
displacements = phonon.get_displacements()
write_disp_yaml(
displacements=displacements,
supercell=phonon.get_supercell(),
filename=yaml_fname,
)
# Supercell structures with displacement
disp_supercells = phonon.get_supercells_with_displacements()
# Perfect supercell structure
init_supercell = phonon.get_supercell()
# Structure list to be returned
structure_list = [get_pmg_structure(init_supercell)]
for c in disp_supercells:
if c is not None:
structure_list.append(get_pmg_structure(c))
return structure_list
@requires(Phonopy, "phonopy is required to calculate phonon density of states")
def get_phonon_dos_from_fc(
structure: Structure,
supercell_matrix: np.ndarray,
force_constants: np.ndarray,
mesh_density: float = 100.0,
num_dos_steps: int = 200,
**kwargs,
) -> CompletePhononDos:
"""
Get a projected phonon density of states from phonopy force constants.
Args:
structure: A structure.
supercell_matrix: The supercell matrix used to generate the force
constants.
force_constants: The force constants in phonopy format.
mesh_density: The density of the q-point mesh. See the docstring
for the ``mesh`` argument in Phonopy.init_mesh() for more details.
num_dos_steps: Number of frequency steps in the energy grid.
**kwargs: Additional kwargs passed to the Phonopy constructor.
Returns:
The density of states.
"""
structure_phonopy = get_phonopy_structure(structure)
phonon = Phonopy(structure_phonopy, supercell_matrix=supercell_matrix, **kwargs)
phonon.set_force_constants(force_constants)
phonon.run_mesh(
mesh_density,
is_mesh_symmetry=False,
with_eigenvectors=True,
is_gamma_center=True,
)
# get min, max, step frequency
frequencies = phonon.get_mesh_dict()["frequencies"]
freq_min = frequencies.min()
freq_max = frequencies.max()
freq_pitch = (freq_max - freq_min) / num_dos_steps
phonon.run_projected_dos(freq_min=freq_min, freq_max=freq_max, freq_pitch=freq_pitch)
dos_raw = phonon.projected_dos.get_partial_dos()
pdoss = dict(zip(structure, dos_raw[1]))
total_dos = PhononDos(dos_raw[0], dos_raw[1].sum(axis=0))
return CompletePhononDos(structure, total_dos, pdoss)
@requires(Phonopy, "phonopy is required to calculate phonon band structures")
def get_phonon_band_structure_from_fc(
structure: Structure,
supercell_matrix: np.ndarray,
force_constants: np.ndarray,
mesh_density: float = 100.0,
**kwargs,
) -> PhononBandStructure:
"""
Get a uniform phonon band structure from phonopy force constants.
Args:
structure: A structure.
supercell_matrix: The supercell matrix used to generate the force
constants.
force_constants: The force constants in phonopy format.
mesh_density: The density of the q-point mesh. See the docstring
for the ``mesh`` argument in Phonopy.init_mesh() for more details.
**kwargs: Additional kwargs passed to the Phonopy constructor.
Returns:
The uniform phonon band structure.
"""
structure_phonopy = get_phonopy_structure(structure)
phonon = Phonopy(structure_phonopy, supercell_matrix=supercell_matrix, **kwargs)
phonon.set_force_constants(force_constants)
phonon.run_mesh(mesh_density, is_mesh_symmetry=False, is_gamma_center=True)
mesh = phonon.get_mesh_dict()
return PhononBandStructure(mesh["qpoints"], mesh["frequencies"], structure.lattice)
@requires(Phonopy, "phonopy is required to calculate phonon band structures")
def get_phonon_band_structure_symm_line_from_fc(
structure: Structure,
supercell_matrix: np.ndarray,
force_constants: np.ndarray,
line_density: float = 20.0,
symprec: float = 0.01,
**kwargs,
) -> PhononBandStructureSymmLine:
"""
Get a phonon band structure along a high symmetry path from phonopy force
constants.
Args:
structure: A structure.
supercell_matrix: The supercell matrix used to generate the force
constants.
force_constants: The force constants in phonopy format.
line_density: The density along the high symmetry path.
symprec: Symmetry precision passed to phonopy and used for determining
the band structure path.
**kwargs: Additional kwargs passed to the Phonopy constructor.
Returns:
The line mode band structure.
"""
structure_phonopy = get_phonopy_structure(structure)
phonon = Phonopy(structure_phonopy, supercell_matrix=supercell_matrix, symprec=symprec, **kwargs)
phonon.set_force_constants(force_constants)
kpath = HighSymmKpath(structure, symprec=symprec)
kpoints, labels = kpath.get_kpoints(line_density=line_density, coords_are_cartesian=False)
phonon.run_qpoints(kpoints)
frequencies = phonon.qpoints.get_frequencies().T
labels_dict = {a: k for a, k in zip(labels, kpoints) if a != ""}
return PhononBandStructureSymmLine(kpoints, frequencies, structure.lattice, labels_dict=labels_dict)
def get_gruneisenparameter(gruneisen_path, structure=None, structure_path=None) -> GruneisenParameter:
"""
Get Gruneisen object from gruneisen.yaml file, as obtained from phonopy (Frequencies in THz!).
The order is structure > structure path > structure from gruneisen dict.
Newer versions of phonopy include the structure in the yaml file,
the structure/structure_path is kept for compatibility.
Args:
gruneisen_path: Path to gruneisen.yaml file (frequencies have to be in THz!)
structure: pymatgen Structure object
structure_path: path to structure in a file (e.g., POSCAR)
Returns: GruneisenParameter object
"""
gruneisen_dict = loadfn(gruneisen_path)
if structure_path and structure is None:
structure = Structure.from_file(structure_path)
else:
try:
structure = get_structure_from_dict(gruneisen_dict)
except ValueError:
raise ValueError("\nPlease provide a structure.\n")
qpts, multiplicities, frequencies, gruneisen = ([] for _ in range(4))
phonopy_labels_dict = {}
for p in gruneisen_dict["phonon"]:
q = p["q-position"]
qpts.append(q)
if "multiplicity" in p:
m = p["multiplicity"]
else:
m = 1
multiplicities.append(m)
bands, gruneisenband = ([] for _ in range(2))
for b in p["band"]:
bands.append(b["frequency"])
if "gruneisen" in b:
gruneisenband.append(b["gruneisen"])
frequencies.append(bands)
gruneisen.append(gruneisenband)
if "label" in p:
phonopy_labels_dict[p["label"]] = p["q-position"]
qpts_np = np.array(qpts)
multiplicities_np = np.array(multiplicities)
# transpose to match the convention in PhononBandStructure
frequencies_np = np.transpose(frequencies)
gruneisen_np = np.transpose(gruneisen)
return GruneisenParameter(
gruneisen=gruneisen_np,
qpoints=qpts_np,
multiplicities=multiplicities_np,
frequencies=frequencies_np,
structure=structure,
)
def get_gs_ph_bs_symm_line_from_dict(
gruneisen_dict, structure=None, structure_path=None, labels_dict=None, fit=False
) -> GruneisenPhononBandStructureSymmLine:
r"""
Creates a pymatgen GruneisenPhononBandStructure object from the dictionary
extracted by the gruneisen.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object. A fit algorithm can be used to replace diverging
Gruneisen values close to gamma.
Args:
gruneisen_dict (dict): the dictionary extracted from the gruneisen.yaml file
structure (Structure): pymatgen structure object
structure_path: path to structure file
labels_dict (dict): dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml.
fit (bool): Substitute Grueneisen parameters close to the gamma point
with points obtained from a fit to a spline if the derivate from
a smooth curve (i.e. if the slope changes by more than 200% in the
range of 10% around the gamma point).
These derivations occur because of very small frequencies
(and therefore numerical inaccuracies) close to gamma.
"""
if structure_path and structure is None:
structure = Structure.from_file(structure_path)
else:
try:
structure = get_structure_from_dict(gruneisen_dict)
except ValueError:
raise ValueError("\nPlease provide a structure.\n")
qpts, frequencies, gruneisenparameters = ([] for _ in range(3))
phonopy_labels_dict = {} # type: Dict[Any,Any]
if fit:
for pa in gruneisen_dict["path"]:
phonon = pa["phonon"] # This is a list
start = pa["phonon"][0]
end = pa["phonon"][-1]
if start["q-position"] == [0, 0, 0]: # Gamma at start of band
qpts_temp, frequencies_temp, gruneisen_temp, distance = (
[] for _ in range(4)
) # type: List[Any],List[Any],List[Any],List[Any]
for i in range(pa["nqpoint"]):
bands, gruneisenband = ([] for _ in range(2)) # type: List[Any], List[Any]
for b in phonon[pa["nqpoint"] - i - 1]["band"]:
bands.append(b["frequency"])
# Fraction of leftover points in current band
gruen = _extrapolate_grun(b, distance, gruneisen_temp, gruneisenband, i, pa)
gruneisenband.append(gruen)
q = phonon[pa["nqpoint"] - i - 1]["q-position"]
qpts_temp.append(q)
d = phonon[pa["nqpoint"] - i - 1]["distance"]
distance.append(d)
frequencies_temp.append(bands)
gruneisen_temp.append(gruneisenband)
if "label" in phonon[pa["nqpoint"] - i - 1]:
phonopy_labels_dict[phonon[pa["nqpoint"] - i - 1]]["label"] = phonon[pa["nqpoint"] - i - 1][
"q-position"
]
qpts.extend(list(reversed(qpts_temp)))
frequencies.extend(list(reversed(frequencies_temp)))
gruneisenparameters.extend(list(reversed(gruneisen_temp)))
elif end["q-position"] == [0, 0, 0]: # Gamma at end of band
distance = []
for i in range(pa["nqpoint"]):
bands, gruneisenband = ([] for _ in range(2))
for b in phonon[i]["band"]:
bands.append(b["frequency"])
gruen = _extrapolate_grun(b, distance, gruneisenparameters, gruneisenband, i, pa)
gruneisenband.append(gruen)
q = phonon[i]["q-position"]
qpts.append(q)
d = phonon[i]["distance"]
distance.append(d)
frequencies.append(bands)
gruneisenparameters.append(gruneisenband)
if "label" in phonon[i]:
phonopy_labels_dict[phonon[i]["label"]] = phonon[i]["q-position"]
else: # No Gamma in band
distance = []
for i in range(pa["nqpoint"]):
bands, gruneisenband = ([] for _ in range(2))
for b in phonon[i]["band"]:
bands.append(b["frequency"])
gruneisenband.append(b["gruneisen"])
q = phonon[i]["q-position"]
qpts.append(q)
d = phonon[i]["distance"]
distance.append(d)
frequencies.append(bands)
gruneisenparameters.append(gruneisenband)
if "label" in phonon[i]:
phonopy_labels_dict[phonon[i]["label"]] = phonon[i]["q-position"]
else:
for pa in gruneisen_dict["path"]:
for p in pa["phonon"]:
q = p["q-position"]
qpts.append(q)
bands, gruneisen_bands = ([] for _ in range(2))
for b in p["band"]:
bands.append(b["frequency"])
gruneisen_bands.append(b["gruneisen"])
frequencies.append(bands)
gruneisenparameters.append(gruneisen_bands)
if "label" in p:
phonopy_labels_dict[p["label"]] = p["q-position"]
qpts_np = np.array(qpts)
# transpose to match the convention in PhononBandStructure
frequencies_np = np.transpose(frequencies)
gruneisenparameters_np = np.transpose(gruneisenparameters)
rec_latt = structure.lattice.reciprocal_lattice
labels_dict = labels_dict or phonopy_labels_dict
return GruneisenPhononBandStructureSymmLine(
qpoints=qpts_np,
frequencies=frequencies_np,
gruneisenparameters=gruneisenparameters_np,
lattice=rec_latt,
labels_dict=labels_dict,
structure=structure,
eigendisplacements=None,
)
def _extrapolate_grun(b, distance, gruneisenparameter, gruneisenband, i, pa):
leftover_fraction = (pa["nqpoint"] - i - 1) / pa["nqpoint"]
if leftover_fraction < 0.1:
diff = abs(b["gruneisen"] - gruneisenparameter[-1][len(gruneisenband)]) / abs(
gruneisenparameter[-2][len(gruneisenband)] - gruneisenparameter[-1][len(gruneisenband)]
)
if diff > 2:
x = list(range(len(distance)))
y = [i[len(gruneisenband)] for i in gruneisenparameter]
y = y[-len(x) :] # Only elements of current band
extrapolator = InterpolatedUnivariateSpline(x, y, k=5)
g_extrapolated = extrapolator(len(distance))
gruen = float(g_extrapolated)
else:
gruen = b["gruneisen"]
else:
gruen = b["gruneisen"]
return gruen
def get_gruneisen_ph_bs_symm_line(gruneisen_path, structure=None, structure_path=None, labels_dict=None, fit=False):
r"""
Creates a pymatgen GruneisenPhononBandStructure from a band.yaml file.
The labels will be extracted from the dictionary, if present.
If the 'eigenvector' key is found the eigendisplacements will be
calculated according to the formula:
\\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
gruneisen_path: path to the band.yaml file
structure: pymaten Structure object
structure_path: path to a structure file (e.g., POSCAR)
labels_dict: dict that links a qpoint in frac coords to a label.
fit: Substitute Grueneisen parameters close to the gamma point
with points obtained from a fit to a spline if the derivate from
a smooth curve (i.e. if the slope changes by more than 200% in the
range of 10% around the gamma point).
These derivations occur because of very small frequencies
(and therefore numerical inaccuracies) close to gamma.
"""
return get_gs_ph_bs_symm_line_from_dict(loadfn(gruneisen_path), structure, structure_path, labels_dict, fit)
|
vorwerkc/pymatgen
|
pymatgen/io/phonopy.py
|
Python
|
mit
| 25,292
|
[
"phonopy",
"pymatgen"
] |
189cce12a4724d9ba9083a272cb97726d2eaa8f3abe26f7c43c6bca9aefae542
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio-Client.
# Copyright (C) 2014 CERN.
#
# Invenio-Client is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio-Client is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Tool to connect to remote Invenio servers using Invenio APIs.
Example of use:
.. code-block:: python
from invenio_client import InvenioConnector
demo = InvenioConnector("http://demo.invenio-software.org")
results = demo.search("higgs")
for record in results:
print record["245__a"][0]
print record["520__b"][0]
for author in record["100__"]:
print author["a"][0], author["u"][0]
FIXME:
- implement cache expiration
- exceptions handling
- parsing of ``<!-- Search-Engine-Total-Number-Of-Results: N -->``
- better checking of input parameters
"""
from __future__ import print_function
import os
import re
import requests
import json
import splinter
import sys
import tempfile
import time
import xml.sax
from requests.exceptions import (ConnectionError, InvalidSchema, InvalidURL,
MissingSchema, RequestException)
from ._compat import binary_type
from .version import __version__
CFG_USER_AGENT = "invenio_connector"
class InvenioConnectorError(Exception):
"""General connector error."""
def __init__(self, value):
"""Set the internal "value" attribute."""
super(InvenioConnectorError, self).__init__()
self.value = value
def __str__(self):
"""Return oneself as a string based on self.value."""
return str(self.value)
class InvenioConnectorAuthError(InvenioConnectorError):
"""Failed authentication during remote connections."""
class InvenioConnectorServerError(InvenioConnectorError):
"""Problem with connecting to Invenio server."""
class InvenioConnector(object):
"""Create an connector to a server running Invenio."""
def __init__(self, url, user="", password="", login_method="Local",
insecure_login=False):
"""
Initialize a new instance of the server at given URL.
If the server happens to be running on the local machine, the
access will be done directly using the Python APIs. In that case
you can choose from which base path to import the necessary file
specifying the local_import_path parameter.
:param url: the url to which this instance will be connected.
Defaults to CFG_SITE_URL, if available.
:type url: string
:param user: the optional username for interacting with the Invenio
instance in an authenticated way.
:type user: string
:param password: the corresponding password.
:type password: string
:param login_method: the name of the login method the Invenio instance
is expecting for this user (in case there is more than one).
:type login_method: string
"""
assert url is not None
self.server_url = url
self._validate_server_url()
self.cached_queries = {}
self.cached_records = {}
self.cached_baskets = {}
self.user = user
self.password = password
self.login_method = login_method
self.browser = None
self.cookies = {}
if self.user:
if not insecure_login and \
not self.server_url.startswith('https://'):
raise InvenioConnectorAuthError(
"You have to use a secure URL (HTTPS) to login")
self._init_browser()
self._check_credentials()
def _init_browser(self):
"""Overide in appropriate way to prepare a logged in browser."""
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url + "/youraccount/login")
try:
self.browser.fill('nickname', self.user)
self.browser.fill('password', self.password)
except:
self.browser.fill('p_un', self.user)
self.browser.fill('p_pw', self.password)
self.browser.fill('login_method', self.login_method)
self.browser.find_by_css('input[type=submit]').click()
def _check_credentials(self):
if not len(self.browser.cookies.all()):
raise InvenioConnectorAuthError(
"It was not possible to successfully login with "
"the provided credentials")
self.cookies = self.browser.cookies.all()
def search(self, read_cache=True, ssl_verify=True, **kwparams):
"""
Returns records corresponding to the given search query.
See docstring of invenio.legacy.search_engine.perform_request_search()
for an overview of available parameters.
"""
parse_results = False
of = kwparams.get('of', "")
if of == "":
parse_results = True
of = "xm"
kwparams['of'] = of
params = kwparams
cache_key = (json.dumps(params), parse_results)
if cache_key not in self.cached_queries or \
not read_cache:
results = requests.get(self.server_url + "/search",
params=params, cookies=self.cookies,
stream=True, verify=ssl_verify)
if 'youraccount/login' in results.url:
# Current user not able to search collection
raise InvenioConnectorAuthError(
"You are trying to search a restricted collection. "
"Please authenticate yourself.\n")
else:
return self.cached_queries[cache_key]
if parse_results:
# FIXME: we should not try to parse if results is string
parsed_records = self._parse_results(results.raw,
self.cached_records)
self.cached_queries[cache_key] = parsed_records
return parsed_records
else:
# pylint: disable=E1103
# The whole point of the following code is to make sure we can
# handle two types of variable.
try:
res = results.content
except AttributeError:
res = results
# pylint: enable=E1103
if of == "id":
try:
if isinstance(res, binary_type):
# Transform to list
res = [int(recid.strip()) for recid in
res.decode('utf-8').strip("[]").split(",")
if recid.strip() != ""]
res.reverse()
except (ValueError, AttributeError):
res = []
self.cached_queries[cache_key] = res
return res
def search_with_retry(self, sleeptime=3.0, retrycount=3, **params):
"""Perform a search given a dictionary of ``search(...)`` parameters.
It accounts for server timeouts as necessary and will retry some number
of times.
:param sleeptime: number of seconds to sleep between retries
:param retrycount: number of times to retry given search
:param params: search parameters
:return: records in given format
"""
results = []
count = 0
while count < retrycount:
try:
results = self.search(**params)
break
except requests.exceptions.Timeout:
sys.stderr.write("Timeout while searching...Retrying\n")
time.sleep(sleeptime)
count += 1
else:
sys.stderr.write(
"Aborting search after %d attempts.\n" % (retrycount,))
return results
def search_similar_records(self, recid):
"""Return the records similar to the given one."""
return self.search(p="recid:" + str(recid), rm="wrd")
def search_records_cited_by(self, recid):
"""Return records cited by the given one."""
return self.search(p="recid:" + str(recid), rm="citation")
def get_records_from_basket(self, bskid, group_basket=False,
read_cache=True):
"""
Returns the records from the (public) basket with given bskid
"""
if bskid not in self.cached_baskets or not read_cache:
if self.user:
if group_basket:
group_basket = '&category=G'
else:
group_basket = ''
results = requests.get(
self.server_url + "/yourbaskets/display?of=xm&bskid=" +
str(bskid) + group_basket, cookies=self.cookies,
stream=True)
else:
results = requests.get(
self.server_url +
"/yourbaskets/display_public?of=xm&bskid=" + str(bskid),
stream=True)
else:
return self.cached_baskets[bskid]
parsed_records = self._parse_results(results.raw, self.cached_records)
self.cached_baskets[bskid] = parsed_records
return parsed_records
def get_record(self, recid, read_cache=True):
"""Return the record with given recid."""
if recid in self.cached_records or not read_cache:
return self.cached_records[recid]
else:
return self.search(p="recid:" + str(recid))
def upload_marcxml(self, marcxml, mode):
"""Upload a record to the server.
:param marcxml: the XML to upload.
:param mode: the mode to use for the upload.
- "-i" insert new records
- "-r" replace existing records
- "-c" correct fields of records
- "-a" append fields to records
- "-ir" insert record or replace if it exists
"""
if mode not in ["-i", "-r", "-c", "-a", "-ir"]:
raise NameError("Incorrect mode " + str(mode))
return requests.post(self.server_url + "/batchuploader/robotupload",
data={'file': marcxml, 'mode': mode},
headers={'User-Agent': CFG_USER_AGENT})
def _parse_results(self, results, cached_records):
"""
Parses the given results (in MARCXML format).
The given "cached_records" list is a pool of
already existing parsed records (in order to
avoid keeping several times the same records in memory)
"""
parser = xml.sax.make_parser()
handler = RecordsHandler(cached_records)
parser.setContentHandler(handler)
parser.parse(results)
return handler.records
def _validate_server_url(self):
"""Validates self.server_url"""
try:
request = requests.head(self.server_url)
if request.status_code >= 400:
raise InvenioConnectorServerError(
"Unexpected status code '%d' accessing URL: %s"
% (request.status_code, self.server_url))
except (InvalidSchema, MissingSchema) as err:
raise InvenioConnectorServerError(
"Bad schema, expecting http:// or https://:\n %s" % (err,))
except ConnectionError as err:
raise InvenioConnectorServerError(
"Couldn't establish connection to '%s':\n %s"
% (self.server_url, err))
except InvalidURL as err:
raise InvenioConnectorServerError(
"Invalid URL '%s':\n %s"
% (self.server_url, err))
except RequestException as err:
raise InvenioConnectorServerError(
"Unknown error connecting to '%s':\n %s"
% (self.server_url, err))
class Record(dict):
"""Represent an Invenio record."""
def __init__(self, recid=None, marcxml=None, server_url=None):
self.recid = recid
self.marcxml = ""
if marcxml is not None:
self.marcxml = marcxml
self.server_url = server_url
def __setitem__(self, item, value):
tag, ind1, ind2, subcode = decompose_code(item)
if subcode is not None:
super(Record, self).__setitem__(
tag + ind1 + ind2, [{subcode: [value]}])
else:
super(Record, self).__setitem__(tag + ind1 + ind2, value)
def __getitem__(self, item):
tag, ind1, ind2, subcode = decompose_code(item)
datafields = dict.__getitem__(self, tag + ind1 + ind2)
if subcode is not None:
subfields = []
for datafield in datafields:
if subcode in datafield:
subfields.extend(datafield[subcode])
return subfields
else:
return datafields
def __repr__(self):
return "Record(" + dict.__repr__(self) + ")"
def __str__(self):
return self.marcxml
def export(self, of="marcxml"):
"""
Returns the record in chosen format
"""
return self.marcxml
def url(self):
"""
Returns the URL to this record.
Returns None if not known
"""
if self.server_url is not None and \
self.recid is not None:
return '/'.join(
[self.server_url, CFG_SITE_RECORD, str(self.recid)])
else:
return None
class RecordsHandler(xml.sax.handler.ContentHandler):
"MARCXML Parser"
def __init__(self, records):
"""Initialize MARCXML Parser.
:param records: dictionary with an already existing cache of records
"""
self.cached_records = records
self.records = []
self.in_record = False
self.in_controlfield = False
self.in_datafield = False
self.in_subfield = False
self.cur_tag = None
self.cur_subfield = None
self.cur_controlfield = None
self.cur_datafield = None
self.cur_record = None
self.recid = 0
self.buffer = ""
self.counts = 0
def startElement(self, name, attributes):
if name == "record":
self.cur_record = Record()
self.in_record = True
elif name == "controlfield":
tag = attributes["tag"]
self.cur_datafield = ""
self.cur_tag = tag
self.cur_controlfield = []
if tag not in self.cur_record:
self.cur_record[tag] = self.cur_controlfield
self.in_controlfield = True
elif name == "datafield":
tag = attributes["tag"]
self.cur_tag = tag
ind1 = attributes["ind1"]
if ind1 == " ":
ind1 = "_"
ind2 = attributes["ind2"]
if ind2 == " ":
ind2 = "_"
if tag + ind1 + ind2 not in self.cur_record:
self.cur_record[tag + ind1 + ind2] = []
self.cur_datafield = {}
self.cur_record[tag + ind1 + ind2].append(self.cur_datafield)
self.in_datafield = True
elif name == "subfield":
subcode = attributes["code"]
if subcode not in self.cur_datafield:
self.cur_subfield = []
self.cur_datafield[subcode] = self.cur_subfield
else:
self.cur_subfield = self.cur_datafield[subcode]
self.in_subfield = True
def characters(self, data):
if self.in_subfield:
self.buffer += data
elif self.in_controlfield:
self.buffer += data
elif "Search-Engine-Total-Number-Of-Results:" in data:
print(data)
match_obj = re.search("\d+", data)
if match_obj:
print(int(match_obj.group()))
self.counts = int(match_obj.group())
def endElement(self, name):
if name == "record":
self.in_record = False
elif name == "controlfield":
if self.cur_tag == "001":
self.recid = int(self.buffer)
if self.recid in self.cached_records:
# Record has already been parsed, no need to add
pass
else:
# Add record to the global cache
self.cached_records[self.recid] = self.cur_record
# Add record to the ordered list of results
self.records.append(self.cached_records[self.recid])
self.cur_controlfield.append(self.buffer)
self.in_controlfield = False
self.buffer = ""
elif name == "datafield":
self.in_datafield = False
elif name == "subfield":
self.in_subfield = False
self.cur_subfield.append(self.buffer)
self.buffer = ""
def decompose_code(code):
"""Decompose a MARC "code" into tag, ind1, ind2, subcode."""
code = "%-6s" % code
ind1 = code[3:4]
if ind1 == " ":
ind1 = "_"
ind2 = code[4:5]
if ind2 == " ":
ind2 = "_"
subcode = code[5:6]
if subcode == " ":
subcode = None
return (code[0:3], ind1, ind2, subcode)
|
SamiHiltunen/invenio-client
|
invenio_client/connector.py
|
Python
|
gpl-2.0
| 18,049
|
[
"VisIt"
] |
1258b93d0a77f154eab276fd132dd023f1232e6b9d6344f03d3d9a473a3cc215
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Calculates the Angular Harmonic potential as:
.. math::
U = K (\\theta - \\theta_0)^2,
where angle :math:`\\theta` is the planar angle formed by three binded particles
(triplet or triple). The usual coefficient of :math:`1/2` is included in :math:`K`.
This potential is employed by:
.. py:class:: espressopp.interaction.AngularHarmonic (K = 1.0, theta0 = 0.0)
:param real K: energy amplitude
:param real theta0: angle in radians
:rtype: triple potential
A triple potential applied to every triple in the system creates an *interaction*.
This is done via:
.. py:class:: espressopp.interaction.FixedTripleListAngularHarmonic (system, fixed_triple_list, potential)
:param shared_ptr system: system object
:param list fixed_triple_list: a fixed list of all triples in the system
:param potential: triple potential (in this case, :py:class:`espressopp.interaction.AngularHarmonic`).
:rtype: interaction
**Methods**
.. py:method:: getFixedTripleList()
:rtype: A Python list of fixed triples (e.g., in the chains).
.. py:method:: setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
**Example 1.** Creating a fixed triple list by :py:class:`espressopp.FixedTripleList`.
>>> # we assume a polymer solution of n_chains of the length chain_len each.
>>> # At first, create a list_of_triples for the system:
>>> N = n_chains * chain_len # number of particles in the system
>>> list_of_tripples = [] # empty list of triples
>>> for n in range (n_chains): # loop over chains
>>> for m in range (chain_len): # loop over chain beads
>>> pid = n * chain_len + m
>>> if (pid > 1) and (pid < N - 1):
>>> list_of_tripples.append( (pid-1, pid, pid+1) )
>>>
>>> # create fixed triple list
>>> fixed_triple_list = espressopp.FixedTripleList(system.storage)
>>> fixed_triple_list.addTriples(list_of_triples)
**Example 2.** Employing an Angular Harmonic potential.
>>> # Note, the fixed_triple_list has to be generated in advance! (see Example 1)
>>>
>>> # set up the potential
>>> potAngHarm = espressopp.interaction.AngularHarmonic(K=0.5, theta0=0.0)
>>>
>>> # set up the interaction
>>> interAngHarm = espressopp.interaction.FixedTripleListAngularHarmonic(system, fixed_triple_list, potAngHarm)
>>>
>>> # finally, add the interaction to the system
>>> system.addInteraction(interAngHarm)
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.AngularPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_AngularHarmonic, interaction_FixedTripleListAngularHarmonic
class AngularHarmonicLocal(AngularPotentialLocal, interaction_AngularHarmonic):
def __init__(self, K=1.0, theta0=0.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_AngularHarmonic, K, theta0)
class FixedTripleListAngularHarmonicLocal(InteractionLocal, interaction_FixedTripleListAngularHarmonic):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleListAngularHarmonic, system, vl, potential)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
if pmi.isController:
class AngularHarmonic(AngularPotential):
pmiproxydefs = dict(
cls = 'espressopp.interaction.AngularHarmonicLocal',
pmiproperty = ['K', 'theta0']
)
class FixedTripleListAngularHarmonic(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListAngularHarmonicLocal',
pmicall = ['setPotential', 'getFixedTripleList']
)
|
govarguz/espressopp
|
src/interaction/AngularHarmonic.py
|
Python
|
gpl-3.0
| 5,154
|
[
"ESPResSo"
] |
a15c68f9a3c0ed3a619609d5eae6a54d3cd4577420c98c1dd02ae6f003c8c6f9
|
#!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001-05-20 12:51:29 $
Pearu Peterson
"""
__version__ = "$Id: Scalars.py,v 1.1 2001-05-20 12:51:29 pearu Exp $"
import DataSetAttr
import string
class Scalars(DataSetAttr.DataSetAttr):
"""Holds VTK scalars.
"""
def __init__(self,scalars,name=None,lookup_table=None):
self.name = self._get_name(name)
self.lookup_table = self._get_lookup_table(lookup_table)
self.scalars = self.get_seq(scalars,[])
def to_string(self,format='ascii'):
t = self.get_datatype(self.scalars)
ret = ['SCALARS %s %s %s'%(self.name,t,1),
'LOOKUP_TABLE %s'%(self.lookup_table)]
ret.append(self.seq_to_string(self.scalars,format,t))
return string.join(ret,'\n')
def get_size(self):
return len(self.scalars)
if __name__ == "__main__":
print Scalars([3,4,240]).to_string('binary')
|
chunshen1987/iSS
|
utilities/for_paraview/lib152/Scalars.py
|
Python
|
mit
| 1,178
|
[
"VTK"
] |
f1cf92e10982d1a7919e2e1c88f6eff08f488da0b9225fa5ff087841538e5470
|
#!/usr/bin/env python
# This is the script that runs a testing python script.
# The script to be run must be the first argument.
import sys
if len(sys.argv) < 2:
print "Usage %s <test script> [<addition arguments>]" % sys.argv[0]
sys.exit(1)
for i in range(2, len(sys.argv)):
if sys.argv[i] == '-A' and i < len(sys.argv)-1:
sys.path = sys.path + [sys.argv[i+1]]
import vtk
import math
#these are the modules that define methods/variables
#used by many scripts. We just include them always
from backdrop import *
from mccases import *
import expr
import catch
import info
import file
from vtk.util.colors import *
#implementation for lindex.
def lindex(list, index):
if type(list) == type("string"):
return list.split()[index]
return list[index]
#gets with no varName (returns the read string)
def gets(file):
line = file.readline()
if line[-1] == "\n":
line = line[:-1]
return line
def gets(file, varName, global_vars):
line = gets(file)
ret = len(line)
global_vars[varName] = line
return ret
def tcl_platform(what):
if what != "platform":
raise "Only platform supported as yet!"
plat = sys.platform
if plat[:5] == "linux":
return "unix"
return plat
def get_variable_name(*args):
var_name = ""
for arg in args:
if arg == "":
continue
# it is essential to qualify the scope of type since
# some test define type variable which messes up the
# bultin call.
if __builtins__.type(arg) == __builtins__.type("string"):
var_name += arg
else:
var_name += `arg`
return var_name
#init Tk
try:
import Tkinter
pythonTk = Tkinter.Tk()
pythonTk.withdraw()
except:
pythonTk = None
pass #no hassles if Tk is not present.
# setup some common things for testing
rtTempObject = vtk.vtkObject()
rtExMath = vtk.vtkMath()
rtExMath.RandomSeed(6)
# create the testing class to do the work
rtTester = vtk.vtkTesting()
for arg in sys.argv[2:]:
rtTester.AddArgument(arg)
VTK_DATA_ROOT = rtTester.GetDataRoot()
# load in the script
test_script = sys.argv[1]
# set the default threshold, the Tcl script may change this
threshold = -1
# we pass the locals over so that the test script has access to
# all the locals we have defined here.
execfile(test_script, globals(), locals())
local_variables_dict = locals()
if "iren" in local_variables_dict.keys():
renWin.Render()
if pythonTk:
# run the event loop quickly to map any tkwidget windows
pythonTk.update()
rtResult = 0
if rtTester.IsValidImageSpecified() != 0:
# look for a renderWindow ImageWindow or ImageViewer
# first check for some common names
if "renWin" in local_variables_dict.keys():
rtTester.SetRenderWindow(renWin)
if threshold == -1:
threshold = 10
else:
if threshold == -1:
threshold = 5
if "viewer" in local_variables_dict.keys():
rtTester.SetRenderWindow(viewer.GetRenderWindow())
viewer.Render()
elif "imgWin" in local_variables_dict.keys():
rtTester.SetRenderWindow(imgWin)
imgWin.Render()
rtResult = rtTester.RegressionTest(threshold)
if rtTester.IsInteractiveModeSpecified() != 0:
if "iren" in local_variables_dict.keys():
iren.Start()
if rtResult == 0:
sys.exit(1)
sys.exit(0)
|
naucoin/VTKSlicerWidgets
|
Utilities/vtkTclTest2Py/rtImageTest.py
|
Python
|
bsd-3-clause
| 3,453
|
[
"VTK"
] |
2be6a5bfc9e80c5aca6a51afe066a8ac758813210b27c09b3f5f2a7c66ae5a02
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(value.values,
value.indices,
value.dense_shape[0],
name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(dtypes.as_dtype(y.dtype).name,
dtypes.as_dtype(grad_y.dtype).name))
return grad_ys
def _IsFloat(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float32, dtypes.float64)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if the gradients are invalid.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is not None:
if not grad.dtype.is_compatible_with(inp.dtype):
raise ValueError("Gradient type %s generated for op %s does "
"not match input type %s" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.op_scope(ys + xs + grad_ys, name, "gradients"):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(),
to_ops, from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
if loop_state:
# The "unused" exits of the loops are added to ys. As an example,
# people often write:
# v1, _ = While(p, b, [x1, x2])
# result = gradients(v1, x1)
# The exit node of x2 is not included by the betweenness analysis.
# But we need it if x2 is involved in computing v1. So we add it
# back in backprop with a zeros_like gradient.
loop_exits = loop_state.GetAllLoopExits()
for y in loop_exits:
if pending_count[y.op._id] == 0 and y.op._id not in to_ops_set:
if _IsFloat(y):
# Floating-point outputs get a zero gradient.
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
grad_fn = ops.get_default_graph()._function_python_gradient.get(
op.type, None)
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor)
and not out_grad) and _IsFloat(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _AsList(grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
# pylint: disable=protected-access
in_grads = _AsList(functional_ops._symbolic_gradient(
f_in, f_types, op.type))
# pylint: enable=protected-access
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagates a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if isinstance(in_grad, ops.Tensor):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# update pending count for the inputs of op and enqueue ready ops.
# pylint: disable=protected-access
for x in op.inputs:
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
if ready:
queue.append(x.op)
# pylint: enable=protected-access
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [AggregationMethod.ADD_N,
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and
not all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad if g is not None])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list([g for g in out_grad
if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat(0, [x.values for x in out_grad]),
array_ops.concat(0, [x.indices
for x in out_grad]), out_grad[0].dense_shape)
else:
out_grads[i] = []
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None]
# Second backprop
return gradients(elemwise_products, xs)
|
natanielruiz/android-yolo
|
jni-build/jni/include/tensorflow/python/ops/gradients.py
|
Python
|
apache-2.0
| 29,579
|
[
"VisIt"
] |
3a9b3cc989c5c3ddb8ec5bd030bd636dd28a68cab3397fcc3ee71d76d2963d24
|
import sys
sys.path.append('..')
import pytest
import os.path
import unittest
from nyctext import nycaddress as parser
class Address(unittest.TestCase):
def __init__(self, *args, **kwds):
super(Address, self).__init__(*args, **kwds)
self.cwd = os.path.dirname(__file__)
def checkExpectation(self, source, expected, verbose=False):
addresses = parser.parse(source, verbose)
if verbose:
print 'source: %s' % source
print 'expected: %s' % expected
print 'got: %s' % addresses
for loc in addresses:
self.assertIn(loc, expected)
expected.remove(loc)
self.assertEqual(expected, [])
def testBasicStreet(self):
'basic - street avenue place plaza boulevard'
lus = 'street avenue place plaza boulevard'.split(' ')
template = '12 Fleet %s Manhattan, NY'
for lu in lus:
source = template % lu
self.checkExpectation(source, [source])
def testWithNumberStreet(self):
'basic - with numbered street'
lus = 'street avenue place plaza boulevard'.split(' ')
template = '123 11 %s Manhattan, NY'
for lu in lus:
source = template % lu
self.checkExpectation(source, [source])
def testWithNumberStreetAndEnding(self):
'basic - with numbered street and -st ending'
lus = 'street avenue place plaza boulevard'.split(' ')
template = '123 1st %s Manhattan, NY'
for lu in lus:
source = template % lu
self.checkExpectation(source, [source])
def testWithNumberStreetWithDash(self):
'basic - with numbered street and dash'
lus = 'street avenue place plaza boulevard'.split(' ')
template = '123-45 Acacia %s Manhattan, NY'
for lu in lus:
source = template % lu
self.checkExpectation(source, [source])
def testWithCompassDirections(self):
'basic - with compass directions'
lus = 'street avenue place plaza boulevard'.split(' ')
points = 'E W S N'.split(' ')
template = '12 %s Fordham %%s Bronx, NY'
x_template = '12 %s Fordham %%s Bronx, NY'
for pt in points:
tp = template % pt
xt = x_template % pt
for lu in lus:
source = tp % lu
expect = [xt % lu]
self.checkExpectation(source, expect)
def testWithNumberStreetWithDashAndRoom(self):
'basic - test with dash and Room'
lus = 'street avenue place plaza boulevard'.split(' ')
template = '100 Gold %s, Room 5-O8, New York, NY'
x_template = '100 Gold %s, Room 5-O8, Manhattan, NY'
for lu in lus:
source = template % lu
expect = [x_template % lu]
self.checkExpectation(source, expect, verbose=True)
def testIT(self):
'basic - long island city '
source = '''30-30 Thomson Avenue Long Island City, NY 11101 '''
expect = ['30-30 Thomson Avenue, Long Island City, Queens, NY']
self.checkExpectation(source, expect)
def testNYNY(self):
'basic - NYNY'
source = '55 Water Street, 9th Floor SW, New York, NY 10041'
expect = ['55 Water Street, 9th Floor SW, Manhattan, NY']
self.checkExpectation(source, expect)
def testSaintAnneAvenue(self):
'name with apostrophe'
expected = [u"600 Saint Ann's Avenue Bronx, NY"]
text = "Academy of Science: 600 Saint Ann's Avenue Bronx, NY"
address = parser.parse(text)[0]
self.assertIn(address, expected)
def testStreetNamePreTypeAveOfAmericas(self):
'find Avenue of the Americas'
expected = "131 Avenue Of The Americas Manhattan, NY"
text = 'blab blah bleu %s foo fe hu' % expected
got = parser.parse(text)
self.assertIn(got[0], [expected])
def testStreetNamePreTypes(self):
'test Avenue xxx'
expected = [
"1600 Avenue L Brooklyn, NY",
"3000 Avenue X Brooklyn, NY",
"50 Avenue X Brooklyn, NY"
]
for text in expected:
text = 'blab blah bleu %s foo fe hu' % text
got = parser.parse(text)[0]
self.assertIn(got, expected)
def testAddressWithMultipleCity(self):
'test ... Queens, NY NY, NY finds first address'
text = "11 W. 19th Street, NY, NY 10011 , New York, NY"
expected = "11 W 19th Street, Manhattan, NY"
got = parser.parse(text)[0]
self.assertEqual(expected, got)
def testHighwayAbbreviations(self):
'hwy, expy'
# 3 IN 1 KITCHEN: 4902 FORT HAMILTON PARKWAY BROOKLYN, NY
text = "238 KINGS HWY BROOKLYN, NY"
expected = "238 KINGS Highway BROOKLYN, NY"
got = parser.parse(text)[0]
self.assertEqual(got, expected)
text = "3050 WHITESTONE EXPY QUEENS, NY"
expected = "3050 WHITESTONE Expressway QUEENS, NY"
got = parser.parse(text)[0]
self.assertEqual(got, expected)
def testAptAndSuite(self):
'handle ste and apt'
text = "35 WEST 89TH STREET APT. 1A NEW YORK, NY"
expected = "35 WEST 89TH STREET Apt 1A Manhattan, NY"
got = parser.parse(text)[0]
self.assertEqual(got, expected)
text = "35 WEST 89TH STREET STE. 1A NEW YORK, NY"
expected = "35 WEST 89TH STREET Suite 1A Manhattan, NY"
got = parser.parse(text)[0]
self.assertEqual(got, expected)
def testInferredStreet(self):
'infer street in manhattan'
text = "10 W 15th , New York, NY"
expected = "10 W 15th Street, Manhattan, NY"
got = parser.parse(text)[0]
self.assertEqual(got, expected)
def testPeriodBetweenDirectionAndStreet(self):
'period handled between direction and street'
text = "Decker Design: 14W.23rd Street 3rd Floor, New York, NY"
expected = "14 W 23rd Street 3rd Floor, Manhattan, NY"
got = parser.parse(text)[0]
self.assertEqual(got, expected)
def testSaintNotStreet(self):
#This is unimplemented
'701 St. Anns should resolve to Saint Anns instead of Street Anns'
expected = ['701 Saint Anns Avenue Bronx, NY']
for text in expected:
print text
text = 'blab blah bleu %s foo fe hu' % text
got = parser.parse(text, verbose=True)[0]
self.assertIn(got, expected)
def testInitials(self):
text = '''
1180 Reverend J.A. Polite Ave. Bronx, NY.
'''
expected = [
'1180 Reverend J A Polite Avenue Bronx, NY'
]
got = parser.parse(text)[0]
self.assertIn(got, expected)
def testColumbusCircle(self):
'basic - Columbus Circle'
lus = 'Circle Cir. Cir'.split(' ')
for lu in lus:
source = '4 Columbus %s NY, NY' % lu
expect = ['4 Columbus Circle Manhattan, NY']
self.checkExpectation(source, expect, True)
|
cds-amal/addressparser
|
tests/testAddresses.py
|
Python
|
mit
| 7,092
|
[
"COLUMBUS"
] |
2548d5f72017c7121b9bb49c39e389e47f047951ba655f325a9a57adbb170aee
|
"""
===========
Description
===========
bbcflib is a set of Python modules for accessing facilities used by
the Bioinformatics and Biostatistics Core Facility (BBCF) at the EPFL.
It provides modules for BBCF's
* *GenRep* (genome data repository),
* the LIMS of the DNA Array Facilities at UNIL and UNIGE,
* the standard frontend code for workflows written by Fabrice David,
* sending job completion emails.
* the *GDV* api for viewing genome tracks
* *mapseq* to map reads to reference genomes
* *chipseq* to run ChIP-seq analyses
* *rnaseq* to map reads to reference transcriptomes and compute statistics of differential expression
* *snp* to search for SNP with respect to a reference genome
All the functionality can be imported with::
import bbcflib.module
where module is one of:
* ``GenRep`` and ``Assembly`` from :doc:`bbcflib_genrep`
* ``EmailReport`` from :doc:`bbcflib_email`
* ``DAFLIMS`` from :doc:`bbcflib_daflims`
* ``Frontend`` from :doc:`bbcflib_frontend`
* ``mapseq`` from :doc:`bbcflib_mapseq`
* ``chipseq`` from :doc:`bbcflib_chipseq`
* ``rnaseq`` from :doc:`bbcflib_rnaseq`
* ``gdv`` from :doc:`bbcflib_gdv`
* ``snp`` from :doc:`bbcflib_snp`
============
Installation
============
bbcflib requires:
* Python >=2.6
* mailer >=0.6 (http://pypi.python.org/pypi/mailer)
* bein (http://bbcf.epfl.ch/bein/)
* numpy (http://numpy.scipy.org/)
* pysam (http://code.google.com/p/pysam/)
Latest source code is available from GitHub::
http://github.com/bbcf/bbcflib
by clicking on "Downloads", or by cloning the git repository with::
$ git clone https://github.com/bbcf/bbcflib.git
=======
License
=======
bbcflib is released under the GNU General Public License 3.0. A copy
of this license is in the LICENSE.txt file.
"""
b'This module needs Python 2.6 or later.'
__version__ = '2.0.0'
|
bbcf/bbcflib
|
bbcflib/__init__.py
|
Python
|
gpl-3.0
| 1,826
|
[
"pysam"
] |
e45a76589d8ece75ce5e6da1b17c260744b96cfcbbdd8f3d0cb05b4364124d42
|
import math
import random
import copy
import pickle
"""
multilayer neural network.
This version is designed for effeciency so is less object oriented.
The bias input in implemented by adding an extra weight to each set of inputs.
The weights are stored as an 3 dimensional array [layers][neurons_in_layer][weights_into_neuron]
e.g. 3 inputs 2 hidden neurons 1 output
The weights might be
in-hidden hidden-out
weights=[ [[1 2 3 0],[3 4 5 -1]] , [[8,9,2]] ]
"""
def step(x):
"""
Threshold step function
"""
if x>0:
return 1
return 0
def sigmoid(x):
"""
transfer function with an output range 0 to 1
"""
if x < -100.0: # avoid math.exp(x) blowing up
return 0.0
return 1.0 / (1.0 + math.exp(-x))
def atan(x):
"""
transfer function with an output range -1 to 1
"""
if x < -100.:
return -1.0
if x > 100.:
return 1.0
ee=math.exp(-x)
return (1.0 -ee) / (1.0 + ee)
def randomSeed():
"""
Random number between -0.5 and 0.5
"""
return 0.5 - random.random()
class FeedForwardBrain:
"""
Basic feedforward neural net
"""
def __init__(self,size=None,func=None,weight=None):
"""
Create a multi layer network
Number of nodes in each layer is define by size
size[0] is the number of inputs
size[n] number of neurons in layer n
func is an array of activation functions (for each layer)
if this is None the activation defaults to sigmoid.
weight can be used to initialize the weights of the NN
in this case size can be None
otherwise if weight is None random values are assigned to the weights using size
Example for 2 inputs 2 hidden and 1 output
size=[2,2,1]
"""
self.layer_size = []
if weight != None:
self.weight=weight
self.layer_size.append(len(weight[0][0])-1)
for i in range(len(self.weight)):
self.layer_size.append(len(weight[i]))
else:
for i in range(len(size)):
self.layer_size.append(size[i])
# print self.layer_size
self.num_layer=len(self.layer_size)
if func == None:
func=[]
for _ in range(self.num_layer):
func.append(sigmoid)
self.func=func
#// allocate memory for output of each neuron
self.out = [] # new float[num_layer][];
for i in range(self.num_layer):
self.out.append([]);
a=self.out[i]
for _ in range(self.layer_size[i]):
a.append(0.0)
if weight == None:
self.weight=[]
for i in range(self.num_layer-1):
layer=[]
for _ in range(self.layer_size[i+1]):
w=[]
for _ in range(self.layer_size[i]):
w.append(randomSeed())
w.append(randomSeed())
layer.append(w)
self.weight.append(layer)
def ffwd(self,x):
"""
input: x list of input values
returns: list of output values.
"""
# assign content to input layer
for i in range(self.layer_size[0]):
self.out[0][i] = x[i] # output_from_neuron(layer,j) Jth neuron in Ith Layer
# assign output(activation) value
# to each neuron using sigmoid func
for layer in range(self.num_layer-1): # For each layer
for j in range(self.layer_size[layer+1]): # For each neuron in current layer
sum = 0.0;
for k in range(self.layer_size[layer]): # For input from each neuron in preceeding layer
sum += self.out[layer][k] * self.weight[layer][j][k]; # Apply weight to inputs and add to sum
sum += self.weight[layer][j][self.layer_size[layer]]; # Apply bias
self.out[layer+1][j] = self.func[layer](sum); # Apply transfer function
return self.out[self.num_layer - 1];
def copyWeights(self):
"""
Return a copy of the weights
"""
return copy.deepcopy(self.wieghts)
def clone(self):
"""
Create a new brain which is the same as this one.
"""
clone=FeedForwardBrain(self.layer_size,self.func,self.weight)
return clone
#------------------ More advanced functionality (you can ignore this) ---------------------------------------------------
def resize_inputs(self,nIn):
"""
Add extra inputs to the network
"""
assert nIn > self.layer_size[0]
for _ in range(nIn-self.layer_size[0]):
self.out[0].append(0.0)
for a in self.weight[1]:
wLast=a.pop()
a.append(0.0)
for _ in range(nIn-self.layer_size[0]-1):
a.append(0.0)
a.append(wLast)
if self.layer_size[0]<nIn:
self.layer_size[0]=nIn
def mutate1(brain,amount):
"""
mutate *all* the weights by a random amount.
amount: range of mutation is amount*0.5
"""
# for all layers with inputs
for i in range(1,brain.num_layer):
a=brain.weight[i]
# for all neurons in the layer
for j in range(brain.layer_size[i]):
r=a[j]
for k in range(brain.layer_size[i-1]+1):
r[k]=r[k]+randomSeed()*amount
def dist(brain1,brain2):
"""
WARNING UNTESTED --- might be useful for implement Niches
WARNING NO ERROR CHECKING (brains must be same topology)
sqrt(sum of diff weights squared)
compares brain1 and brain2 by calculating Euclidean distance between weight vectors
"""
sum=0.0
# for all layers with inputs
for i in range(1,brain1.num_layer):
a=brain1.weight[i]
b=brain2.weight[i]
# for all neurons in the layer
for j in range(brain1.layer_size[i]):
ra=a[j]
rb=b[j]
for k in range(brain1.layer_size[i-1]+1):
sum += (ra[k]-rb[k])**2
return math.sqrt(sum)
def mutate2(brain,amount):
"""
Only mutate a random section of each set of inputs
"""
for i in range(1,brain.num_layer):
a=brain.weight[i]
for j in range(brain.layer_size[i]):
r=a[j]
k=random.randint(0, brain.layer_size[i-1]+1)
z=random.randint(0, (brain.layer_size[i-1]+1))
while k < z:
r[k]=r[k]+randomSeed()*amount
k=k+1
|
pauljohnleonard/pod-world
|
CI_2014/ATTIC/OLD_SIMULATION_CODE/CartWithIP/brain.py
|
Python
|
gpl-2.0
| 7,638
|
[
"NEURON"
] |
711480f6597589ea36b29853a20de7cc10ad17a2d25e932fcdce81a8b70b30fa
|
#!/usr/bin/python
"""
Copyright 2013 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import ghShared
class resourceStats:
def __init__(self):
self.CR = 0
self.CD = 0
self.DR = 0
self.FL = 0
self.HR = 0
self.MA = 0
self.PE = 0
self.OQ = 0
self.SR = 0
self.UT = 0
self.ER = 0
class resourcePlanet:
def __init__(self, planetID=0, planetName="", enteredBy=""):
self.planetID = planetID
self.planetName = planetName
self.enteredBy = enteredBy
class resourceSpawn:
def __init__(self):
self.spawnID = 0
self.spawnName = ""
self.spawnGalaxy = 0
self.resourceType = ""
self.resourceTypeName = ""
self.containerType = ""
self.favorite = 0
self.favGroup = ""
self.units = 0
self.inventoryType = ""
self.groupName = ""
self.groupList = ""
self.stats = resourceStats()
self.percentStats = resourceStats()
self.overallScore = 0
self.entered = None
self.enteredBy = ""
self.verified = None
self.verifiedBy = ""
self.unavailable = None
self.unavailableBy = ""
self.planets = []
self.maxWaypointConc = None
def getPlanetBar(self):
result = '<ul class="planetBar">'
criteriaStr = ''
for planet in self.planets:
result = result + ' <li class="planetBarBox'
if (planet.enteredBy != None):
result = result + ' ' + planet.planetName.replace(' ','')
result +='"'
if (planet.enteredBy != None):
result = result + ' title="'+planet.planetName+' marked available by '+planet.enteredBy+'"'
result = result + ' onclick="planetRemove(this,'+str(planet.planetID)+','+str(self.spawnID)+',\''+planet.planetName+'\');"'
else:
result = result + ' title="'+planet.planetName+' - not available"'
result = result + ' onclick="planetAdd(this,'+str(planet.planetID)+','+str(self.spawnID)+',\''+planet.planetName+'\');"'
result = result + '>'+planet.planetName[0]+'</li>'
result += '</ul>'
return result
def getHTML(self, editable, formatStyle, resBoxMargin):
result = ''
unPlanetStr = ",'all'"
statHeads = ""
statVals = ""
titleStr = ""
# style 0 is wide format
if formatStyle == 0:
resBoxStyle = "margin-left:" + resBoxMargin + ";min-width:500px;"
# style 2 is survey tool format
elif formatStyle == 2:
resBoxStyle = "padding-left:" + resBoxMargin + ";"
else:
# other is compact style
resBoxStyle = ""
# prepare stat value table contents
if (self.percentStats.ER != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>ER</span></td>"
if (self.stats.ER != None and self.percentStats.ER != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.ER)+"'><span>" + str(self.stats.ER) + "<br />(" + ("%.0f" % float(self.percentStats.ER)) + "%)</span></td>"
elif (self.percentStats.ER != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.CR != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>CR</span></td>"
if (self.stats.CR != None and self.percentStats.CR != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.CR)+"'><span>" + str(self.stats.CR) + "<br />(" + ("%.0f" % float(self.percentStats.CR)) + "%)</span></td>"
elif (self.percentStats.CR != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.CD != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>CD</span></td>"
if (self.stats.CD != None and self.percentStats.CD != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.CD)+"'><span>" + str(self.stats.CD) + "<br />(" + ("%.0f" % float(self.percentStats.CD)) + "%)</span></td>"
elif (self.percentStats.CD != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.DR != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>DR</span></td>"
if (self.stats.DR != None and self.percentStats.DR != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.DR)+"'><span>" + str(self.stats.DR) + "<br />(" + ("%.0f" % float(self.percentStats.DR)) + "%)</span></td>"
elif (self.percentStats.DR != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.FL != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>FL</span></td>"
if (self.stats.FL != None and self.percentStats.FL != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.FL)+"'><span>" + str(self.stats.FL) + "<br />(" + ("%.0f" % float(self.percentStats.FL)) + "%)</span></td>"
elif (self.percentStats.FL != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.HR != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>HR</span></td>"
if (self.stats.HR != None and self.percentStats.HR != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.HR)+"'><span>" + str(self.stats.HR) + "<br />(" + ("%.0f" % float(self.percentStats.HR)) + "%)</span></td>"
elif (self.percentStats.HR != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.MA != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>MA</span></td>"
if (self.stats.MA != None and self.percentStats.MA != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.MA)+"'><span>" + str(self.stats.MA) + "<br />(" + ("%.0f" % float(self.percentStats.MA)) + "%)</span></td>"
elif (self.percentStats.MA != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.PE != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>PE</span></td>"
if (self.stats.PE != None and self.percentStats.PE != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.PE)+"'><span>" + str(self.stats.PE) + "<br />(" + ("%.0f" % float(self.percentStats.PE)) + "%)</span></td>"
elif (self.percentStats.PE != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.OQ != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>OQ</span></td>"
if (self.stats.OQ != None and self.percentStats.OQ != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.OQ)+"'><span>" + str(self.stats.OQ) + "<br />(" + ("%.0f" % float(self.percentStats.OQ)) + "%)</span></td>"
elif (self.percentStats.OQ != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.SR != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>SR</span></td>"
if (self.stats.SR != None and self.percentStats.SR != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.SR)+"'><span>" + str(self.stats.SR) + "<br />(" + ("%.0f" % float(self.percentStats.SR)) + "%)</span></td>"
elif (self.percentStats.SR != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
if (self.percentStats.UT != None or formatStyle == 0):
statHeads = statHeads + "<td class='header'><span>UT</span></td>"
if (self.stats.UT != None and self.percentStats.UT != None):
statVals = statVals + "<td class='"+ghShared.percOfRangeColor(self.percentStats.UT)+"'><span>" + str(self.stats.UT) + "<br />(" + ("%.0f" % float(self.percentStats.UT)) + "%)</span></td>"
elif (self.percentStats.UT != None):
statVals = statVals + "<td>?</td>"
else:
statVals = statVals + "<td></td>"
# construct resource container
if self.overallScore != None and self.overallScore > 0:
titleStr = ' title="' + str("%.0f" % (float(self.overallScore))) + '"'
if formatStyle == 2:
result += ' <div id="cont_'+self.spawnName+'" class="boxBorderHidden" style="' + resBoxStyle + '"' + titleStr + ' onmouseover="$(this).removeClass(\'boxBorderHidden\');$(this).addClass(\'listSelected\');" onmouseout="$(this).removeClass(\'listSelected\');$(this).addClass(\'boxBorderHidden\');">'
else:
result += ' <div id="cont_'+self.spawnName+'" class="resourceBox" style="' + resBoxStyle + '"' + titleStr + '>'
if self.overallScore != None and self.overallScore > 0:
result += ' <div class="compareInfo"><span>Quality: ' + str("%.0f" % (float(self.overallScore))) + '</span></div>'
# resource title row
if formatStyle == 0:
result += ' <div style="text-align:left;"><div class="inlineBlock" style="width:55%;text-align:left;float:left;"><span style="font-size: 12px;font-weight: bold;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/'+str(self.spawnGalaxy)+'/'+self.spawnName+'" class="nameLink">'+self.spawnName+'</a></span>'
elif formatStyle == 2:
result += ' <div style="margin-bottom:4px;text-align:left;"><span style="font-size: 12px;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/'+str(self.spawnGalaxy)+'/'+self.spawnName+'" class="nameLink">'+self.spawnName+'</a></span>'
else:
result += ' <div style="margin-bottom:4px;text-align:left;"><span style="font-size: 12px;font-weight: bold;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/'+str(self.spawnGalaxy)+'/'+self.spawnName+'" class="nameLink">'+self.spawnName+'</a></span>'
if (editable > 0):
if formatStyle != 2:
result += ' <a alt="Edit Stats" style="cursor: pointer;" onclick="editStats(this, \''+self.spawnName+'\');">[Edit]</a>'
if formatStyle != 1:
if formatStyle == 2:
result += ' <div style="width:100px;float:right;"><input type="checkbox" id="chkRemove_' + self.spawnName + '" />Remove</div>'
else:
result += ' <a alt="Mark Unavailable" style="cursor: pointer;" onclick="markUnavailable(this, \''+self.spawnName+'\', '+str(self.spawnGalaxy)+unPlanetStr+');"> [X]</a>'
# non-stat info
if formatStyle == 0:
result += ' <span style="color:#000033;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/'+self.resourceType+'" title="View recent spawns of this type" class="nameLink">'+self.resourceTypeName+'</a></span></div>'
result += ' <div class="inlineBlock" style="margin-top:2px;margin-right:4px;width:44%;text-align:right;float:right;">'+self.getPlanetBar()+'</div>'
result += ' </div><div><div style="height:32px;float:left;"><img src="/images/resources/'+self.containerType+'.png"/></div>'
elif formatStyle == 1:
result += ' '+ghShared.timeAgo(self.entered)+' ago by <a href="user.py?uid='+self.enteredBy+'" class="nameLink">'+self.enteredBy+'</a>'
result += ' </div>'
result += ' <div>'
result += ' <div style="height:32px;float:left;"><img src="/images/resources/'+self.containerType+'.png" /></div>'
result += ' <div style="width:90px;float:left;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/'+self.resourceType+'" title="View schematics and recent spawns of this type" class="nameLink">'+self.resourceTypeName+'</a></div>'
else:
result += ''
# favorite indicator
if editable > 0:
if self.favorite > 0:
result += ' <div class="inlineBlock" style="width:3%;float:left;"><a alt="Favorite" title="Favorite" style="cursor: pointer;" onclick="toggleFavorite(this, \''+str(self.spawnID)+'\');"><img src="/images/favorite16On.png" /></a></div>'
else:
result += ' <div class="inlineBlock" style="width:3%;float:left;"><a alt="Favorite" title="Favorite" style="cursor: pointer;" onclick="toggleFavorite(this, \''+str(self.spawnID)+'\');"><img src="/images/favorite16Off.png" /></a></div>'
# stats information table
if formatStyle == 0:
result += ' <div style="width:275px;float:left;">'
elif formatStyle == 1:
result += ' <div style="float:left;">'
else:
result += ''
if formatStyle != 2:
result += ' <table class="resAttr"><tr>' + statHeads + '</tr><tr>' + statVals + '</tr>'
result += ' </table></div></div>'
if formatStyle == 0:
# resource update information
result += ' <div style="clear:both;">'
# add waypoints indicator
if self.maxWaypointConc != None:
result += '<div style="float:right;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/'+str(self.spawnGalaxy)+'/'+self.spawnName+'"><img src="/images/waypointMarker.png" alt="waypoint marker" title="waypoint(s) available (best is ' + str(self.maxWaypointConc) + '%)" width="20" /></a></div>'
# entered
result += ' <div class="inlineBlock" style="width:33%;float:right;"><img src="/images/circleBlue16.png" alt="Entered" title="Entered" /><span style="vertical-align:4px;">' + ghShared.timeAgo(self.entered)+' ago by <a href="/user.py?uid='+self.enteredBy+'">'+self.enteredBy+'</a></span></div>'
# verified
result += ' <div class="inlineBlock" style="width:33%;float:right;">'
if (self.verified != None):
result += ' <img src="/images/checkGreen16.png" alt="Verified" title="Verified" /><span style="vertical-align:4px;">' + ghShared.timeAgo(self.verified) + ' ago by <a href="/user.py?uid='+self.verifiedBy+'">'+self.verifiedBy+'</a></span>'
else:
if (self.unavailable == None and editable > 0):
result += ' <span id="cont_verify_'+self.spawnName+'"><img src="/images/checkGrey16.png" alt="Not Verified" title="Not Verified" /><span style="vertical-align:4px;"><a alt="Verify Resource" style="cursor: pointer;" onclick="quickAdd(null, \''+self.spawnName+'\');">[Verify]</a></span></span>'
# unavailable
result += ' </div><div class="inlineBlock" style="width:32%;float:right;">'
if (self.unavailable != None):
result += ' <img src="/images/xRed16.png" alt="Unavailable" title="Unavailable" /><span style="vertical-align:4px;">' + ghShared.timeAgo(self.unavailable) + ' ago by <a href="/user.py?uid='+self.unavailableBy+'">'+self.unavailableBy+'</a></span>'
result += ' </div>'
else:
result += ' <div style="width: 180px;clear:both;margin-left:64px;">'+self.getPlanetBar()+'</div>'
else:
if (self.unavailable == None and editable > 0 and ghShared.timeAgo(self.verified).find('minute') == -1):
result += ' <div id="cont_verify_'+self.spawnName+'" style="width:100px;float:right;"><input type="checkbox" id="chkVerify_' + self.spawnName + '" />Verify</div>'
result += ' </div>'
return result
def getMobileHTML(self, editable):
result = ''
statHeads = ""
statVals = ""
titleStr = ""
# prepare stat value table contents
if (self.stats.ER != None and self.percentStats.ER != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.ER)+"'> ER: " + str(self.stats.ER) + "(" + ("%.0f" % float(self.percentStats.ER)) + "%)</span>"
if (self.stats.CR != None and self.percentStats.CR != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.CR)+"'> CR: " + str(self.stats.CR) + "(" + ("%.0f" % float(self.percentStats.CR)) + "%)</span>"
if (self.stats.CD != None and self.percentStats.CD != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.CD)+"'> CD: " + str(self.stats.CD) + "(" + ("%.0f" % float(self.percentStats.CD)) + "%)</span>"
if (self.stats.DR != None and self.percentStats.DR != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.DR)+"'> DR: " + str(self.stats.DR) + "(" + ("%.0f" % float(self.percentStats.DR)) + "%)</span>"
if (self.stats.FL != None and self.percentStats.FL != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.FL)+"'> FL: " + str(self.stats.FL) + "(" + ("%.0f" % float(self.percentStats.FL)) + "%)</span>"
if (self.stats.HR != None and self.percentStats.HR != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.HR)+"'> HR: " + str(self.stats.HR) + "(" + ("%.0f" % float(self.percentStats.HR)) + "%)</span>"
if (self.stats.MA != None and self.percentStats.MA != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.MA)+"'> MA: " + str(self.stats.MA) + "(" + ("%.0f" % float(self.percentStats.MA)) + "%)</span>"
if (self.stats.PE != None and self.percentStats.PE != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.PE)+"'> PE: " + str(self.stats.PE) + "(" + ("%.0f" % float(self.percentStats.PE)) + "%)</span>"
if (self.stats.OQ != None and self.percentStats.OQ != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.OQ)+"'> OQ: " + str(self.stats.OQ) + "(" + ("%.0f" % float(self.percentStats.OQ)) + "%)</span>"
if (self.stats.SR != None and self.percentStats.SR != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.SR)+"'> SR: " + str(self.stats.SR) + "(" + ("%.0f" % float(self.percentStats.SR)) + "%)</span>"
if (self.stats.UT != None and self.percentStats.UT != None):
statVals = statVals + "<span class='"+ghShared.percOfRangeColor(self.percentStats.UT)+"'>" + str(self.stats.UT) + "(" + ("%.0f" % float(self.percentStats.UT)) + "%)</span>"
# construct resource container
if self.overallScore != None and self.overallScore > 0:
titleStr = ' title="' + str("%.0f" % (float(self.overallScore))) + '"'
result += ' <a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/'+str(self.spawnGalaxy)+'/'+self.spawnName+'" class="nameLink">'
result += ' <div id="cont_'+self.spawnName+'" class="control"' + titleStr + ' style="text-align:left;">'
# resource title row
result += ' <div style="margin-bottom:4px;">'
result += ' '+self.resourceTypeName+' ('+self.spawnName + ')'
# non-stat info
result += ' entered '+ghShared.timeAgo(self.entered)+' ago by '+self.enteredBy
if (editable > 0):
result += ' <span title="Mark Unavailable" style="cursor: pointer;float:right;" onclick="markUnavailable(this, \''+self.spawnName+'\', '+str(self.spawnGalaxy)+',\'all\');"> [X]</span>'
result += ' </div>'
# stats information
result += ' <div>'
result += statVals
result += ' </div></div></a>'
return result
def getRow(self, editable):
result = ''
statVals = ""
titleStr = ""
result += '<tr id="cont_'+self.spawnName+'" name="cont_'+self.spawnName+'" class="statRow ui-draggable">'
# favorite indicator
if editable > 0:
if self.favorite > 0:
result += ' <td class="dragColumn" style="width:20px;"><a alt="Favorite" title="Favorite" style="cursor: pointer;" onclick="toggleFavorite(this, \''+str(self.spawnID)+'\');"><img src="/images/favorite16On.png" /></a></td>'
else:
result += ' <td class="dragColumn" style="width:20px;"><a alt="Favorite" title="Favorite" style="cursor: pointer;" onclick="toggleFavorite(this, \''+str(self.spawnID)+'\');"><img src="/images/favorite16Off.png" /></a></td>'
# resource title row
if self.unavailable != None:
styleAdd = "background-image:url(/images/xRed16.png);background-repeat:no-repeat;background-position:2px 2px;"
elif self.verified != None:
styleAdd = "background-image:url(/images/checkGreen16.png);background-repeat:no-repeat;background-position:2px 2px;"
else:
styleAdd = "background-image:url(/images/circleBlue16.png);background-repeat:no-repeat;background-position:2px 2px;"
result += ' <td class="dragColumn" style="width:90px;' + styleAdd + '"><span style="font-size: 12px;font-weight: bold;"><a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/'+str(self.spawnGalaxy)+'/'+self.spawnName+'" class="nameLink">'+self.spawnName+'</a></td>'
result += ' <td class="dragColumn" style="width:160px"><a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/'+self.resourceType+'" title="View recent spawns of this type" class="nameLink">'+self.resourceTypeName+'</a></td>'
# prepare stat value table contents
if (self.stats.ER != None and self.percentStats.ER != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.ER)+"'><span title='(" + ("%.0f" % float(self.percentStats.ER)) + "%)'>" + str(self.stats.ER) + "</span></td>"
elif (self.percentStats.ER != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.CR != None and self.percentStats.CR != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.CR)+"'><span title='(" + ("%.0f" % float(self.percentStats.CR)) + "%)'>" + str(self.stats.CR) + "</span></td>"
elif (self.percentStats.CR != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.CD != None and self.percentStats.CD != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.CD)+"'><span title='(" + ("%.0f" % float(self.percentStats.CD)) + "%)'>" + str(self.stats.CD) + "</span></td>"
elif (self.percentStats.CD != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.DR != None and self.percentStats.DR != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.DR)+"'><span title='(" + ("%.0f" % float(self.percentStats.DR)) + "%)'>" + str(self.stats.DR) + "</span></td>"
elif (self.percentStats.DR != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.FL != None and self.percentStats.FL != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.FL)+"'><span title='(" + ("%.0f" % float(self.percentStats.FL)) + "%)'>" + str(self.stats.FL) + "</span></td>"
elif (self.percentStats.FL != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.HR != None and self.percentStats.HR != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.HR)+"'><span title='(" + ("%.0f" % float(self.percentStats.HR)) + "%)'>" + str(self.stats.HR) + "</span></td>"
elif (self.percentStats.HR != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.MA != None and self.percentStats.MA != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.MA)+"'><span title='(" + ("%.0f" % float(self.percentStats.MA)) + "%)'>" + str(self.stats.MA) + "</span></td>"
elif (self.percentStats.MA != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.PE != None and self.percentStats.PE != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.PE)+"'><span title='(" + ("%.0f" % float(self.percentStats.PE)) + "%)'>" + str(self.stats.PE) + "</span></td>"
elif (self.percentStats.PE != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.OQ != None and self.percentStats.OQ != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.OQ)+"'><span title='(" + ("%.0f" % float(self.percentStats.OQ)) + "%)'>" + str(self.stats.OQ) + "</span></td>"
elif (self.percentStats.OQ != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.SR != None and self.percentStats.SR != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.SR)+"'><span title='(" + ("%.0f" % float(self.percentStats.SR)) + "%)'>" + str(self.stats.SR) + "</span></td>"
elif (self.percentStats.SR != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
if (self.stats.UT != None and self.percentStats.UT != None):
statVals = statVals + "<td style='width:30px;' class='"+ghShared.percOfRangeColor(self.percentStats.UT)+"'><span title='(" + ("%.0f" % float(self.percentStats.UT)) + "%)'>" + str(self.stats.UT) + "</span></td>"
elif (self.percentStats.UT != None):
statVals = statVals + "<td style='width:30px;'>?</td>"
else:
statVals = statVals + "<td style='width:30px;'></td>"
result += statVals
result += "<td><input type='text' id='units_" + str(self.spawnID) + "' size='10' maxlength='13' tag='" + str(self.units) + "' value='" + str(self.units) + "' onblur='updateFavoriteAmount(this, \"" + str(self.spawnID) + "\",this.value);' onkeyup='if($(this).attr(\"tag\")==this.value){$(this).css(\"color\",\"black\");}else{$(this).css(\"color\",\"red\");}' /></td>"
result += "<td style='width:20px'><input type='checkbox' id='chkMove_" + str(self.spawnID) + "' /></td>"
return result
def getStatList(self):
statList = ""
if self.stats.ER != None:
statList += "<span class='altText'>Entangle Resistance:</span> " + str(self.stats.ER) + "<br />"
if self.stats.CR != None:
statList += "<span class='altText'>Cold Resistance:</span> " + str(self.stats.CR) + "<br />"
if self.stats.CD != None:
statList += "<span class='altText'>Conductivity:</span> " + str(self.stats.CD) + "<br />"
if self.stats.DR != None:
statList += "<span class='altText'>Decay Resistance:</span> " + str(self.stats.DR) + "<br />"
if self.stats.FL != None:
statList += "<span class='altText'>Flavor:</span> " + str(self.stats.FL) + "<br />"
if self.stats.HR != None:
statList += "<span class='altText'>Heat Resistance:</span> " + str(self.stats.HR) + "<br />"
if self.stats.MA != None:
statList += "<span class='altText'>Malleability:</span> " + str(self.stats.MA) + "<br />"
if self.stats.PE != None:
statList += "<span class='altText'>Potential Energy:</span> " + str(self.stats.PE) + "<br />"
if self.stats.OQ != None:
statList += "<span class='altText'>Overall Quality:</span> " + str(self.stats.OQ) + "<br />"
if self.stats.SR != None:
statList += "<span class='altText'>Shock Resistance:</span> " + str(self.stats.SR) + "<br />"
if self.stats.UT != None:
statList += "<span class='altText'>Unit Toughness:</span> " + str(self.stats.UT) + "<br />"
return statList
def getInventoryObject(self):
# Return HTML of resource in inventory box
result = "<div id='resInventory" + str(self.spawnName) + "' class='inventoryItem inlineBlock' style='background-image:url(/images/resources/inventory/inv_" + self.inventoryType + ".png);background-size:64px 64px;' tag='" + self.groupList + "," + self.resourceType + ",'>"
result += "<div style='float:right;'>" + ghShared.getNumberAbbr(self.units) + "</div>"
result += "<p style='display:none;'>Loaded with: " + self.spawnName + ", " + self.resourceTypeName + "<br />"
result += self.getStatList()
result += "</p>"
result += "<div id='stackDetails" + str(self.spawnID) + "' style='display:none;' class='resourceDetails' tag='ER:" + str(self.stats.ER) + ",CR:" + str(self.stats.CR) + ",CD:" + str(self.stats.CD) + ",DR:" + str(self.stats.DR) + ",FL:" + str(self.stats.FL) + ",HR:" + str(self.stats.HR) + ",MA:" + str(self.stats.MA) + ",PE:" + str(self.stats.PE) + ",OQ:" + str(self.stats.OQ) + ",SR:" + str(self.stats.SR) + ",UT:" + str(self.stats.UT) + "'>"
result += " <div style='text-align:center;width:100%;margin-bottom:14px;'>" + self.groupName + "</div>"
result += " <span class='altText'>Resource Type:</span> " + self.spawnName + "<br />"
result += " <span class='altText'>Resource Quantity:</span> " + str(self.units) + "<br />"
result += " <span class='altText'>Resource Class:</span> " + self.resourceTypeName + "<br />"
result += self.getStatList()
result += "</div>"
result += "<div style='position: absolute;bottom:0;width:100%'>" + self.groupName + "</div>"
result += "</div>"
return result
|
clreinki/GalaxyHarvester
|
ghObjects.py
|
Python
|
agpl-3.0
| 29,278
|
[
"Galaxy"
] |
6f44a5ea1c97b2a696371056b28d42793093598b1d74c82356f113de155bad51
|
# -----------------------------------------------------------------------------
# Download data:
# - Browser:
# http://midas3.kitware.com/midas/folder/10409 => VisibleMale/vm_head_frozenct.mha
# - Terminal
# curl "http://midas3.kitware.com/midas/download?folders=&items=235235" -o vm_head_frozenct.mha
# -----------------------------------------------------------------------------
from vtk import *
from tonic.vtk import *
from tonic.vtk.dataset_builder import *
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/head_ct'
file_path = '/Users/seb/Downloads/vm_head_frozenct.mha'
field = 'MetaImage'
fieldRange = [0.0, 4096.0]
nbSteps = 3
features = [ { 'center': 200, 'halfSpread': 200 }, { 'center': 900, 'halfSpread': 200 }, { 'center': 2000, 'halfSpread': 900 }, ]
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWise(pwf, dataRange, center, halfSpread):
scalarOpacity.RemoveAllPoints()
if (center - halfSpread) <= dataRange[0]:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center, 1.0)
else:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center - halfSpread, 0.0)
scalarOpacity.AddPoint(center, 1.0)
if (center + halfSpread) >= dataRange[1]:
scalarOpacity.AddPoint(dataRange[1], 0.0)
else:
scalarOpacity.AddPoint(center + halfSpread, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
reader = vtkMetaImageReader()
reader.SetFileName(file_path)
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(reader.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(fieldRange[0], 1.0, 1.0, 1.0)
colorFunction.AddRGBPoint(fieldRange[1], 1.0, 1.0, 1.0)
halfSpread = (fieldRange[1] - fieldRange[0]) / float(2*nbSteps)
centers = [ fieldRange[0] + halfSpread*float(2*i+1) for i in range(nbSteps)]
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(512, 512)
renderer = vtkRenderer()
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
# Camera setting
camera = {
'position': [-0.264, -890.168, -135.0],
'focalPoint': [-0.264, -30.264, -135.0],
'viewUp': [0,0,1]
}
update_camera(renderer, camera)
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
vcdsb = SortedCompositeDataSetBuilder(dataset_destination_path, {'type': 'spherical', 'phi': range(0, 360, 30), 'theta': range(-60, 61, 30)})
idx = 0
vcdsb.start(window, renderer)
for feature in features:
idx += 1
updatePieceWise(scalarOpacity, fieldRange, feature['center'], feature['halfSpread'])
# Capture layer
vcdsb.activateLayer(field, feature['center'])
# Write data
vcdsb.writeData(mapper)
vcdsb.stop()
|
Kitware/tonic-data-generator
|
scripts/vtk/medical/head-ct.py
|
Python
|
bsd-3-clause
| 3,738
|
[
"VTK"
] |
5f96f3812ffff6c9d0422535ec70c68e17583ffa998c7423e9dbef7a67387363
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects to inspect the status of the Abinit tasks at run-time.
by extracting information from the main output file (text format).
"""
from __future__ import unicode_literals, division, print_function
import os
import collections
import numpy as np
import yaml
import six
from six.moves import cStringIO, map, zip
from tabulate import tabulate
from monty.collections import AttrDict
from pymatgen.util.plotting import add_fig_kwargs
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def _magic_parser(stream, magic):
"""
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
"""
#Example (SCF cycle, similar format is used for phonons):
#
# iter Etot(hartree) deltaE(h) residm vres2
# ETOT 1 -8.8604027880849 -8.860E+00 2.458E-02 3.748E+00
# At SCF step 5 vres2 = 3.53E-08 < tolvrs= 1.00E-06 =>converged.
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
keys = line.split()
fields = collections.OrderedDict((k, []) for k in keys)
if fields is not None:
#print(line)
in_doc += 1
if in_doc == 1:
continue
# End of the section.
if not line: break
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
# Convert values to numpy arrays.
if fields:
return collections.OrderedDict([(k, np.array(v)) for k, v in fields.items()])
else:
return None
def plottable_from_outfile(filepath):
"""
Factory function that returns a plottable object by inspecting the main output file of abinit
Returns None if it is not able to detect the class to instantiate.
"""
# TODO
# Figure out how to detect the type of calculations
# without having to parse the input. Possible approach: YAML doc
#with YamlTokenizer(filepath) as r:
# doc = r.next_doc_with_tag("!CalculationType")
# d = yaml.load(doc.text_notag)
# calc_type = d["calculation_type"]
#ctype2class = {
# "Ground State": GroundStateScfCycle,
# "Phonon": PhononScfCycle,
# "Relaxation": Relaxation,
#}
#obj = ctype2class.get(calc_type, None)
obj = GroundStateScfCycle
if obj is not None:
return obj.from_file(filepath)
else:
return None
class ScfCycle(collections.Mapping):
"""
It essentially consists of a dictionary mapping string
to list of floats containing the data at the different iterations.
.. attributes::
num_iterations: Number of iterations performed.
"""
def __init__(self, fields):
self.fields = fields
#print(fields)
all_lens = [len(lst) for lst in self.values()]
self.num_iterations = all_lens[0]
assert all(n == self.num_iterations for n in all_lens)
def __getitem__(self, slice):
return self.fields.__getitem__(slice)
def __iter__(self):
return self.fields.__iter__()
def __len__(self):
return len(self.fields)
def __str__(self):
"""String representation."""
rows = [list(map(str, (self[k][it] for k in self.keys())))
for it in range(self.num_iterations)]
stream = cStringIO()
print(tabulate(rows, headers=list(self.keys())), file=stream)
stream.seek(0)
return "".join(stream)
@property
def last_iteration(self):
"""Returns a dictionary with the values of the last iteration."""
return {k: v[-1] for k, v in self.items()}
@classmethod
def from_file(cls, filepath):
"""Read the first occurrence of ScfCycle from file."""
with open(filepath, "rt") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Read the first occurrence of ScfCycle from stream.
Returns:
None if no `ScfCycle` entry is found.
"""
fields = _magic_parser(stream, magic=cls.MAGIC)
if fields:
fields.pop("iter")
return cls(fields)
else:
return None
@add_fig_kwargs
def plot(self, axlist=None, **kwargs):
"""
Uses matplotlib to plot the evolution of the SCF cycle. Return `matplotlib` figure
Args:
axlist: List of axes. If None a new figure is produced.
Returns: matplotlib figure
"""
# Build grid of plots.
num_plots, ncols, nrows = len(self), 1, 1
if num_plots > 1:
ncols = 2
nrows = num_plots // ncols + num_plots % ncols
import matplotlib.pyplot as plt
if axlist is None:
fig, axlist = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, squeeze=False)
axlist = axlist.ravel()
else:
fig = plt.gcf()
# Use log scale for these variables.
use_logscale = set(["residm", "vres2"])
# Hard-coded y-range for selected variables.
has_yrange = {
"deltaE(h)": (-1e-3, +1e-3),
"deltaE(Ha)": (-1e-3, +1e-3),
}
iter_num = np.array(list(range(self.num_iterations)))
for (key, values), ax in zip(self.items(), axlist):
ax.grid(True)
ax.set_xlabel('Iteration')
ax.set_xticks(iter_num, minor=False)
ax.set_ylabel(key)
xx, yy = iter_num, values
if self.num_iterations > 1:
# Don't show the first iteration since it's not very useful.
xx, yy = xx[1:] + 1, values[1:]
ax.plot(xx, yy, "-o", lw=2.0)
if key in use_logscale and np.all(yy > 1e-22):
ax.set_yscale("log")
if key in has_yrange:
ymin, ymax = has_yrange[key]
val_min, val_max = np.min(yy), np.max(yy)
if abs(val_max - val_min) > abs(ymax - ymin):
ax.set_ylim(ymin, ymax)
# Get around a bug in matplotlib.
if num_plots % ncols != 0:
axlist[-1].plot(xx, yy, lw=0.0)
axlist[-1].axis('off')
#plt.legend(loc="best")
fig.tight_layout()
return fig
class GroundStateScfCycle(ScfCycle):
"""Result of the Ground State self-consistent cycle."""
MAGIC = "iter Etot(hartree)"
@property
def last_etotal(self):
"""The total energy at the last iteration."""
return self["Etot(hartree)"][-1]
class D2DEScfCycle(ScfCycle):
"""Result of the Phonon self-consistent cycle."""
MAGIC = "iter 2DEtotal(Ha)"
@property
def last_etotal(self):
"""The 2-nd order derivative of the energy at the last iteration."""
return self["2DEtotal(Ha)"][-1]
class PhononScfCycle(D2DEScfCycle):
"""Iterations of the DFPT SCF cycle for phonons."""
class Relaxation(collections.Iterable):
"""
A list of :class:`GroundStateScfCycle` objects.
.. note::
Forces, stresses and crystal structures are missing.
This object is mainly used to analyze the behavior of the Scf cycles
during the structural relaxation. A more powerful and detailed analysis
can be obtained by using the HIST.nc file.
"""
def __init__(self, cycles):
self.cycles = cycles
def __iter__(self):
return self.cycles.__iter__()
def __len__(self):
return self.cycles.__len__()
def __getitem__(self, slice):
return self.cycles[slice]
def __str__(self):
"""String representation."""
lines = []
app = lines.append
for i, cycle in enumerate(self):
app("")
app("RELAXATION STEP: %d" % i)
app(str(cycle))
app("")
return "\n".join(lines)
@classmethod
def from_file(cls, filepath):
"""Initialize the object from the Abinit main output file."""
with open(filepath, "rt") as stream:
return cls.from_stream(stream)
@classmethod
def from_stream(cls, stream):
"""
Extract data from stream. Returns None if some error occurred.
"""
cycles = []
while True:
scf_cycle = GroundStateScfCycle.from_stream(stream)
if scf_cycle is None: break
cycles.append(scf_cycle)
return cls(cycles) if cycles else None
@property
def history(self):
"""
Dictionary of lists with the evolution of the data as function of the relaxation step.
"""
try:
return self._history
except AttributeError:
self._history = history = collections.defaultdict(list)
for cycle in self:
d = cycle.last_iteration
for k, v in d.items():
history[k].append(v)
return self._history
@add_fig_kwargs
def plot(self, axlist=None, **kwargs):
"""
Uses matplotlib to plot the evolution of the structural relaxation.
Args:
axlist: List of axes. If None a new figure is produced.
Returns:
`matplotlib` figure
"""
import matplotlib.pyplot as plt
history = self.history
#print(history)
relax_step = list(range(len(self)))
# Build grid of plots.
num_plots, ncols, nrows = len(list(history.keys())), 1, 1
if num_plots > 1:
ncols = 2
nrows = (num_plots//ncols) + (num_plots % ncols)
fig, ax_list = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, squeeze=False)
ax_list = ax_list.ravel()
if num_plots % ncols != 0:
ax_list[-1].axis('off')
for (key, values), ax in zip(history.items(), ax_list):
ax.grid(True)
ax.set_xlabel('Relaxation Step')
ax.set_xticks(relax_step, minor=False)
ax.set_ylabel(key)
ax.plot(relax_step, values, "-o", lw=2.0)
return fig
# TODO
#class HaydockIterations(collections.Iterable):
# """This object collects info on the different steps of the Haydock technique used in the Bethe-Salpeter code"""
# @classmethod
# def from_file(cls, filepath):
# """Initialize the object from file."""
# with open(filepath, "rt") as stream:
# return cls.from_stream(stream)
#
# @classmethod
# def from_stream(cls, stream):
# """Extract data from stream. Returns None if some error occurred."""
# cycles = []
# while True:
# scf_cycle = GroundStateScfCycle.from_stream(stream)
# if scf_cycle is None: break
# cycles.append(scf_cycle)
#
# return cls(cycles) if cycles else None
#
# #def __init__(self):
#
# def plot(self, **kwargs):
# """
# Uses matplotlib to plot the evolution of the structural relaxation.
# ============== ==============================================================
# kwargs Meaning
# ============== ==============================================================
# title Title of the plot (Default: None).
# how True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
# ============== ==============================================================
# Returns:
# `matplotlib` figure
# """
# import matplotlib.pyplot as plt
# title = kwargs.pop("title", None)
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# if title: fig.suptitle(title)
# if savefig is not None: fig.savefig(savefig)
# if show: plt.show()
# return fig
##################
## Yaml parsers.
##################
class YamlTokenizerError(Exception):
"""Exceptions raised by :class:`YamlTokenizer`."""
class YamlTokenizer(collections.Iterator):
"""
Provides context-manager support so you can use it in a with statement.
"""
Error = YamlTokenizerError
def __init__(self, filename):
# The position inside the file.
self.linepos = 0
self.filename = filename
try:
self.stream = open(filename, "rt")
except IOError as exc:
# Look for associated error file.
root, ext = os.path.splitext(self.filename)
errfile = root + ".err"
if os.path.exists(errfile) and errfile != self.filename:
print("Found error file: %s" % errfile)
with open(errfile, "rt") as fh:
print(fh.read())
raise exc
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def close(self):
try:
self.stream.close()
except:
print("Exception in YAMLTokenizer.close()")
print("Python traceback:")
print(straceback())
def seek(self, offset, whence=0):
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
assert offset == 0
self.linepos = 0
return self.stream.seek(offset, whence)
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
"""
Returns the first YAML document in stream.
.. warning::
Assume that the YAML document are closed explicitely with the sentinel '...'
"""
in_doc, lines, doc_tag = None, [], None
for i, line in enumerate(self.stream):
self.linepos += 1
#print(i, line)
if line.startswith("---"):
# Include only lines in the form:
# "--- !tag"
# "---"
# Other lines are spurious.
in_doc = False
l = line[3:].strip().lstrip()
if l.startswith("!"):
# "--- !tag"
doc_tag = l
in_doc = True
elif not l:
# "---"
in_doc = True
doc_tag = None
if in_doc:
lineno = self.linepos
if in_doc:
lines.append(line)
if in_doc and line.startswith("..."):
return YamlDoc(text="".join(lines), lineno=lineno, tag=doc_tag)
raise StopIteration("Cannot find next YAML document in %s" % self.filename)
def all_yaml_docs(self):
"""
Returns a list with all the YAML docs found in stream.
Seek the stream before returning.
.. warning::
Assume that all the YAML docs (with the exception of the last one)
are closed explicitely with the sentinel '...'
"""
docs = [doc for doc in self]
self.seek(0)
return docs
def next_doc_with_tag(self, doc_tag):
"""
Returns the next document with the specified tag. Empty string is no doc is found.
"""
while True:
try:
doc = six.advance_iterator(self)
if doc.tag == doc_tag:
return doc
except StopIteration:
raise
def all_docs_with_tag(self, doc_tag):
"""
Returns all the documents with the specified tag.
"""
docs = []
while True:
try:
doc = self.next_doc_with(doc_tag)
docs.append(doc)
except StopIteration:
break
self.seek(0)
return docs
def yaml_read_kpoints(filename, doc_tag="!Kpoints"):
"""Read the K-points from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.load(doc.text_notag)
return np.array(d["reduced_coordinates_of_qpoints"])
def yaml_read_irred_perts(filename, doc_tag="!IrredPerts"):
"""Read the list of irreducible perturbations from file."""
with YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag(doc_tag)
d = yaml.load(doc.text_notag)
return [AttrDict(**pert) for pert in d["irred_perts"]]
#return d["irred_perts"]
class YamlDoc(object):
"""
Handy object that stores that YAML document, its main tag and the
position inside the file.
"""
__slots__ = [
"text",
"lineno",
"tag",
]
def __init__(self, text, lineno, tag=None):
"""
Args:
text: String with the YAML document.
lineno: The line number where the document is located.
tag: The YAML tag associate to the document.
"""
# Sanitize strings: use "ignore" to skip invalid characters in .encode/.decode like
if isinstance(text, bytes):
text = text.decode("utf-8", "ignore")
text = text.rstrip().lstrip()
self.text = text
self.lineno = lineno
if isinstance(tag, bytes):
tag = tag.decode("utf-8", "ignore")
self.tag = tag
def __str__(self):
return self.text
def __eq__(self, other):
if other is None: return False
return (self.text == other.text and
self.lineno == other.lineno and
self.tag == other.tag)
def __ne__(self, other):
return not self == other
@property
def text_notag(self):
"""
Returns the YAML text without the tag.
Useful if we don't have any constructor registered for the tag
(we used the tag just to locate the document).
"""
if self.tag is not None:
return self.text.replace(self.tag, "")
else:
return self.text
def as_dict(self):
"""Use Yaml to parse the text (without the tag) and returns a dictionary."""
return yaml.load(self.text_notag)
|
xhqu1981/pymatgen
|
pymatgen/io/abinit/abiinspect.py
|
Python
|
mit
| 19,214
|
[
"ABINIT",
"CRYSTAL",
"pymatgen"
] |
e92033f92cdd095bf33672189bd904de44bf6b02d576738aff9c146ee582077c
|
#!/usr/bin/env python
# The glassbrain class copied from The NeuroImaging Analysis Framework (NAF) repositories
# The code is covered under GNU GPL v2.
# Usage example.
'''
brain = ConnecBrain("fsaverage", "lh", "inflated")
coords = np.array([[-27., 23., 48.],
[-41.,-60., 29.],
[-64., -20., -9.],
[ -7., 49., 18.],
[ -7., -52., 26.]])
labels = ['MFG','AG','MTG','PCC','MPFC']
brain.add_coords(coords, color='green', labels=labels, scale_factor=1)
brain.add_arrow(coords[:2,:], color='red')
mlab.view(45,135)
'''
import numpy as np
from matplotlib.colors import colorConverter
from mayavi import mlab
from mayavi.mlab import pipeline as mp
import surfer
from surfer import utils
class ConnecBrain(surfer.Brain):
"""
Subclass of sufer.Brain which allows adding co-ordinates and arrows
to denote directional connectivity estimates
"""
def __init__(self, subject_id, hemi, surf='inflated', curv=True,
title=None, config_opts={}, figure=None, subjects_dir=None,
views=['lat'], show_toolbar=False, offscreen=False,
opacity=0.3):
# Call our main constructor
surfer.Brain.__init__(self, subject_id, hemi, surf, views=views, curv=curv,
config_opts=config_opts, subjects_dir=subjects_dir)
#surfer.Brain.__init__(self, subject_id, hemi, surf, curv, title,
# config_opts, figure, subjects_dir,
# views, show_toolbar, offscreen)
# Initialise our arrows dictionary
self.arrows_dict = dict()
# Set all brain opacities
for b in self._brain_list:
b['brain']._geo_surf.actor.property.opacity = opacity
def arrows(self):
"""Wrap to arrows"""
return self._get_one_brain(self.arrows_dict, 'arrows')
def add_coords(self, coords, map_surface=None, scale_factor=1.5,
color="red", alpha=1, name=None, labels=None, hemi=None,
text_size=5, txt_pos=[1.4, 1.1, 1.1]):
"""
Plot locations onto the brain surface as spheres.
:param coords: list of co-ordinates or (n, 3) numpy array. Co-ordinate
space must match that of the underlying MRI image
:param map_surface: Freesurfer surf or None.
surface to map coordinates through, or None to use raw coords
:param scale_factor: int
controls the size of the foci spheres
:param color: matplotlib color code
HTML name, RGB tuple or hex code
:param alpha: float in [0, 1]
opacity of coordinate spheres
:param name: str
internal name to use (_foci and _labels will be appended)
:param labels:
List of text strings used to label co-ordinates
:param hemi: str | None
If None, assumed to belong to the hemisphere being shown.
If two hemispheresa are being shown, an error will be thrown
:param text_size: int
Text size of labels
"""
hemi = self._check_hemi(hemi)
if map_surface is None:
foci_vtxs = surfer.utils.find_closest_vertices(self.geo[hemi].coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
else:
foci_surf = utils.Surface(self.subject_id, hemi, map_surface,
subjects_dir=self.subjects_dir)
foci_surf.load_geometry()
foci_vtxs = utils.find_closest_vertices(foci_surf.coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if name is None:
name = "coords_%s" % (max(len(self.foci_dict) + 1,
len(self.labels_dict) + 1))
views = self._toggle_render(False)
# Store the coords in the foci list and the label in the labels list
fl = []
# Create the visualization
for brain in self._brain_list:
if brain['hemi'] == hemi:
fl.append(mlab.points3d(foci_coords[:, 0],
foci_coords[:, 1],
foci_coords[:, 2],
np.ones(foci_coords.shape[0]),
scale_factor=(10. * scale_factor),
color=color, opacity=alpha,
name=name + '_foci',
figure=brain['brain']._f))
self.foci_dict[name + '_foci'] = fl
if labels is not None:
tl = []
for i in xrange(coords.shape[0]):
tl.append(mlab.text3d(foci_coords[i, 0]*txt_pos[0],
foci_coords[i, 1]*txt_pos[1],
foci_coords[i, 2]*txt_pos[2],
labels[i],
color=(1.0, 1.0, 1.0),
scale=text_size,
name=name + '_label',
figure=brain['brain']._f))
self.labels_dict[name + '_label'] = fl
self._toggle_render(True, views)
def add_arrow(self, coords, map_surface=None, tube_radius=3.0,
color="white", alpha=1, name=None, hemi=None):
"""
Add an arrow across the brain between two co-ordinates
:param coords: list of co-ordinates or (n, 3) numpy array. Co-ordinate
space must match that of the underlying MRI image
:param tube_radius: float
controls the size of the arrow
:param color: matplotlib color code
HTML name, RGB tuple or hex code
:param alpha: float in [0, 1]
opacity of coordinate spheres
:param name: str
internal name to use
:param hemi: str | None
If None, assumed to belong to the hemisphere being shown.
If two hemispheresa are being shown, an error will be thrown
"""
hemi = self._check_hemi(hemi)
if map_surface is None:
foci_vtxs = surfer.utils.find_closest_vertices(self.geo[hemi].coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
else:
foci_surf = utils.Surface(self.subject_id, hemi, map_surface,
subjects_dir=self.subjects_dir)
foci_surf.load_geometry()
foci_vtxs = utils.find_closest_vertices(foci_surf.coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
# foci_vtxs = surfer.utils.find_closest_vertices(self.geo[hemi].coords, coords)
# foci_coords = self.geo[hemi].coords[foci_vtxs]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if name is None:
name = "arrow_%s" % (len(self.arrows_dict) + 1)
nsegs = 100
x = np.linspace(foci_coords[0, 0], foci_coords[1, 0], nsegs)
y = np.linspace(foci_coords[0, 1], foci_coords[1, 1], nsegs)
z = np.linspace(foci_coords[0, 2], foci_coords[1, 2], nsegs)
line_coords = np.vstack((x, y, z)).transpose()
step = 5
idx_a = range(0, nsegs+1, step)
idx_b = range(10, nsegs+1, step)
views = self._toggle_render(False)
al = []
for brain in self._brain_list:
if brain['hemi'] == hemi:
for start,end in zip(idx_a, idx_b):
seg_width = tube_radius - (start*(tube_radius-.5)/100.)
al.append(mlab.plot3d(line_coords[start:end, 0],
line_coords[start:end, 1],
line_coords[start:end, 2],
np.ones_like(line_coords[start:end, 0]),
color=color, opacity=alpha,
tube_radius=seg_width,
name=name,
figure=brain['brain']._f))
self.arrows_dict[name] = al
self._toggle_render(True, views)
|
fboers/jumegX
|
glassbrain.py
|
Python
|
bsd-3-clause
| 8,410
|
[
"Mayavi"
] |
409824126e482a66e2a99e041c1a6e0582a7ec7de9b7080dd9a500cd3c0dd87e
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 16:02:10 2018
@author: BallBlueMeercat
"""
import matplotlib.pyplot as plt
from emcee import EnsembleSampler
import numpy as np
import time
import os.path
import datasim
import tools
import ln
import plots
def stats(params, zpicks, mag, sigma, nsteps,
save_path, firstderivs_key):
"""
Takes in:
m_true = e_m(t)/ec(t0) at t=t0;
zpicks = list of z to match the interpolated dlmpc to;
mag = list of n apparent magnitudes mag for zpicks redshits;
sigma = standard deviation used to generate Gaussian noise.
Returns:
"""
# print('-stats has been called')
if firstderivs_key == 'LCDM':
params['gamma'] = 0
del params['gamma']
# emcee parameters:
ndim = len(params)
nwalkers = int(ndim * 2)
# Initializing walkers.
poslist = list(params.values())
pos = []
for i in poslist:
pos.append(i / 2)
startpos = np.array(pos)
pos = [startpos + 0.01*np.random.randn(ndim) for i in range(nwalkers)]
# Are walkers starting outside of prior?
i = 0
while i < nwalkers:
theta = pos[i]
lp = ln.lnprior(theta, firstderivs_key)
if not np.isfinite(lp):
print('~~~~~~~pos[%s] (outside of prior) = %s ~~~~~~~'%(i, theta))
i += 1
# Sampler setup.
times0 = time.time() # starting sampler timer
sampler = EnsembleSampler(nwalkers, ndim, ln.lnprob,
args=(zpicks, mag, sigma, firstderivs_key, ndim))
# Burnin.
burnin = int(nsteps/4) # steps to discard
print('_____ burnin start')
timeb0 = time.time() # starting burnin timer
pos, prob, state = sampler.run_mcmc(pos, burnin)
timeb1=time.time() # stopping burnin timer
print('_____ burnin end')
sampler.reset()
# Starting sampler after burnin.
print('_____ sampler start')
sampler.run_mcmc(pos, nsteps)
print('_____ sampler end')
times1=time.time() # stopping sampler timer
# Walker steps.
lnprob = sampler.flatlnprobability
# Index of best parameters found by emcee.
bi = np.argmax(sampler.flatlnprobability) # index with highest post prob
trace = sampler.chain[:, burnin:, :].reshape(-1, ndim)
# Extracting results:
thetabest = np.zeros(ndim)
parambest = {}
true = []
propert = {}
propert['trace'] = trace
for i in range(ndim):
if i == 0:
mbest = sampler.flatchain[bi,i]
thetabest[i] = mbest
parambest['m'] = mbest
# Input m = e_m(z)/ec(z=0).
m_true = params.get('m', 0)
true.append(m_true)
# Output m.
m = sampler.flatchain[:,i]
# Standard deviation and mean of the m distribution.
m_sd = np.std(m)
m_mean = np.mean(m)
propert['m_sd'] = m_sd
propert['m_mean'] = m_mean
propert['m'] = mbest
plots.stat('coral', m, m_true, 'Matter', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
elif i == 1:
gammabest = sampler.flatchain[bi,i]
thetabest[i] = gammabest
parambest['gamma'] = gammabest
# Input interaction term.
g_true = params.get('gamma',0)
true.append(g_true)
# Output gamma.
gamma = sampler.flatchain[:,i]
# Standard deviation and mean of the gamme distribution.
gamma_sd = np.std(gamma)
gamma_mean = np.mean(gamma)
propert['gamma_sd'] = gamma_sd
propert['gamma_mean'] = gamma_mean
propert['gamma'] = gammabest
plots.stat('aquamarine', gamma, g_true, 'Gamma', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
elif i == 2:
debest = sampler.flatchain[bi,i]
thetabest[i] = debest
parambest['de'] = debest
H0 = 1
rho_c0 = H0**2 # critical density
# Input de = e_de(z)/ec(z=0).
de_true = params.get('de', rho_c0/rho_c0 - m_true)
true.append(de_true)
# Output de.
de = sampler.flatchain[:,i]
# Standard deviation and mean of the de distribution
de_sd = np.std(de)
de_mean = np.mean(de)
propert['de_sd'] = de_sd
propert['de_mean'] = de_mean
propert['de'] = debest
plots.stat('orchid', de, de_true, 'DE', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
# Checking if best found parameters are within prior.
lp = ln.lnprior(thetabest, firstderivs_key)
if not np.isfinite(lp):
print('')
print('best emcee parameters outside of prior (magbest calculation)')
print('')
# Plot of data mag and redshifts, overlayed with
# mag simulated using emcee best parameters and data redshifts.
magbest = datasim.magn(parambest, zpicks, firstderivs_key)
plt.figure()
plt.title('model: '+firstderivs_key
+'\n Evolution of magnitude with redshift \n nsteps: '
+str(nsteps)+', noise: '+str(sigma)+', npoints: '+str(len(zpicks)))
data = plt.errorbar(zpicks, mag, yerr=sigma, fmt='.', alpha=0.3)
best_fit = plt.scatter(zpicks, magbest, lw='1', c='xkcd:tomato')
plt.ylabel('magnitude')
plt.xlabel('z')
plt.legend([data, best_fit], ['LCDM', firstderivs_key])
stamp = str(int(time.time()))
filename = str(stamp)+'____magz__nsteps_'+str(nsteps)+'_nwalkers_' \
+str(nwalkers)+'_noise_'+str(sigma)+'_numpoints_'+str(len(zpicks))+'.pdf'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
plt.show(block=False)
# # Corner plot (walkers' walk + histogram).
# import corner
## samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
# samples = sampler.chain[:, :, :].reshape((-1, ndim))
# corner.corner(samples, labels=["$m$"],
# truths=true)
# show(block=False)
# Results getting printed:
if bi == 0:
print('@@@@@@@@@@@@@@@@@')
print('best index =',str(bi))
print('@@@@@@@@@@@@@@@@@')
print('best parameters =',str(parambest.values()))
print('m.a.f.:', np.mean(sampler.acceptance_fraction))
print('nsteps:', str(nsteps))
print('sigma:', str(sigma))
print('npoints:', str(len(zpicks)))
print('model:', firstderivs_key)
tools.timer('burnin', timeb0, timeb1)
tools.timer('sampler', times0, times1)
return propert, sampler
|
lefthandedroo/Cosmo-models
|
zprev versions/Models_py_backup/stats.py
|
Python
|
mit
| 6,851
|
[
"Gaussian"
] |
b23e310e64dfd1a814f08ac236143878f95251b7765f2f7d2d32d0819aa8243c
|
""" Munch is a subclass of dict with attribute-style access.
>>> b = Munch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Munch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
It is safe to import * from this module:
__all__ = ('Munch', 'munchify','unmunchify')
un/munchify provide dictionary conversion; Munches can also be
converted via Munch.to/fromDict().
"""
__version__ = '2.2.0'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = ('Munch', 'munchify', 'DefaultMunch', 'unmunchify')
from .python3_compat import * # pylint: disable=wildcard-import
class Munch(dict):
""" A dictionary that provides attribute-style access.
>>> b = Munch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Munch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
A Munch is a subclass of dict; it supports all the methods a dict does...
>>> sorted(b.keys())
['foo', 'hello']
Including update()...
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print (repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
As well as iteration...
>>> sorted([ (k,b[k]) for k in b ])
[('foo', Munch({'lol': True})), ('hello', 42), ('ponies', 'are pretty!')]
And "splats".
>>> "The {knights} who say {ni}!".format(**Munch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
See unmunchify/Munch.toDict, munchify/Munch.fromDict for notes about conversion.
"""
# only called if k not found in normal places
def __getattr__(self, k):
""" Gets key if it exists, otherwise throws AttributeError.
nb. __getattr__ is only called if key is not found in normal places.
>>> b = Munch(bar='baz', lol={})
>>> b.foo
Traceback (most recent call last):
...
AttributeError: foo
>>> b.bar
'baz'
>>> getattr(b, 'bar')
'baz'
>>> b['bar']
'baz'
>>> b.lol is b['lol']
True
>>> b.lol is getattr(b, 'lol')
True
"""
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
""" Sets attribute k if it exists, otherwise sets key k. A KeyError
raised by set-item (only likely if you subclass Munch) will
propagate as an AttributeError instead.
>>> b = Munch(foo='bar', this_is='useful when subclassing')
>>> hasattr(b.values, '__call__')
True
>>> b.values = 'uh oh'
>>> b.values
'uh oh'
>>> b['values']
Traceback (most recent call last):
...
KeyError: 'values'
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
""" Deletes attribute k if it exists, otherwise deletes key k. A KeyError
raised by deleting the key--such as when the key is missing--will
propagate as an AttributeError instead.
>>> b = Munch(lol=42)
>>> del b.lol
>>> b.lol
Traceback (most recent call last):
...
AttributeError: lol
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError:
raise AttributeError(k)
else:
object.__delattr__(self, k)
def toDict(self):
""" Recursively converts a munch back into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(b.toDict().items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
See unmunchify for more info.
"""
return unmunchify(self)
@property
def __dict__(self):
return self.toDict()
def __repr__(self):
""" Invertible* string-form of a Munch.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> print (repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
>>> eval(repr(b))
Munch({'ponies': 'are pretty!', 'foo': Munch({'lol': True}), 'hello': 42})
>>> with_spaces = Munch({1: 2, 'a b': 9, 'c': Munch({'simple': 5})})
>>> print (repr(with_spaces))
Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
>>> eval(repr(with_spaces))
Munch({'a b': 9, 1: 2, 'c': Munch({'simple': 5})})
(*) Invertible so long as collection contents are each repr-invertible.
"""
return '{0}({1})'.format(self.__class__.__name__, dict.__repr__(self))
def __dir__(self):
return list(iterkeys(self))
__members__ = __dir__ # for python2.x compatibility
@classmethod
def fromDict(cls, d):
""" Recursively transforms a dictionary into a Munch via copy.
>>> b = Munch.fromDict({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
See munchify for more info.
"""
return munchify(d, cls)
def copy(self):
return type(self).fromDict(self)
class DefaultMunch(Munch):
"""
A Munch that returns a user-specified value for missing keys.
"""
def __init__(self, *args, **kwargs):
""" Construct a new DefaultMunch. Like collections.defaultdict, the
first argument is the default value; subsequent arguments are the
same as those for dict.
"""
# Mimic collections.defaultdict constructor
if args:
default = args[0]
args = args[1:]
else:
default = None
super(DefaultMunch, self).__init__(*args, **kwargs)
self.__default__ = default
def __getattr__(self, k):
""" Gets key if it exists, otherwise returns the default value."""
try:
return super(DefaultMunch, self).__getattr__(k)
except AttributeError:
return self.__default__
def __setattr__(self, k, v):
if k == '__default__':
object.__setattr__(self, k, v)
else:
return super(DefaultMunch, self).__setattr__(k, v)
def __getitem__(self, k):
""" Gets key if it exists, otherwise returns the default value."""
try:
return super(DefaultMunch, self).__getitem__(k)
except KeyError:
return self.__default__
@classmethod
def fromDict(cls, d, default=None):
# pylint: disable=arguments-differ
return munchify(d, factory=lambda d_: cls(default, d_))
def copy(self):
return type(self).fromDict(self, default=self.__default__)
def __repr__(self):
return '{0}({1!r}, {2})'.format(
type(self).__name__, self.__undefined__, dict.__repr__(self))
# While we could convert abstract types like Mapping or Iterable, I think
# munchify is more likely to "do what you mean" if it is conservative about
# casting (ex: isinstance(str,Iterable) == True ).
#
# Should you disagree, it is not difficult to duplicate this function with
# more aggressive coercion to suit your own purposes.
def munchify(x, factory=Munch):
""" Recursively transforms a dictionary into a Munch via copy.
>>> b = munchify({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
munchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = munchify({ 'lol': ('cats', {'hah':'i win again'}),
... 'hello': [{'french':'salut', 'german':'hallo'}] })
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win again'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return factory((k, munchify(v, factory)) for k, v in iteritems(x))
elif isinstance(x, (list, tuple)):
return type(x)(munchify(v, factory) for v in x)
else:
return x
def unmunchify(x):
""" Recursively converts a Munch into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(unmunchify(b).items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
unmunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42,
... ponies=('are pretty!', Munch(lies='are trouble!')))
>>> sorted(unmunchify(b).items()) #doctest: +NORMALIZE_WHITESPACE
[('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return dict((k, unmunchify(v)) for k, v in iteritems(x))
elif isinstance(x, (list, tuple)):
return type(x)(unmunchify(v) for v in x)
else:
return x
# Serialization
try:
try:
import json
except ImportError:
import simplejson as json
def toJSON(self, **options):
""" Serializes this Munch to JSON. Accepts the same keyword options as `json.dumps()`.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> json.dumps(b) == b.toJSON()
True
"""
return json.dumps(self, **options)
Munch.toJSON = toJSON
except ImportError:
pass
try:
# Attempt to register ourself with PyYAML as a representer
import yaml
from yaml.representer import Representer, SafeRepresenter
def from_yaml(loader, node):
""" PyYAML support for Munches using the tag `!munch` and `!munch.Munch`.
>>> import yaml
>>> yaml.load('''
... Flow style: !munch.Munch { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
... Block style: !munch
... Clark : Evans
... Brian : Ingerson
... Oren : Ben-Kiki
... ''') #doctest: +NORMALIZE_WHITESPACE
{'Flow style': Munch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki'),
'Block style': Munch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki')}
This module registers itself automatically to cover both Munch and any
subclasses. Should you want to customize the representation of a subclass,
simply register it with PyYAML yourself.
"""
data = Munch()
yield data
value = loader.construct_mapping(node)
data.update(value)
def to_yaml_safe(dumper, data):
""" Converts Munch to a normal mapping node, making it appear as a
dict in the YAML output.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
"""
return dumper.represent_dict(data)
def to_yaml(dumper, data):
""" Converts Munch to a representation node.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42)
>>> import yaml
>>> yaml.dump(b, default_flow_style=True)
'!munch.Munch {foo: [bar, !munch.Munch {lol: true}], hello: 42}\\n'
"""
return dumper.represent_mapping(u('!munch.Munch'), data)
yaml.add_constructor(u('!munch'), from_yaml)
yaml.add_constructor(u('!munch.Munch'), from_yaml)
SafeRepresenter.add_representer(Munch, to_yaml_safe)
SafeRepresenter.add_multi_representer(Munch, to_yaml_safe)
Representer.add_representer(Munch, to_yaml)
Representer.add_multi_representer(Munch, to_yaml)
# Instance methods for YAML conversion
def toYAML(self, **options):
""" Serializes this Munch to YAML, using `yaml.safe_dump()` if
no `Dumper` is provided. See the PyYAML documentation for more info.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> b.toYAML(default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> yaml.dump(b, default_flow_style=True)
'!munch.Munch {foo: [bar, !munch.Munch {lol: true}], hello: 42}\\n'
>>> b.toYAML(Dumper=yaml.Dumper, default_flow_style=True)
'!munch.Munch {foo: [bar, !munch.Munch {lol: true}], hello: 42}\\n'
"""
opts = dict(indent=4, default_flow_style=False)
opts.update(options)
if 'Dumper' not in opts:
return yaml.safe_dump(self, **opts)
else:
return yaml.dump(self, **opts)
def fromYAML(*args, **kwargs):
return munchify(yaml.load(*args, **kwargs))
Munch.toYAML = toYAML
Munch.fromYAML = staticmethod(fromYAML)
except ImportError:
pass
|
pombredanne/bunch
|
munch/__init__.py
|
Python
|
mit
| 14,119
|
[
"Brian"
] |
266bd0eec3d86b55e7b3201c9a2a421d70ba120c5449703793090480ec2f6492
|
""" Refresh local CS (if needed)
Used each time you call gConfig. It keep your configuration up-to-date with the configuration server
"""
import threading
import time
import os
import _thread
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.private.RefresherBase import RefresherBase
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities import LockRing
class Refresher(RefresherBase, threading.Thread):
"""
The refresher
A long time ago, in a code away, far away...
A guy do the code to autorefresh the configuration
To prepare transition to HTTPS we have done separation
between the logic and the implementation of background
tasks, it's the original version, for diset, using thread.
"""
def __init__(self):
threading.Thread.__init__(self)
RefresherBase.__init__(self)
self._triggeredRefreshLock = LockRing.LockRing().getLock()
def _refreshInThread(self):
"""
Refreshing configuration in the background. By default it uses a thread but it can be
run also in the IOLoop
"""
retVal = self._refresh()
if not retVal["OK"]:
gLogger.error("Error while updating the configuration", retVal["Message"])
def refreshConfigurationIfNeeded(self):
"""
Refresh the configuration if automatic updates are disabled, refresher is enabled and servers are defined
"""
if not self._refreshEnabled or self._automaticUpdate or not gConfigurationData.getServers():
return
# To improve performance, skip acquiring the lock if possible
if not self._lastRefreshExpired():
return
self._triggeredRefreshLock.acquire()
try:
if not self._lastRefreshExpired():
return
self._lastUpdateTime = time.time()
finally:
try:
self._triggeredRefreshLock.release()
except _thread.error:
pass
# Launch the refresh
thd = threading.Thread(target=self._refreshInThread)
thd.setDaemon(1)
thd.start()
def autoRefreshAndPublish(self, sURL):
"""
Start the autorefresh background task
:param str sURL: URL of the configuration server
"""
gLogger.debug("Setting configuration refresh as automatic")
if not gConfigurationData.getAutoPublish():
gLogger.debug("Slave server won't auto publish itself")
if not gConfigurationData.getName():
import DIRAC
DIRAC.abort(10, "Missing configuration name!")
self._url = sURL
self._automaticUpdate = True
self.setDaemon(1)
self.start()
def run(self):
while self._automaticUpdate:
time.sleep(gConfigurationData.getPropagationTime())
if self._refreshEnabled:
if not self._refreshAndPublish():
gLogger.error("Can't refresh configuration from any source")
def daemonize(self):
"""
Daemonize the background tasks
"""
self.setDaemon(1)
self.start()
# Here we define the refresher which should be used.
# By default we use the original refresher.
# Be careful, if you never start the IOLoop (with a TornadoServer for example)
# the TornadoRefresher will not work. IOLoop can be started after refresher
# but background tasks will be delayed until IOLoop start.
# DIRAC_USE_TORNADO_IOLOOP is defined by starting scripts
if os.environ.get("DIRAC_USE_TORNADO_IOLOOP", "false").lower() in ("yes", "true"):
from DIRAC.ConfigurationSystem.private.TornadoRefresher import TornadoRefresher
gRefresher = TornadoRefresher()
else:
gRefresher = Refresher()
if __name__ == "__main__":
time.sleep(0.1)
gRefresher.daemonize()
|
DIRACGrid/DIRAC
|
src/DIRAC/ConfigurationSystem/private/Refresher.py
|
Python
|
gpl-3.0
| 3,919
|
[
"DIRAC"
] |
360cb88d0b67c45a5b85ff7053a946c34b1f2c6f845dc7e7d42355351e8e291e
|
# -*- coding: utf-8 -*-
__author__ = 'shreejoy'
import unittest
import neuroelectro.models as m
from db_functions.normalize_ephys_data import check_data_val_range, normalize_nedm_val
class DataRangeTest(unittest.TestCase):
def test_check_data_val_range_in(self):
ephys_prop = m.EphysProp.objects.create(name = 'input resistance', min_range = .1, max_range = 10000)
data_val = 100
output_bool = check_data_val_range(data_val, ephys_prop)
expected_bool = True
self.assertEqual(output_bool, expected_bool)
def test_check_data_val_range_out(self):
ephys_prop = m.EphysProp.objects.create(name = 'input resistance', min_range = .1, max_range = 10000)
data_val = -10
output_bool = check_data_val_range(data_val, ephys_prop)
expected_bool = False
self.assertEqual(output_bool, expected_bool)
class NormalizeValTest(unittest.TestCase):
def test_normalize_nedm_val_ir(self):
with open('tests/test_html_data_tables/example_html_table_exp_facts.html', mode='rb') as f:
exp_fact_table_text = f.read()
article_ob = m.Article.objects.create(title='asdf', pmid='456')
data_table_ob = m.DataTable.objects.create(table_html=exp_fact_table_text, article=article_ob)
data_source_ob = m.DataSource.objects.create(data_table=data_table_ob)
# create ephys concept maps
ephys_unit = m.Unit.objects.create(name=u'Ω', prefix = 'M')
ir_ephys_ob = m.EphysProp.objects.create(name='input resistance', units = ephys_unit)
ecm = m.EphysConceptMap.objects.create(dt_id='td-68', source=data_source_ob, ephys_prop=ir_ephys_ob, ref_text = u'blah (GΩ)')
neuron_ob = m.Neuron.objects.get_or_create(name='Other')[0]
ncm = m.NeuronConceptMap.objects.create(dt_id='th-2', source=data_source_ob, neuron=neuron_ob,
neuron_long_name='thalamus parafascicular nucleus')
input_value = .2
nedm = m.NeuronEphysDataMap(neuron_concept_map = ncm, ephys_concept_map = ecm, dt_id = 'td-3', source = data_source_ob, val = input_value)
normalized_dict = normalize_nedm_val(nedm)
expected_value = 200
self.assertEqual(normalized_dict['value'], expected_value)
def test_normalize_nedm_val_ahp_amp(self):
with open('tests/test_html_data_tables/example_html_table_exp_facts.html', mode='rb') as f:
exp_fact_table_text = f.read()
article_ob = m.Article.objects.create(title='asdf', pmid='456')
data_table_ob = m.DataTable.objects.create(table_html=exp_fact_table_text, article=article_ob)
data_source_ob = m.DataSource.objects.create(data_table=data_table_ob)
# create ephys concept maps
ephys_unit = m.Unit.objects.create(name=u'V', prefix = 'm')
ahp_amp_ephys_ob = m.EphysProp.objects.create(name='AHP amplitude', units = ephys_unit,
min_range = 0, max_range = 50)
ecm = m.EphysConceptMap.objects.create(dt_id='td-68', source=data_source_ob, ephys_prop=ahp_amp_ephys_ob, ref_text = u'blah (mV)')
neuron_ob = m.Neuron.objects.get_or_create(name='Other')[0]
ncm = m.NeuronConceptMap.objects.create(dt_id='th-2', source=data_source_ob, neuron=neuron_ob,
neuron_long_name='thalamus parafascicular nucleus')
input_value = -9
error_value = 1.0
nedm = m.NeuronEphysDataMap(neuron_concept_map = ncm, ephys_concept_map = ecm, dt_id = 'td-3',
source = data_source_ob, val = input_value, err = error_value)
normalized_dict = normalize_nedm_val(nedm)
expected_value = 9
expected_error = 1.0
self.assertEqual(normalized_dict['value'], expected_value)
self.assertEqual(normalized_dict['error'], expected_error)
|
neuroelectro/neuroelectro_org
|
tests/test_normalize_ephys_data.py
|
Python
|
gpl-2.0
| 3,931
|
[
"NEURON"
] |
732ebd6a203f97beb5e57c0b434f6e08d36ab9e9f712648b48cc146acb6854f0
|
# Part of Neubot <https://neubot.nexacenter.org/>.
# Neubot is free software. See AUTHORS and LICENSE for more
# information on the copying conditions.
''' Path management utils '''
import collections
import logging
import os
from .third_party import six
from .third_party.six.moves.urllib import parse as urlparse
def depth_visit(prefix, components, visit):
''' Visit the subtree prefix/components[0]/components[1]... '''
#
# Append() guarantees that the result is always below prefix,
# so the result of this function is below prefix as well.
#
# It is not an error to pass a component that contains one or
# more path separators, except that subcomponents are not visited
# in that case.
#
# The boolean second argument to visit is to distinguish between
# leaf and ordinary nodes.
#
# This function is more strict than needed and generates an
# error for input like '/var/www', ['a/b/c', '../d'], but we
# don't care because that case doesn't happen in Neubot.
#
components = collections.deque(components)
while components:
prefix = append(prefix, components.popleft(), False)
if prefix == None:
raise RuntimeError("utils_path: depth_visit(): append() failed")
visit(prefix, not components)
return prefix
STRING_CLASS = six.u("").__class__
def decode(string, encoding):
""" Decode STRING from ENCODING to UNICODE """
logging.debug("utils_path: decode('%s', '%s')", string, encoding)
try:
string = string.decode(encoding)
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.warning("utils_path: decode() error", exc_info=1)
else:
return string
def encode(string, encoding):
""" Encode STRING to ENCODING from UNICODE """
logging.debug("utils_path: encode('%s', '%s')", string, encoding)
try:
string = string.encode(encoding)
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.warning("utils_path: encode() error", exc_info=1)
else:
return string
def possibly_decode(string, encoding):
""" If needed, decode STRING from ENCODING to UNICODE """
if string.__class__.__name__ == STRING_CLASS.__name__:
return string
return decode(string, encoding)
def append(rootdir, path, unquote_path):
""" Append path to rootdir """
logging.debug("utils_path: rootdir \"%s\"", rootdir)
logging.debug("utils_path: path \"%s\"", path)
#
# ROOTDIR
#
rootdir = possibly_decode(rootdir, "utf-8")
logging.debug("utils_path: unicode(rootdir): %s", rootdir)
if not rootdir:
return
rootdir = os.path.normpath(rootdir)
logging.debug("utils_path: normpath(rootdir): %s", rootdir)
rootdir = os.path.realpath(rootdir)
logging.debug("utils_path: realpath(rootdir): %s", rootdir)
#
# PATH
#
# 1) Neubot only and always uses ASCII paths;
#
# 2) after we unquote, the unicode string can contain some
# non-ASCII characters;
#
# 3) therefore we encode and decode again to make sure
# that we have an ASCII only path.
#
path = possibly_decode(path, "ascii")
logging.debug("utils_path: ascii(path): %s", path)
if not path:
return
if unquote_path:
path = urlparse.unquote(path)
logging.debug("utils_path: unquote(path): %s", path)
#
# Note: we encode() and decode() IN ANY CASE, because the original
# path string can also be unicode, which means that the above
# possibly_decode() invocation just returns the unicode string.
#
# BTW we MUST perform this step after we unquote(), because unquote()
# may introduce non-ASCII chars into the string.
#
path = encode(path, "ascii")
if not path:
return
path = decode(path, "ascii")
if not path:
return
logging.debug("utils_path: make_sure_really_ascii(path): %s", path)
#
# JOINED
#
joined = join(rootdir, path)
logging.debug("utils_path: joined = join(rootdir, path): %s", joined)
joined = os.path.normpath(joined)
logging.debug("utils_path: normpath(joined): %s", joined)
joined = os.path.realpath(joined)
logging.debug("utils_path: realpath(joined): %s", joined)
if not joined.startswith(rootdir):
logging.warning("utils_path: '%s' IS NOT below '%s'", joined, rootdir)
return
return joined
def normalize(string):
''' Normalize a pathname '''
return os.path.normpath(string)
def join(left, right):
''' Join two paths '''
return os.sep.join([left, right])
|
neubot/neubot-runtime
|
neubot_runtime/utils_path.py
|
Python
|
gpl-3.0
| 4,653
|
[
"VisIt"
] |
3b2e269e573eeb20aa3dc3d30a423b6c6ec675ddb5cae3bec1a80181ae218b7f
|
# thic code is used to test Python2Vtk.py read and write function
import tempfile as TP
import numpy as np
from MNNparcellation import Python2Vtk as PY
def co_shape(A, B):
if np.shape(A) == np.shape(B):
return A, B, 1
elif np.shape(A) == np.shape(B.T):
return A, B.T, 1
else:
return A, B, 0
def test_python2vtk():
" Test read and write vtk files "
nbr_sources = 100
vertices = np.random.randint(nbr_sources, size=(nbr_sources, 3))
faces = np.random.randint(nbr_sources, size=(nbr_sources*2, 3))
normal = np.array(np.random.randn(nbr_sources), dtype=np.float64)
scalar = range(nbr_sources)
f = TP.NamedTemporaryFile(delete=True, suffix='.vtk', dir='./data/')
# write vtk file
PY.WritePython2Vtk(f.name, vertices, faces, normal, scalar,
name_of_scalar="Parcels")
# read vtk file
Coordinates, Faces, Scalers, Normal = PY.ReadVtk2Python(f.name)
Coordinates, vertices, t_c = co_shape(Coordinates, vertices)
Faces, faces, t_f = co_shape(Faces, faces)
dec = 4
if t_f*t_c == 0:
return False
else:
np.testing.assert_almost_equal(Scalers, scalar, decimal=dec)
np.testing.assert_almost_equal(Faces, faces, decimal=dec)
np.testing.assert_almost_equal(Coordinates, vertices, decimal=dec)
np.testing.assert_almost_equal(Normal, normal, decimal=dec)
return True
if __name__ == "__main__":
if test_python2vtk():
print("Write and Read vtk files .....Ok")
else:
print("Write and Read vtk files .....Failed")
|
BBELAOUCHA/dMRIParcellation
|
test/test_vtk_readwrite.py
|
Python
|
bsd-3-clause
| 1,591
|
[
"VTK"
] |
ad7758ea72f2191bb970098931b3ab550ca7349004f1d568e1830c25e0f64733
|
import json
import pkg_resources
import re
import string
import time
from pymongo import MongoClient
import misc
import parser_helpers
import utils
# Initialize global variables
phone_num_sets = misc.phone_num_lists()
countries = utils.countries()
post_id_bp_groups = utils.post_id_backpage_groups()
# read ethnicities into memory for parser to use
with open('dataFiles/ethnicities.json', 'rU') as df:
eths_dict = json.load(df)
def parse_ethnicity(parts):
"""
Parse the ethnicity from the Backpage ad. Returns the higher level ethnicities associated with an ethnicity.
For example, if "russian" is found in the ad, this function will return ["russian", "eastern_european", "white_non_hispanic"].
This allows for us to look at ethnicities numerically and uniformally.
Note: The code for this function is pretty old and messy, but still works well enough for our purposes.
parts ->
"""
eastern_european = ['russian', 'ukrainian', 'moldova', 'bulgarian', 'slovakian', 'hungarian', 'romanian',
'polish', 'latvian', 'lithuanian', 'estonia', 'czech', 'croatian', 'bosnian', 'montenegro', 'macedonian', 'albanian',
'slovenian', 'serbian', 'kosovo', 'armenian', 'siberian', 'belarusian']
western_european = ['british', 'german', 'france', 'greek', 'italian', 'belgian', 'netherlands', 'swiss', 'irish',
'danish', 'sweden', 'finnish', 'norwegian', 'portugese', 'austrian', 'sanmarino', 'turkish', 'liechtenstein', 'australian',
'newzealand', 'andorra', 'luxembourg', 'israeli', 'jewish']
caribbean = ['bahamian', 'haitian', 'dominican', 'puertorican', 'jamaican', 'cuban', 'caymanislands', 'trinidad', 'caribbean',
'guadeloupe', 'martinique', 'barbados', 'saintlucia', 'stlucia', 'curacao', 'aruban', 'saintvincent', 'stvincent', 'creole',
'grenadines', 'grenada', 'barbuda', 'saintkitts', 'saintmartin', 'anguilla', 'virginislands', 'montserrat', 'saintbarthelemy']
south_central_american = ['guatemalan', 'belizean', 'honduras', 'nicaraguan', 'elsalvador', 'panamanian', 'costarican',
'colombian', 'columbian', 'venezuelan', 'ecuadorian', 'peruvian', 'bolivian', 'chilean', 'argentine', 'uruguayan', 'paraguayan',
'brazilian', 'guyana', 'suriname']
mexican = ['mexican']
spanish = ['spanish']
east_asian = ['thai', 'vietnamese', 'cambodian', 'malaysian', 'filipino', 'singaporean', 'indonesian', 'japanese',
'chinese', 'taiwanese', 'northkorean', 'southkorean', 'korean']
korean = ['northkorean', 'southkorean', 'korean']
south_asian = ['nepalese', 'bangladeshi', 'bhutanese', 'indian']
hawaiian_pacific_islanders = ['hawaiian', 'guamanian', 'newguinea', 'fiji', 'marianaislands', 'solomonislands', 'micronesia',
'tuvalu', 'samoan', 'vanuata', 'polynesia', 'cookislands', 'pitcaimislands', 'marshallese']
middle_eastern = ['iraqi', 'iranian', 'pakistani', 'afghan', 'kazakhstan', 'uzbekistan', 'tajikistan', 'turkmenistan',
'azerbaijan', 'kyrgyzstan', 'syrian', 'lebanese', 'jordanian', 'saudiarabian', 'unitedarabemirates', 'bahrain', 'kuwait',
'persian', 'kurdish', 'middleeastern']
north_african = ['egyptian', 'libyan', 'algerian', 'tunisian', 'moroccan', 'westernsaharan', 'mauritanian', 'senegal', 'djibouti']
# Broad, high level ethnicity classes
white_non_hispanic = eastern_european + western_european
hispanic_latino = caribbean + south_central_american + mexican + spanish
# Get tribe names!!!
american_indian = ['nativeamerican', 'canadian', 'alaskan', 'apache', 'aztec', 'cherokee', 'chinook', 'comanche',
'eskimo', 'incan', 'iroquois', 'kickapoo', 'mayan', 'mohave', 'mojave', 'navaho', 'navajo', 'seminole']
asian = east_asian + south_asian + hawaiian_pacific_islanders
midEast_nAfrica = middle_eastern + north_african
african_american = ['black', 'african american']
ss_african = ['gambia', 'bissau', 'guinea', 'sierraleone', 'liberian', 'ghana', 'malian', 'burkinafaso', 'beninese', 'nigerian',
'sudanese', 'eritrea', 'ethiopian', 'cameroon', 'centralafricanrepublic', 'somalian', 'gabon', 'congo', 'ugandan', 'kenyan',
'tanzanian', 'rwandan', 'burundi', 'angola', 'zambian', 'mozambique', 'malawi', 'zimbabwe', 'namibia', 'botswana',
'lesotho', 'southafrican', 'swaziland', 'madagascar', 'comoros', 'mauritius', 'saintdenis', 'seychelles', 'saotome']
# Add various identifying values to Category lists
white_non_hispanic.append('european')
white_non_hispanic.append('white')
hispanic_latino.extend(['hispanic', 'latina'])
asian.extend(['asian', 'oriental'])
midEast_nAfrica.extend(['arabian', 'muslim'])
# "from ____" to handle false positives as names
from_names = ['malaysia']
# One massive ethnicities list
ethnicities = white_non_hispanic + hispanic_latino + american_indian + asian + midEast_nAfrica + african_american + ss_african
num = 0
found = []
# Check each part of the body
clean_parts = []
for p in parts:
part = parser_helpers.clean_part_ethn(p)
clean_parts.append(part)
# handle "from _____" ethnicities to avoid false positives in names
for name in from_names:
if re.compile(r'from +' + re.escape(name)).search(part):
found.append(name)
if any(eth in part for eth in ethnicities):
# At least one ethnicity was found
for ethn in ethnicities:
if ethn in part:
index=part.index(ethn)
if (' no ' in part and part.index(' no ')+4==index) or ('no ' in part and part.index('no')==0 and part.index('no ') + 3==index) or ('.no ' in part and part.index('.no ') + 4==index):
pass
else:
# Found the current ethnicity
if ethn in ['black', 'african american']:
ethn = "african_american"
if ethn == 'white':
ethn = 'white_non_hispanic'
if ethn not in found:
# Add to Found list, check for subsets
found.append(ethn)
if ethn in eastern_european:
found.append("eastern_european")
if ethn in western_european:
found.append("western_european")
if ethn in caribbean:
found.append("caribbean")
if ethn in south_central_american:
found.append("south_central_american")
if ethn in east_asian:
found.append("east_asian")
if ethn in south_asian:
found.append("south_asian_indian")
if ethn in hawaiian_pacific_islanders:
found.append("hawaiian_pacific_islanders")
if ethn in middle_eastern:
found.append("middle_eastern")
if ethn in north_african:
found.append("north_african")
# Check the most general ethnicity categories
if ethn in white_non_hispanic and "white_non_hispanic" not in found:
found.append("white_non_hispanic")
num += 1
if ethn in hispanic_latino and "hispanic_latino" not in found:
found.append("hispanic_latino")
num += 1
if ethn in american_indian and "american_indian" not in found:
found.append("american_indian")
num += 1
if ethn in asian and "asian" not in found:
if ethn != "asian":
found.append("asian")
num += 1
if ethn in midEast_nAfrica and "midEast_nAfrican" not in found:
found.append("midEast_nAfrican")
num += 1
if ethn in ss_african and "subsaharan_african" not in found:
found.append("subsaharan_african")
num += 1
if ethn == "african_american":
num += 1
# Remove ethnicity from all parts
output_parts = []
for p in clean_parts:
part = p
if any(eth in part for eth in found):
# Ethnicity(s) found in this part
for eth in found:
if eth in part:
# Remove ethnicity
part = re.sub(eth, "", part)
# Add part to output
if len(part) > 2:
output_parts.append(part)
# Check if there was more than one general ethnicity. If so, the ad is multi-racial.
if num > 1:
found.append("multiracial")
found = list(set(found))
return (found, output_parts)
def parse_indicators(parts, ethnicity):
"""
Parses terms that may or may not be "indicators" of trafficking. Some terms are used for
non-trafficking related purposes (e.g. matching or identification problems). Also good to
note is that this list has been growing slowly for about 2 years and some indicators have
been removed/combined. Thus, you may notice that some numeric values are non-existent.
TODO: Move logic from hard-coded into JSON config file(s).
parts -> The backpage ad's posting_body, separated into substrings
ethnicity -> The ethnicity list that we parsed for the ad
"""
ret_val=[]
for part in parts:
part=part.lower()
part = part.replace('virginia', '').replace('fresh pot', '')
part = re.sub(r'virgin ?island', '', part)
part = re.sub(r'no teen', '', part)
if re.compile(r'new ?to ?the ?(usa?|country)').search(part):
ret_val.append(1)
if "natasha" in part or "svetlana" in part:
ret_val.append(2)
if 'young' in part:
ret_val.append(3)
if re.compile(r'just *(hit|turned) *18').search(part):
ret_val.append(5)
if re.compile(r'fresh *meat').search(part):
ret_val.append(6)
if 'virgin' in part:
ret_val.append(7)
if 'foreign' in part:
ret_val.append(8)
if re.compile(r'(just|fresh)( *off *)?( *the *)boat').search(part):
ret_val.append(9)
if re.compile(r'fresh from over ?sea').search(part):
ret_val.append(9)
if re.compile(r'easy *sex').search(part):
ret_val.append(10)
if re.compile(r'come *chat *with *me').search(part):
ret_val.append(11)
if re.compile(r'\b(massage|nuru)\b').search(part):
ret_val.append(12)
if re.compile(r'escort *agency').search(part):
ret_val.append(13)
if re.compile(r'((https?)|www)\.\w{5,30}?.com').search(part):
ret_val.append(14)
if (re.compile(r'world( )*series').search(part) or re.compile(r'grand( )*slam').search(part) or
re.compile(r'base( )?ball').search(part) or re.compile(r'double( )?play').search(part) or
'cws' in part or re.compile(r'home( )?run').search(part) or re.compile(r'batter( )?up').search(part) or
re.compile(r'triple( )?play').search(part) or re.compile(r'strike( )?out').search(part) or
'sports' in part):
ret_val.append(15)
if (re.compile(r'new ?girls').search(part) or re.compile(r'new ?arrivals').search(part) or
re.compile(r'just ?in ?from ? \w{3,15}\W').search(part) or re.compile(r'new \w{3,9} staff').search(part)):
ret_val.append(17)
if re.compile(r'brand *new').search(part):
ret_val.append(18)
if re.compile(r'coll(e|a)ge').search(part) and 15 not in ret_val:
ret_val.append(19)
if 'teen' in part:
ret_val.append(20)
if re.compile(r'high ?school').search(part):
ret_val.append(21)
if re.compile(r'daddy\'?s? ?little ?girl').search(part):
ret_val.append(22)
if 'fresh' in part:
ret_val.append(23)
phrases = [(r'100%' + re.escape(eth)) for eth in ethnicity]
if any(re.compile(phrase).search(part) for phrase in phrases):
ret_val.append(24)
if re.compile(r'speaks? \d\d? language').search(part):
ret_val.append(25)
if re.compile(r'new to the (country|us)').search(part):
ret_val.append(26)
if re.compile(r'massage ?parlou?r').search(part):
ret_val.append(27)
if re.compile(r'come see us at ').search(part):
ret_val.append(28)
if (re.compile(r'420 ?friendly').search(part) or re.compile(r'party ?friendly').search(part) or
re.compile(r'420 ?sp').search(part) or ' 420 ' in part):
ret_val.append(30)
if 'under 35' in part:
ret_val.append(31)
if re.compile(r'\b(avail(able)?|open) *24(/|\\|-)7\b').search(part):
ret_val.append(33)
if re.compile(r'no ?(indian)').search(part) or re.compile(r'indians? not ((allow)|(welcome))'):
ret_val.append(36)
if re.compile(r'no ?(hispanic|mexican)').search(part) or re.compile(r'(hispanic|mexican)s? not ((allow)|(welcome))'):
ret_val.append(37)
if 'incall' in part:
ret_val.append(38)
if 'outcall' in part:
ret_val.append(39)
parts = part.split('from ')
parts.pop(0)
for p in parts:
p = re.sub(r' +', ' ', p)
if p.split(' ')[0].lower() in countries:
ret_val.append(27)
break
eastern_euro_countries = ['estonia', 'latvia', 'lithuania', 'armenia', 'russia', 'kazakhstan', 'ukrain', 'belarus',
'moldova', 'czech', 'austria', 'croatia', 'hungary', 'poland', 'slovakia', 'slovenia',
'albania', 'bosnia', 'bulgaria', 'greece', 'macedonia', 'romania']
if any(c in part for c in eastern_euro_countries):
ret_val.append(28)
ret_val = list(set(ret_val))
return ret_val
def parse_military_friendly(parts):
"""
Parse whether or not the ad indicates "military friendly".
parts -> The backpage ad's posting_body, separated into substrings
"""
for part in parts:
if 'military' in part:
return 1
return 0
def parse_name(parts, main_names, common_names, debug=0):
"""
Parse all name(s) from a Backpage ad.
parts -> The backpage ad's posting_body, separated into substrings
main_names -> "Regular" names (jessica, gabriel, etc.) that can be trusted as names simply by its existence
common_names -> Names such as "pleasure" or "sexy" that should only be parsed if surrounded by an "intro"
main_names and common_names can both be either sets or dictionaries. Current version uses dictionaries.
"""
lowercase_parts = [re.sub(r'(in|out)call', '', p.lower()) for p in parts]
start = time.time()
# Intros to common names
intros = {
'pre': ['my name is', 'i am', 'call me', 'call', 'text', 'my names', 'my name', 'known as', 'go by', 'Intro',
'ask for', 'call for', 'ask', 'this is', 'one and only', 'prevphonespothti', 'called'],
'post': ['is back', 'is here', 'in town', 'prevphonespothti', 'is ready', 'is available']
}
spanish_intros = ['hola soy', 'me llamo', 'mi nombre es', 'yo soy', 'pregunta por', 'encantada de conocerlo', 'hola papi']
intros['pre'].extend(spanish_intros)
# Regex intros to common names
rgx_intros = {
'pre': [r'\b(?:it\'s)\b', r'\b(?:it s)\b', r'\b(?:its)\b', r'\b(?:soy)\b', r'\b(?:es)\b', r'\b(?:hola)\b', r'\b(?:y?o?ur girl)\b', r'\b(?:i\'m)\b',
r'\b(?:im)\b', r'\b(?:y?o?ur favorite girl)\b', r'\b(?:y?o?ur most favorite girl)\b', r'\bmy ?fr(?:i|e)(?:i|e)nd\b', r'\bm(?:s|z)\.',
r'\bmi(?:s{1,2}|z{1,2})'],
'post': [r'\b(?:here)\b', r'\b(?:(?:i|\')s (?:the|my) name)\b']
}
# These words shouldn't follow common name matched from an intro
false_positives = set(['complexion', 'skin', 'hair', 'locks', 'eyes', 'st', 'ave', 'street', 'avenue', 'blvd', 'boulevard',
'highway', 'circle', 'hwy', 'road', 'rd'])
vowels_with_y = set(list('aeiouy'))
uniques = set([])
for p in lowercase_parts:
part = p.lower()
part = re.sub(r"(^| )i ?'? ?m ", " Intro ", part).strip()
part = part.replace('<br>', ' ').replace('&', ' and ')
part = re.sub(r'\.+', ' ', part)
part = re.sub(r'x+', 'x', part)
part = re.sub(r'y+', 'y', part)
# Retain 'part' to be used for separating comma-separated names
part = re.sub(r',+', ' ', part)
part = re.sub(r' +', ' ', part)
builder = []
for pt in part.split():
if len(pt) > 2:
lastc = pt[len(pt)-1]
# Convert names that have repeated last letters and the last letters aren't "E" and aren't two consonants following a vowel
if lastc == pt[len(pt)-2] and not (lastc == 'e' or (pt[len(pt)-3] in vowels_with_y and lastc not in vowels_with_y)):
builder.append(pt[:len(pt)-1])
else:
builder.append(pt)
else:
builder.append(pt)
part = ' '.join(builder)
# Check if the part is entirely just a common word
ageless_title = re.sub(r' - \d\d', '', part.lower())
ageless_title = re.sub(r'\W+', '', ageless_title)
if ageless_title in common_names or ageless_title in main_names:
uniques.add(ageless_title)
continue;
# Find common names that come immediately before or after a "both-side intro"
for k in intros:
for intro in intros[k]:
if intro in part:
pts = part.split(intro)
for i in range(1, len(pts)):
if k == 'post':
# Check left side of intro
ptl = re.sub(r'\W', ' ', pts[i-1])
ptl = re.sub(r' +', ' ', ptl)
tokenized = ptl.split()
if tokenized and tokenized[len(tokenized)-1] and tokenized[len(tokenized)-1] in common_names:
uniques.add(tokenized[len(tokenized)-1])
break
else:
# Check right side of intro
ptr = re.sub(r'\W', ' ', pts[i])
ptr = re.sub(r' +', ' ', ptr)
tokenized = ptr.split()
if tokenized and tokenized[0] in common_names:
if not (len(tokenized) > 1 and tokenized[1] in false_positives or (len(tokenized) > 2 and tokenized[2] in false_positives)):
# Next 2 words are not false positives
uniques.add(tokenized[0])
break
# Check intros that include regexes
for k in rgx_intros:
for intro in rgx_intros[k]:
matches = list(re.findall(intro, part))
for match in matches:
pts = part.split(match)
for i in range(1, len(pts)):
if k == 'post':
# Check left side of intro
ptl = re.sub(r'\W', ' ', pts[i-1])
ptl = re.sub(r' +', ' ', ptl)
tokenized = ptl.split()
if tokenized and tokenized[len(tokenized)-1] and tokenized[len(tokenized)-1] in common_names:
uniques.add(tokenized[len(tokenized)-1])
break
else:
# Check right side of intro
ptr = re.sub(r'\W', ' ', pts[i])
ptr = re.sub(r' +', ' ', ptr)
tokenized = ptr.split()
if tokenized and tokenized[0] in common_names:
if not (len(tokenized) > 1 and tokenized[1] in false_positives or (len(tokenized) > 2 and tokenized[2] in false_positives)):
# Next 2 words are not false positives
uniques.add(tokenized[0])
break
# Find regular names
tokens = list(re.split(r'\W+', part))
for i in range(len(tokens)):
if not tokens[i]:
continue
curr = tokens[i]
# Check if current token has an 's' at the end (ex: "brittanys beautiful body")
if curr not in main_names and curr[len(curr)-1] == 's' and curr[:-1] in main_names:
curr = curr[:-1]
if curr in main_names:
# Check if name is a two-part name
if i > 0 and (''.join([tokens[i-1], curr]) in main_names or ''.join([tokens[i-1], curr]) in common_names):
# Prev token was a prefix to current
uniques.add(' '.join([tokens[i-1], curr]))
uniques.discard(tokens[i-1])
elif (i < len(tokens)-1 and tokens[i+1] and (''.join([tokens[i], tokens[i+1]]) in main_names or
''.join([tokens[i], tokens[i+1]]) in common_names)):
# Current token has a suffix
uniques.add(' '.join([tokens[i], tokens[i+1]]))
elif (i < len(tokens)-1 and tokens[i+1] and tokens[i+1][len(tokens[i+1])-1] == 's' and (''.join([tokens[i], tokens[i+1][:-1]]) in main_names or
''.join([tokens[i], tokens[i+1][:-1]]) in common_names)):
# Current token has a suffix with plural ending ('s')
uniques.add(' '.join([tokens[i], tokens[i+1][:-1]]))
else:
# Only single-word name
uniques.add(curr)
# Find common words that are part of "pairing" phrases, paired with names that we found already
pairings = set(['and', 'plus', 'with'])
for i in range(len(tokens)):
if tokens[i] not in uniques and tokens[i] in common_names:
if i > 1 and tokens[i-2] in uniques and tokens[i-1] in pairings:
# ex: "jessica and diamond"
uniques.add(tokens[i])
elif i < len(tokens)-2 and tokens[i+2] in uniques and tokens[i+1] in pairings:
# ex: "diamond and jessica"
uniques.add(tokens[i])
# Odd cases
if ('mary' in uniques or 'jane' in uniques or 'mary jane' in uniques) and re.search(r'mary\W+jane', part):
uniques.discard('jane')
uniques.discard('mary')
uniques.discard('mary jane')
if 'crystal' in uniques and re.search(r'crystal ?(blue|spa|massage|parlor|city|stone)', part):
uniques.discard('crystal')
# Remove names that are substrings of larger names
names_final = set([])
if isinstance(main_names, set):
# Name datasets are raw sets of names
for match in uniques:
if not any(match in name for name in [v for v in uniques if v != match]) and match:
names_final.add(match.strip())
else:
# Name datasets are misspelled names mapped to properly spelled names
for match in uniques:
nosp_match = match.replace(' ', '')
if not any(nosp_match in name for name in [v for v in uniques if v != nosp_match]) and nosp_match:
# add the parsed name, not the converted one (ex: don't change "mickey" to "mikey")
names_final.add(nosp_match)
if debug == 1:
print 'parse_name time taken: {} seconds'.format(time.time() - start)
return list(names_final)
def parse_no_blacks(parts):
"""
Parse whether or not an ad indicates that there are "no black clients" allowed.
Returns a tuple containing:
[0]: Binary result of whether or not ad specifies "no blacks allowed"
[1]: The input strings, minus the sections indicating "no blacks allowed"
"""
match_patterns = [r'no ?black', r'no ?african', r'no ?aa', r'white ?(guys|men|clients) ?only',
r'only ?white ?(guys|men|clients)']
remove_patterns = [r'no ?black ?or ?african', r'no ?african ?or ?black', r'men', r'guys']
output_parts = []
output_val = 0
# Check each part
for part in parts:
o_part = part
# Check all patterns
for m in match_patterns:
found = re.search(m, part)
if found != None:
# Found a 'no black allowed' phrase
output_val = 1
# Remove all relevant phrases
for p in remove_patterns:
o_part = re.sub(p, '', o_part)
o_part = re.sub(m, '', o_part)
# Append part to output (if it's not empty)
if len(o_part) > 2:
output_parts.append(o_part)
return (output_val, output_parts)
def parse_phone(parts, allow_multiple=False):
"""
Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone
"""
# Get text substitutions (ex: 'three' -> '3')
text_subs = misc.phone_text_subs()
Small = text_subs['Small']
Magnitude = text_subs['Magnitude']
Others = text_subs['Others']
phone_pattern = r'1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})'
phone_pattern_spaces = r'1?\W?[2-9]\W?[0-8]\W?[0-9]\W?[2-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]'
found_phones = []
return_parts = []
# Check each part for phone # and remove from parts if found
for part in parts:
body = part
# remove '420' references to avoid false positives
body = re.sub(r'420 ?friendly', '', body)
body = body.replace(' 420 ', '')
body = body.replace('420 sp', '')
# Replace all disguising characters in the body
for key in Small:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Small[key]), body)
for key in Magnitude:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Magnitude[key]), body)
for key in Others:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Others[key]), body)
body = re.sub(r'\W', ' ', body)
body = re.sub(r' +', ' ', body)
if len(re.sub(r'\D', '', body)) < 10:
# Less than 10 numeric digits in part - no phone number here
return_parts.append(part)
continue;
phones = re.findall(phone_pattern, body)
if len(phones) == 0:
# No phone number in standard format
phones = re.findall(phone_pattern_spaces, body)
if len(phones) > 0:
# Phone number had spaces between digits
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
else:
# Found phone in standard format
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
if found_phones:
# Phone has been found, remove from part)
for found in found_phones:
filtered_part = parser_helpers.remove_phone(part, found)
if re.sub(r'\W', '', filtered_part):
# get rid of now-empty parts
return_parts.append(filtered_part)
else:
# Phone not found yet, add part to output
return_parts.append(part)
if not allow_multiple:
# Get most commonly occurring phone
found_phone = ''
if len(found_phones) > 0:
found_phone = max(set(found_phones), key=found_phones.count)
# Return the phone along with the original parts (minus any occurrences of the phone number)
return (found_phone, return_parts)
else:
# return all phones
return (list(set(found_phones)), return_parts)
def parse_posting_id(text, city):
"""
Parse the posting ID from the Backpage ad.
text -> The ad's HTML (or the a substring containing the "Post ID:" section)
city -> The Backpage city of the ad
"""
parts = text.split('Post ID: ')
if len(parts) == 2:
post_id = parts[1].split(' ')[0]
if post_id:
return post_id + post_id_bp_groups[city]
def parse_time(text):
"""
Parse the time of the ad.
text -> The 'posted_date' field of the Backpage ad as input.
"""
data = text[-15:-7]
data = data.replace(' ', '')
return data
def parse_truckers(parts):
"""
Parse for phrases that indicate a form of "trucker friendly".
parts -> The backpage ad's posting_body, separated into substrings
Returns a tuple containing:
[0]: Binary result of whether or not ad is trucker-friendly
[1]: The input strings, minus the sections indicating trucker-friendly
"""
match_terms = ['truck', 'cabs', 'flying j']
match_patterns = [r'exit /d{1,4}', r'interstate /d{0,3}', r'i-/d/d?/d?', r'iowa ?80']
output_val = 0
output_parts = []
for p in parts:
part = p
if any(term in part for term in match_terms):
# found trucker string
output_val = 1
for term in match_terms:
# remove matched term
part = part.replace(term, '')
else:
# check for trucker patterns
for patt in match_patterns:
found = re.search(patt, part)
if found != None:
# found match pattern
output_val = 1
part = re.sub(patt, '', part) # remove matched term
if len(part) > 2:
output_parts.append(part)
return (output_val, output_parts)
|
usc-isi-i2/etk
|
etk/data_extractors/htiExtractors/parser.py
|
Python
|
mit
| 27,555
|
[
"CRYSTAL"
] |
c115db4b515cba4c4bc029dd8310eb68a2f3ce6ab42749250bf44f3addbe58bb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# gauss2d.py
"""
Class for generating and fitting 2D Gaussian peaks
Supports both least squares and MLE fitting and gaussian peaks
parameterized by a single width, widths along each axis and widths
along arbitrary axes. Fitting can be done with manually specified
guesses or initial guesses will be estimated from the data.
Supports parameter passing through dicts and tuples.
Copyright (c) 2016, David Hoffman
"""
import logging
# need to be able to deal with warnings
import warnings
# numpy for numerical
import numpy as np
from dphtools.utils.lm import curve_fit
# need basic curve fitting
from scipy.optimize import OptimizeWarning
# need measure to take image moments
from skimage.measure import moments
# need to detrend data before estimating parameters
from .utils import detrend, find_real_root_near_zero
# Eventually we'll want to abstract the useful, abstract bits of this
# class to a parent class called peak that will allow for multiple types
# of fits
# rho = cos(theta)
logger = logging.getLogger(__name__)
class Gauss2D(object):
"""
A class that encapsulates experimental data that is best modeled by a 2D
gaussian peak. It can estimate model parameters and perform a fit to the
data. Best fit parameters are stored in a dictionary that can be accessed
by helper functions.
Right now the class assumes that `data` has constant spacing
"""
def __init__(self, data, **kwargs):
"""
Holds experimental equi-spaced 2D-data best represented by a Gaussian
Parameters
----------
data : array_like
An array holding the experimental data, for now data is assumed to
have equal spacing
Returns
-------
out : object
A Gauss2D object holding the specified data. All other internal
variables are internalized to `None`
"""
# Note that we are only passing a reference to the original data here
# so DO NOT modify this field
self._data = data
# set all internal fields to point to NONE
self._guess_params = None
self._popt = None
self._pcov = None
self.errmsg = None
self.ier = None
self.noise = None
self.residuals = None
super().__init__(**kwargs)
########################
# PROPERTY DEFINITIONS #
########################
@property
def data(self):
"""
Internal data
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._data.copy()
@property
def opt_params(self):
"""
Optimized parameters from the fit
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._popt.copy()
@property
def pcov(self):
"""
Covariance matrix of model parameters from the fit
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._pcov.copy()
@property
def error(self):
"""Gives whether there's an error or not."""
if self.ier in [1, 2, 3, 4]:
return False
else:
return True
@property
def guess_params(self):
"""Guessed parameters"""
return self._guess_params.copy()
#############################
# STATIC METHOD DEFINITIONS #
#############################
@classmethod
def gauss2D(cls, xdata_tuple, amp, mu0, mu1, sigma0, sigma1, rho, offset):
"""
A model function for a bivariate normal distribution (not normalized)
see http://mathworld.wolfram.com/BivariateNormalDistribution.html for
details
Parameters
----------
xdata_tuple : tuple of array_like objects
First element is x0 and second is x1, each usually from np.meshgrid
x0 and x1 must have the same shape
amp : float
Amplitude
mu0 : float
center x position
mu1 : float
center y position
sigma0 : float
x width
sigma1 : float
y width
rho : float
correlation between x and y (defines the angle the distributions
major axes make with the coordinate system)
offset : float
offset
Returns
-------
g : array_like
A matrix of values that represent a 2D Gaussian peak. `g` will have
the same dimensions as `x0` and `x1`
Note: Area = 2*amp*np.pi*sigma_x*sigma_y*np.sqrt(1-rho**2)
"""
(x0, x1) = xdata_tuple
if x0.shape != x1.shape:
# All functions assume that data is 2D
raise RuntimeError("Grid is mishapen")
if not abs(rho) < 1:
rho = np.sign(rho) * 0.9999
logger.warning(
"rho cannot be greater than 1 or less than -1. Here rho is {}.".format(rho)
+ "\nCoercing to {}".format(rho)
)
z = (
((x0 - mu0) / sigma0) ** 2
- 2 * rho * (x0 - mu0) * (x1 - mu1) / (sigma0 * sigma1)
+ ((x1 - mu1) / sigma1) ** 2
)
g = offset + amp * np.exp(-z / (2 * (1 - rho ** 2)))
return g
@classmethod
def gauss2D_norot(cls, xdata_tuple, amp, x0, y0, sigma_x, sigma_y, offset):
"""A special case of gauss2D with rho = 0"""
# return the general form with a rho of 0
return cls.gauss2D(xdata_tuple, amp, x0, y0, sigma_x, sigma_y, 0.0, offset)
@classmethod
def gauss2D_sym(cls, xdata_tuple, amp, x0, y0, sigma_x, offset):
"""A special case of gauss2D_norot with sigma_x = sigma_y"""
# return the no rotation form with same sigmas
return cls.gauss2D_norot(xdata_tuple, amp, x0, y0, sigma_x, sigma_x, offset)
@classmethod
def model(cls, xdata_tuple, *args):
"""
Chooses the correct model function to use based on the number of
arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
num_args = len(args)
if num_args == 5:
return cls.gauss2D_sym(xdata_tuple, *args)
elif num_args == 6:
return cls.gauss2D_norot(xdata_tuple, *args)
elif num_args == 7:
return cls.gauss2D(xdata_tuple, *args)
else:
raise ValueError("len(args) = {}, number out of range!".format(num_args))
@classmethod
def gauss2D_jac(cls, params, xdata):
"""Jacobian for full model"""
x0 = xdata[0].ravel()
x1 = xdata[1].ravel()
amp, mu0, mu1, sigma0, sigma1, rho, offset = params
# calculate the main value, minus offset
# (derivative of constant is zero)
value = cls.gauss2D(xdata, *params).ravel() - offset
dydamp = value / amp
dydmu0 = (
value
* ((2 * (x0 - mu0)) / sigma0 ** 2 - (2 * rho * (x1 - mu1)) / (sigma0 * sigma1))
/ (2 * (1 - rho ** 2))
)
dydmu1 = (
value
* ((2 * (x1 - mu1)) / sigma1 ** 2 - (2 * rho * (x0 - mu0)) / (sigma0 * sigma1))
/ (2 * (1 - rho ** 2))
)
dydsigma0 = (
value
* (
((x0 - mu0) ** 2 / sigma0 ** 3)
- ((2 * rho * (x0 - mu0) * (x1 - mu1)) / (sigma0 ** 2 * sigma1))
)
/ (2 * (1 - rho ** 2))
)
dydsigma1 = (
value
* (
((x1 - mu1) ** 2 / sigma1 ** 3)
- ((2 * rho * (x0 - mu0) * (x1 - mu1)) / (sigma1 ** 2 * sigma0))
)
/ (2 * (1 - rho ** 2))
)
dydrho = value * (
((x0 - mu0) * (x1 - mu1)) / ((1 - rho ** 2) * sigma0 * sigma1)
+ (
rho
* (
-((x0 - mu0) ** 2 / sigma0 ** 2)
+ (2 * rho * (x0 - mu0) * (x1 - mu1)) / (sigma0 * sigma1)
- (x1 - mu1) ** 2 / sigma1 ** 2
)
)
/ ((1 - rho ** 2) ** 2)
)
# now return
return np.vstack(
(dydamp, dydmu0, dydmu1, dydsigma0, dydsigma1, dydrho, np.ones_like(value))
).T
@classmethod
def gauss2D_norot_jac(cls, params, xdata):
"""Jacobian for no rotation model"""
x = xdata[0].ravel()
y = xdata[1].ravel()
amp, x0, y0, sigma_x, sigma_y, offset = params
value = cls.gauss2D_norot(xdata, *params).ravel() - offset
dydamp = value / amp
dydx0 = value * (x - x0) / sigma_x ** 2
dydsigmax = value * (x - x0) ** 2 / sigma_x ** 3
dydy0 = value * (y - y0) / sigma_y ** 2
dydsigmay = value * (y - y0) ** 2 / sigma_y ** 3
return np.vstack((dydamp, dydx0, dydy0, dydsigmax, dydsigmay, np.ones_like(value))).T
# the below works, but speed up only for above
# new_params = np.insert(params, 5, 0)
# return np.delete(cls.gauss2D_jac(new_params, xdata), 5, axis=0)
@classmethod
def gauss2D_sym_jac(cls, params, xdata):
"""Jacobian for symmetric model"""
x = xdata[0].ravel()
y = xdata[1].ravel()
amp, x0, y0, sigma_x, offset = params
value = cls.gauss2D_sym(xdata, *params).ravel() - offset
dydamp = value / amp
dydx0 = value * (x - x0) / sigma_x ** 2
dydsigmax = value * (x - x0) ** 2 / sigma_x ** 3
dydy0 = value * (y - y0) / sigma_x ** 2
return np.vstack((dydamp, dydx0, dydy0, dydsigmax, np.ones_like(value))).T
# new_params = np.insert(params, 4, 0)
# new_params = np.insert(new_params, 4, params[3])
# return np.delete(cls.gauss2D_jac(new_params, xdata), (4, 5), axis=0)
@classmethod
def model_jac(cls, xdata_tuple, *params):
"""Chooses the correct model jacobian function to use based on the
number of arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
num_args = len(params)
if num_args == 5:
return cls.gauss2D_sym_jac(params, xdata_tuple)
elif num_args == 6:
return cls.gauss2D_norot_jac(params, xdata_tuple)
elif num_args == 7:
return cls.gauss2D_jac(params, xdata_tuple)
else:
raise RuntimeError("len(params) = {}, number out of range!".format(num_args))
@classmethod
def gen_model(cls, data, *args):
"""
A helper method to generate a fit if needed, useful for generating
residuals
Parameters
----------
*args : tuple
passed directly to `model`
Returns
-------
out : ndarray
Fit generated by the model.
"""
# generate data grid
yy, xx = np.indices(data.shape)
xdata_tuple = (xx, yy)
# return model
return cls.model(xdata_tuple, *args)
@property
def fit_model(self):
"""
Generate the model from this instance, if the fit hasn't been performed
yet an error will be raised
"""
return self.gen_model(self._data, *self._popt)
def area(self, **kwargs):
"""
A function for calculating the area of the model peak
Area = 2*amp*np.pi*sigma_x*sigma_y*np.sqrt(1-rho**2)
Parameters
----------
kwargs : dictionary
key word arguments to pass to `optimize_params`, only used if
`opt_params` has not been caculated yet.
Returns
-------
Area of the peak based on fit parameters.
"""
# this is for convenience so that the area can
# be returned quickly, i.e. a = Gauss2D(data).area()
if self._popt is None:
self.optimize_params(**kwargs)
# extract the optimal parameters
opt_params = self.opt_params
num_params = len(opt_params)
# depending on the specified model the area is calculated
if num_params == 7:
return abs(
2
* np.pi
* opt_params[0]
* opt_params[3]
* opt_params[4]
* np.sqrt(1 - opt_params[5] ** 2)
)
elif num_params == 6:
return abs(2 * np.pi * opt_params[0] * opt_params[3] * opt_params[4])
else:
return abs(2 * np.pi * opt_params[0] * opt_params[3] ** 2)
def optimize_params(
self,
guess_params=None,
modeltype="norot",
quiet=False,
bounds=None,
checkparams=True,
detrenddata=False,
fittype="ls",
):
"""
A function that will optimize the parameters for a 2D Gaussian model
using either a least squares or maximum likelihood method
Parameters
----------
guess_params : numeric sequence, or dict (optional)
The initial guesses for the model parameters. The number of
parameters determines the modeltype (see notes). If no
guesses are provided they will be estimated from the data.
The estimation is only valid for positive data
modeltype : {'sym', 'norot', 'full'}, default 'norot'
Determines the model to guess parameters for
fittype : {'ls', 'mle'}, default 'ls'
Specifies if a least squares fit ('ls') or maximum likelihood
estimation ('mle') should be performed
quiet : bool
Determines the verbosity of the output
bounds : (-np.inf, np.inf)
See `scipy.optimize.curve_fit` for details, if modeltype is
'full' then the bounds for $\rho$ are automatically set to
(-1, 1) while the rest are left as is
checkparams : bool
Checks the parameters for validity after the fit, maybe replaced
in the future by more intelligent default bounding
detrenddata : bool
Determines if the data should be detrended before parameter
estimation, may be removed in the future.
Returns
-------
opt_params : ndarray
The optimized parameters from the fit. If the fit wasn't
successful a series of np.nan's will be returned.
Notes
-----
This function will call scipy.optimize to optimize the parameters of
the model function
MLE is for poisson noise model while LS is for gaussian noise model.
"""
# Test if we've been provided guess parameters
# Need to test if the variable is good or not.
if guess_params is None:
# if not we generate them
guess_params = self.estimate_params(detrenddata=detrenddata)
if modeltype.lower() == "sym":
guess_params = np.delete(guess_params, (4, 5))
elif modeltype.lower() == "norot":
guess_params = np.delete(guess_params, 5)
elif modeltype.lower() == "full":
pass
else:
raise RuntimeError("modeltype is not one of: 'sym', 'norot', 'full'")
# handle the case where the user passes a dictionary of values.
if isinstance(guess_params, dict):
guess_params = self.dict_to_params(guess_params)
self._guess_params = guess_params
# pull the data attribute for use
data = self._data
# We need to generate the x an y coordinates for the fit
# remember that image data generally has the higher dimension first
# as do most python objects
yy, xx = np.indices(data.shape)
# define our function for fitting
def model_ravel(*args):
return self.model(*args).ravel()
# TODO: We also need a function to clear nan values from data and the
# associated xx and yy points.
# Here we fit the data but we catch any errors and instead set the
# optimized parameters to nan.
# full_output is an undocumented key word of `curve_fit` if set to true
# it returns the same output as leastsq's would, if False, as it is by
# default it returns only popt and pcov.
# we need to set the bounds if rho is available
if bounds is None:
# TODO: we can make better defaults, keep sigma_x/sigma_y positive,
# make sure amp is positive, etc...
# set to default for all params
if len(guess_params) == 7:
# make sure rho is restricted
ub = np.array((np.inf,) * 5 + (1, np.inf))
bounds = (-1 * ub, ub)
else:
bounds = (-np.inf, np.inf)
with warnings.catch_warnings():
# we'll catch this error later and alert the user with a printout
warnings.simplefilter("ignore", OptimizeWarning)
if fittype.lower() == "mle":
meth = "mle"
elif fittype.lower() == "ls":
# default to scipy
meth = None
else:
raise RuntimeError("fittype is not one of: 'ls', 'mle'")
try:
popt, pcov, infodict, errmsg, ier = curve_fit(
model_ravel,
(xx, yy),
data.ravel(),
p0=guess_params,
bounds=bounds,
full_output=True,
jac=self.model_jac,
method=meth,
)
except RuntimeError as e:
# print(e)
# now we need to re-parse the error message to set all the
# flags pull the message
self.errmsg = e.args[0].replace("Optimal parameters not found: ", "")
# run through possibilities for failure
errors = {
0: "Improper",
5: "maxfev",
6: "ftol",
7: "xtol",
8: "gtol",
"unknown": "Unknown",
}
# set the error flag correctly
for k, v in errors.items():
if v in self.errmsg:
self.ier = k
except ValueError as e:
# This except is for bounds checking gone awry
self.errmsg = str(e)
self.ier = -1
else:
# if we save the infodict as well then we'll start using a lot
# of memory
# self.infodict = infodict
self.errmsg = errmsg
self.ier = ier
if checkparams:
self._check_params(popt)
# check to see if the covariance is bunk
if not np.isfinite(pcov).all():
self.errmsg = """
Covariance of the parameters could not be estimated
"""
self.ier = 9
# save parameters for later use
# if the error flag is good, proceed
if self.ier in [1, 2, 3, 4]:
# make sure sigmas are positive
# if popt.size > 5:
# popt[3:5] = abs(popt[3:5])
# else:
# popt[3] = abs(popt[3])
self._popt = popt
self._pcov = pcov
else:
if not quiet:
logger.warning("Fitting error: " + self.errmsg)
self._popt = guess_params * np.nan
self._pcov = np.zeros((len(guess_params), len(guess_params))) * np.nan
if not self.error:
# if no fitting error calc residuals and noise
self.residuals = self.data - self.fit_model
self.noise = self.residuals.std()
else:
# if there is an error set the noise to nan
self.noise = np.nan
return self.opt_params
def _check_params(self, popt):
"""
A method that checks if optimized parameters are valid
and sets the fit flag
"""
data = self.data
# check to see if the gaussian is bigger than its fitting window by a
# large amount, generally the user is advised to enlarge the fitting
# window or disregard the results of the fit.
sigma_msg = "Sigma larger than ROI"
max_s = max(data.shape)
if len(popt) < 6:
if abs(popt[3]) > max_s:
self.errmsg = sigma_msg
self.ier = 10
else:
if abs(popt[3]) > max_s or abs(popt[4]) > max_s:
self.errmsg = sigma_msg
self.ier = 10
# check to see if the amplitude makes sense
# it must be greater than 0 but it can't be too much larger than the
# entire range of data values
if not (0 < popt[0] < (data.max() - data.min()) * 5):
self.errmsg = "Amplitude unphysical, amp = {:.3f}," " data range = {:.3f}"
# cast to float to avoid memmap problems
self.errmsg = self.errmsg.format(popt[0], np.float(data.max() - data.min()))
self.ier = 11
def estimate_params(self, detrenddata=False):
"""
Estimate the parameters that best model the data using it's moments
Parameters
----------
detrenddata : bool
a keyword that determines whether data should be detrended first.
Detrending takes *much* longer than not. Probably only useful for
large fields of view.
Returns
-------
params : array_like
params[0] = amp
params[1] = x0
params[2] = y0
params[3] = sigma_x
params[4] = sigma_y
params[5] = rho
params[6] = offset
Notes
-----
Bias is removed from data using detrend in the util module.
"""
# initialize the parameter array
params = np.zeros(7)
# iterate at most 10 times
for i in range(10):
# detrend data
if detrenddata:
# only try to remove a plane, any more should be done before
# passing object instatiation.
data, bg = detrend(self._data.copy(), degree=1)
offset = bg.mean()
amp = data.max()
else:
data = self._data.astype(float)
offset = data.min()
amp = data.max() - offset
# calculate the moments up to second order
M = moments(data, 2)
# calculate model parameters from the moments
# https://en.wikipedia.org/wiki/Image_moment# Central_moments
xbar = M[1, 0] / M[0, 0]
ybar = M[0, 1] / M[0, 0]
xvar = M[2, 0] / M[0, 0] - xbar ** 2
yvar = M[0, 2] / M[0, 0] - ybar ** 2
covar = M[1, 1] / M[0, 0] - xbar * ybar
# place the model parameters in the return array
params[:3] = amp, xbar, ybar
params[3] = np.sqrt(np.abs(xvar))
params[4] = np.sqrt(np.abs(yvar))
params[5] = covar / np.sqrt(np.abs(xvar * yvar))
params[6] = offset
if abs(params[5]) < 1 or not detrenddata:
# if the rho is valid or we're not detrending data,
# break the loop.
break
# save estimate for later use
self._guess_params = params
# return parameters to the caller as a `copy`, we don't want them to
# change the internal state
return params.copy()
@classmethod
def _params_dict(cls, params):
"""
Helper function to return a version of params in dictionary form to
make the user interface a little more friendly
Examples
--------
>>> Gauss2D._params_dict((1, 2, 3, 4, 5, 6, 7)) == {
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7}
True
"""
keys = ["amp", "x0", "y0", "sigma_x", "sigma_y", "rho", "offset"]
num_params = len(params)
# adjust the dictionary size
if num_params < 7:
keys.remove("rho")
if num_params < 6:
keys.remove("sigma_y")
return {k: p for k, p in zip(keys, params)}
def params_errors_dict(self):
"""Return a dictionary of errors"""
keys = ["amp_e", "x0_e", "y0_e", "sigma_x_e", "sigma_y_e", "rho_e", "offset_e"]
# pull the variances of the parameters from the covariance matrix
# take the sqrt to get the errors
with np.errstate(invalid="ignore"):
params = np.sqrt(np.diag(self.pcov))
num_params = len(params)
# adjust the dictionary size
if num_params < 7:
keys.remove("rho_e")
if num_params < 6:
keys.remove("sigma_y_e")
return {k: p for k, p in zip(keys, params)}
@classmethod
def dict_to_params(cls, d):
"""
Helper function to return a version of params in dictionary form
to make the user interface a little more friendly
>>> Gauss2D.dict_to_params({
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7})
array([1, 2, 3, 4, 5, 6, 7])
"""
keys = ["amp", "x0", "y0", "sigma_x", "sigma_y", "rho", "offset"]
values = []
for k in keys:
try:
values.append(d[k])
except KeyError:
pass
return np.array(values)
def opt_params_dict(self):
return self._params_dict(self.opt_params)
def all_params_dict(self):
"""Return the parameters and there estimated errors all in one dictionary
the errors will have the same key plus a '_e'"""
params_dict = self.opt_params_dict()
params_dict.update(self.params_errors_dict())
return params_dict
def guess_params_dict(self):
"""
>>> import numpy as np
>>> myg = Gauss2D(np.random.randn(10, 10))
>>> myg.guess_params = np.array([1, 2, 3, 4, 5, 6, 7])
>>> myg.guess_params_dict() == {
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7}
True
"""
return self._params_dict(self.guess_params)
class Gauss2Dz(Gauss2D):
"""
A class that encapsulates experimental data that is best modeled by a 2D
gaussian peak. It can estimate model parameters and perform a fit to the
data. Best fit parameters are stored in a dictionary that can be accessed
by helper functions.
Right now the class assumes that `data` has constant spacing
"""
def __init__(self, data, poly_coefs_df):
"""
Holds experimental equi-spaced 2D-data best represented by a Gaussian
Parameters
----------
data : array_like
An array holding the experimental data, for now data is assumed to
have equal spacing
poly_coefs_df : pd.DataFrame
A data frame holding the coefficients of polynomials
Returns
-------
out : object
A Gauss2D object holding the specified data. All other internal
variables are internalized to `None`
"""
# Note that we are only passing a reference to the original data here
# so DO NOT modify this field
super().__init__(data)
# set up polynomial functions for relating z to sigmax and y
self.sigma_x_poly = np.poly1d(poly_coefs_df.sigma_x)
self.sigma_y_poly = np.poly1d(poly_coefs_df.sigma_y)
# we need their derivatives too for the jacobian
self.sigma_x_polyd = self.sigma_x_poly.deriv()
self.sigma_y_polyd = self.sigma_y_poly.deriv()
@property
def fit_model(self):
yy, xx = np.indices(self.data.shape)
xdata_tuple = (xx, yy)
# return model
return self.model(xdata_tuple, *self._popt)
def model(self, xdata_tuple, amp, x0, y0, z0, offset):
"""
Chooses the correct model function to use based on the number of
arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
args = amp, x0, y0, self.sigma_x_poly(z0), self.sigma_y_poly(z0), offset
return self.gauss2D_norot(xdata_tuple, *args)
def model_jac(self, xdata_tuple, *params):
"""Chooses the correct model jacobian function to use based on the
number of arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
x = xdata_tuple[0].ravel()
y = xdata_tuple[1].ravel()
amp, x0, y0, z0, offset = params
sigma_x, sigma_y = self.sigma_x_poly(z0), self.sigma_y_poly(z0)
sigma_xd, sigma_yd = self.sigma_x_polyd(z0), self.sigma_y_polyd(z0)
value = self.model(xdata_tuple, *params).ravel() - offset
dydamp = value / amp
dydx0 = value * (x - x0) / sigma_x ** 2
dydsigmax = value * (x - x0) ** 2 / sigma_x ** 3
dydy0 = value * (y - y0) / sigma_y ** 2
dydsigmay = value * (y - y0) ** 2 / sigma_y ** 3
dydz0 = dydsigmax * sigma_xd + dydsigmay * sigma_yd
return np.vstack((dydamp, dydx0, dydy0, dydz0, np.ones_like(value))).T
# the below works, but speed up only for above
# new_params = np.insert(params, 5, 0)
# return np.delete(cls.gauss2D_jac(new_params, xdata), 5, axis=0)
def area(self, **kwargs):
raise NotImplementedError
def optimize_params(
self,
guess_params=None,
modeltype="norot",
quiet=False,
bounds=None,
checkparams=True,
detrenddata=False,
fittype="ls",
):
# Test if we've been provided guess parameters
# Need to test if the variable is good or not.
if guess_params is None:
# if not we generate them
guess_params = self.estimate_params(detrenddata=detrenddata)
# handle the case where the user passes a dictionary of values.
if isinstance(guess_params, dict):
guess_params = self.dict_to_params(guess_params)
return super().optimize_params(
guess_params=guess_params,
quiet=quiet,
bounds=bounds,
checkparams=checkparams,
detrenddata=detrenddata,
fittype=fittype,
)
optimize_params.__doc__ = Gauss2D.optimize_params.__doc__
def _check_params(self, popt):
"""
A method that checks if optimized parameters are valid
and sets the fit flag
"""
data = self.data
# check to see if the amplitude makes sense
# it must be greater than 0 but it can't be too much larger than the
# entire range of data values
if not (0 < popt[0] < (data.max() - data.min()) * 5):
self.errmsg = "Amplitude unphysical, amp = {:.3f}," " data range = {:.3f}"
# cast to float to avoid memmap problems
self.errmsg = self.errmsg.format(popt[0], np.float(data.max() - data.min()))
self.ier = 11
def estimate_params(self, detrenddata=False):
"""
Estimate the parameters that best model the data using it's moments
Parameters
----------
detrenddata : bool
a keyword that determines whether data should be detrended first.
Detrending takes *much* longer than not. Probably only useful for
large fields of view.
Returns
-------
params : array_like
params[0] = amp
params[1] = x0
params[2] = y0
params[3] = z0
params[4] = offset
Notes
-----
Bias is removed from data using detrend in the util module.
"""
gauss2d_params = super().estimate_params(detrenddata)
amp, x0, y0, sigma_x, sigma_y, rho, offset = gauss2d_params
# find z estimates based on sigmas
zx = find_real_root_near_zero(self.sigma_x_poly - sigma_x)
zy = find_real_root_near_zero(self.sigma_y_poly - sigma_y)
possible_z = np.array((zx, zy))
# remove nans
possible_z = possible_z[np.isfinite(possible_z)]
# choose the estimate closest to zero.
if len(possible_z):
z0 = possible_z[np.abs(possible_z).argmin()]
else:
z0 = 0
# save estimate for later use
params = self._guess_params = np.array([amp, x0, y0, z0, offset])
# return parameters to the caller as a `copy`, we don't want them to
# change the internal state
return params.copy()
def gen_model(self, *args):
"""
A helper method to generate a fit if needed, useful for generating
residuals
Parameters
----------
*args : tuple
passed directly to `model`
Returns
-------
out : ndarray
Fit generated by the model.
"""
# generate data grid
yy, xx = np.indices(self.data.shape)
xdata_tuple = (xx, yy)
# return model
return self.model(xdata_tuple, *args)
@classmethod
def _params_dict(cls, params):
"""
Helper function to return a version of params in dictionary form to
make the user interface a little more friendly
Examples
--------
>>> Gauss2D._params_dict((1, 2, 3, 4, 5, 6, 7)) == {
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7}
True
"""
keys = ["amp", "x0", "y0", "z0", "offset"]
return {k: p for k, p in zip(keys, params)}
def params_errors_dict(self):
"""Return a dictionary of errors"""
keys = ["amp_e", "x0_e", "y0_e", "z0_e", "offset_e"]
# pull the variances of the parameters from the covariance matrix
# take the sqrt to get the errors
with np.errstate(invalid="ignore"):
params = np.sqrt(np.diag(self.pcov))
return {k: p for k, p in zip(keys, params)}
@classmethod
def dict_to_params(cls, d):
"""
Helper function to return a version of params in dictionary form
to make the user interface a little more friendly
>>> Gauss2D.dict_to_params({
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7})
array([1, 2, 3, 4, 5, 6, 7])
"""
keys = ["amp", "x0", "y0", "z0", "offset"]
values = []
for k in keys:
try:
values.append(d[k])
except KeyError:
pass
return np.array(values)
if __name__ == "__main__":
# TODO: Make data, add noise, estimate, fit. Plot all 4 + residuals
raise NotImplementedError
|
david-hoffman/peaks
|
peaks/gauss2d.py
|
Python
|
apache-2.0
| 36,593
|
[
"Gaussian"
] |
14685466c4c67affe5b943dda4b6f75b7c0c7996728a5b1bd1caf2a4ae769a9e
|
# ----------------------------------------
# USAGE:
# python water_positioning.py config_file
# ----------------------------------------
# PREAMBLE:
import numpy as np
import sys
import os
import MDAnalysis
from MDAnalysis.analysis.align import *
from distance_functions import *
flush = sys.stdout.flush
zeros = np.zeros
sqrt = np.sqrt
sums = np.sum
square = np.square
dot = np.dot
arccos = np.arccos
mean = np.mean
unit_conversion = 180./np.pi
config_file = sys.argv[1]
# ----------------------------------------
# FUNCTIONS:
def ffprint(string):
print '%s' %(string)
flush()
necessary_parameters = ['pdb','traj_loc','start','end','pocket_selection','pocket_radius','wat_resname','wat_O_name','substrate_atom1','substrate_atom2']
all_parameters = ['pdb','traj_loc','start','end','pocket_selection','pocket_radius','wat_resname','wat_O_name','substrate_atom1','substrate_atom2','Wrapped','write_summary','summary_filename','nucl_wat_outputname','avg_wat_outputname','center_of_geometry_filename']
def config_parser(config_file): # Function to take config file and create/fill the parameter dictionary
for i in range(len(necessary_parameters)):
parameters[necessary_parameters[i]] = ''
# SETTING DEFAULT PARAMETERS FOR OPTIONAL PARAMETERS:
parameters['write_summary'] = False
parameters['summary_filename'] = None
parameters['nucl_wat_outputname'] = 'nucleophilic_waters.dat'
parameters['avg_wat_outputname'] = 'average_waters.dat'
parameters['center_of_geometry'] = 'COG.xyz'
# GRABBING PARAMETER VALUES FROM THE CONFIG FILE:
execfile(config_file,parameters)
for key, value in parameters.iteritems():
if value == '':
print '%s has not been assigned a value. This variable is necessary for the script to run. Please declare this variable within the config file.' %(key)
sys.exit()
def summary():
with open('%s' %(parameters['summary_filename']),'w') as f:
f.write('Using MDAnalysis version: %s\n' %(MDAnalysis.version.__version__))
f.write('To recreate this analysis, run this line:\n')
for i in range(len(sys.argv)):
f.write('%s ' %(sys.argv[i]))
f.write('\n\n')
f.write('Parameters used:\n')
for i in all_parameters:
f.write('%s = %s \n' %(i,parameters[i]))
# ----------------------------------------
# MAIN:
# CREATING PARAMETER DICTIONARY
parameters = {}
config_parser(config_file)
# ----------------------------------------
# LOAD IN THE ANALYSIS UNIVERSE AND CREATE THE NECESSARY ATOM SELECTIONS
ffprint('Loading Analysis Universe.')
u = MDAnalysis.Universe(parameters['pdb'])
u_all = u.select_atoms('all')
wat = u.select_atoms(parameters['wat_resname'])
u_pocket = u.select_atoms(parameters['pocket_selection'])
atom1 = u.select_atoms(parameters['substrate_atom1'])
atom2 = u.select_atoms(parameters['substrate_atom2'])
# ----------------------------------------
# ANALYSIS OF TRAJECTORIES
start = int(parameters['start'])
end = int(parameters['end'])
timestep = 0
with open(parameters['nucl_wat_outputname'],'w') as W, open(parameters['avg_wat_outputname'],'w') as X, open(parameters['center_of_geometry_filename'],'w') as Y:
ffprint('Beginning trajectory analysis')
while start <= end:
ffprint('Loading trajectory %s' %(start))
u.load_new(parameters['traj_loc'] %(start,start))
# Loop through trajectory
for ts in u.trajectory:
# Obtaining COG of pocket; moving origin to this point
t = u_pocket.center_of_geometry()
Y.write('1\n generated by MDAnalysis and RBD\n X %10.4f %10.4f %10.4f\n' %(t[0], t[1], t[2])) #Writing an xyz trajectory of the center of geometry of the binding pocket; the COG particle is labeled as a dummy atom X
u_all.translate(-t)
# Wrap waters around the center of the NTPase active site if trajectories are not wrapped already
if not parameters['Wrapped']:
dims = u.dimensions[:3] # obtain dimension values to be used for wrapping atoms
dims2 = dims/2.0
for i in range(nWats):
temp = wat.reidues[i].atom[0].position
t = wrapping(temp,dims,dims2)
wat.residues[i].translate(t)
pocket_waters = wat.select_atoms('byres point 0 0 0 %d'%(parameters['pocket_radius']))
nRes = pocket_waters.n_residues
X.write('%d\n'%(nRes))
pos1 = atom1.positions[0]
pos2 = atom2.positions[0]
dist,dist2 = euclid_dist(pos1,pos2)
bond_vector = (pos2 - pos1)/dist
for i in range(nRes):
res = pocket_waters.residues[i]
ox_pos = res.select_atoms('name %s'%(parameters['wat_O_name'])).positions[0]
dist, dist2 = euclid_dist(pos2,ox_pos)
attack_vector = (pos2 - ox_pos)/dist
attack_angle = arccos(dot(bond_vector,attack_vector))*unit_conversion
W.write('%d %d %f %f\n'%(timestep,i,dist,attack_angle))
timestep += 1
start += 1
if parameters['write_summary']:
summary()
|
rbdavid/DENV-NS3h
|
Water_positioning/water_positioning.py
|
Python
|
gpl-3.0
| 4,826
|
[
"MDAnalysis"
] |
e6695d5f6bafcf5bf295d8c037d2913414e5123bb4eab834dc74137def1f0b36
|
""" VirtualMachineDB Integration Tests
"""
import unittest
import DIRAC
DIRAC.initialize() # Initialize configuration
from DIRAC.WorkloadManagementSystem.DB.VirtualMachineDB import VirtualMachineDB
class VirtualMachineDBInstanceTests(unittest.TestCase):
"""This tests the instance related database functions.
It creates two entries, one is used for the tests and the second
is checked after each test to check it hasn't changed.
"""
INST_UUID = "1111-2222-3333"
INST_NAME = "TestInst"
INST_IMAGE = "TestImage"
INST_EP = "MyCloud::mycloud.domain"
INST_POD = "testvo" # pod is generally just set to VO name and not really used
def setUp(self):
"""Adds an instance which we can then check the properties of"""
self.__db = VirtualMachineDB()
# Start by clearing the database so we don't get any surprises
for vmTable in VirtualMachineDB.tablesDesc:
res = self.__db._query("DELETE FROM `%s`" % vmTable)
self.assertTrue(res["OK"])
# Now create our test instances
res = self.__db.insertInstance(self.INST_UUID, self.INST_IMAGE, self.INST_NAME, self.INST_EP, self.INST_POD)
self.assertTrue(res["OK"])
# Most functions will need the internal DB instance ID
res = self.__db.getInstanceID(self.INST_UUID)
self.assertTrue(res["OK"])
self.__id = res["Value"]
# Create the second "canary" instance entry that should always be unchanged
res = self.__db.insertInstance(
self.INST_UUID + "2", self.INST_IMAGE + "2", self.INST_NAME + "2", self.INST_EP + "2", self.INST_POD + "2"
)
self.assertTrue(res["OK"])
def tearDown(self):
"""Checks that the second instance is unchanged and then clears the DB tables
so that any changes the test made aren't retained for future tests"""
test_id = self.__db.getInstanceID(self.INST_UUID + "2")["Value"]
for paramName, paramValue in [
("UniqueID", self.INST_UUID + "2"),
("InstanceID", test_id),
("Name", self.INST_NAME + "2"),
("Endpoint", self.INST_EP + "2"),
("RunningPod", self.INST_POD + "2"),
("Status", "Submitted"),
]:
res = self.__db.getInstanceParameter(paramName, test_id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], paramValue)
res = self.__db.checkImageStatus(self.INST_IMAGE + "2")
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], "New")
def test_imageStatus(self):
"""Adding an instance should automatically add its image in the "New" state"""
res = self.__db.checkImageStatus(self.INST_IMAGE)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], "New")
# An image is considered validated when an instance using it starts running
# returning at least one heartbeat
self.assertTrue(self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")["OK"])
self.assertTrue(self.__db.instanceIDHeartBeat(self.INST_UUID, 0.0, 0, 0, 0, 60)["OK"])
res = self.__db.checkImageStatus(self.INST_IMAGE)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], "Validated")
def test_instanceParam(self):
"""Check we can get all of the instance parameters and that unknown properties fail"""
for paramName, paramValue in [
("UniqueID", self.INST_UUID),
("InstanceID", self.__id),
("Name", self.INST_NAME),
("Endpoint", self.INST_EP),
("RunningPod", self.INST_POD),
("Status", "Submitted"),
]:
res = self.__db.getInstanceParameter(paramName, self.__id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], paramValue)
res = self.__db.getInstanceParameter("BadParam", self.__id)
self.assertFalse(res["OK"])
# Some parameters have dedicated access functions too
res = self.__db.getEndpointFromInstance(self.INST_UUID)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], self.INST_EP)
res = self.__db.getInstanceStatus(self.__id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], "Submitted")
# A running instance with a heartbeat has a few of extra parameters
# Bytes and files are stored in the history table and not tested here
INST_LOAD = 4.0
INST_JOBS = 5
INST_FILES = 6
INST_BYTES = 7
INST_UPTIME = 8
self.assertTrue(self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")["OK"])
res = self.__db.instanceIDHeartBeat(self.INST_UUID, INST_LOAD, INST_JOBS, INST_FILES, INST_BYTES, INST_UPTIME)
self.assertTrue(res["OK"])
for paramName, paramValue in [("Load", INST_LOAD), ("Jobs", INST_JOBS), ("Uptime", INST_UPTIME)]:
res = self.__db.getInstanceParameter(paramName, self.__id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], paramValue)
# There is also a function to get all fields in one go
# Check a small selection of the fields in that
res = self.__db.getAllInfoForUniqueID(self.INST_UUID)
self.assertTrue(res["OK"])
inst = res["Value"]["Instance"]
self.assertEqual(inst["Name"], self.INST_NAME)
self.assertEqual(inst["Endpoint"], self.INST_EP)
self.assertEqual(inst["RunningPod"], self.INST_POD)
self.assertEqual(inst["Status"], "Running")
def test_changeInstanceID(self):
"""Check we can update an instance's ID."""
NEW_ID = "4444-5555-6666"
res = self.__db.setInstanceUniqueID(self.__id, NEW_ID)
self.assertTrue(res["OK"])
# Check it changed and that we can get it by the new ID
res = self.__db.getUniqueID(self.__id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], NEW_ID)
res = self.__db.getInstanceID(NEW_ID)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], self.__id)
def test_getByName(self):
"""Check we can get an instance by name."""
res = self.__db.getUniqueIDByName(self.INST_NAME)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], self.INST_UUID)
def test_instanceStates(self):
"""Swap the instance through all of the states and check it updates."""
self.assertTrue(self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")["OK"])
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Running")
self.assertTrue(self.__db.declareInstanceStopping(self.__id)["OK"])
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Stopping")
self.assertTrue(self.__db.declareInstanceHalting(self.INST_UUID, 0.0)["OK"])
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Halted")
# We don't test all invalid transitions, but an instance cannot go from halted
# back to running, so this should not update the state
self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Halted")
def test_recordDBHalt(self):
"""This function lets an admin declare an instance in the halting state,
test that it does update the DB. This only works on a running instance.
"""
# We should get an error as we can't halt a submitted instnace
self.assertFalse(self.__db.recordDBHalt(self.__id, 0.0)["OK"])
# Put it into the running state and try again
self.assertTrue(self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")["OK"])
self.assertTrue(self.__db.recordDBHalt(self.__id, 0.0)["OK"])
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Halted")
def test_instanceIPs(self):
"""Check instance IP assignment functions."""
PUBLIC_IP = "123.123.123.123"
PRIVATE_IP = "127.123.123.123"
# This function should strip an IPv6 mapping from the public IP
# Test this at the same time
res = self.__db.declareInstanceRunning(self.INST_UUID, "::ffff:" + PUBLIC_IP, PRIVATE_IP)
self.assertTrue(res["OK"])
res = self.__db.getInstanceParameter("PrivateIP", self.__id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], PRIVATE_IP)
res = self.__db.getInstanceParameter("PublicIP", self.__id)
self.assertTrue(res["OK"])
self.assertEqual(res["Value"], PUBLIC_IP)
def test_heartbeats(self):
"""Test that the heartbeat mechanism works as expected."""
# Put the instance into the running state and check that it doesn't get pruned
self.assertTrue(self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")["OK"])
self.assertTrue(self.__db.instanceIDHeartBeat(self.INST_UUID, 0.0, 0, 0, 0, 60)["OK"])
self.assertTrue(self.__db.declareStalledInstances()["OK"])
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Running")
# We now manually patch the heartbeat we added so that it's further in the past
# Far enough back that the instance appears stuck to check it gets marked as stalled
exp_time = self.__db.stallingInterval * 2 # Use twice the interval for safety
sql = "UPDATE vm_Instances SET LastUpdate = LastUpdate - INTERVAL %u SECOND WHERE InstanceID = %s" % (
exp_time,
self.__id,
)
self.assertTrue(self.__db._query(sql)["OK"])
res = self.__db.getAllInfoForUniqueID(self.INST_UUID)
self.assertTrue(self.__db.declareStalledInstances()["OK"])
self.assertEqual(self.__db.getInstanceStatus(self.__id)["Value"], "Stalled")
def test_getByState(self):
"""Tests the getInstancesByStatus function."""
# First check that a bad status isn't accepted by the function
self.assertFalse(self.__db.getInstancesByStatus("BadState")["OK"])
# Now do the tests with valid inputs
res = self.__db.getInstancesByStatus("Submitted")
self.assertTrue(res["OK"])
images = res["Value"]
# We should have two instances
# Check that the structure looks correct {image: [instnaces]}
self.assertEqual(len(images), 2)
self.assertIn(self.INST_IMAGE, images)
self.assertIn(self.INST_UUID, images[self.INST_IMAGE])
# Mark the test instance as running and check it doesn't appear in the output
self.assertTrue(self.__db.declareInstanceRunning(self.INST_UUID, "127.0.0.1")["OK"])
res = self.__db.getInstancesByStatus("Submitted")
self.assertTrue(res["OK"])
images = res["Value"]
self.assertEqual(len(images), 1)
self.assertNotIn(self.INST_IMAGE, images)
|
DIRACGrid/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_VirtualMachineDB.py
|
Python
|
gpl-3.0
| 10,957
|
[
"DIRAC"
] |
2eddbd96805c2cdc9e7559eecd96822ba96649fe9d4ce67d53079fd51712d719
|
import vtk, ctk, slicer
import logging
import time
import gc
from slicer.util import VTKObservationMixin
from slicer.ScriptedLoadableModule import *
class LogicMixin(ScriptedLoadableModuleLogic, VTKObservationMixin):
def __init__(self, ModuleNodeName):
VTKObservationMixin.__init__(self)
self.InputCases = list()
self.allCaseStartTime = 0
# Dictionaries
self.pipeline = {}
self.completed = {}
# Status
self.Node = slicer.vtkMRMLCommandLineModuleNode()
self.Node.SetStatus(self.Node.Idle)
self.Node.SetName(ModuleNodeName)
self.ProgressBar = slicer.qSlicerCLIProgressBar()
self.ProgressBar.setCommandLineModuleNode(self.Node)
self.ProgressBar.setNameVisibility(slicer.qSlicerCLIProgressBar.AlwaysVisible)
self.ErrorMessage = 'Unexpected error'
def startPipeline(self, id):
self.pipeline[id].setup()
self.pipeline[id].Node.SetStatus(self.Node.Scheduled)
self.pipeline[id].runFirstCLIModule()
def onPipelineModified(self, pipeline_node, event):
pipeline_id = None
current_pipeline = None
for key, pipeline in self.pipeline.items():
if pipeline.Node == pipeline_node:
pipeline_id = key
current_pipeline = pipeline
if pipeline_id is None:
logging.error('Error: Unidentified pipeline modified')
return -1
status = pipeline_node.GetStatusString()
logging.info('-- %s: Case %d: %s', status, pipeline_id, current_pipeline.CaseInput)
if not pipeline_node.IsBusy():
self.removeObserver(pipeline_node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onPipelineModified)
# Report time taken to get to the non-busy state for the pipeline
logging.info('Case %d (%s) took %d sec to run', pipeline_id, current_pipeline.CaseInput,
current_pipeline.getPipelineComputationTime())
statusForNode = None
# If canceled, stop everything
if pipeline_node.GetStatusString() == 'Cancelled':
self.ErrorMessage = current_pipeline.ErrorMessage
logging.error(current_pipeline.ErrorMessage)
statusForNode = pipeline_node.GetStatus()
# If completed, with errors or not
else:
# If completed with errors, inform user
if pipeline_node.GetStatusString() == 'Completed with errors':
self.ErrorMessage = current_pipeline.ErrorMessage
logging.error(current_pipeline.ErrorMessage)
# Then starts next case if it exists
self.completed[pipeline_id] = True
# If there is no anymore case
if self.areAllPipelineCompleted():
logging.info('All pipelines took: %d sec to run', time.time() - self.allCaseStartTime)
statusForNode = pipeline_node.GetStatus()
if statusForNode is None:
# Run next pipeline
self.startPipeline(pipeline_id + 1)
else:
self.Node.SetStatus(statusForNode)
def areAllPipelineCompleted(self):
for i in range(len(self.completed)):
if not self.completed[i]:
return False
return True
def Cancel(self):
self.Node.SetStatus(self.Node.Cancelling)
for i in range(len(self.pipeline)):
self.pipeline[i].Cancel()
|
NIRALUser/SPHARM-PDM
|
Modules/Scripted/ShapeAnalysisModule/CommonUtilities/LogicMixin.py
|
Python
|
apache-2.0
| 3,223
|
[
"VTK"
] |
4ac7730d55e026c8ee3ca11bf117226e07e9f102eabad8e1495a6b15298a140d
|
"""
Unit tests for ASP solving.
"""
import unittest
from collections import defaultdict
from neural_world.tests import NeuralNetworkTester
class TestASPSolving(NeuralNetworkTester):
def test_no_atoms(self):
atoms = ''
expected_result = ''
self.assert_running(atoms, expected_result)
def test_two_neurons_down(self):
atoms = 'neuron(1,i). neuron(2,o). output(2,up).'
expected_result = ()
self.assert_running(atoms, expected_result)
def test_two_neurons_up(self):
atoms = 'neuron(1,i). neuron(2,o). output(2). up(1). edge(1,2).'
expected_result = ('up(2)',)
self.assert_running(atoms, expected_result)
def test_three_neurons_up(self):
atoms = ('neuron(1,i). neuron(2,i). neuron(3,a). output(3). '
'up(1). up(2). edge(1,3). edge(2,3).')
expected_result = 'up(3)'
self.assert_running(atoms, expected_result)
def test_two_paths(self):
"""Test on a complex network.
1 --> 3 (NOT) ->\
\ 6 (XOR, output)
4 (OR) ------->|
/ 7 (AND, output)
2 --> 5 (NOT) ->/
All cases of input states (none, up 1, up 2, up 1 & up 2) are tested.
"""
atoms = ('neuron(1,i). neuron(2,i). ' # two input neurons
'neuron(3,n). neuron(4,o). neuron(5,n). ' # (not, or, not) neurons
'neuron(6,x). output(6). neuron(7,a). output(7). ' # (xor, and) output neurons
'edge(1,3). edge(1,4). edge(2,4). edge(2,5). '
'edge(3,6). edge(4,6). edge(4,7). edge(5,7). ')
expected_results = { # up states: expected result
'' : 'up(6)',
'up(1).' : 'up(6) up(7)',
'up(2).' : '',
'up(1).up(2).' : 'up(6) ',
}
for up_states, expected_result in expected_results.items():
self.assert_running(atoms + up_states, expected_result)
def test_basic(self):
self.assert_running(
'up(1). neuron(1,i). edge(1,2). neuron(2,o). output(2).',
'up(2)'
)
class TestNeuronLogicalGates(NeuralNetworkTester):
"""Test all logical gates, with all possible cases of
predecessor neurons states.
"""
def setUp(self):
"""Create three input neurons, predecessors of a single neuron and all
possible cases of input."""
self.atoms = ('neuron(1,i). neuron(2,i). neuron(3,i).'
# neuron 4 will be tested with all possible gates
'neuron(4,{}). output(4).'
'edge(1,4). edge(2,4). edge(3,4).')
self.cases = {
# input states : {expected gates leading to activated direction}
'' : {'not'},
'up(1).' : {'or', 'xor'},
'up(2).' : {'or', 'xor'},
'up(3).' : {'or', 'xor'},
'up(1).up(2).' : {'or'},
'up(1).up(3).' : {'or'},
'up(2).up(3).' : {'or'},
'up(1).up(2).up(3).': {'or', 'and'},
}
def test_gates(self):
"""Test for all gates all cases of up states, and verify that all
results match with the expected ones."""
for gate in ('and', 'or', 'xor', 'not'):
for states, expected_result in self.cases.items():
self.assert_running(
self.atoms.format(gate[0]) + states,
'up(4)' if gate in expected_result else '',
)
|
Aluriak/neural_world
|
neural_world/tests/test_asp_solving.py
|
Python
|
gpl-2.0
| 3,626
|
[
"NEURON"
] |
610a73f426bfd8500be7d57cb8503484e1e30e8b3dbd44792e699115e2fc1769
|
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import sys
import numpy as np
class Train(object):
""" Basic training class. Allows for either minimization or maximization, though all implementations may not
support both.
"""
def __init__(self, goal_minimize=True):
self.max_iterations = 100000
self.position = []
self.best_score = 0
self.goal_minimize = goal_minimize
self.display_final = True
self.display_iteration = False
self.stop_score = None
def better_than(self, is_this, than_that):
"""Determine if one score is better than the other, based on minimization settings.
@param is_this: The first score to compare.
@param than_that: The second score to compare.
@return: True, if the first score is better than the second.
"""
if self.goal_minimize:
return is_this < than_that
else:
return is_this > than_that
def should_stop(self, iteration, best_score):
""" Determine if we should stop.
@param iteration: The current iteration.
@param best_score: The current best score.
@return: True, if we should stop.
"""
if iteration > self.max_iterations:
return True
if self.stop_score is not None:
if self.better_than(best_score, self.stop_score):
return True
return False
class TrainGreedRandom(Train):
"""
The Greedy Random learning algorithm is a very primitive random-walk algorithm that only takes steps that serve
to move the Machine Learning algorithm to a more optimal position. This learning algorithm essentially chooses
random locations for the long term memory until a better set is found.
http://en.wikipedia.org/wiki/Random_walk
"""
def __init__(self, low, high, goal_minimize=True):
"""
Construct a greedy random trainer.
@param low: The low end of random numbers to generate.
@param high: The high end of random numbers to generate.
@param goal_minimize: Is the goal to minimize?
"""
self.high = low
self.low = high
Train.__init__(self, goal_minimize)
def train(self, x0, funct):
"""
Train with the specified score function.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
while not self.should_stop(iteration_number, self.best_score):
# Clone current position, create a new array of same size.
trial_position = list(self.position)
# Randomize trial position.
self.perform_randomization(trial_position)
# Obtain new trial score.
trial_score = funct(trial_position)
if self.better_than(trial_score, self.best_score):
self.best_score = trial_score
self.position = trial_position
current = funct(self.position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
def perform_randomization(self, vec):
for i in range(0, len(vec)):
vec[i] = np.random.uniform(self.low, self.high)
class TrainHillClimb(Train):
"""
Train using hill climbing. Hill climbing can be used to optimize the long term memory of a Machine Learning
Algorithm. This is done by moving the current long term memory values to a new location if that new location
gives a better score from the scoring function.
http://en.wikipedia.org/wiki/Hill_climbing
"""
def __init__(self, goal_minimize=True):
Train.__init__(self, goal_minimize)
def train(self, x0, funct, acceleration=1.2, step_size=1.0):
"""
Train up to the specified maximum number of iterations using hill climbing.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@param acceleration: The acceleration (default=1.2)
@param step_size: The step size (default=1.0)
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
step_size = [step_size] * len(x0)
candidate = [0] * 5
candidate[0] = -acceleration
candidate[1] = -1 / acceleration
candidate[2] = 0
candidate[3] = 1 / acceleration
candidate[4] = acceleration
while not self.should_stop(iteration_number, self.best_score):
if self.goal_minimize:
best_step_score = sys.float_info.max
else:
best_step_score = sys.float_info.min
for dimension in range(0, len(self.position)):
best = -1
for i in range(0, len(candidate)):
# Take a step
self.position[dimension] += candidate[i] * step_size[dimension]
# Obtain new trial score.
trial_score = funct(self.position)
# Step back, we only want to try movement in one dimension.
self.position[dimension] -= candidate[i] * step_size[dimension]
# Record best step taken
if self.better_than(trial_score, best_step_score):
best_step_score = trial_score
best = i
if best != -1:
self.best_score = best_step_score
self.position[dimension] += candidate[best] * step_size[dimension]
step_size[dimension] += candidate[best]
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
class TrainAnneal(Train):
"""
Train a Machine Learning Algorithm using Simulated Annealing. Simulated Annealing is a Monte Carlo algorithm
that is based on annealing in metallurgy, a technique involving heating and controlled cooling of a
material to increase the size of its crystals and reduce their defects, both are attributes of the material
that depend on its thermodynamic free energy.
The Simulated Annealing algorithm works by randomly changing a vector of doubles. This is the long term memory
of the Machine Learning algorithm. While this happens a temperature is slowly decreased. When this
temperature is higher, the Simulated Annealing algorithm is more likely to accept changes that have a higher
error (or energy) than the current state.
There are several important components to any Simulated Learning Algorithm:
First, the randomization technique. This is performed by the method performRandomize. To randomize
differently, override this method.
Secondly, the cooling schedule. This determines how quickly the current temperature will fall. This is
controlled by the coolingSchedule. To define a different cooling schedule, override this method.
Finally, the probability of accepting a higher-error (energy) solution. This is defined by a Probability
Distribution Function (PDF) contained in calcProbability. To define a different PDF, override this method.
http://en.wikipedia.org/wiki/Simulated_annealing
"""
def __init__(self, max_iterations=100, starting_temperature=400, ending_temperature=0.0001):
"""
Create a simulated annealing trainer.
@param max_iterations: The maximum number of iterations.
@param starting_temperature: The starting temperature.
@param ending_temperature: The ending temperature.
"""
Train.__init__(self, True)
self.max_iterations = max_iterations
self.starting_temperature = starting_temperature
self.ending_temperature = ending_temperature
self.cycles = 100
self.last_probability = 0
def train(self, x0, funct):
"""
Train for the specified number of iterations using simulated annealing. The temperature will be lowered
between the specified range at each iteration. You can also use the cycles property to set how many cycles
are executed at each iteration. Simulated annealing can only be used to minimize the score function.
@param x0: The initial long-term memory.
@param funct: The score function.
@return: The trained long-term memory.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
current_score = self.best_score
current_position = list(x0)
while not self.should_stop(iteration_number, self.best_score):
# Clone current position, create a new array of same size.
current_temperature = self.cooling_schedule(iteration_number)
for c in range(0, self.cycles):
trial_position = list(current_position)
# Randomize trial position.
self.perform_randomization(trial_position)
# Obtain new trial score.
trial_score = funct(trial_position)
keep = False
if self.better_than(trial_score, current_score):
keep = True
else:
self.last_probability = self.calc_probability(current_score, trial_score, current_temperature)
if self.last_probability > np.random.uniform():
keep = True
if keep:
current_score = trial_score
current_position = list(trial_position)
if self.better_than(current_score, self.best_score):
self.best_score = current_score
self.position = list(current_position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score)
+ ",k=" + str(iteration_number)
+ ",kMax=" + str(self.max_iterations)
+ ",t=" + str(current_temperature) + ",prob=" + str(self.last_probability) + ","
+ str(current_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
def calc_probability(self, error_current, error_new, t):
"""
Calculate the probability of accepting a worse position. This can be overriden to provide other
implementations.
@param error_current: The current error (score).
@param error_new: The new error (score)
@param t: The temperature.
@return: The probability of accepting the worse score.
"""
return np.exp(-(np.abs(error_new - error_current) / t))
def cooling_schedule(self, current_iteration):
"""
Determine the temperature for the specified iteration. This method can be overriden to provide other cooling
schedules.
@param current_iteration: The iteration number.
@return: The temperature.
"""
ex = float(current_iteration) / float(self.max_iterations)
return self.starting_temperature * (self.ending_temperature / self.starting_temperature) ** ex
def perform_randomization(self, vec):
"""
Randomize the provided position to move to a neighbor position. The provided method perterbs each vector
element by one tenth of a normally distributed random number. This works for many continuous problems,
however, a different method must be used for discrete problems.
@param vec:
@return:
"""
for i in range(0, len(vec)):
d = np.random.randn() / 10
vec[i] += d
|
trenton3983/Artificial_Intelligence_for_Humans
|
vol1/python-examples/lib/aifh/train.py
|
Python
|
apache-2.0
| 13,693
|
[
"VisIt"
] |
b184dd812ea017ab1b5cd046d8468e04bb0b4c4db0069068385bacf4426b89c9
|
from django.db import models
class Locator(models.Model):
subject_identifier = models.CharField(
verbose_name="Subject Identifier",
null=True, blank=True,
max_length=100)
subject_cell = models.CharField(
verbose_name='Cell number',
max_length=100,
blank=True,
null=True)
subject_cell_alt = models.CharField(
verbose_name='Cell number (alternate)',
max_length=100,
blank=True,
null=True)
class Meta:
app_label = "edc_sms"
class Consent(models.Model):
subject_identifier = models.CharField(
verbose_name="Subject Identifier",
null=True, blank=True,
max_length=100)
first_name = models.CharField(
blank=True,
null=True,
max_length=100)
last_name = models.CharField(
blank=True,
null=True,
max_length=100)
class Meta:
app_label = "edc_sms"
class SMS(models.Model):
subject_identifier = models.CharField(
verbose_name="Subject Identifier",
max_length=100,
null=True, blank=True,)
date_time_form_filled = models.DateTimeField(
verbose_name='Date SMS form filled',
null=True, blank=True)
next_ap_date = models.DateField(
verbose_name='Date of next appointment (referral or return)',
null=True, blank=True,)
date_reminder_sent = models.DateField(
verbose_name='Date visit reminder SMS sent',
null=True, blank=True,)
sms_outcome = models.CharField(
verbose_name='Outcome of reminder SMS',
max_length=50,
null=True, blank=True)
class Meta:
app_label = 'edc_sms'
|
botswana-harvard/edc-sms
|
edc_sms/tests/models.py
|
Python
|
gpl-2.0
| 1,708
|
[
"VisIt"
] |
f07067b7971afe4fe85c63db781d9b7e617edaf4c5f9afa5e364c28ecee7c231
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.build_data import DownloadableFile
import parlai.core.build_data as build_data
import os
RESOURCES = [
DownloadableFile(
'https://raw.githubusercontent.com/kushalchawla/CaSiNo/main/data/split/casino_train.json',
'casino_train.json',
'6b953d153fc8c78f27e911c1439b93b9b3519357e3ba825091b2e567845ba3a7',
zipped=False,
),
DownloadableFile(
'https://raw.githubusercontent.com/kushalchawla/CaSiNo/main/data/split/casino_valid.json',
'casino_valid.json',
'91f2d1f09accedf98667ac081fd5083752738390734e991601b036643da077e0',
zipped=False,
),
DownloadableFile(
'https://raw.githubusercontent.com/kushalchawla/CaSiNo/main/data/split/casino_test.json',
'casino_test.json',
'bf6da2d7c105396300d85a65819c04d99304ac9abb8a590ba342fd0c86b4dd12',
zipped=False,
),
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'casino')
version = "v1.1"
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
# make a clean directory if needed
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark as done
build_data.mark_done(dpath, version_string=version)
|
facebookresearch/ParlAI
|
parlai/tasks/casino/build.py
|
Python
|
mit
| 1,722
|
[
"CASINO"
] |
afdd28fb03185bd04cf1c888eddb720e1488638baff3dc253633ba4a45473e00
|
"""User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.mul import _keep_coeff
from sympy.core.symbol import Symbol
from sympy.core.basic import preorder_traversal
from sympy.core.relational import Relational
from sympy.core.sympify import sympify
from sympy.core.decorators import _sympifyit
from sympy.core.function import Derivative
from sympy.logic.boolalg import BooleanAtom
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group, sift, public
import sympy.polys
import mpmath
from mpmath.libmp.libhyper import NoConvergence
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable, range
@public
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set([])
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Caveat: The function prem(f, g, x) can be safely used to compute
in Z[x] _only_ subresultant polynomial remainder sequences (prs's).
To safely compute Euclidean and Sturmian prs's in Z[x]
employ anyone of the corresponding functions found in
the module sympy.polys.subresultants_qq_zz. The functions
in the module with suffix _pg compute prs's in Z[x] employing
rem(f, g, x), whereas the functions with suffix _amv
compute prs's in Z[x] employing rem_z(f, g, x).
The function rem_z(f, g, x) differs from prem(f, g, x) in that
to compute the remainder polynomials in Z[x] it premultiplies
the divident times the absolute value of the leading coefficient
of the divisor raised to the power degree(f, x) - degree(g, x) + 1.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
See the Caveat note in the function prem(f, g).
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*(sqrt(x)), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
if len(N) != len(f.gens):
raise ValueError('exponent of each generator must be specified')
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
The Leading monomial signifies the monomial having
the highest power of the principal generator in the
expression f.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
The Leading term signifies the term having
the highest power of the principal generator in the
expression f along with its coefficient.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
f = self
if not f.rep.dom.has_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(self, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
f = self
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(self, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
f = self
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs, **kwargs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if not kwargs.get('evaluate', True):
return Derivative(f, *specs, **kwargs)
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
_eval_derivative = diff
_eval_diff = diff
def eval(self, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
f = self
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""
Compute ``f**(-1)`` mod ``x**n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(1, x).revert(2)
Poly(1, x, domain='ZZ')
>>> Poly(1 + x, x).revert(1)
Poly(1, x, domain='ZZ')
>>> Poly(x**2 - 1, x).revert(1)
Traceback (most recent call last):
...
NotReversible: only unity is reversible in a ring
>>> Poly(1/x, x).revert(1)
Traceback (most recent call last):
...
PolynomialError: 1/x contains an element of the generators set
"""
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(self, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(self, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
f = self
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
CRootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.rootof(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[CRootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[CRootOf(x**3 + x + 1, 0),
CRootOf(x**3 + x + 1, 1),
CRootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = mpmath.mp.dps
mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, r.imag))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
rep, opt = _dict_from_expr(expr, opt)
if not opt.gens:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
from sympy.functions.elementary.piecewise import Piecewise
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
reps, opt = _parallel_dict_from_expr(exprs, opt)
if not opt.gens:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree', 1, exc)
return sympify(F.degree(opt.gen))
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S
>>> from sympy.core.numbers import mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the ``mod_inverse`` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.numbers.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
if isinstance(f, Equality):
return f
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
>>> f = x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x
>>> gff_list(f)
[(x**3 + 7, 2), (x**2 + 5*x, 3)]
>>> ff(x**3 + 7, 2)*ff(x**2 + 5*x, 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), len(poly.gens), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), len(poly.gens), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number:
coeff *= arg
continue
if arg.is_Mul:
args.extend(arg.args)
continue
if arg.is_Pow:
base, exp = arg.args
if base.is_Number and exp.is_Number:
coeff *= arg
continue
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), -2*sqrt(2) + 2)
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
sifted = sift(args, lambda z: z.is_rational)
c1, c2 = sifted[True], sifted[False]
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
"""
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.piecewise import Piecewise
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
sifted = sift(f.args, lambda x: x.is_commutative is True and not x.has(Piecewise))
c, nc = sifted[True], sifted[False]
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func._from_args(c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of `solve_poly_system()`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
polys[i] = ring.from_dict(poly.rep.to_dict())
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.has_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
antepsis/anteplahmacun
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 174,979
|
[
"Gaussian"
] |
847d20c8e0b8801413e2a17fb6244f1289f2fdd98be65dd384f48fd38ca39503
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import json_parse
from js_externs_generator import JsExternsGenerator
from datetime import datetime
import model
import sys
import unittest
COPYRIGHT = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
""" % datetime.now().year)
INFO = """// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.%s.FooType'.
// Please run the closure compiler before committing changes.
// See https://code.google.com/p/chromium/wiki/ClosureCompilation.
"""
# The contents of a fake idl file.
fake_idl = """
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A totally fake API.
namespace fakeApi {
enum Greek {
ALPHA,
BETA,
GAMMA,
DELTA
};
dictionary Bar {
long num;
};
dictionary Baz {
DOMString str;
long num;
boolean b;
Greek letter;
Greek? optionalLetter;
long[] arr;
Bar[]? optionalObjArr;
Greek[] enumArr;
any[] anythingGoes;
Bar obj;
long? maybe;
(DOMString or Greek or long[]) choice;
object plainObj;
};
callback VoidCallback = void();
callback BazGreekCallback = void(Baz baz, Greek greek);
interface Functions {
// Does something exciting! And what's more, this is a multiline function
// comment! It goes onto multiple lines!
// |baz| : The baz to use.
static void doSomething(Baz baz, VoidCallback callback);
// |callback| : The callback which will most assuredly in all cases be
// called; that is, of course, iff such a callback was provided and is
// not at all null.
static void bazGreek(optional BazGreekCallback callback);
[deprecated="Use a new method."] static DOMString returnString();
};
interface Events {
// Fired when we realize it's a trap!
static void onTrapDetected(Baz baz);
};
};
"""
# The output we expect from our fake idl file.
expected_output = COPYRIGHT + "\n" + (INFO % (sys.argv[0], "fakeApi")) + """
/** @fileoverview Externs generated from namespace: fakeApi */
/**
* @const
*/
chrome.fakeApi = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeApi#type-Greek
*/
chrome.fakeApi.Greek = {
ALPHA: 'ALPHA',
BETA: 'BETA',
GAMMA: 'GAMMA',
DELTA: 'DELTA',
};
/**
* @typedef {{
* num: number
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Bar
*/
chrome.fakeApi.Bar;
/**
* @typedef {{
* str: string,
* num: number,
* b: boolean,
* letter: !chrome.fakeApi.Greek,
* optionalLetter: (!chrome.fakeApi.Greek|undefined),
* arr: !Array<number>,
* optionalObjArr: (!Array<!chrome.fakeApi.Bar>|undefined),
* enumArr: !Array<!chrome.fakeApi.Greek>,
* anythingGoes: !Array<*>,
* obj: !chrome.fakeApi.Bar,
* maybe: (number|undefined),
* choice: (string|!chrome.fakeApi.Greek|!Array<number>),
* plainObj: Object
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Baz
*/
chrome.fakeApi.Baz;
/**
* Does something exciting! And what's more, this is a multiline function
* comment! It goes onto multiple lines!
* @param {!chrome.fakeApi.Baz} baz The baz to use.
* @param {function():void} callback
* @see https://developer.chrome.com/extensions/fakeApi#method-doSomething
*/
chrome.fakeApi.doSomething = function(baz, callback) {};
/**
* @param {function(!chrome.fakeApi.Baz, !chrome.fakeApi.Greek):void=} callback
* The callback which will most assuredly in all cases be called; that is,
* of course, iff such a callback was provided and is not at all null.
* @see https://developer.chrome.com/extensions/fakeApi#method-bazGreek
*/
chrome.fakeApi.bazGreek = function(callback) {};
/**
* @return {string}
* @deprecated Use a new method.
* @see https://developer.chrome.com/extensions/fakeApi#method-returnString
*/
chrome.fakeApi.returnString = function() {};
/**
* Fired when we realize it's a trap!
* @type {!ChromeEvent}
* @see https://developer.chrome.com/extensions/fakeApi#event-onTrapDetected
*/
chrome.fakeApi.onTrapDetected;
"""
fake_json = """// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
[
{
"namespace": "fakeJson",
"description": "Fake JSON API Stuff",
"types": [ {
"id": "CrazyEnum",
"type": "string",
"enum": ["camelCaseEnum", "Non-Characters", "5NumFirst", \
"3Just-plainOld_MEAN"]
} ],
"functions": [ {
"name": "funcWithInlineObj",
"type": "function",
"parameters": [
{
"type": "object",
"name": "inlineObj",
"description": "Evil inline object! With a super duper duper long\
string description that causes problems!",
"properties": {
"foo": {
"type": "boolean",
"optional": "true",
"description": "The foo."
},
"bar": {
"type": "integer",
"description": "The bar."
},
"baz": {
"type": "object",
"description": "Inception object.",
"properties": {
"depth": {
"type": "integer"
}
}
}
}
},
{
"name": "callback",
"type": "function",
"parameters": [
{
"type": "object",
"name": "returnObj",
"properties": {
"str": { "type": "string"}
}
}
],
"description": "The callback to this heinous method"
}
],
"returns": {
"type": "object",
"properties": {
"str": { "type": "string" },
"int": { "type": "number" }
}
}
} ]
}
]"""
json_expected = COPYRIGHT + "\n" + (INFO % (sys.argv[0], "fakeJson")) + """
/** @fileoverview Externs generated from namespace: fakeJson */
/**
* @const
*/
chrome.fakeJson = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeJson#type-CrazyEnum
*/
chrome.fakeJson.CrazyEnum = {
CAMEL_CASE_ENUM: 'camelCaseEnum',
NON_CHARACTERS: 'Non-Characters',
_5NUM_FIRST: '5NumFirst',
_3JUST_PLAIN_OLD_MEAN: '3Just-plainOld_MEAN',
};
/**
* @param {{
* foo: (boolean|undefined),
* bar: number,
* baz: {
* depth: number
* }
* }} inlineObj Evil inline object! With a super duper duper long string
* description that causes problems!
* @param {function({
* str: string
* }):void} callback The callback to this heinous method
* @return {{
* str: string,
* int: number
* }}
* @see https://developer.chrome.com/extensions/fakeJson#method-funcWithInlineObj
*/
chrome.fakeJson.funcWithInlineObj = function(inlineObj, callback) {};
"""
class JsExternGeneratorTest(unittest.TestCase):
def _GetNamespace(self, fake_content, filename, is_idl):
"""Returns a namespace object for the given content"""
api_def = (idl_schema.Process(fake_content, filename) if is_idl
else json_parse.Parse(fake_content))
m = model.Model()
return m.AddNamespace(api_def[0], filename)
def setUp(self):
self.maxDiff = None # Lets us see the full diff when inequal.
def testBasic(self):
namespace = self._GetNamespace(fake_idl, 'fake_api.idl', True)
self.assertMultiLineEqual(expected_output,
JsExternsGenerator().Generate(namespace).Render())
def testJsonWithInlineObjects(self):
namespace = self._GetNamespace(fake_json, 'fake_api.json', False)
self.assertMultiLineEqual(json_expected,
JsExternsGenerator().Generate(namespace).Render())
if __name__ == '__main__':
unittest.main()
|
ltilve/ChromiumGStreamerBackend
|
tools/json_schema_compiler/js_externs_generator_test.py
|
Python
|
bsd-3-clause
| 8,190
|
[
"exciting"
] |
cb52cf2478e902799c26b31a7b98115d543c4994482deda09af59d1b1ee3c92e
|
# This example demonstrates a multiscale model with synaptic input, Ca
# entry to the spine, receptor modulation following phosphorylation and
# Ca diffusion from spine to the dendrite. Lots going on.
# System switches to a potentiated state after a 1s strong synaptic input.
#
# Ca+CaM <===> Ca_CaM; Ca_CaM + CaMKII <===> Ca_CaM_CaMKII (all in
# spine head, except that the Ca_CaM_CaMKII translocates to the PSD)
# chan ------Ca_CaM_CaMKII-----> chan_p; chan_p ------> chan (all in PSD)
#
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
# Convered to doctest by Dilawar Singh
import os
import moose
import numpy as np
import rdesigneur as rd
sdir_ = os.path.dirname(os.path.realpath(__file__))
A = np.array([3.522e-05, 3.298e-04, 1.752e-05, 1.879e-02, 1.629e-02, 1.533e-04,
1.538e-04, 1.546e-04, 1.559e-04, 1.576e-04, 1.597e-04, 1.623e-04,
1.655e-04, 1.693e-04, 1.738e-04, 1.791e-04, 1.852e-04, 1.922e-04,
2.002e-04, 2.094e-04, 2.197e-04, 2.314e-04, 2.446e-04, 2.596e-04,
2.765e-04, 2.955e-04, 3.171e-04, 3.415e-04, 3.693e-04, 4.008e-04,
4.369e-04, 4.783e-04, 5.259e-04, 5.811e-04, 6.455e-04, 7.212e-04,
8.076e-04, 8.993e-04, 9.039e-04, 9.105e-04, 9.189e-04, 9.293e-04,
9.417e-04, 9.562e-04, 9.729e-04, 9.918e-04, 1.013e-03, 1.037e-03,
1.063e-03, 1.092e-03, 1.123e-03, 1.157e-03, 1.193e-03, 1.233e-03,
1.275e-03, 1.320e-03, 1.305e-03, 1.293e-03, 1.285e-03, 1.279e-03,
1.276e-03, 1.029e-03, 1.035e-03, 3.114e-02, 2.892e-02, 4.087e-03,
3.402e-03])
B = np.array([6.761e-06, 1.049e-04, 4.196e-06, 7.230e-02, 6.142e-02, 8.928e-05,
8.956e-05, 9.012e-05, 9.096e-05, 9.209e-05, 9.350e-05, 9.524e-05,
9.735e-05, 9.990e-05, 1.029e-04, 1.065e-04, 1.106e-04, 1.154e-04,
1.208e-04, 1.271e-04, 1.342e-04, 1.422e-04, 1.514e-04, 1.618e-04,
1.735e-04, 1.869e-04, 2.021e-04, 2.195e-04, 2.394e-04, 2.625e-04,
2.894e-04, 3.210e-04, 3.586e-04, 4.041e-04, 4.601e-04, 5.305e-04,
6.183e-04, 7.234e-04, 6.871e-04, 6.725e-04, 6.715e-04, 6.796e-04,
6.937e-04, 7.114e-04, 7.310e-04, 7.515e-04, 7.720e-04, 7.922e-04,
8.118e-04, 8.311e-04, 8.504e-04, 8.702e-04, 8.913e-04, 9.150e-04,
9.429e-04, 9.780e-04, 9.673e-04, 9.636e-04, 9.632e-04, 9.640e-04,
9.647e-04, 2.049e-03, 1.984e-03, 1.499e-02, 1.382e-02, 2.415e-03,
1.918e-03])
def test():
"""Test
"""
rdes = rd.rdesigneur(
elecDt = 50e-6,
chemDt = 0.002,
diffDt = 0.002,
chemPlotDt = 0.02,
useGssa = False,
# cellProto syntax: ['ballAndStick', 'name', somaDia, somaLength, dendDia, dendLength, numDendSegments ]
cellProto = [['ballAndStick', 'soma', 12e-6, 12e-6, 4e-6, 100e-6, 2 ]],
chemProto = [[os.path.join(sdir_,
'../py_rdesigneur/chem/chanPhosph3compt.g'), 'chem']],
spineProto = [['makeActiveSpine()', 'spine']],
chanProto = [
['make_Na()', 'Na'],
['make_K_DR()', 'K_DR'],
['make_K_A()', 'K_A' ],
['make_Ca()', 'Ca' ],
['make_Ca_conc()', 'Ca_conc' ]
],
passiveDistrib = [['soma', 'CM', '0.01', 'Em', '-0.06']],
spineDistrib = [['spine', '#dend#', '50e-6', '1e-6']],
chemDistrib = [['chem', '#', 'install', '1' ]],
chanDistrib = [
['Na', 'soma', 'Gbar', '300' ],
['K_DR', 'soma', 'Gbar', '250' ],
['K_A', 'soma', 'Gbar', '200' ],
['Ca_conc', 'soma', 'tau', '0.0333' ],
['Ca', 'soma', 'Gbar', '40' ]
],
adaptorList = [
[ 'psd/chan_p', 'n', 'glu', 'modulation', 0.1, 1.0 ],
[ 'Ca_conc', 'Ca', 'spine/Ca', 'conc', 0.00008, 8 ]
],
# Syn input basline 1 Hz, and 40Hz burst for 1 sec at t=20. Syn weight
# is 0.5, specified in 2nd argument as a special case stimLists.
stimList = [['head#', '0.5','glu', 'periodicsyn', '1 + 40*(t>10 && t<11)']],
plotList = [
['soma', '1', '.', 'Vm', 'Membrane potential'],
['#', '1', 'spine/Ca', 'conc', 'Ca in Spine'],
['#', '1', 'dend/DEND/Ca', 'conc', 'Ca in Dend'],
['#', '1', 'spine/Ca_CaM', 'conc', 'Ca_CaM'],
['head#', '1', 'psd/chan_p', 'conc', 'Phosph gluR'],
['head#', '1', 'psd/Ca_CaM_CaMKII', 'conc', 'Active CaMKII'],
]
)
moose.seed(123)
rdes.buildModel()
moose.reinit()
moose.start(25)
data = []
v = moose.wildcardFind('/##[TYPE=Table]')[0].vector
assert np.allclose( (v.mean(), v.std()), (-0.06777396715033643,
0.008550767915889)), (v.mean(), v.std())
for t in moose.wildcardFind('/##[TYPE=Table2]'):
data.append(t.vector)
m = np.mean(data, axis=1)
u = np.std(data, axis=1)
assert m.shape[0] == 67
assert np.allclose(m, A, atol=1e-5), m - A
assert np.allclose(u, B, atol=1e-5), u - B
if __name__ == '__main__':
test()
|
dilawar/moose-core
|
devel/fixme/test_82_multiscale_gluR_phosph_3compt.py
|
Python
|
gpl-3.0
| 5,024
|
[
"MOOSE"
] |
a213e5e3f4357a5b71fc6b0bc698c21c18e3fda3e62baf8dbae289a1fa705de5
|
# -*- coding: utf-8 -*-
import os, codecs
import copy
from structs import Tincture, Fieldless, Charge
TINCTURES = {
'(fieldless)': Fieldless(),
'(tinctureless)': Fieldless(),
'multicolor': Tincture('multicolor'),
}
TINCTURE_ALIASES = {
'azure': ['de larmes'],
'sable': ['de poix'],
'purpure': ['de vin'],
'gules': ['de sangue', 'de sang'],
'vert': ["d'huile"],
'argent': ["d'eau"],
'or': ["d'or"],
'counterermine': ['counter-ermine'],
}
FURS = {
'ermined',
}
COUNTERCHANGEDS = {
'counterchanged',
'counter-changed',
}
ANDS = {
'and',
'and for augmentation',
'conjoined with',
'conjoined in pale with',
'conjoined to',
'conjoined at the base to',
'surrounded by and conjoined to',
'enfiling',
'between the head and tail',
'suspended from',
'pierced by',
'pierced through by',
'and on the sinister with', # after "charged on the dexter with"
'nowed in', # More of a "in the shape of" than an "and", but this works.
}
SUSTAININGS = {
'sustaining',
'sustained by',
'conjoined to and sustaining',
'conjoined in base to and sustaining',
'rising from',
'transfixed by',
}
# Maintained charges now count for difference.
MAINTAININGS = {
'maintaining': None,
'maintaining between them': None,
'maintaining on the outer swirl': None,
'gorged of': None,
'perched upon': None,
'distilling': None,
'attired of': None,
'grasping': None,
'sinister forepaw resting on a maintained': None,
'each tentacle maintaining': None,
'playing a maintained': 1,
'topped of': None,
'vorant of': None,
'wearing': None,
}
# "Charged withs" or "withins". NOT "conjoined with"s.
# Value is implied number, if any.
WITHS = {
'in': None,
'within': None,
'all within': None,
'within and conjoined to': None,
'within the horns of': None,
'between the horns of': None,
# charged withs
'charged with': None,
'charged in base with': None,
'each arm charged with a': 4, # for crosses
'each arm charged with an': 4, # for crosses
'decorated with': None,
'braced with': None,
'interlaced with': None,
'surmounted by': None,
'surrounding': None,
}
ATOPS = {
'atop',
'fastened to',
'hanging from',
'on', # Can also be used in other ways.
'beneath',
'surmounting',
}
NUMBERS = {
'a': 1,
'an': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'a pair of': 2,
'a sheaf of': 3,
'two sheaves of': 6,
'three sheaves of': 9,
'two pairs of': 4,
'three pairs of': 6,
}
for i in xrange(1, 11):
NUMBERS[str(i)] = i
OF_CHARGES = {
'triskelion': 3,
}
CHARGES = {}
DESC_TO_CHARGE = {}
LOCATIONS = {
'base',
'chief',
'dexter',
'sinister',
'dexter chief',
'sinister chief',
'canton',
}
DEPRIM = {
'in fess point', # To disambig vs "in fess"
}
ARRANGEMENTS = {
'one and two', 'two and one',
'two and two',
'two , two and two',
'two , two , and two',
'two , two , and one',
'two , one , and two',
}
ORIENTATIONS = {}
PERIPHERALS = {'chief', 'base', 'bordure', 'orle', 'gore', 'flaunch', 'fillet'}
CENTRAL_ORDINARIES = {'bend', 'bend sinister', 'chevron', 'chevron inverted',
'fess', 'pale', 'pall', 'pall inverted', 'saltire'}
BETWEEN = frozenset(('between',))
MAJOR_DETAILS = {
'winged': 'winged object',
}
DETAILS = set()
CONTOURNYS = {
'contourny',
'contourney',
'countourny',
}
DETAILS.update(CONTOURNYS)
LINES = {
'grady': 'indented',
u'ployé': 'ploye',
'doubly-enarched': 'enarched',
'endorsed': 'cotised',
'flory counter-flory': 'complex line',
'flory counterflory': 'complex line',
'flory': 'complex line',
'embowed to base': 'embowed',
'raguly bretessed': 'raguly',
}
BIRD_POSTURES = {}
BIRD_POSTURE_ALIASES = {
'rising': ['rousant', 'hovering'],
'volant': ['volant guardant'],
'other bird posture': ['volant in chevron addorsed'],
'close': ['statant'],
'close to sinister': ['statant contourny'],
}
BIRD_TYPES = {}
FISH_POSTURES = {}
FISH_POSTURE_ALIASES = {
'hauriant': 'haurient',
}
POSTURES = {}
POSTURE_ALIASES = {
'rampant': ['segreant', 'salient', 'clymant', 'springing'],
'affronte': ['affronty', 'sejant affronty'],
'combattant': ['combatant'],
}
CROSS_FAMILIES = {}
CROSS_ALIASES = {
'crosslet': ['of saint julian'],
'doubled': ['russian orthodox'],
'flory': ['patonce'],
'other cross': ['of saint brigid', 'canterbury'],
}
STAR_TYPES = {}
DEFAULT_CHARGE = Charge('?', '?')
#SYMBOLS = {'elder futhark'}
ALSOS = {'flower, few petals'}
CATEGORIES = {}
EXPLICIT_CATEGORIES = {'human figure': 'human'}
IMPLIED_NUMBER = {
'flames': 1,
'flaunches': 2,
'jessant-de-lys': 'the',
}
IMPLIED_TINCTURES = {
'bezant': 'or',
'plate': 'argent',
'hurt': 'azure',
'torteau': 'gules',
'pellet': 'sable',
'pomme': 'vert',
'golpe': 'purpure',
'fountain': 'multicolor',
}
MULTIPLE_TINCTURES = {
'flower, rose', # for double rose
}
SEMYS = {
'billety': 'billet',
'billetty': 'billet',
'bezanty': 'bezant',
'crescenty': 'crescent',
'crusily': 'cross',
'delphy': 'delf',
'escallopy': 'escallop',
'estencely': 'spark',
'estoilly': 'estoile',
'fleury': 'fleur de lys',
'semy-de-lys': 'fleur de lys',
'goutty': 'goute',
'goutte': 'goute',
'mullety': 'mullet',
'platy': 'plate',
}
# vairy is a field treatment that's written like a field division
VAIRYS = {
'vairy',
'counter-vairy',
'papellony', 'papelonny',
'potenty',
}
CHARGE_ADJ = {
'winged': 'monster, winged',
'bat-winged': 'monster, winged'
}
DETAIL_ADJ = {
'empty',
'bottom-whorl',
'elder futhark',
'lower case',
'greek',
'brunette',
'caucasian',
'hexagonal',
}
BLACKLIST = {
'throughout',
'cross of',
'respectant',
'fish', # From head, fish
'enflamed', # Can be a line, but usually isn't.
'sceptre', # In aliases.txt to also consider mace
}
# These we want to treat as words for purposes of spellchecking, but we only
# understand them in certain manually-coded contexts.
#
# Small words are those that can't likely be the end of an alias or desc,
# misc words are others.
SMALL_WORDS = {'of', 'on', 'to', 'her', 'his', 'its', 'at', 'with',
'in', 'the', 'de', 'each', 'it'}
MISC_WORDS = {'sinister', 'respectant', 'each of'}
PRIMTAGS_WHITELIST = {'primary'}
ALL_WORDS = set()
LOADED = False
def loadwords():
global LOADED
if LOADED:
return
with codecs.open(os.path.join(os.path.dirname(__file__), 'details.txt'),
encoding='utf-8') as fil:
for l in fil:
if l.strip() and not l.startswith('#'):
DETAILS.add(l.strip())
with open(os.path.join(os.path.dirname(__file__), 'my.cat')) as mydotcat:
for l in mydotcat:
l = l.strip()
if l.startswith('|'):
if l.startswith('|tincture:') and '=' not in l:
typ, tinct = l.split(':')
if '<' in tinct:
tinct, rest = tinct.split('<', 1)
if tinct not in TINCTURES and tinct != 'brown':
TINCTURES[tinct] = Tincture(tinct)
if tinct in TINCTURE_ALIASES:
for a in TINCTURE_ALIASES[tinct]:
TINCTURES[a] = Tincture(tinct)
elif l.startswith('|bird_type:'):
typ, post = l.split(':')
if '<' in post:
post, rest = post.split('<', 1)
if post.endswith(' shaped'):
key = post[:-len(' shaped')]
else:
key = post
BIRD_TYPES[key] = post
elif l.startswith('|bird_posture:'):
typ, post = l.split(':')
if '<' in post:
post, rest = post.split('<', 1)
name = post
if name.startswith('bird '):
name = name[len('bird '):]
names = [name]
for a in BIRD_POSTURE_ALIASES:
if name.startswith(a):
for v in BIRD_POSTURE_ALIASES[a]:
names.append(v + name[len(a):])
for n in list(names):
if n.endswith(' to dexter'):
names.append(n[:-len(' to dexter')])
for n in names:
BIRD_POSTURES[n] = post
elif l.startswith('|fish_posture:'):
typ, post = l.split(':')
if '<' in post:
post, rest = post.split('<', 1)
name = post
if name.startswith('fish '):
name = name[len('fish '):]
names = [name]
for a in FISH_POSTURE_ALIASES:
if name.startswith(a):
names.append(FISH_POSTURE_ALIASES[a]
+ name[len(a):])
for n in list(names):
if n.endswith(' to dexter'):
names.append(n[:-len(' to dexter')])
for n in names:
FISH_POSTURES[n] = post
elif l.startswith('|posture:'):
typ, post = l.split(':')
if '<' in post:
post, _ = post.split('<', 1)
if '=' in post:
post, _ = post.split('=', 1)
name = post
names = [name]
for a in POSTURE_ALIASES:
if name.startswith(a):
for v in POSTURE_ALIASES[a]:
names.append(v + name[len(a):])
if name.endswith(' to sinister'):
stem = name[:-len(' to sinister')]
for c in CONTOURNYS:
names.append(stem + ' ' + c)
else:
names.append(name + ' guardant')
for n in names:
POSTURES[n] = post
elif l.startswith('|cross_family:'):
typ, fam = l.split(':')
CROSS_FAMILIES[fam] = fam
if fam in CROSS_ALIASES:
for a in CROSS_ALIASES[fam]:
CROSS_FAMILIES[a] = fam
elif l.startswith('|star_type:'):
_, fam = l.split(':')
STAR_TYPES[fam] = fam
elif l.startswith('|orientation:'):
typ, orient = l.split(':')
if '<' in orient:
orient, rest = orient.split('<', 1)
ORIENTATIONS[orient] = orient
elif l.startswith('|line:'):
typ, ln = l.split(':')
if '<' in ln:
ln, rest = ln.split('<', 1)
if ln not in BLACKLIST:
LINES[ln] = ln
else:
continue
elif '|' in l:
name, desc = l.split('|')
cat = None
if ', ' in name:
cat, typ = name.rsplit(', ', 1)
charge = Charge(name, desc, category=cat)
if typ in BLACKLIST:
pass
elif typ not in CHARGES:
CHARGES[typ] = charge
else:
if CHARGES[typ] is None or CHARGES[typ].category != typ:
CHARGES[typ] = None
if cat not in CATEGORIES:
CATEGORIES[cat] = [charge]
else:
CATEGORIES[cat].append(charge)
elif name in EXPLICIT_CATEGORIES:
charge = Charge(name, desc,
category=EXPLICIT_CATEGORIES[name])
else:
charge = Charge(name, desc)
CHARGES[name] = charge
if name.startswith('head, beast, '):
CHARGES["%s's head" % name[len('head, beast, '):]] = charge
assert desc not in DESC_TO_CHARGE, desc
DESC_TO_CHARGE[desc] = charge
elif ' - see ' in l:
name, see = l.split(' - see ')
name = name.lower()
if name in BLACKLIST:
continue
if see.startswith('also '):
see = see[len('also '):]
also = True
else:
also = name in ALSOS
seenames = see.split(' and ')
sees = []
seesdone = set()
for n in seenames:
# We don't handle sees referring to a see later in the
# alphabet; so sees referring to other sees need correction.
CORRECTIONS = {
'fish, lobster': 'arthropod, lobster',
'peripheral on ly': 'peripheral only',
'bird': 'bird, whole',
'sun': 'sun, whole',
'roundel': 'roundel, whole',
'portcullis': 'gate',
'gridiron': 'tool, other',
'monster, composite': 'monster, other',
'sun': 'mullet',
'tree, branch': 'tree branch',
'bird, penguin': 'penguin',
'sun, whole, charged': 'mullet, charged',
'wheel, heraldic': 'wheel',
}
if n in CORRECTIONS:
n = CORRECTIONS[n]
if n in CATEGORIES and n not in CHARGES:
sees += CATEGORIES[n]
assert None not in sees, (sees, n)
else:
if n not in CHARGES:
assert ', ' in n, n
most, lastbit = n.rsplit(', ', 1)
assert most in CHARGES, n
chargemod = copy.deepcopy(CHARGES[most])
if lastbit == 'seme':
chargemod.number = 'seme'
elif lastbit in LINES and most in PERIPHERALS:
chargemod.tags.append(LINES[lastbit])
elif lastbit == 'charged':
chargemod.tags.append('charged')
else:
assert most in (
'cross, as charge',
'saltire, as charge'), n
assert lastbit in CROSS_FAMILIES, n
chargemod.tags.append(CROSS_FAMILIES[lastbit])
sees.append(chargemod)
else:
assert CHARGES[n] is not None, n
if CHARGES[n].desc not in seesdone:
seesdone.add(CHARGES[n].desc)
sees.append(CHARGES[n])
assert None not in sees, (sees, n)
if also:
assert name in CHARGES, name
else:
assert (name not in CHARGES or CHARGES[name] is None
#or CHARGES[name].name == sees[0].name
or CHARGES[name].category), (name, CHARGES[name], sees[0])
first = sees.pop(0)
CHARGES[name] = copy.deepcopy(first)
CHARGES[name].maintags = []
# Copying the seealso of just the first is weird
CHARGES[name].seealso = []
if 'bird, whole' in seenames and name in BIRD_TYPES:
CHARGES[name].tags.append(BIRD_TYPES[name])
if name in STAR_TYPES:
CHARGES[name].maintags.append(STAR_TYPES[name])
#assert name != 'mullet', (name, CHARGES[name].tags, sees)
for s in sees:
CHARGES[name].seealso.append(s)
if ', ' in name:
cat, typ = name.rsplit(', ', 1)
if cat == 'tree':
CHARGES['%s leaf' % typ] = CHARGES['leaf']
CHARGES['%s leaves' % typ] = CHARGES['leaves']
else:
assert False, l
with codecs.open(os.path.join(os.path.dirname(__file__), 'aliases.txt'),
encoding='utf-8') as fil:
for l in fil:
if l.strip() and not l.startswith('#'):
k, val = l.strip().split(': ')
assert (k not in CHARGES or CHARGES[k] is None
or CHARGES[k].name == val), (k, val, CHARGES[k].name)
charges = []
for v in val.split(' & '):
if '*' in v:
v, multiplier = v.split('*')
else:
multiplier = None
tags = v.split(':')
v = tags.pop(0)
assert v in CHARGES, v
chg = copy.deepcopy(CHARGES[v])
for t in tags:
if t == 'seme':
chg.number = t
elif t.isdigit():
chg.number = int(t)
else:
chg.tags.append(t)
if multiplier:
chg.multiplier = int(multiplier)
charges.append(chg)
CHARGES[k] = charges[0]
for a in charges[1:]:
CHARGES[k].seealso.append(a)
for n in TINCTURES:
tinct = TINCTURES[n]
if tinct.fielddesc is None:
key = "%s field" % tinct.tincture
if key in CHARGES:
tinct.fielddesc = CHARGES[key].desc
else:
ftkey = 'field treatment, %s' % tinct.tincture
if ftkey in CHARGES:
tinct.fielddesc = CHARGES[ftkey].desc
ALL_WORDS.update(TINCTURES,
CHARGES, DETAILS, ARRANGEMENTS, ORIENTATIONS, POSTURES,
LINES, LOCATIONS, DEPRIM,
BIRD_POSTURES, NUMBERS, ANDS, SUSTAININGS, MAINTAININGS,
WITHS, CROSS_FAMILIES, ATOPS,
DETAIL_ADJ, COUNTERCHANGEDS,
SMALL_WORDS, MISC_WORDS)
LOADED = True
|
xavidotron/blazon
|
words.py
|
Python
|
mit
| 19,553
|
[
"FLEUR"
] |
0845d154c7459f73eabd98b020a77e2124ad82fcb95cd0c04481fed841943247
|
r"""
This module is a VTK Web server application.
The following command line illustrate how to use it::
$ vtkpython .../vtk_web_filebrowser.py --data-dir /.../server_directory_to_share
Any VTK Web executable script come with a set of standard arguments that
can be overriden if need be::
--host localhost
Interface on which the HTTP server will listen on.
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtk-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtk-secret" as secret key.
"""
# import to process args
import sys
import os
# import vtk modules.
import vtk
from vtk.web import protocols, server
from vtk.web import wamp as vtk_wamp
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
from vtk.util import _argparse as argparse
# =============================================================================
# Create custom File Opener class to handle clients requests
# =============================================================================
class _WebFileBrowser(vtk_wamp.ServerProtocol):
# Application configuration
authKey = "vtkweb-secret"
basedir = ""
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(protocols.vtkWebFileBrowser(_WebFileBrowser.basedir, "Home"))
# Update authentication key to use
self.updateSecret(_WebFileBrowser.authKey)
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="VTK/Web FileBrowser web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--data-dir", help="Base directory to list", dest="basedir", default=".")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_WebFileBrowser.authKey = args.authKey
_WebFileBrowser.basedir = args.basedir
# Start server
server.start_webserver(options=args, protocol=_WebFileBrowser)
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Web/Applications/FileBrowser/server/vtk_web_filebrowser.py
|
Python
|
bsd-3-clause
| 2,963
|
[
"VTK"
] |
ff2033c1f18598e27991d4de429c14ccc80b57a08f9bcf5e5bee114c4a29c29b
|
'''
This module performs a few early syntax check on the input AST.
It checks the conformance of the input code to Pythran specific
constraints.
'''
from pythran.tables import MODULES
from pythran.intrinsic import Class
import ast
class PythranSyntaxError(SyntaxError):
def __init__(self, msg, node=None):
SyntaxError.__init__(self, msg)
if node:
self.filename = getattr(node, 'filename', None)
self.lineno = node.lineno
self.offset = node.col_offset
class SyntaxChecker(ast.NodeVisitor):
"""
Visit an AST and raise a PythranSyntaxError upon unsupported construct.
Attributes
----------
attributes : {str}
Possible attributes from Pythonic modules/submodules.
"""
def __init__(self):
""" Gather attributes from MODULES content. """
self.attributes = set()
def save_attribute(module):
""" Recursively save Pythonic keywords as possible attributes. """
self.attributes.update(module.iterkeys())
for signature in module.itervalues():
if isinstance(signature, dict):
save_attribute(signature)
elif isinstance(signature, Class):
save_attribute(signature.fields)
for module in MODULES.itervalues():
save_attribute(module)
def visit_Module(self, node):
err = ("Top level statements can only be strings, functions, comments"
" or imports")
for n in node.body:
if isinstance(n, ast.Expr) and isinstance(n.value, ast.Str):
continue
else:
if not any(isinstance(n, getattr(ast, t))
for t in ('FunctionDef', 'Import', 'ImportFrom',)):
raise PythranSyntaxError(err, n)
self.generic_visit(node)
def visit_Interactive(self, node):
raise PythranSyntaxError("Interactive session not supported", node)
def visit_Expression(self, node):
raise PythranSyntaxError("Interactive expressions not supported", node)
def visit_Suite(self, node):
raise PythranSyntaxError(
"Suites are specific to Jython and not supported", node)
def visit_ClassDef(self, node):
raise PythranSyntaxError("Classes not supported")
def visit_Print(self, node):
self.generic_visit(node)
if node.dest:
raise PythranSyntaxError(
"Printing to a specific stream not supported", node.dest)
def visit_With(self, node):
raise PythranSyntaxError("With statements not supported")
def visit_Call(self, node):
self.generic_visit(node)
if node.starargs:
raise PythranSyntaxError("Call with star arguments not supported",
node)
if node.kwargs:
raise PythranSyntaxError("Call with kwargs not supported", node)
def visit_FunctionDef(self, node):
self.generic_visit(node)
if node.args.vararg:
raise PythranSyntaxError("Varargs not supported", node)
if node.args.kwarg:
raise PythranSyntaxError("Keyword arguments not supported",
node)
def visit_Raise(self, node):
self.generic_visit(node)
if node.tback:
raise PythranSyntaxError(
"Traceback in raise statements not supported",
node)
def visit_Attribute(self, node):
self.generic_visit(node)
if node.attr not in self.attributes:
raise PythranSyntaxError(
"Attribute '{0}' unknown".format(node.attr),
node)
def visit_Import(self, node):
""" Check if imported module exists in MODULES. """
for alias in node.names:
current_module = MODULES
# Recursive check for submodules
for path in alias.name.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(alias.name),
node)
else:
current_module = current_module[path]
def visit_ImportFrom(self, node):
"""
Check validity of imported functions.
Check:
- no level specific value are provided.
- a module is provided
- module/submodule exists in MODULES
- imported function exists in the given module/submodule
"""
if node.level != 0:
raise PythranSyntaxError("Specifying a level in an import", node)
if not node.module:
raise PythranSyntaxError("import from without module", node)
module = node.module
current_module = MODULES
# Check if module exists
for path in module.split('.'):
if path not in current_module:
raise PythranSyntaxError(
"Module '{0}' unknown.".format(module),
node)
else:
current_module = current_module[path]
# Check if imported functions exist
for alias in node.names:
if alias.name == '*':
continue
elif alias.name not in current_module:
raise PythranSyntaxError(
"identifier '{0}' not found in module '{1}'".format(
alias.name,
module),
node)
def visit_Exec(self, node):
raise PythranSyntaxError("Exec statement not supported", node)
def visit_Global(self, node):
raise PythranSyntaxError("Global variables not supported", node)
def check_syntax(node):
'''Does nothing but raising PythranSyntaxError when needed'''
SyntaxChecker().visit(node)
class SpecsChecker(ast.NodeVisitor):
'''
Verify the arity of each function (incl. defaults)
and raise a PythranSyntaxError if they are incompatible with the
#pythran export specs
'''
def __init__(self, specs, renamings):
self.specs = {renamings.get(k, k): v for k, v in specs.items()}
self.funcs = set()
def visit_FunctionDef(self, node):
fname = node.name
self.funcs.add(fname)
max_arg_count = len(node.args.args)
min_arg_count = max_arg_count - len(node.args.defaults)
signatures = self.specs.get(fname, ())
for signature in signatures:
# just verify the arity
arg_count = len(signature)
if min_arg_count <= arg_count <= max_arg_count:
pass
else:
msg = 'export for function {} incompatible with its definition'
raise PythranSyntaxError(msg.format(node.name), node)
def visit_Module(self, node):
self.generic_visit(node)
for fname, _ in self.specs.items():
if fname not in self.funcs:
msg = 'exporting undefined function {}'
raise PythranSyntaxError(msg.format(fname))
def check_specs(mod, specs, renamings):
'''
Does nothing but raising PythranSyntaxError if specs
are incompatible with the actual code
'''
SpecsChecker(specs, renamings).visit(mod)
|
artas360/pythran
|
pythran/syntax.py
|
Python
|
bsd-3-clause
| 7,358
|
[
"VisIt"
] |
9ba82675685767024809a65fa87cd9ca8b2430d43ef38ede232f02a9e986f1cf
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy
short_description: Manage BIG-IP ASM policies
description:
- Manage BIG-IP ASM policies.
version_added: 2.5
deprecated:
removed_in: '2.12'
alternative: bigip_asm_policy_manage
why: >
The bigip_asm_policy module has been split into three new modules to handle import, export and general policy
management. This will allow scalability of the asm policy management as well as ease of maintenance.
Additionally to further reduce the burden of having multiple smaller module F5 has created asm_policy
role in Ansible Galaxy for a more declarative way of ASM policy management.
options:
active:
description:
- If C(yes) will apply and activate existing inactive policy. If C(no), it will
deactivate existing active policy. Generally should be C(yes) only in cases where
you want to activate new or existing policy.
default: no
type: bool
name:
description:
- The ASM policy to manage or create.
required: True
state:
description:
- When C(state) is C(present), and C(file) or C(template) parameter is provided,
new ASM policy is imported and created with the given C(name).
- When C(state) is present and no C(file) or C(template) parameter is provided
new blank ASM policy is created with the given C(name).
- When C(state) is C(absent), ensures that the policy is removed, even if it is
currently active.
choices:
- present
- absent
default: present
file:
description:
- Full path to a policy file to be imported into the BIG-IP ASM.
- Policy files exported from newer versions of BIG-IP cannot be imported into older
versions of BIG-IP. The opposite, however, is true; you can import older into
newer.
template:
description:
- An ASM policy built-in template. If the template does not exist we will raise an error.
- Once the policy has been created, this value cannot change.
- The C(Comprehensive), C(Drupal), C(Fundamental), C(Joomla),
C(Vulnerability Assessment Baseline), and C(Wordpress) templates are only available
on BIG-IP versions >= 13.
choices:
- ActiveSync v1.0 v2.0 (http)
- ActiveSync v1.0 v2.0 (https)
- Comprehensive
- Drupal
- Fundamental
- Joomla
- LotusDomino 6.5 (http)
- LotusDomino 6.5 (https)
- OWA Exchange 2003 (http)
- OWA Exchange 2003 (https)
- OWA Exchange 2003 with ActiveSync (http)
- OWA Exchange 2003 with ActiveSync (https)
- OWA Exchange 2007 (http)
- OWA Exchange 2007 (https)
- OWA Exchange 2007 with ActiveSync (http)
- OWA Exchange 2007 with ActiveSync (https)
- OWA Exchange 2010 (http)
- OWA Exchange 2010 (https)
- Oracle 10g Portal (http)
- Oracle 10g Portal (https)
- Oracle Applications 11i (http)
- Oracle Applications 11i (https)
- PeopleSoft Portal 9 (http)
- PeopleSoft Portal 9 (https)
- Rapid Deployment Policy
- SAP NetWeaver 7 (http)
- SAP NetWeaver 7 (https)
- SharePoint 2003 (http)
- SharePoint 2003 (https)
- SharePoint 2007 (http)
- SharePoint 2007 (https)
- SharePoint 2010 (http)
- SharePoint 2010 (https)
- Vulnerability Assessment Baseline
- Wordpress
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Import and activate ASM policy
bigip_asm_policy:
name: new_asm_policy
file: /root/asm_policy.xml
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import ASM policy from template
bigip_asm_policy:
name: new_sharepoint_policy
template: SharePoint 2007 (http)
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create blank ASM policy
bigip_asm_policy:
name: new_blank_policy
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create blank ASM policy and activate
bigip_asm_policy:
name: new_blank_policy
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Activate ASM policy
bigip_asm_policy:
name: inactive_policy
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Deactivate ASM policy
bigip_asm_policy:
name: active_policy
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import and activate ASM policy in Role
bigip_asm_policy:
name: new_asm_policy
file: "{{ role_path }}/files/asm_policy.xml"
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import ASM binary policy
bigip_asm_policy:
name: new_asm_policy
file: "/root/asm_policy.plc"
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
active:
description: Set when activating/deactivating ASM policy
returned: changed
type: bool
sample: yes
state:
description: Action performed on the target device.
returned: changed
type: str
sample: absent
file:
description: Local path to ASM policy file.
returned: changed
type: str
sample: /root/some_policy.xml
template:
description: Name of the built-in ASM policy template
returned: changed
type: str
sample: OWA Exchange 2007 (https)
name:
description: Name of the ASM policy to be managed/created
returned: changed
type: str
sample: Asm_APP1_Transparent
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
updatables = [
'active',
]
returnables = [
'name',
'template',
'file',
'active',
]
api_attributes = [
'name',
'file',
'active',
]
api_map = {
'filename': 'file',
}
@property
def template_link(self):
if self._values['template_link'] is not None:
return self._values['template_link']
collection = self._templates_from_device()
for resource in collection['items']:
if resource['name'] == self.template.upper():
return dict(link=resource['selfLink'])
return None
@property
def full_path(self):
return fq_name(self.name)
def _templates_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/asm/policy-templates/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class V1Parameters(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP',
'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS',
'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP',
'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS',
'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP',
'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS',
'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP',
'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS',
'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP',
'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS',
'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP',
'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS',
'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP',
'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS',
'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP',
'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS',
'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT',
'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP',
'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS',
'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP',
'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS',
'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP',
'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS',
'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP',
'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS'
}
if self._values['template'] in template_map:
return template_map[self._values['template']]
else:
raise F5ModuleError(
"The specified template is not valid for this version of BIG-IP."
)
class V2Parameters(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP',
'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS',
'Comprehensive': 'POLICY_TEMPLATE_COMPREHENSIVE', # v13
'Drupal': 'POLICY_TEMPLATE_DRUPAL', # v13
'Fundamental': 'POLICY_TEMPLATE_FUNDAMENTAL', # v13
'Joomla': 'POLICY_TEMPLATE_JOOMLA', # v13
'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP',
'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS',
'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP',
'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS',
'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP',
'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS',
'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP',
'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS',
'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP',
'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS',
'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP',
'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS',
'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP',
'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS',
'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT',
'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP',
'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS',
'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP',
'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS',
'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP',
'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS',
'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP',
'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS',
'Vulnerability Assessment Baseline': 'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT', # v13
'Wordpress': 'POLICY_TEMPLATE_WORDPRESS' # v13
}
return template_map[self._values['template']]
class Changes(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP': 'ActiveSync v1.0 v2.0 (http)',
'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS': 'ActiveSync v1.0 v2.0 (https)',
'POLICY_TEMPLATE_COMPREHENSIVE': 'Comprehensive',
'POLICY_TEMPLATE_DRUPAL': 'Drupal',
'POLICY_TEMPLATE_FUNDAMENTAL': 'Fundamental',
'POLICY_TEMPLATE_JOOMLA': 'Joomla',
'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP': 'LotusDomino 6.5 (http)',
'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS': 'LotusDomino 6.5 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP': 'OWA Exchange 2003 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS': 'OWA Exchange 2003 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2003 with ActiveSync (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2003 with ActiveSync (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP': 'OWA Exchange 2007 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS': 'OWA Exchange 2007 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2007 with ActiveSync (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2007 with ActiveSync (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP': 'OWA Exchange 2010 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS': 'OWA Exchange 2010 (https)',
'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP': 'Oracle 10g Portal (http)',
'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS': 'Oracle 10g Portal (https)',
'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP': 'Oracle Applications 11i (http)',
'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS': 'Oracle Applications 11i (https)',
'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP': 'PeopleSoft Portal 9 (http)',
'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS': 'PeopleSoft Portal 9 (https)',
'POLICY_TEMPLATE_RAPID_DEPLOYMENT': 'Rapid Deployment Policy',
'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP': 'SAP NetWeaver 7 (http)',
'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS': 'SAP NetWeaver 7 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP': 'SharePoint 2003 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS': 'SharePoint 2003 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP': 'SharePoint 2007 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS': 'SharePoint 2007 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP': 'SharePoint 2010 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS': 'SharePoint 2010 (https)',
'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT': 'Vulnerability Assessment Baseline',
'POLICY_TEMPLATE_WORDPRESS': 'Wordpress',
}
return template_map[self._values['template']]
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def active(self):
if self.want.active is True and self.have.active is False:
return True
if self.want.active is False and self.have.active is True:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
self.have = None
self.changes = Changes()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if not self.exists():
return False
else:
return self.remove()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if any(p['name'] == self.want.name and p['partition'] == self.want.partition for p in response['items']):
return True
return False
def _file_is_missing(self):
if self.want.template and self.want.file is None:
return False
if self.want.template is None and self.want.file is None:
return False
if not os.path.exists(self.want.file):
return True
return False
def create(self):
if self.want.active is None:
self.want.update(dict(active=False))
if self._file_is_missing():
raise F5ModuleError(
"The specified ASM policy file does not exist"
)
self._set_changed_options()
if self.module.check_mode:
return True
if self.want.template is None and self.want.file is None:
self.create_blank()
else:
if self.want.template is not None:
self.create_from_template()
elif self.want.file is not None:
self.create_from_file()
if self.want.active:
self.activate()
return True
else:
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
if self.changes.active:
self.activate()
return True
def activate(self):
self.have = self.read_current_from_device()
task_id = self.apply_on_device()
if self.wait_for_task(task_id, 'apply'):
return True
else:
raise F5ModuleError('Apply policy task failed.')
def wait_for_task(self, task_id, task):
uri = ''
if task == 'apply':
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
elif task == 'import':
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] in ['COMPLETED', 'FAILURE']:
break
time.sleep(1)
if response['status'] == 'FAILURE':
return False
if response['status'] == 'COMPLETED':
return True
def _get_policy_id(self):
name = self.want.name
partition = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
policy_id = next(
(p['id'] for p in response['items'] if p['name'] == name and p['partition'] == partition), None
)
if not policy_id:
raise F5ModuleError("The policy was not found")
return policy_id
def update_on_device(self):
params = self.changes.api_params()
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
if not params['active']:
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_blank(self):
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError(
'Failed to create ASM policy: {0}'.format(self.want.name)
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError(
'Failed to delete ASM policy: {0}'.format(self.want.name)
)
return True
def is_activated(self):
if self.want.active is True:
return True
else:
return False
def read_current_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
response.update((dict(self_link=response['selfLink'])))
return Parameters(params=response)
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def import_to_device(self):
name = os.path.split(self.want.file)[1]
self.upload_file_to_device(self.want.file, name)
time.sleep(2)
full_name = fq_name(self.want.partition, self.want.name)
cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_temp_policy_from_device(self):
name = os.path.split(self.want.file)[1]
tpath_name = '/var/config/rest/downloads/{0}'.format(name)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=tpath_name
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def apply_on_device(self):
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
params = dict(policyReference={'link': self.have.self_link})
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['id']
def create_from_template_on_device(self):
full_name = fq_name(self.want.partition, self.want.name)
cmd = 'tmsh create asm policy {0} policy-template {1}'.format(full_name, self.want.template)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
# we need to remove active from params as API will raise an error if the active is set to True,
# policies can only be activated via apply-policy task endpoint.
params.pop('active')
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 401, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
time.sleep(2)
return response['selfLink']
def remove_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
if self.version_is_less_than_13():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def version_is_less_than_13(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
class V1Manager(BaseManager):
def __init__(self, *args, **kwargs):
client = kwargs.get('client', None)
module = kwargs.get('module', None)
super(V1Manager, self).__init__(client=client, module=module)
self.want = V1Parameters(params=module.params, client=client)
def create_from_file(self):
self.import_to_device()
self.remove_temp_policy_from_device()
def create_from_template(self):
self.create_from_template_on_device()
class V2Manager(BaseManager):
def __init__(self, *args, **kwargs):
client = kwargs.get('client', None)
module = kwargs.get('module', None)
super(V2Manager, self).__init__(client=client, module=module)
self.want = V2Parameters(params=module.params, client=client)
def create_from_template(self):
if not self.create_from_template_on_device():
return False
def create_from_file(self):
if not self.import_to_device():
return False
self.remove_temp_policy_from_device()
class ArgumentSpec(object):
def __init__(self):
self.template_map = [
'ActiveSync v1.0 v2.0 (http)',
'ActiveSync v1.0 v2.0 (https)',
'Comprehensive',
'Drupal',
'Fundamental',
'Joomla',
'LotusDomino 6.5 (http)',
'LotusDomino 6.5 (https)',
'OWA Exchange 2003 (http)',
'OWA Exchange 2003 (https)',
'OWA Exchange 2003 with ActiveSync (http)',
'OWA Exchange 2003 with ActiveSync (https)',
'OWA Exchange 2007 (http)',
'OWA Exchange 2007 (https)',
'OWA Exchange 2007 with ActiveSync (http)',
'OWA Exchange 2007 with ActiveSync (https)',
'OWA Exchange 2010 (http)',
'OWA Exchange 2010 (https)',
'Oracle 10g Portal (http)',
'Oracle 10g Portal (https)',
'Oracle Applications 11i (http)',
'Oracle Applications 11i (https)',
'PeopleSoft Portal 9 (http)',
'PeopleSoft Portal 9 (https)',
'Rapid Deployment Policy',
'SAP NetWeaver 7 (http)',
'SAP NetWeaver 7 (https)',
'SharePoint 2003 (http)',
'SharePoint 2003 (https)',
'SharePoint 2007 (http)',
'SharePoint 2007 (https)',
'SharePoint 2010 (http)',
'SharePoint 2010 (https)',
'Vulnerability Assessment Baseline',
'Wordpress',
]
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
file=dict(type='path'),
template=dict(
choices=self.template_map
),
active=dict(
type='bool'
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=[
['file', 'template']
]
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
brandond/ansible
|
lib/ansible/modules/network/f5/_bigip_asm_policy.py
|
Python
|
gpl-3.0
| 38,806
|
[
"Galaxy"
] |
224dc877708cdea9903ae0638b57be819dca67aeb49c46a4c86218fac643c6df
|
"""
@name: Modules/House/Lighting/Buttons/__init__.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2020-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Feb 5, 2020
"""
__updated__ = '2020-02-09'
__version_info__ = (20, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
CONFIG_NAME = 'buttons'
class ButtonInformation:
def __init__(self):
self.Name = None
self.Comment = None
self.DeviceType = 'Lighting'
self.DeviceSubType = 'Button'
self.Type = None # Remote, Slave
self.Family = None # FamilyInformation()
self.Room = None # RoomInformation()
self.Buttons = None
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Lighting/Buttons/__init__.py
|
Python
|
mit
| 727
|
[
"Brian"
] |
328a44b94fc92df383f6a545a328f39370eb7fd366a2cc25c82faa0c2177c7ba
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.chronos.model.tcmf_model import TCMFNdarrayModelWrapper, \
TCMFXshardsModelWrapper
from bigdl.orca.data import SparkXShards
from bigdl.chronos.forecaster.abstract import Forecaster
class TCMFForecaster(Forecaster):
"""
Example:
>>> import numpy as np
>>> model = TCMFForecaster()
>>> fit_params = dict(val_len=12,
start_date="2020-1-1",
freq="5min",
y_iters=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
>>> ndarray_input = {'id': np.arange(300), 'y': np.random.rand(300, 480)}
>>> model.fit(ndarray_input, fit_params)
>>> horizon = np.random.randint(1, 50)
>>> yhat = model.predict(horizon=horizon)
>>> model.save({tempdirname})
>>> loaded_model = TCMFForecaster.load({tempdirname}, is_xshards_distributed=False)
>>> data_new = np.random.rand(300, horizon)
>>> model.evaluate(target_value=dict({"y": data_new}), metric=['mse'])
>>> model.fit_incremental({"y": data_new})
>>> yhat_incr = model.predict(horizon=horizon)
"""
def __init__(self,
vbsize=128,
hbsize=256,
num_channels_X=[32, 32, 32, 32, 32, 1],
num_channels_Y=[16, 16, 16, 16, 16, 1],
kernel_size=7,
dropout=0.1,
rank=64,
kernel_size_Y=7,
learning_rate=0.0005,
normalize=False,
use_time=True,
svd=True,):
"""
Build a TCMF Forecast Model.
:param vbsize: int, default is 128.
Vertical batch size, which is the number of cells per batch.
:param hbsize: int, default is 256.
Horizontal batch size, which is the number of time series per batch.
:param num_channels_X: list, default=[32, 32, 32, 32, 32, 1].
List containing channel progression of temporal convolution network for local model
:param num_channels_Y: list, default=[16, 16, 16, 16, 16, 1]
List containing channel progression of temporal convolution network for hybrid model.
:param kernel_size: int, default is 7.
Kernel size for local models
:param dropout: float, default is 0.1.
Dropout rate during training
:param rank: int, default is 64.
The rank in matrix factorization of global model.
:param kernel_size_Y: int, default is 7.
Kernel size of hybrid model
:param learning_rate: float, default is 0.0005
:param normalize: boolean, false by default.
Whether to normalize input data for training.
:param use_time: boolean, default is True.
Whether to use time coveriates.
:param svd: boolean, default is False.
Whether factor matrices are initialized by NMF
"""
self.internal = None
self.config = {
"vbsize": vbsize,
"hbsize": hbsize,
"num_channels_X": num_channels_X,
"num_channels_Y": num_channels_Y,
"kernel_size": kernel_size,
"dropout": dropout,
"rank": rank,
"kernel_size_Y": kernel_size_Y,
"learning_rate": learning_rate,
"normalize": normalize,
"use_time": use_time,
"svd": svd,
}
def fit(self,
x,
val_len=24,
start_date="2020-4-1",
freq="1H",
covariates=None,
dti=None,
period=24,
y_iters=10,
init_FX_epoch=100,
max_FX_epoch=300,
max_TCN_epoch=300,
alt_iters=10,
num_workers=None):
"""
Fit the model on x from scratch
:param x: the input for fit. Only dict of ndarray and SparkXShards of dict of ndarray
are supported. Example: {'id': id_arr, 'y': data_ndarray}, and data_ndarray
is of shape (n, T), where n is the number f target time series and T is the
number of time steps.
:param val_len: int, default is 24.
Validation length. We will use the last val_len time points as validation data.
:param start_date: str or datetime-like.
Start date time for the time-series. e.g. "2020-01-01"
:param freq: str or DateOffset, default is 'H'
Frequency of data
:param covariates: 2-D ndarray or None. The shape of ndarray should be (r, T), where r is
the number of covariates and T is the number of time points.
Global covariates for all time series. If None, only default time coveriates will be
used while use_time is True. If not, the time coveriates used is the stack of input
covariates and default time coveriates.
:param dti: DatetimeIndex or None.
If None, use default fixed frequency DatetimeIndex generated with start_date and freq.
:param period: int, default is 24.
Periodicity of input time series, leave it out if not known
:param y_iters: int, default is 10.
Number of iterations while training the hybrid model.
:param init_FX_epoch: int, default is 100.
Number of iterations while initializing factors
:param max_FX_epoch: int, default is 300.
Max number of iterations while training factors.
:param max_TCN_epoch: int, default is 300.
Max number of iterations while training the local model.
:param alt_iters: int, default is 10.
Number of iterations while alternate training.
:param num_workers: the number of workers you want to use for fit. If None, it defaults to
num_ray_nodes in the created RayContext or 1 if there is no active RayContext.
"""
if self.internal is None:
if isinstance(x, SparkXShards):
self.internal = TCMFXshardsModelWrapper(self.config)
elif isinstance(x, dict):
self.internal = TCMFNdarrayModelWrapper(self.config)
else:
raise ValueError("value of x should be a dict of ndarray or "
"an xShards of dict of ndarray")
try:
self.internal.fit(x,
num_workers=num_workers,
val_len=val_len,
start_date=start_date,
freq=freq,
covariates=covariates,
dti=dti,
period=period,
y_iters=y_iters,
init_FX_epoch=init_FX_epoch,
max_FX_epoch=max_FX_epoch,
max_TCN_epoch=max_TCN_epoch,
alt_iters=alt_iters,
)
except Exception as inst:
self.internal = None
raise inst
else:
raise Exception("This model has already been fully trained, "
"you can only run full training once.")
def fit_incremental(self, x_incr, covariates_incr=None, dti_incr=None):
"""
Incrementally fit the model. Note that we only incrementally fit X_seq (TCN in global model)
:param x_incr: incremental data to be fitted. It should be of the same format as input x in
fit, which is a dict of ndarray or SparkXShards of dict of ndarray.
Example: {'id': id_arr, 'y': incr_ndarray}, and incr_ndarray is of shape (n, T_incr)
, where n is the number of target time series, T_incr is the number of time steps
incremented. You can choose not to input 'id' in x_incr, but if you do, the elements
of id in x_incr should be the same as id in x of fit.
:param covariates_incr: covariates corresponding to x_incr. 2-D ndarray or None.
The shape of ndarray should be (r, T_incr), where r is the number of covariates.
Global covariates for all time series. If None, only default time coveriates will be
used while use_time is True. If not, the time coveriates used is the stack of input
covariates and default time coveriates.
:param dti_incr: dti corresponding to the x_incr. DatetimeIndex or None.
If None, use default fixed frequency DatetimeIndex generated with the last date of x in
fit and freq.
"""
self.internal.fit_incremental(x_incr,
covariates_incr=covariates_incr,
dti_incr=dti_incr)
def evaluate(self,
target_value,
metric=['mae'],
target_covariates=None,
target_dti=None,
num_workers=None,
):
"""
Evaluate the model
:param target_value: target value for evaluation. We interpret its second dimension of
as the horizon length for evaluation.
:param metric: the metrics. A list of metric names.
:param target_covariates: covariates corresponding to target_value.
2-D ndarray or None.
The shape of ndarray should be (r, horizon), where r is the number of covariates.
Global covariates for all time series. If None, only default time coveriates will be
used while use_time is True. If not, the time coveriates used is the stack of input
covariates and default time coveriates.
:param target_dti: dti corresponding to target_value.
DatetimeIndex or None.
If None, use default fixed frequency DatetimeIndex generated with the last date of x in
fit and freq.
:param num_workers: the number of workers to use in evaluate. If None, it defaults to
num_ray_nodes in the created RayContext or 1 if there is no active RayContext.
:return: A list of evaluation results. Each item represents a metric.
"""
return self.internal.evaluate(y=target_value,
metric=metric,
target_covariates=target_covariates,
target_dti=target_dti,
num_workers=num_workers)
def predict(self,
horizon=24,
future_covariates=None,
future_dti=None,
num_workers=None,
):
"""
Predict using a trained forecaster.
:param horizon: horizon length to look forward.
:param future_covariates: covariates corresponding to future horizon steps data to predict.
2-D ndarray or None.
The shape of ndarray should be (r, horizon), where r is the number of covariates.
Global covariates for all time series. If None, only default time coveriates will be
used while use_time is True. If not, the time coveriates used is the stack of input
covariates and default time coveriates.
:param future_dti: dti corresponding to future horizon steps data to predict.
DatetimeIndex or None.
If None, use default fixed frequency DatetimeIndex generated with the last date of x in
fit and freq.
:param num_workers: the number of workers to use in predict. If None, it defaults to
num_ray_nodes in the created RayContext or 1 if there is no active RayContext.
:return: A numpy ndarray with shape of (nd, horizon), where nd is the same number
of time series as input x in fit_eval.
"""
if self.internal is None:
raise Exception("You should run fit before calling predict()")
else:
return self.internal.predict(horizon,
future_covariates=future_covariates,
future_dti=future_dti,
num_workers=num_workers)
def save(self, path):
"""
Save the forecaster.
:param path: Path to target saved file.
"""
if self.internal is None:
raise Exception("You should run fit before calling save()")
else:
self.internal.save(path)
def is_xshards_distributed(self):
"""
Check whether model is distributed by input xshards.
:return: True if the model is distributed by input xshards
"""
if self.internal is None:
raise ValueError(
"You should run fit before calling is_xshards_distributed()")
else:
return self.internal.is_xshards_distributed()
@classmethod
def load(cls, path, is_xshards_distributed=False, minPartitions=None):
"""
Load a saved model.
:param path: The location you want to save the forecaster.
:param is_xshards_distributed: Whether the model is distributed trained with
input of dict of SparkXshards.
:param minPartitions: The minimum partitions for the XShards.
:return: the model loaded
"""
loaded_model = TCMFForecaster()
if is_xshards_distributed:
loaded_model.internal = TCMFXshardsModelWrapper(
loaded_model.config)
loaded_model.internal.load(path, minPartitions=minPartitions)
else:
loaded_model.internal = TCMFNdarrayModelWrapper(
loaded_model.config)
loaded_model.internal.load(path)
return loaded_model
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/forecaster/tcmf_forecaster.py
|
Python
|
apache-2.0
| 14,725
|
[
"ORCA"
] |
15ea11b84fad06ef87037087ed815b669d364a6141fc2b994e0545f80217068e
|
""" Decorators for DIRAC.
"""
from __future__ import print_function
import os
import inspect
import functools
import traceback
__RCSID__ = "$Id$"
def deprecated(reason, onlyOnce=False):
""" A decorator to mark a class or function as deprecated.
This will cause a warnings to be generated in the usual log if the item
is used (instantiated or called).
If the environment variable ``DIRAC_DEPRECATED_FAIL`` is set to a non-empty value, an exception will be
raised when the function or class is used.
The decorator can be used before as class or function, giving a reason,
for example::
@deprecated("Use functionTwo instead")
def functionOne(...):
If `onlyOnce` is set to true then the warning will only be generated on the
first call or creation of the item. This is useful for things that are
likely to get called repeatedly (to prevent generating massive log files);
for example::
@deprecated("Use otherClass instead", onlyOnce=True)
class MyOldClass:
If used on a classmethod, it should be used after the `@classmethod` decorator
for example::
@classmethod
@deprecated("Do not put me before @classmethod")
def methodX(cls):
Parameters
----------
reason : str
Message to display to the user when the deprecated item is used. This should specify
what should be used instead.
onlyOnce : bool
If set, the deprecation warning will only be displayed on the first use.
Returns
-------
function
A double-function wrapper around the decorated object as required by the python
interpreter.
"""
def decFunc(func, clsName=None):
""" Inner function generator.
Returns a function which wraps the given "func" function,
which prints a deprecation notice as it is called.
clsName is used internally for class handling, and should be left
set to None otherwise.
Parameters
----------
func : function
The function to call from the wrapper.
clsName : string
If set, the wrapped object is assumed to be the __init__ function of
a class called "clsName". Set to None for wrapping a normal function.
Returns
-------
function
A function wrapper which which prints the deprecated warning and calls
func.
"""
decFunc.warningEn = True
if func.__doc__ is None:
func.__doc__ = '\n\n**Deprecated**: ' + reason
else:
func.__doc__ += '\n\n**Deprecated**: ' + reason
@functools.wraps(func)
def innerFunc(*args, **kwargs):
""" Prints a suitable deprectaion notice and calls
the constructor/function/method.
All arguments are passed through to the target function.
"""
# fail calling the function if environment variable is set
if os.environ.get("DIRAC_DEPRECATED_FAIL", None):
raise NotImplementedError("ERROR: using deprecated function or class: %s" % reason)
# Get the details of the deprecated object
if clsName:
objName = clsName
objType = "class"
else:
objName = func.__name__
objType = "object"
if inspect.isfunction(func):
objType = "function"
if decFunc.warningEn:
# We take the second to last stack frame,
# which will be the place which called the deprecated item
# callDetails is a tuple of (file, lineNum, function, text)
callDetails = traceback.extract_stack()[-2]
print("NOTE: %s %s is deprecated (%s)." % (objName, objType, reason))
print("NOTE: Used at %s:%u" % (callDetails[0], callDetails[1]))
if onlyOnce:
decFunc.warningEn = False
return func(*args, **kwargs)
# Classes are special, we can decorate them directly,
# but then calling super( class, inst ) doesn't work as the reference
# to class becomes a function. Instead we decorate the class __init__
# function, but then have to override the name otherwise just "__init__ is
# deprecated" will be printed.
if inspect.isclass(func):
func.__init__ = decFunc(func.__init__, clsName=func.__name__)
return func
return innerFunc
return decFunc
def executeOnlyIf(attrName, returnedError, attrVal=None):
""" A decorator to test the value of the attribute of a class before executing a method.
We often have classes in DIRAC that sets an attribute to True when they the object
has been successfuly instanciated. And then each and every method test this parameter
at the start of it.
This is yet another (very) poor man solution to avoid using exceptions.
This decorator will do the test for you.
Pitty, but it breaks the doc, as functools.wrap does not propagate the default attribute discovery
:param attrName: the name of the attribute to test. If undefined, equivalent to its value being None
:param returnedError: what to return if the attribute value is not what we expect
:param attrVal: if set to anything else than None, we check that the attribute value is what is give.
If set to None, we just check evaluate __bool__ on the attribute.
For example::
class ExceptionsAreEvil(object):
_hasBeenInitialized = False
def __init__(self):
self._hasBeenInitialized = 'OfCourse'
@executeOnlyIf("_hasBeenInitialized", S_ERROR("How could I not be initialized ?"))
def functionOne(...):
doTheActualWork()
def stupidMethod(...):
print "I don't like decorator"
if not self._hasBeenInitialized:
return S_ERROR("How could I not be initialized ?")
finallyDoSomething()
"""
def specificOnlyIf(meth):
""" onlyIf method aplied to a specigic case """
# This utilities allow to preserve the original help
# Of the method being decorated
@functools.wraps(meth)
def innerFunc(*args, **kwargs):
""" Test the attribute before executing the method"""
self = args[0]
# First condition is if we have been given a specific attrVal
# second condition is if we have not been given it
if ((attrVal is not None and getattr(self, attrName, None) != attrVal) or
(attrVal is None and not getattr(self, attrName, None))):
return returnedError
return meth(*args, **kwargs)
return innerFunc
return specificOnlyIf
|
fstagni/DIRAC
|
Core/Utilities/Decorators.py
|
Python
|
gpl-3.0
| 6,569
|
[
"DIRAC"
] |
a8fac8b05fd839568a33ff64fb5a668adc907001adbb26fc82d81e7167580d03
|
from ase import *
from ase.parallel import paropen
from gpaw import *
from gpaw.wavefunctions.pw import PW
from gpaw.xc.rpa import RPACorrelation
from gpaw.xc.fxc import FXCCorrelation
# LDA --------------------------------------
H = Atoms('H', [(0, 0, 0)])
H.set_pbc(True)
H.center(vacuum=2.0)
calc = GPAW(mode=PW(300),
hund=True,
dtype=complex,
xc='LDA')
H.set_calculator(calc)
E_lda = H.get_potential_energy()
E_c_lda = -calc.get_xc_difference('LDA_X')
print 'LDA correlation: ', E_c_lda, 'eV'
print
calc.diagonalize_full_hamiltonian()
calc.write('H_lda.gpw', mode='all')
rpa = RPACorrelation('H_lda.gpw')
rpa.calculate(ecut=300)
fxc = FXCCorrelation('H_lda.gpw', xc='rALDA')
fxc.calculate(ecut=300)
# PBE --------------------------------------
H = Atoms('H', [(0, 0, 0)])
H.set_pbc(True)
H.center(vacuum=2.0)
calc = GPAW(mode=PW(300),
hund=True,
dtype=complex,
xc='PBE')
H.set_calculator(calc)
E_pbe = H.get_potential_energy()
E_c_pbe = -calc.get_xc_difference('GGA_X_PBE')
print 'PBE correlation: ', E_c_pbe, 'eV'
print
calc.diagonalize_full_hamiltonian()
calc.write('H_pbe.gpw', mode='all')
rpa = RPACorrelation('H_pbe.gpw')
rpa.calculate(ecut=300)
fxc = FXCCorrelation('H_pbe.gpw', xc='rAPBE')
fxc.calculate(ecut=300)
|
robwarm/gpaw-symm
|
doc/tutorials/fxc_correlation/hydrogen.py
|
Python
|
gpl-3.0
| 1,311
|
[
"ASE",
"GPAW"
] |
689664682b6911316a979e90f48ba7e7f0e22b6328d4f34e5d4c3dc18fc8b3c1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlMoose(PerlPackage):
"""A postmodern object system for Perl 5"""
homepage = "http://search.cpan.org/~ether/Moose-2.2006/lib/Moose.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Moose-2.2006.tar.gz"
version('2.2006', '929c6b3877a6054ef617cf7ef1e220b5')
depends_on('perl-cpan-meta-check', type=('build', 'run'))
depends_on('perl-test-cleannamespaces', type=('build', 'run'))
depends_on('perl-devel-overloadinfo', type=('build', 'run'))
depends_on('perl-class-load-xs', type=('build', 'run'))
depends_on('perl-devel-stacktrace', type=('build', 'run'))
depends_on('perl-eval-closure', type=('build', 'run'))
depends_on('perl-sub-name', type=('build', 'run'))
depends_on('perl-module-runtime-conflicts', type=('build', 'run'))
depends_on('perl-devel-globaldestruction', type=('build', 'run'))
depends_on('perl-package-deprecationmanager', type=('build', 'run'))
depends_on('perl-package-stash-xs', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/perl-moose/package.py
|
Python
|
lgpl-2.1
| 2,263
|
[
"MOOSE"
] |
1f7dd52e3294a0e341280695c476bdb46c0ab1865b33e80f237f4fc471517544
|
#!usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.widgets as widgets
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import e_field_gen as e_field
import odeint_solve as ode
import sys as sys
ELEC_MASS = 9.10938356E-31
ELEC_CHARGE = -1.60217662E-19
FUND_FREQ = 3.7474057E14
SP_LIGHT = 3E8
PL_FWHM = 25E-15
FOCUS_RADIUS = 30E-6
PULSE_ENERGY = 0.6E-3
EPSILON_o = 8.85418782E-12
TIME_GRID = 200
INTENSITY = 1.88*(PULSE_ENERGY/(FOCUS_RADIUS**2*PL_FWHM))/np.pi #gaussian
FIELD_AMP = np.sqrt(2*INTENSITY/(EPSILON_o*SP_LIGHT))
FIELD_TOLERANCE = FIELD_AMP*1E-1
FIELD_AMP_ION = np.sqrt(2E14/(EPSILON_o*SP_LIGHT))
def plot(*args):
# x = np.linspace(slider_12.val*PL_FWHM,(slider_12.val + slider_11.val)*PL_FWHM,200)
# y_field = args[0][0](x)
# z_field = args[0][1](x)
# t2 = np.linspace(args[1], args[2][-1], len(args[-1]))
# t = np.linspace(args[1] +
# (len(args[-1]) - len(args[-2]))*(args[2][-1] - args[1])/len(args[-1]),
# args[2][-1], len(args[-2]))
# y = args[0][0]
# z = args[0][1]
# dist = args[-1]
fig1 = plt.figure(1)
ax1 = plt.axes([0.05, 0.15, 0.9, 0.80])
#fig2 = plt.figure(2)
#ax2 = plt.axes([0.05, 0.15, 0.9, 0.80], projection='3d')
closest = args[0][:,0]
mask = [0 if np.ma.is_masked(i) else np.NaN for i in np.ma.masked_invalid(closest)]
time = args[0][:,1]
ax1.clear()
l = ax1.plot(time, closest, 'b.')
l2 = ax1.plot(time, mask, 'r.')
# l = ax1.plot(t2, dist, 'r-', t, args[-2], 'b.')
# l = ax1.plot(x,y_field,z_field)
ax1.set_ylim(-1E-9, 2E-9)
ax1.set_xlim(-1E-16, 3E-14)
plt.savefig(str(np.random.rand(1)[0])+'foo.png')
def update():
qwp_1 = np.random.rand(1)[0]*360
hwp_2 = np.random.rand(1)[0]*360
qwp_2 = np.random.rand(1)[0]*360
hwp_3 = np.random.rand(1)[0]*360
qwp_3 = np.random.rand(1)[0]*360
delay_1 = (np.random.rand(1)[0]-0.5)*4
delay_2 = (np.random.rand(1)[0]-0.5)*4
ampl_1 = np.random.rand(1)
ampl_2 = np.random.rand(1)
ampl_3 = np.random.rand(1)
closeness = np.random.rand(1)
a = e_field.e_field_gen(3, True, ampl_1*FIELD_AMP, ampl_2*FIELD_AMP,
ampl_3*FIELD_AMP, 0,
delay_1/FUND_FREQ, delay_2/FUND_FREQ,
FUND_FREQ, 2*FUND_FREQ, 3*FUND_FREQ,
[[qwp_1], [hwp_2, qwp_2], [hwp_3, qwp_3]],
PL_FWHM, PL_FWHM, PL_FWHM,
b1='q', b2='hq', b3='hq')
t = np.linspace(-5*PL_FWHM, 5*PL_FWHM, 50)
y_field = a[0](t)
z_field = a[1](t)
tot = np.sqrt(y_field**2 + z_field**2)
times = [j for i,j in zip(tot,t) if i > FIELD_AMP_ION]
b = ode.solve_path(a, times[0], times[-1], True, True, closeness*1E-9)
plot(b)
# plot(b[1:-2], b[0], times, b[-1], b[-2])
# plot(a)
if __name__ == '__main__':
#start
qwp_1 = np.random.rand(1)[0]*360
hwp_2 = np.random.rand(1)[0]*360
qwp_2 = np.random.rand(1)[0]*360
hwp_3 = np.random.rand(1)[0]*360
qwp_3 = np.random.rand(1)[0]*360
delay_1 = (np.random.rand(1)[0]-0.5)*4
delay_2 = (np.random.rand(1)[0]-0.5)*4
ampl_1 = np.random.rand(1)
ampl_2 = np.random.rand(1)
ampl_3 = np.random.rand(1)
closeness = np.random.rand(1)
a = e_field.e_field_gen(3, True, 0*FIELD_AMP, 0*FIELD_AMP,
ampl_3*FIELD_AMP, 0,
delay_1/FUND_FREQ, delay_2/FUND_FREQ,
FUND_FREQ, 2*FUND_FREQ, 3*FUND_FREQ,
[[qwp_1], [hwp_2, qwp_2], [hwp_3, qwp_3]],
PL_FWHM, PL_FWHM, PL_FWHM,
b1='q', b2='hq', b3='hq')
t = np.linspace(-5*PL_FWHM, 5*PL_FWHM, 50)
y_field = a[0](t)
z_field = a[1](t)
tot = np.sqrt(y_field**2 + z_field**2)
times = [j for i,j in zip(tot,t) if i > FIELD_AMP_ION]
b = ode.solve_path(a, times[0], times[-1], True, True, closeness*1E-9)
plot(b)
# plot(b[1:-2], b[0], times, b[-1], b[-2])
# plot(a)
plt.show()
for i in range(10):
update()
|
KavuriG/classical-calc-three-color
|
no_gui_tester.py
|
Python
|
gpl-3.0
| 4,304
|
[
"Gaussian"
] |
2b67ebfeff23823887deb55ebd45ae22e536b0ec9204d45367d44ed7a552fb3c
|
#!/usr/bin/env python
#code is same as ~/tools/stats/wiggle_to_simple.py
"""
Read a wiggle track and print out a series of lines containing
"chrom position score". Ignores track lines, handles bed, variableStep
and fixedStep wiggle lines.
"""
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.wiggle
from galaxy.tools.exception_handling import *
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
def main():
if len( sys.argv ) > 1:
in_file = open( sys.argv[1] )
else:
in_file = open( sys.stdin )
if len( sys.argv ) > 2:
out_file = open( sys.argv[2], "w" )
else:
out_file = sys.stdout
try:
for fields in bx.wiggle.IntervalReader( UCSCOutWrapper( in_file ) ):
out_file.write( "%s\n" % "\t".join( map( str, fields ) ) )
except UCSCLimitException:
# Wiggle data was truncated, at the very least need to warn the user.
print 'Encountered message from UCSC: "Reached output limit of 100000 data values", so be aware your data was truncated.'
except ValueError, e:
in_file.close()
out_file.close()
stop_err( str( e ) )
in_file.close()
out_file.close()
if __name__ == "__main__": main()
|
volpino/Yeps-EURAC
|
lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py
|
Python
|
mit
| 1,292
|
[
"Galaxy"
] |
5ba7146e128040a38eb259a2ad167a1f1eb17239fdff7f78d8e9486f69de62c1
|
"""
This is a typical input script that runs a simulation of
laser-wakefield acceleration using Warp 2D / Circ.
Usage
-----
- Modify the parameters below to suit your needs
- Type "python -i warp_script.py" in a terminal
- When the simulation finishes, the python session will *not* quit.
Therefore the simulation can be continued by running step()
Otherwise, one can just type exit()
"""
# Import warp-specific packages
from warp.init_tools import *
# -----------------------------------------------------------------------------
# Parameters (Modify the values below to suit your needs)
# -----------------------------------------------------------------------------
# General parameters
# ------------------
# Dimension of simulation ("3d", "circ", "2d", "1d")
dim = "2d"
# Number of azimuthal modes beyond m=0, for "circ" (not used for "2d" and "3d")
circ_m = 1
# Total number of timesteps in the simulation
N_steps = 401
# Whether to run the simulation interactively (0:off, 1:on)
interactive = 0
# Simulation box
# --------------
# Number of grid cells in the longitudinal direction
Nz = 200
# Number of grid cells in transverse direction (represents Nr in "circ")
Nx = 50
# Number of grid cells in the 3rd dimension (not used for "2d" and "circ")
Ny = 50
# Dimension of the box in longitudinal direction (meters)
zmin = -15.e-6
zmax = 5.e-6
# Dimension of the box in transverse direction (box ranges from -xmax to xmax)
xmax = 15.e-6
# Dimension of the box in 3rd direction (not used for "2d" and "circ")
ymax = 15.e-6
# Field boundary conditions (longitudinal and transverse respectively)
f_boundz = openbc
f_boundxy = absorb
# Particles boundary conditions (longitudinal and transverse respectively)
p_boundz = absorb
p_boundxy = reflect
# Moving window (0:off, 1:on)
use_moving_window = 1
# Speed of the moving window (ignored if use_moving_window = 0)
v_moving_window = clight
# Diagnostics
# -----------
# Period of diagnostics (in number of timesteps)
diag_period = 5
# Whether to write the fields
write_fields = 1
# Whether to write the particles
write_particles = 1
# Whether to write the diagnostics in parallel
parallel_output = False
# Numerical parameters
# --------------------
# Field solver (0:Yee, 1:Karkkainen on EF,B, 3:Lehe)
stencil = 0
# Particle shape (1:linear, 2:quadratic, 3:cubic)
depos_order = 3
# Gathering mode (1:from cell centers, 4:from Yee mesh)
efetch = 1
# Particle pusher (0:Boris, 1:Vay)
particle_pusher = 1
# Current smoothing parameters
# ----------------------------
# Turn current smoothing on or off (0:off; 1:on)
use_smooth = 1
# Number of passes of smoother and compensator in each direction (x, y, z)
npass_smooth = array([[ 0 , 0 ], [ 0 , 0 ], [ 1 , 1 ]])
# Smoothing coefficients in each direction (x, y, z)
alpha_smooth = array([[ 0.5, 3.], [ 0.5, 3.], [0.5, 3./2]])
# Stride in each direction (x, y, z)
stride_smooth = array([[ 1 , 1 ], [ 1 , 1 ], [ 1 , 1 ]])
# Laser parameters
# ----------------
# Initialize laser (0:off, 1:on)
use_laser = 1
# Position of the antenna (meters)
laser_source_z = 0.e-6
# Polarization angle with respect to the x axis (rad)
laser_polangle = pi/2
# Laser file:
# When using a laser profile that was experimentally
# measured, provide a string with the path to an HDF5 laser file,
# otherwise provide None and a Gaussian pulse will be initialized
laser_file = None
laser_file_energy = 2. # When using a laser file, energy in Joule of the pulse
# Gaussian pulse:
# Laser amplitude at focus
laser_a0 = 1.
# Waist at focus (meters)
laser_w0 = 4.e-6
# Length of the pulse (length from the peak to 1/e of the amplitude ; meters)
laser_ctau = 3.e-6
# Initial position of the centroid (meters)
laser_z0 = -2 * laser_ctau
# Focal position
laser_zfoc = 4.5e-05
# Plasma macroparticles
# ---------------------
# Initialize some preexisting plasmas electrons (0:off, 1:on)
# (Can be used in order to neutralize pre-ionized ions, if any,
# or in order to simulate a plasma without having to initialize ions)
use_preexisting_electrons = 1
# Initialize plasma ions (0:off, 1:on)
use_ions = 1
# Number of macroparticles per cell in each direction
# In Circ, nppcelly is the number of particles along the
# azimuthal direction. Use a multiple of 4*circ_m
plasma_nx = 1
plasma_ny = 4
plasma_nz = 1
# Plasma content and profile
# --------------------------
# Reference plasma density (in number of particles per m^3)
n_plasma = 2.5e25
# Relative density of the preexisting electrons (relative to n_plasma)
rel_dens_preexisting_electrons = 1.
# The different elements used. (Only used if use_ions is different than 0.)
# relative_density is the density relative to n_plasma.
# q_start is the ionization state of the ions at the beginning of the simulation
# q_max is the maximum ionization state
# If q_start is not equal to q_max, ionization between states will be computed.
ion_states = { 'Hydrogen': {'relative_density':1., 'q_start':1, 'q_max':1 } }
# Positions between which the plasma is initialized
# (Transversally, the plasma is initialized between -plasma_xmax and
# plasma_xmax, along x, and -plasma_ymax and plasma_ymax along y)
plasma_zmin = 1.e-6
plasma_zmax = 1500.e-6
plasma_xmax = xmax
plasma_ymax = ymax
# Define your own profile and profile parameters below
ramp_start = 0.e-6
ramp_length = 20.e-6
ramp_plateau = 20.e-6
def plasma_dens_func( x, y, z ):
"""
User-defined function: density profile of the plasma
It should return the relative density with respect to n_plasma,
at the position x, y, z (i.e. return a number between 0 and 1)
Parameters
----------
x, y, z: 1darrays of floats
Arrays with one element per macroparticle
Returns
-------
n : 1d array of floats
Array of relative density, with one element per macroparticles
"""
# Allocate relative density
n = ones_like(z)
# Make linear ramp
n = where( z<ramp_start+ramp_length, (z-ramp_start)/ramp_length, n )
# Supress density before the ramp
n = where( z<ramp_start, 0., n )
# Reduce density by half after the ramp
n = where( z> ramp_start+ramp_length+ramp_plateau, 0.5*n, n )
# Put the density to 0 later
n = where( z> ramp_start+ramp_length+2*ramp_plateau, 0., n )
return(n)
# Relativistic beam
# -----------------
# Initialize beam electrons (0:off, 1:on)
# (Please be aware that initializing a beam in 2D geometry makes very little
# physical sense, because of the long range of its space-charge fields)
use_beam = 0
# Longitudinal momentum of the beam
beam_uz = 100.
# Beam density
n_beam = 1.e26
# Number of macroparticles per cell in each direction
beam_nx = 2*plasma_nx
beam_ny = 2*plasma_ny
beam_nz = 2*plasma_nz
# Positions between which the beam is initialized
# (Transversally, the plasma is initialized between -plasma_xmax and
# plasma_xmax, along x, and -plasma_ymax and plasma_ymax along y)
beam_zmin = -12.e-6
beam_zmax = -10.e-6
beam_xmax = 3.e-6
beam_ymax = 3.e-6
# Define your own profile and profile parameters below
beam_rmax = beam_xmax
def beam_dens_func(x, y, z):
"""
User-defined function: density profile of the beam
It should return the relative density with respect to n_beam,
at the position x, y, z (i.e. return a number between 0 and 1)
Parameters
----------
x, y, z: 1darrays of floats
Arrays with one element per macroparticle
Returns
-------
n : 1d array of floats
Array of relative density, with one element per macroparticles
"""
# Allocate relative density
n = ones_like(z)
# Longitudinal profile: parabolic
n = n*(z - beam_zmin)*(beam_zmax - z) * 4/(beam_zmax - beam_zmin)**2
# Transverse profile: parabolic
r = sqrt( x**2 + y**2)
n = n*(1 - (r/beam_rmax)**2 )
# Put the density above rmax to 0
n[r > beam_rmax] = 0.
return(n)
# -----------------------------------------------------------------------------
# Initialization of the simulation (Normal users should not modify this part.)
# -----------------------------------------------------------------------------
# Set some general options for warp
set_diagnostics( interactive )
set_boundary_conditions( f_boundz, f_boundxy, p_boundz, p_boundxy )
set_simulation_box( Nz, Nx, Ny, zmin, zmax, xmax, ymax, dim )
set_moving_window( use_moving_window, v_moving_window )
# See smoothing.py
set_smoothing_parameters( use_smooth, dim, npass_smooth,
alpha_smooth, stride_smooth )
# Creation of the species
# -----------------------
elec = None
ions = None
elec_from_ions = None
beam = None
# Create the plasma species
# Reference weight for plasma species
plasma_weight = prepare_weights( n_plasma, plasma_nx, plasma_ny,
plasma_nz, dim, circ_m )
if use_preexisting_electrons:
elec_weight = rel_dens_preexisting_electrons * plasma_weight
elec = Species(type=Electron, weight=elec_weight, name='electrons')
if use_ions:
ions, elec_from_ions = initialize_ion_dict( ion_states, plasma_weight,
group_elec_by_element=True )
# Create the beam
if use_beam:
beam_weight = prepare_weights( n_beam, beam_nx, beam_ny,
beam_nz, dim, circ_m )
beam = Species(type=Electron, weight=beam_weight, name='beam')
# Set the numerical parameters only now: they affect the newly created species
top.ssnpid = nextpid()
set_numerics( depos_order, efetch, particle_pusher, dim)
# Setup the field solver object
# -----------------------------
em = EM3D(
stencil=stencil,
npass_smooth=npass_smooth,
alpha_smooth=alpha_smooth,
stride_smooth=stride_smooth,
l_2dxz= (dim in ["2d", "circ"]),
l_2drz= (dim in ["circ"]),
l_1dz = (dim =="1d" ),
l_getrho=True,
circ_m = (dim =="circ")*circ_m,
type_rz_depose=1 )
registersolver(em)
# Introduce the laser
# -------------------
if use_laser==1:
add_laser( em, dim, laser_a0, laser_w0, laser_ctau, laser_z0,
zf=laser_zfoc, theta_pol=laser_polangle, source_z=laser_source_z,
laser_file=laser_file, laser_file_energy=laser_file_energy )
# Introduce the beam
# ------------------
# Load the beam
if use_beam:
PlasmaInjector( beam, None, w3d, top, dim, beam_nx, beam_ny, beam_nz,
beam_zmin, beam_zmax, beam_xmax, beam_ymax,
dens_func = beam_dens_func, uz_m=beam_uz )
initialize_beam_fields( em, dim, beam, w3d, top )
# Introduce the plasma
# --------------------
# Create an object to store the information about plasma injection
plasma_injector = PlasmaInjector( elec, ions, w3d, top, dim,
plasma_nx, plasma_ny, plasma_nz, plasma_zmin,
plasma_zmax, plasma_xmax, plasma_ymax, plasma_dens_func )
# Continuously inject the plasma, if the moving window is on
if use_moving_window :
installuserinjection( plasma_injector.continuous_injection )
# Setup the diagnostics
# ---------------------
if write_fields == 1:
diag1 = FieldDiagnostic( period=diag_period, top=top, w3d=w3d, em=em,
comm_world=comm_world, lparallel_output=parallel_output,
write_dir='./example-2d', fieldtypes=["E", "rho"] )
if write_particles == 1:
species_dict = { species.name : species for species in listofallspecies \
if species.name == "electrons" }
diag2 = ParticleDiagnostic( period=diag_period, top=top, w3d=w3d,
species=species_dict, write_dir='./example-2d',
particle_data={"position","momentum","weighting","id"},
comm_world=comm_world, lparallel_output=parallel_output )
species_dict = { species.name : species for species in listofallspecies \
if species.name == "Hydrogen1+" }
diag3 = ParticleDiagnostic( period=diag_period, top=top, w3d=w3d,
species=species_dict, write_dir='./example-2d',
sub_sample=10,
comm_world=comm_world, lparallel_output=parallel_output )
print('\nInitialization complete\n')
# -----------------------------------------------------------------------------
# Simulation loop (Normal users should not modify this part either.)
# -----------------------------------------------------------------------------
step(250)
installafterstep( diag1.write )
installafterstep( diag2.write )
installafterstep( diag3.write )
step(151)
|
openPMD/openPMD-example-datasets
|
scripts/warp_2d_script.py
|
Python
|
cc0-1.0
| 12,337
|
[
"Gaussian"
] |
dc3fc01676ca7b906513f17b11d64cba62de0ab3d42f399f83819d2dc7b5cbf4
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
SurnamePage - creates list of individuals with same surname
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.display.name import displayer as _nd
from gramps.gen.plug.report import utils
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (name_to_md5, _NAME_STYLE_FIRST,
_find_birth_date, _find_death_date,
FULLCLEAR, html_escape)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# create the page from SurnameListPage
#
#################################################
class SurnamePage(BasePage):
"""
This will create a list of individuals with the same surname
"""
def __init__(self, report, title, surname, ppl_handle_list):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: surname -- The surname to use
@param: ppl_handle_list -- The list of people for whom we need to create
a page.
"""
BasePage.__init__(self, report, title)
# module variables
showbirth = report.options['showbirth']
showdeath = report.options['showdeath']
showpartner = report.options['showpartner']
showparents = report.options['showparents']
if surname == '':
surname = self._("<absent>")
output_file, sio = self.report.create_file(name_to_md5(surname), "srn")
self.uplink = True
result = self.write_header("%s - %s" % (self._("Surname"), surname))
surnamepage, dummy_head, dummy_body, outerwrapper = result
ldatec = 0
# begin SurnameDetail division
with Html("div", class_="content", id="SurnameDetail") as surnamedetail:
outerwrapper += surnamedetail
# section title
# In case the user choose a format name like "*SURNAME*"
# We must display this field in upper case. So we use
# the english format of format_name to find if this is
# the case.
name_format = self.report.options['name_format']
nme_format = _nd.name_formats[name_format][1]
if "SURNAME" in nme_format:
surnamed = surname.upper()
else:
surnamed = surname
surnamedetail += Html("h3", html_escape(surnamed), inline=True)
# feature request 2356: avoid genitive form
msg = self._("This page contains an index of all the individuals "
"in the database with the surname of %s. "
"Selecting the person’s name "
"will take you to that person’s "
"individual page.") % html_escape(surname)
surnamedetail += Html("p", msg, id="description")
# begin surname table and thead
with Html("table", class_="infolist primobjlist surname") as table:
surnamedetail += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# Name Column
trow += Html("th", self._("Given Name"), class_="ColumnName",
inline=True)
if showbirth:
trow += Html("th", self._("Birth"), class_="ColumnDate",
inline=True)
if showdeath:
trow += Html("th", self._("Death"), class_="ColumnDate",
inline=True)
if showpartner:
trow += Html("th", self._("Partner"),
class_="ColumnPartner",
inline=True)
if showparents:
trow += Html("th", self._("Parents"),
class_="ColumnParents",
inline=True)
# begin table body
tbody = Html("tbody")
table += tbody
for person_handle in sorted(ppl_handle_list,
key=self.sort_on_given_and_birth):
person = self.r_db.get_person_from_handle(person_handle)
if person.get_change_time() > ldatec:
ldatec = person.get_change_time()
trow = Html("tr")
tbody += trow
# firstname column
link = self.new_person_link(person_handle, uplink=True,
person=person,
name_style=_NAME_STYLE_FIRST)
trow += Html("td", link, class_="ColumnName")
# birth column
if showbirth:
tcell = Html("td", class_="ColumnBirth", inline=True)
trow += tcell
birth_date = _find_birth_date(self.r_db, person)
if birth_date is not None:
if birth_date.fallback:
tcell += Html('em',
self.rlocale.get_date(birth_date),
inline=True)
else:
tcell += self.rlocale.get_date(birth_date)
else:
tcell += " "
# death column
if showdeath:
tcell = Html("td", class_="ColumnDeath", inline=True)
trow += tcell
death_date = _find_death_date(self.r_db, person)
if death_date is not None:
if death_date.fallback:
tcell += Html('em',
self.rlocale.get_date(death_date),
inline=True)
else:
tcell += self.rlocale.get_date(death_date)
else:
tcell += " "
# partner column
if showpartner:
tcell = Html("td", class_="ColumnPartner")
trow += tcell
family_list = person.get_family_handle_list()
if family_list:
fam_count = 0
for family_handle in family_list:
fam_count += 1
family = self.r_db.get_family_from_handle(
family_handle)
partner_handle = utils.find_spouse(
person, family)
if partner_handle:
link = self.new_person_link(partner_handle,
uplink=True)
if fam_count < len(family_list):
if isinstance(link, Html):
link.inside += ","
else:
link += ','
tcell += link
else:
tcell += " "
# parents column
if showparents:
parent_hdl_list = person.get_parent_family_handle_list()
if parent_hdl_list:
parent_hdl = parent_hdl_list[0]
fam = self.r_db.get_family_from_handle(parent_hdl)
f_id = fam.get_father_handle()
m_id = fam.get_mother_handle()
mother = father = None
if f_id:
father = self.r_db.get_person_from_handle(f_id)
if father:
father_name = self.get_name(father)
if m_id:
mother = self.r_db.get_person_from_handle(m_id)
if mother:
mother_name = self.get_name(mother)
if mother and father:
tcell = Html("span", father_name,
class_="father fatherNmother")
tcell += Html("span", mother_name,
class_="mother")
elif mother:
tcell = Html("span", mother_name,
class_="mother", inline=True)
elif father:
tcell = Html("span", father_name,
class_="father", inline=True)
samerow = False
else:
tcell = " "
samerow = True
trow += Html("td", tcell,
class_="ColumnParents", inline=samerow)
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(surnamepage, output_file, sio, ldatec)
|
sam-m888/gramps
|
gramps/plugins/webreport/surname.py
|
Python
|
gpl-2.0
| 12,157
|
[
"Brian"
] |
806704dc9a0e38540b05c05a708897f93d6524b436571023b6e95495b10595ab
|
#
# Copyright 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ase.lattice import bulk
import ase.units as units
# ********** Bulk unit cell ************
# 8-atom diamond cubic unit cell for silicon
a0 = 5.44 # guess at lattice constant for Si - we will minimize
cryst = bulk('Si', 'diamond', a=a0, cubic=True)
surf_ny = 4 # number of cells needed to get accurate surface energy
# ********* System parameters **********
# There are three possible crack systems, choose one and uncomment it
# System 1. (111)[0-11]
crack_direction = (-2, 1, 1) # Miller index of x-axis
cleavage_plane = (1, 1, 1) # Miller index of y-axis
crack_front = (0, 1, -1) # Miller index of z-axis
# # System 2. (110)[001]
# crack_direction = (1,-1,0)
# cleavage_plane = (1,1,0)
# crack_front = (0,0,1)
# # System 3. (110)[1-10]
# crack_direction = (0,0,-1)
# cleavage_plane = (1,1,0)
# crack_front = (1,-1,0)
check_rotated_elastic_constants = False
width = 200.0*units.Ang # Width of crack slab
height = 100.0*units.Ang # Height of crack slab
vacuum = 100.0*units.Ang # Amount of vacuum around slab
crack_seed_length = 40.0*units.Ang # Length of seed crack
strain_ramp_length = 30.0*units.Ang # Distance over which strain is ramped up
initial_G = 5.0*(units.J/units.m**2) # Initial energy flow to crack tip
relax_bulk = True # If True, relax initial bulk cell
bulk_fmax = 1e-6*units.eV/units.Ang # Max force for bulk, C_ij and surface energy
relax_slab = True # If True, relax notched slab with calculator
relax_fmax = 0.025*units.eV/units.Ang # Maximum force criteria for relaxation
# ******* Molecular dynamics parameters ***********
sim_T = 300.0*units.kB # Simulation temperature
nsteps = 10000 # Total number of timesteps to run for
timestep = 1.0*units.fs # Timestep (NB: time base units are not fs!)
cutoff_skin = 2.0*units.Ang # Amount by which potential cutoff is increased
# for neighbour calculations
tip_move_tol = 10.0 # Distance tip has to move before crack
# is taken to be running
strain_rate = 1e-5*(1/units.fs) # Strain rate
traj_file = 'traj.nc' # Trajectory output file (NetCDF format)
traj_interval = 10 # Number of time steps between
# writing output frames
# ********** Setup calculator ************
# Stillinger-Weber (SW) classical interatomic potential, from QUIP
from quippy import Potential
calc = Potential('IP SW', 'params.xml')
# Screened Kumagai potential, from Atomistica
#import atomistica
#calc = atomistica.KumagaiScr()
|
libAtoms/matscipy
|
examples/fracture_mechanics/make_crack_thin_strip/params.py
|
Python
|
lgpl-2.1
| 3,464
|
[
"ASE",
"Matscipy",
"NetCDF"
] |
c6ee7b6aa285fbb31f0624f4c77e712f248e14e0685e92f60a8405d07ec0cb35
|
# French / Français - Translations - Python 3 Only!
from seleniumbase import BaseCase
from seleniumbase import MasterQA
class CasDeBase(BaseCase):
def __init__(self, *args, **kwargs):
super(CasDeBase, self).__init__(*args, **kwargs)
self._language = "French"
def ouvrir(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def ouvrir_url(self, *args, **kwargs):
# open_url(url)
return self.open_url(*args, **kwargs)
def cliquer(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def double_cliquer(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def cliquer_lentement(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def cliquer_si_affiché(self, *args, **kwargs): # noqa
# click_if_visible(selector, by=By.CSS_SELECTOR)
return self.click_if_visible(*args, **kwargs)
def cliquer_texte_du_lien(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def modifier_texte(self, *args, **kwargs):
# update_text(selector, text)
return self.update_text(*args, **kwargs)
def taper(self, *args, **kwargs):
# type(selector, text) # Same as update_text()
return self.type(*args, **kwargs)
def ajouter_texte(self, *args, **kwargs):
# add_text(selector, text)
return self.add_text(*args, **kwargs)
def obtenir_texte(self, *args, **kwargs):
# get_text(selector, text)
return self.get_text(*args, **kwargs)
def vérifier_texte(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def vérifier_texte_exactement(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def vérifier_texte_du_lien(self, *args, **kwargs):
# assert_link_text(link_text)
return self.assert_link_text(*args, **kwargs)
def vérifier_élément(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def vérifier_élément_affiché(self, *args, **kwargs):
# assert_element_visible(selector) # Same as self.assert_element()
return self.assert_element_visible(*args, **kwargs)
def vérifier_élément_pas_affiché(self, *args, **kwargs):
# assert_element_not_visible(selector)
return self.assert_element_not_visible(*args, **kwargs)
def vérifier_élément_présent(self, *args, **kwargs):
# assert_element_present(selector)
return self.assert_element_present(*args, **kwargs)
def vérifier_élément_pas_présent(self, *args, **kwargs):
# assert_element_absent(selector)
return self.assert_element_absent(*args, **kwargs)
def vérifier_titre(self, *args, **kwargs):
# assert_title(title)
return self.assert_title(*args, **kwargs)
def obtenir_titre(self, *args, **kwargs):
# get_title()
return self.get_title(*args, **kwargs)
def vérifier_vrai(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def vérifier_faux(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def vérifier_égal(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def vérifier_non_égal(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def rafraîchir_la_page(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def obtenir_url_actuelle(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def obtenir_html_de_la_page(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def retour(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def en_avant(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def est_texte_affiché(self, *args, **kwargs):
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def est_un_élément_affiché(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def est_un_élément_présent(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def attendre_le_texte(self, *args, **kwargs):
# wait_for_text(text, selector)
return self.wait_for_text(*args, **kwargs)
def attendre_un_élément(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def attendre_un_élément_affiché(self, *args, **kwargs):
# wait_for_element_visible(selector) # Same as wait_for_element()
return self.wait_for_element_visible(*args, **kwargs)
def attendre_un_élément_pas_affiché(self, *args, **kwargs):
# wait_for_element_not_visible(selector)
return self.wait_for_element_not_visible(*args, **kwargs)
def attendre_un_élément_présent(self, *args, **kwargs):
# wait_for_element_present(selector)
return self.wait_for_element_present(*args, **kwargs)
def attendre_un_élément_pas_présent(self, *args, **kwargs):
# wait_for_element_absent(selector)
return self.wait_for_element_absent(*args, **kwargs)
def dormir(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def attendre(self, *args, **kwargs):
# wait(seconds) # Same as sleep(seconds)
return self.wait(*args, **kwargs)
def soumettre(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def effacer(self, *args, **kwargs):
# clear(selector)
return self.clear(*args, **kwargs)
def js_cliquer(self, *args, **kwargs):
# js_click(selector)
return self.js_click(*args, **kwargs)
def js_modifier_texte(self, *args, **kwargs):
# js_update_text(selector, text)
return self.js_update_text(*args, **kwargs)
def js_taper(self, *args, **kwargs):
# js_type(selector, text)
return self.js_type(*args, **kwargs)
def vérifier_html(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def enregistrer_capture_d_écran(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def sélectionner_fichier(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def exécuter_script(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def exécuter_script_sans_risque(self, *args, **kwargs):
# safe_execute_script(script)
return self.safe_execute_script(*args, **kwargs)
def activer_jquery(self, *args, **kwargs):
# activate_jquery()
return self.activate_jquery(*args, **kwargs)
def annonces_de_bloc(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def sauter(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def vérifier_les_liens_rompus(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def vérifier_les_erreurs_js(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def passer_au_cadre(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(*args, **kwargs)
def passer_au_contenu_par_défaut(self, *args, **kwargs):
# switch_to_default_content()
return self.switch_to_default_content(*args, **kwargs)
def ouvrir_une_nouvelle_fenêtre(self, *args, **kwargs):
# open_new_window()
return self.open_new_window(*args, **kwargs)
def passer_à_fenêtre(self, *args, **kwargs):
# switch_to_window(window)
return self.switch_to_window(*args, **kwargs)
def passer_à_fenêtre_par_défaut(self, *args, **kwargs):
# switch_to_default_window()
return self.switch_to_default_window(*args, **kwargs)
def maximiser_fenêtre(self, *args, **kwargs):
# maximize_window()
return self.maximize_window(*args, **kwargs)
def illuminer(self, *args, **kwargs):
# highlight(selector)
return self.highlight(*args, **kwargs)
def illuminer_cliquer(self, *args, **kwargs):
# highlight_click(selector)
return self.highlight_click(*args, **kwargs)
def déménager_à(self, *args, **kwargs):
# scroll_to(selector)
return self.scroll_to(*args, **kwargs)
def faites_défiler_vers_le_haut(self, *args, **kwargs):
# scroll_to_top()
return self.scroll_to_top(*args, **kwargs)
def faites_défiler_vers_le_bas(self, *args, **kwargs):
# scroll_to_bottom()
return self.scroll_to_bottom(*args, **kwargs)
def planer_au_dessus_et_cliquer(self, *args, **kwargs):
# hover_and_click(hover_selector, click_selector)
return self.hover_and_click(*args, **kwargs)
def est_il_sélectionné(self, *args, **kwargs):
# is_selected(selector)
return self.is_selected(*args, **kwargs)
def appuyer_sur_flèche_haut(self, *args, **kwargs):
# press_up_arrow(selector="html", times=1)
return self.press_up_arrow(*args, **kwargs)
def appuyer_sur_flèche_bas(self, *args, **kwargs):
# press_down_arrow(selector="html", times=1)
return self.press_down_arrow(*args, **kwargs)
def appuyer_sur_flèche_gauche(self, *args, **kwargs):
# press_left_arrow(selector="html", times=1)
return self.press_left_arrow(*args, **kwargs)
def appuyer_sur_flèche_droite(self, *args, **kwargs):
# press_right_arrow(selector="html", times=1)
return self.press_right_arrow(*args, **kwargs)
def cliquer_éléments_visibles(self, *args, **kwargs):
# click_visible_elements(selector)
return self.click_visible_elements(*args, **kwargs)
def sélectionner_option_par_texte(self, *args, **kwargs):
# select_option_by_text(dropdown_selector, option)
return self.select_option_by_text(*args, **kwargs)
def sélectionner_option_par_index(self, *args, **kwargs):
# select_option_by_index(dropdown_selector, option)
return self.select_option_by_index(*args, **kwargs)
def sélectionner_option_par_valeur(self, *args, **kwargs):
# select_option_by_value(dropdown_selector, option)
return self.select_option_by_value(*args, **kwargs)
def créer_une_présentation(self, *args, **kwargs):
# create_presentation(name=None, theme="default", transition="default")
return self.create_presentation(*args, **kwargs)
def ajouter_une_diapositive(self, *args, **kwargs):
# add_slide(content=None, image=None, code=None, iframe=None,
# content2=None, notes=None, transition=None, name=None)
return self.add_slide(*args, **kwargs)
def enregistrer_la_présentation(self, *args, **kwargs):
# save_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.save_presentation(*args, **kwargs)
def démarrer_la_présentation(self, *args, **kwargs):
# begin_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.begin_presentation(*args, **kwargs)
def créer_un_graphique_à_secteurs(self, *args, **kwargs):
# create_pie_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_pie_chart(*args, **kwargs)
def créer_un_graphique_à_barres(self, *args, **kwargs):
# create_bar_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_bar_chart(*args, **kwargs)
def créer_un_graphique_à_colonnes(self, *args, **kwargs):
# create_column_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_column_chart(*args, **kwargs)
def créer_un_graphique_linéaire(self, *args, **kwargs):
# create_line_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_line_chart(*args, **kwargs)
def créer_un_graphique_en_aires(self, *args, **kwargs):
# create_area_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_area_chart(*args, **kwargs)
def ajouter_séries_au_graphique(self, *args, **kwargs):
# add_series_to_chart(data_name=None, chart_name=None)
return self.add_series_to_chart(*args, **kwargs)
def ajouter_un_point_de_données(self, *args, **kwargs):
# add_data_point(label, value, color=None, chart_name=None)
return self.add_data_point(*args, **kwargs)
def enregistrer_le_graphique(self, *args, **kwargs):
# save_chart(chart_name=None, filename=None)
return self.save_chart(*args, **kwargs)
def afficher_le_graphique(self, *args, **kwargs):
# display_chart(chart_name=None, filename=None, interval=0)
return self.display_chart(*args, **kwargs)
def extraire_le_graphique(self, *args, **kwargs):
# extract_chart(chart_name=None)
return self.extract_chart(*args, **kwargs)
def créer_une_visite(self, *args, **kwargs):
# create_tour(name=None, theme=None)
return self.create_tour(*args, **kwargs)
def créer_une_visite_shepherd(self, *args, **kwargs):
# create_shepherd_tour(name=None, theme=None)
return self.create_shepherd_tour(*args, **kwargs)
def créer_une_visite_bootstrap(self, *args, **kwargs):
# create_bootstrap_tour(name=None, theme=None)
return self.create_bootstrap_tour(*args, **kwargs)
def créer_une_visite_driverjs(self, *args, **kwargs):
# create_driverjs_tour(name=None, theme=None)
return self.create_driverjs_tour(*args, **kwargs)
def créer_une_visite_hopscotch(self, *args, **kwargs):
# create_hopscotch_tour(name=None, theme=None)
return self.create_hopscotch_tour(*args, **kwargs)
def créer_une_visite_introjs(self, *args, **kwargs):
# create_introjs_tour(name=None, theme=None)
return self.create_introjs_tour(*args, **kwargs)
def ajouter_étape_à_la_visite(self, *args, **kwargs):
# add_tour_step(message, selector=None, name=None,
# title=None, theme=None, alignment=None)
return self.add_tour_step(*args, **kwargs)
def jouer_la_visite(self, *args, **kwargs):
# play_tour(name=None)
return self.play_tour(*args, **kwargs)
def exporter_la_visite(self, *args, **kwargs):
# export_tour(name=None, filename="my_tour.js", url=None)
return self.export_tour(*args, **kwargs)
def obtenir_texte_pdf(self, *args, **kwargs):
# get_pdf_text(pdf, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=False, nav=False, override=False)
return self.get_pdf_text(*args, **kwargs)
def vérifier_texte_pdf(self, *args, **kwargs):
# assert_pdf_text(pdf, text, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=True, nav=False, override=False)
return self.assert_pdf_text(*args, **kwargs)
def vérifier_fichier_téléchargé(self, *args, **kwargs):
# assert_downloaded_file(file)
return self.assert_downloaded_file(*args, **kwargs)
def échouer(self, *args, **kwargs):
# fail(msg=None) # Inherited from "unittest"
return self.fail(*args, **kwargs)
def obtenir(self, *args, **kwargs):
# get(url) # Same as open(url)
return self.get(*args, **kwargs)
def visiter(self, *args, **kwargs):
# visit(url) # Same as open(url)
return self.visit(*args, **kwargs)
def visiter_url(self, *args, **kwargs):
# visit_url(url) # Same as open(url)
return self.visit_url(*args, **kwargs)
def obtenir_élément(self, *args, **kwargs):
# get_element(selector) # Element can be hidden
return self.get_element(*args, **kwargs)
def trouver_élément(self, *args, **kwargs):
# find_element(selector) # Element must be visible
return self.find_element(*args, **kwargs)
def supprimer_élément(self, *args, **kwargs):
# remove_element(selector)
return self.remove_element(*args, **kwargs)
def supprimer_éléments(self, *args, **kwargs):
# remove_elements(selector)
return self.remove_elements(*args, **kwargs)
def trouver_texte(self, *args, **kwargs):
# find_text(text, selector="html") # Same as wait_for_text
return self.find_text(*args, **kwargs)
def définir_texte(self, *args, **kwargs):
# set_text(selector, text)
return self.set_text(*args, **kwargs)
def obtenir_attribut(self, *args, **kwargs):
# get_attribute(selector, attribute)
return self.get_attribute(*args, **kwargs)
def définir_attribut(self, *args, **kwargs):
# set_attribute(selector, attribute, value)
return self.set_attribute(*args, **kwargs)
def définir_attributs(self, *args, **kwargs):
# set_attributes(selector, attribute, value)
return self.set_attributes(*args, **kwargs)
def écriver(self, *args, **kwargs):
# write(selector, text) # Same as update_text()
return self.write(*args, **kwargs)
def définir_thème_du_message(self, *args, **kwargs):
# set_messenger_theme(theme="default", location="default")
return self.set_messenger_theme(*args, **kwargs)
def afficher_message(self, *args, **kwargs):
# post_message(message, duration=None, pause=True, style="info")
return self.post_message(*args, **kwargs)
def imprimer(self, *args, **kwargs):
# _print(msg) # Same as Python print()
return self._print(*args, **kwargs)
def reporté_vérifier_élément(self, *args, **kwargs):
# deferred_assert_element(selector)
return self.deferred_assert_element(*args, **kwargs)
def reporté_vérifier_texte(self, *args, **kwargs):
# deferred_assert_text(text, selector="html")
return self.deferred_assert_text(*args, **kwargs)
def effectuer_vérifications_reportées(self, *args, **kwargs):
# process_deferred_asserts(print_only=False)
return self.process_deferred_asserts(*args, **kwargs)
def accepter_alerte(self, *args, **kwargs):
# accept_alert(timeout=None)
return self.accept_alert(*args, **kwargs)
def rejeter_alerte(self, *args, **kwargs):
# dismiss_alert(timeout=None)
return self.dismiss_alert(*args, **kwargs)
def passer_à_alerte(self, *args, **kwargs):
# switch_to_alert(timeout=None)
return self.switch_to_alert(*args, **kwargs)
def glisser_et_déposer(self, *args, **kwargs):
# drag_and_drop(drag_selector, drop_selector)
return self.drag_and_drop(*args, **kwargs)
def charger_html_fichier(self, *args, **kwargs):
# load_html_file(html_file, new_page=True)
return self.load_html_file(*args, **kwargs)
def ouvrir_html_fichier(self, *args, **kwargs):
# open_html_file(html_file)
return self.open_html_file(*args, **kwargs)
def supprimer_tous_les_cookies(self, *args, **kwargs):
# delete_all_cookies()
return self.delete_all_cookies(*args, **kwargs)
def obtenir_agent_utilisateur(self, *args, **kwargs):
# get_user_agent()
return self.get_user_agent(*args, **kwargs)
def obtenir_code_de_langue(self, *args, **kwargs):
# get_locale_code()
return self.get_locale_code(*args, **kwargs)
class MasterQA_Français(MasterQA, CasDeBase):
def vérifier(self, *args, **kwargs):
# "Manual Check"
self.DEFAULT_VALIDATION_TITLE = "Vérification manuelle"
# "Does the page look good?"
self.DEFAULT_VALIDATION_MESSAGE = "La page est-elle bonne?"
# verify(QUESTION)
return self.verify(*args, **kwargs)
|
mdmintz/SeleniumBase
|
seleniumbase/translate/french.py
|
Python
|
mit
| 21,271
|
[
"VisIt"
] |
3bcbd3d2ed279b64bdc9fb1d05986809cd3807f41a6be55354b2cf09afaf9ab8
|
# Copyright (c) 2021 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os
from typing import Dict, List, Optional, Set
from PyQt5.QtNetwork import QNetworkReply
from PyQt5.QtWidgets import QMessageBox
from UM import i18nCatalog
from UM.Logger import Logger # To log errors talking to the API.
from UM.Message import Message
from UM.Settings.Interfaces import ContainerInterface
from UM.Signal import Signal
from UM.Util import parseBool
from cura.API import Account
from cura.API.Account import SyncState
from cura.CuraApplication import CuraApplication
from cura.Settings.CuraContainerRegistry import CuraContainerRegistry # To update printer metadata with information received about cloud printers.
from cura.Settings.CuraStackBuilder import CuraStackBuilder
from cura.Settings.GlobalStack import GlobalStack
from cura.UltimakerCloud.UltimakerCloudConstants import META_CAPABILITIES, META_UM_LINKED_TO_ACCOUNT
from .CloudApiClient import CloudApiClient
from .CloudOutputDevice import CloudOutputDevice
from ..Models.Http.CloudClusterResponse import CloudClusterResponse
class CloudOutputDeviceManager:
"""The cloud output device manager is responsible for using the Ultimaker Cloud APIs to manage remote clusters.
Keeping all cloud related logic in this class instead of the UM3OutputDevicePlugin results in more readable code.
API spec is available on https://api.ultimaker.com/docs/connect/spec/.
"""
META_CLUSTER_ID = "um_cloud_cluster_id"
META_HOST_GUID = "host_guid"
META_NETWORK_KEY = "um_network_key"
SYNC_SERVICE_NAME = "CloudOutputDeviceManager"
# The translation catalog for this device.
i18n_catalog = i18nCatalog("cura")
# Signal emitted when the list of discovered devices changed.
discoveredDevicesChanged = Signal()
def __init__(self) -> None:
# Persistent dict containing the remote clusters for the authenticated user.
self._remote_clusters = {} # type: Dict[str, CloudOutputDevice]
# Dictionary containing all the cloud printers loaded in Cura
self._um_cloud_printers = {} # type: Dict[str, GlobalStack]
self._account = CuraApplication.getInstance().getCuraAPI().account # type: Account
self._api = CloudApiClient(CuraApplication.getInstance(), on_error = lambda error: Logger.log("e", str(error)))
self._account.loginStateChanged.connect(self._onLoginStateChanged)
self._removed_printers_message = None # type: Optional[Message]
# Ensure we don't start twice.
self._running = False
self._syncing = False
CuraApplication.getInstance().getContainerRegistry().containerRemoved.connect(self._printerRemoved)
def start(self):
"""Starts running the cloud output device manager, thus periodically requesting cloud data."""
if self._running:
return
if not self._account.isLoggedIn:
return
self._running = True
self._getRemoteClusters()
self._account.syncRequested.connect(self._getRemoteClusters)
def stop(self):
"""Stops running the cloud output device manager."""
if not self._running:
return
self._running = False
self._onGetRemoteClustersFinished([]) # Make sure we remove all cloud output devices.
def refreshConnections(self) -> None:
"""Force refreshing connections."""
self._connectToActiveMachine()
def _onLoginStateChanged(self, is_logged_in: bool) -> None:
"""Called when the uses logs in or out"""
if is_logged_in:
self.start()
else:
self.stop()
def _getRemoteClusters(self) -> None:
"""Gets all remote clusters from the API."""
if self._syncing:
return
self._syncing = True
self._account.setSyncState(self.SYNC_SERVICE_NAME, SyncState.SYNCING)
self._api.getClusters(self._onGetRemoteClustersFinished, self._onGetRemoteClusterFailed)
def _onGetRemoteClustersFinished(self, clusters: List[CloudClusterResponse]) -> None:
"""Callback for when the request for getting the clusters is successful and finished."""
self._um_cloud_printers = {m.getMetaDataEntry(self.META_CLUSTER_ID): m for m in
CuraApplication.getInstance().getContainerRegistry().findContainerStacks(
type = "machine") if m.getMetaDataEntry(self.META_CLUSTER_ID, None)}
new_clusters = []
all_clusters = {c.cluster_id: c for c in clusters} # type: Dict[str, CloudClusterResponse]
online_clusters = {c.cluster_id: c for c in clusters if c.is_online} # type: Dict[str, CloudClusterResponse]
# Add the new printers in Cura.
for device_id, cluster_data in all_clusters.items():
if device_id not in self._remote_clusters:
new_clusters.append(cluster_data)
if device_id in self._um_cloud_printers:
# Existing cloud printers may not have the host_guid meta-data entry. If that's the case, add it.
if not self._um_cloud_printers[device_id].getMetaDataEntry(self.META_HOST_GUID, None):
self._um_cloud_printers[device_id].setMetaDataEntry(self.META_HOST_GUID, cluster_data.host_guid)
# If a printer was previously not linked to the account and is rediscovered, mark the printer as linked
# to the current account
if not parseBool(self._um_cloud_printers[device_id].getMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, "true")):
self._um_cloud_printers[device_id].setMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, True)
if not self._um_cloud_printers[device_id].getMetaDataEntry(META_CAPABILITIES, None):
self._um_cloud_printers[device_id].setMetaDataEntry(META_CAPABILITIES, ",".join(cluster_data.capabilities))
self._onDevicesDiscovered(new_clusters)
self._updateOnlinePrinters(all_clusters)
# Hide the current removed_printers_message, if there is any
if self._removed_printers_message:
self._removed_printers_message.actionTriggered.disconnect(self._onRemovedPrintersMessageActionTriggered)
self._removed_printers_message.hide()
# Remove the CloudOutput device for offline printers
offline_device_keys = set(self._remote_clusters.keys()) - set(online_clusters.keys())
for device_id in offline_device_keys:
self._onDiscoveredDeviceRemoved(device_id)
# Handle devices that were previously added in Cura but do not exist in the account anymore (i.e. they were
# removed from the account)
removed_device_keys = set(self._um_cloud_printers.keys()) - set(all_clusters.keys())
if removed_device_keys:
self._devicesRemovedFromAccount(removed_device_keys)
if new_clusters or offline_device_keys or removed_device_keys:
self.discoveredDevicesChanged.emit()
if offline_device_keys:
# If the removed device was active we should connect to the new active device
self._connectToActiveMachine()
self._syncing = False
self._account.setSyncState(self.SYNC_SERVICE_NAME, SyncState.SUCCESS)
Logger.debug("Synced cloud printers with account.")
def _onGetRemoteClusterFailed(self, reply: QNetworkReply, error: QNetworkReply.NetworkError) -> None:
self._syncing = False
self._account.setSyncState(self.SYNC_SERVICE_NAME, SyncState.ERROR)
def _onDevicesDiscovered(self, clusters: List[CloudClusterResponse]) -> None:
"""**Synchronously** create machines for discovered devices
Any new machines are made available to the user.
May take a long time to complete. As this code needs access to the Application
and blocks the GIL, creating a Job for this would not make sense.
Shows a Message informing the user of progress.
"""
new_devices = []
remote_clusters_added = False
host_guid_map = {machine.getMetaDataEntry(self.META_HOST_GUID): device_cluster_id
for device_cluster_id, machine in self._um_cloud_printers.items()
if machine.getMetaDataEntry(self.META_HOST_GUID)}
machine_manager = CuraApplication.getInstance().getMachineManager()
for cluster_data in clusters:
device = CloudOutputDevice(self._api, cluster_data)
# If the machine already existed before, it will be present in the host_guid_map
if cluster_data.host_guid in host_guid_map:
machine = machine_manager.getMachine(device.printerType, {self.META_HOST_GUID: cluster_data.host_guid})
if machine and machine.getMetaDataEntry(self.META_CLUSTER_ID) != device.key:
# If the retrieved device has a different cluster_id than the existing machine, bring the existing
# machine up-to-date.
self._updateOutdatedMachine(outdated_machine = machine, new_cloud_output_device = device)
# Create a machine if we don't already have it. Do not make it the active machine.
# We only need to add it if it wasn't already added by "local" network or by cloud.
if machine_manager.getMachine(device.printerType, {self.META_CLUSTER_ID: device.key}) is None \
and machine_manager.getMachine(device.printerType, {self.META_NETWORK_KEY: cluster_data.host_name + "*"}) is None: # The host name is part of the network key.
new_devices.append(device)
elif device.getId() not in self._remote_clusters:
self._remote_clusters[device.getId()] = device
remote_clusters_added = True
# If a printer that was removed from the account is re-added, change its metadata to mark it not removed
# from the account
elif not parseBool(self._um_cloud_printers[device.key].getMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, "true")):
self._um_cloud_printers[device.key].setMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, True)
# Inform the Cloud printers model about new devices.
new_devices_list_of_dicts = [{
"key": d.getId(),
"name": d.name,
"machine_type": d.printerTypeName,
"firmware_version": d.firmwareVersion} for d in new_devices]
discovered_cloud_printers_model = CuraApplication.getInstance().getDiscoveredCloudPrintersModel()
discovered_cloud_printers_model.addDiscoveredCloudPrinters(new_devices_list_of_dicts)
if not new_devices:
if remote_clusters_added:
self._connectToActiveMachine()
return
# Sort new_devices on online status first, alphabetical second.
# Since the first device might be activated in case there is no active printer yet,
# it would be nice to prioritize online devices
online_cluster_names = {c.friendly_name.lower() for c in clusters if c.is_online and not c.friendly_name is None}
new_devices.sort(key = lambda x: ("a{}" if x.name.lower() in online_cluster_names else "b{}").format(x.name.lower()))
message = Message(
title = self.i18n_catalog.i18ncp(
"info:status",
"New printer detected from your Ultimaker account",
"New printers detected from your Ultimaker account",
len(new_devices)
),
progress = 0,
lifetime = 0,
message_type = Message.MessageType.POSITIVE
)
message.show()
for idx, device in enumerate(new_devices):
message_text = self.i18n_catalog.i18nc("info:status Filled in with printer name and printer model.", "Adding printer {name} ({model}) from your account").format(name = device.name, model = device.printerTypeName)
message.setText(message_text)
if len(new_devices) > 1:
message.setProgress((idx / len(new_devices)) * 100)
CuraApplication.getInstance().processEvents()
self._remote_clusters[device.getId()] = device
# If there is no active machine, activate the first available cloud printer
activate = not CuraApplication.getInstance().getMachineManager().activeMachine
self._createMachineFromDiscoveredDevice(device.getId(), activate = activate)
message.setProgress(None)
max_disp_devices = 3
if len(new_devices) > max_disp_devices:
num_hidden = len(new_devices) - max_disp_devices
device_name_list = ["<li>{} ({})</li>".format(device.name, device.printerTypeName) for device in new_devices[0:max_disp_devices]]
device_name_list.append("<li>" + self.i18n_catalog.i18ncp("info:{0} gets replaced by a number of printers", "... and {0} other", "... and {0} others", num_hidden) + "</li>")
device_names = "".join(device_name_list)
else:
device_names = "".join(["<li>{} ({})</li>".format(device.name, device.printerTypeName) for device in new_devices])
message_text = self.i18n_catalog.i18nc("info:status", "Printers added from Digital Factory:") + "<ul>" + device_names + "</ul>"
message.setText(message_text)
def _updateOnlinePrinters(self, printer_responses: Dict[str, CloudClusterResponse]) -> None:
"""
Update the metadata of the printers to store whether they are online or not.
:param printer_responses: The responses received from the API about the printer statuses.
"""
for container_stack in CuraContainerRegistry.getInstance().findContainerStacks(type = "machine"):
cluster_id = container_stack.getMetaDataEntry("um_cloud_cluster_id", "")
if cluster_id in printer_responses:
container_stack.setMetaDataEntry("is_online", printer_responses[cluster_id].is_online)
def _updateOutdatedMachine(self, outdated_machine: GlobalStack, new_cloud_output_device: CloudOutputDevice) -> None:
"""
Update the cloud metadata of a pre-existing machine that is rediscovered (e.g. if the printer was removed and
re-added to the account) and delete the old CloudOutputDevice related to this machine.
:param outdated_machine: The cloud machine that needs to be brought up-to-date with the new data received from
the account
:param new_cloud_output_device: The new CloudOutputDevice that should be linked to the pre-existing machine
:return: None
"""
old_cluster_id = outdated_machine.getMetaDataEntry(self.META_CLUSTER_ID)
outdated_machine.setMetaDataEntry(self.META_CLUSTER_ID, new_cloud_output_device.key)
outdated_machine.setMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, True)
# Cleanup the remainings of the old CloudOutputDevice(old_cluster_id)
self._um_cloud_printers[new_cloud_output_device.key] = self._um_cloud_printers.pop(old_cluster_id)
output_device_manager = CuraApplication.getInstance().getOutputDeviceManager()
if old_cluster_id in output_device_manager.getOutputDeviceIds():
output_device_manager.removeOutputDevice(old_cluster_id)
if old_cluster_id in self._remote_clusters:
# We need to close the device so that it stops checking for its status
self._remote_clusters[old_cluster_id].close()
del self._remote_clusters[old_cluster_id]
self._remote_clusters[new_cloud_output_device.key] = new_cloud_output_device
def _devicesRemovedFromAccount(self, removed_device_ids: Set[str]) -> None:
"""
Removes the CloudOutputDevice from the received device ids and marks the specific printers as "removed from
account". In addition, it generates a message to inform the user about the printers that are no longer linked to
his/her account. The message is not generated if all the printers have been previously reported as not linked
to the account.
:param removed_device_ids: Set of device ids, whose CloudOutputDevice needs to be removed
:return: None
"""
if not CuraApplication.getInstance().getCuraAPI().account.isLoggedIn:
return
# Do not report device ids which have been previously marked as non-linked to the account
ignored_device_ids = set()
for device_id in removed_device_ids:
if not parseBool(self._um_cloud_printers[device_id].getMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, "true")):
ignored_device_ids.add(device_id)
# Keep the reported_device_ids list in a class variable, so that the message button actions can access it and
# take the necessary steps to fulfill their purpose.
self.reported_device_ids = removed_device_ids - ignored_device_ids
if not self.reported_device_ids:
return
# Generate message
self._removed_printers_message = Message(
title = self.i18n_catalog.i18ncp(
"info:status",
"A cloud connection is not available for a printer",
"A cloud connection is not available for some printers",
len(self.reported_device_ids)
),
message_type = Message.MessageType.WARNING
)
device_names = "".join(["<li>{} ({})</li>".format(self._um_cloud_printers[device].name, self._um_cloud_printers[device].definition.name) for device in self.reported_device_ids])
message_text = self.i18n_catalog.i18ncp(
"info:status",
"This printer is not linked to the Digital Factory:",
"These printers are not linked to the Digital Factory:",
len(self.reported_device_ids)
)
message_text += "<br/><ul>{}</ul><br/>".format(device_names)
digital_factory_string = self.i18n_catalog.i18nc("info:name", "Ultimaker Digital Factory")
message_text += self.i18n_catalog.i18nc(
"info:status",
"To establish a connection, please visit the {website_link}".format(website_link = "<a href='https://digitalfactory.ultimaker.com?utm_source=cura&utm_medium=software&utm_campaign=change-account-connect-printer'>{}</a>.".format(digital_factory_string))
)
self._removed_printers_message.setText(message_text)
self._removed_printers_message.addAction("keep_printer_configurations_action",
name = self.i18n_catalog.i18nc("@action:button", "Keep printer configurations"),
icon = "",
description = "Keep cloud printers in Ultimaker Cura when not connected to your account.",
button_align = Message.ActionButtonAlignment.ALIGN_RIGHT)
self._removed_printers_message.addAction("remove_printers_action",
name = self.i18n_catalog.i18nc("@action:button", "Remove printers"),
icon = "",
description = "Remove cloud printer(s) which aren't linked to your account.",
button_style = Message.ActionButtonStyle.SECONDARY,
button_align = Message.ActionButtonAlignment.ALIGN_LEFT)
self._removed_printers_message.actionTriggered.connect(self._onRemovedPrintersMessageActionTriggered)
output_device_manager = CuraApplication.getInstance().getOutputDeviceManager()
# Remove the output device from the printers
for device_id in removed_device_ids:
device = self._um_cloud_printers.get(device_id, None) # type: Optional[GlobalStack]
if not device:
continue
if device_id in output_device_manager.getOutputDeviceIds():
output_device_manager.removeOutputDevice(device_id)
if device_id in self._remote_clusters:
del self._remote_clusters[device_id]
# Update the printer's metadata to mark it as not linked to the account
device.setMetaDataEntry(META_UM_LINKED_TO_ACCOUNT, False)
self._removed_printers_message.show()
def _onDiscoveredDeviceRemoved(self, device_id: str) -> None:
device = self._remote_clusters.pop(device_id, None) # type: Optional[CloudOutputDevice]
if not device:
return
device.close()
output_device_manager = CuraApplication.getInstance().getOutputDeviceManager()
if device.key in output_device_manager.getOutputDeviceIds():
output_device_manager.removeOutputDevice(device.key)
def _createMachineFromDiscoveredDevice(self, key: str, activate: bool = True) -> None:
device = self._remote_clusters[key]
if not device:
return
# Create a new machine.
# We do not use use MachineManager.addMachine here because we need to set the cluster ID before activating it.
new_machine = CuraStackBuilder.createMachine(device.name, device.printerType)
if not new_machine:
Logger.log("e", "Failed creating a new machine")
return
self._setOutputDeviceMetadata(device, new_machine)
if activate:
CuraApplication.getInstance().getMachineManager().setActiveMachine(new_machine.getId())
def _connectToActiveMachine(self) -> None:
"""Callback for when the active machine was changed by the user"""
active_machine = CuraApplication.getInstance().getGlobalContainerStack()
if not active_machine:
return
output_device_manager = CuraApplication.getInstance().getOutputDeviceManager()
stored_cluster_id = active_machine.getMetaDataEntry(self.META_CLUSTER_ID)
local_network_key = active_machine.getMetaDataEntry(self.META_NETWORK_KEY)
for device in list(self._remote_clusters.values()): # Make a copy of the remote devices list, to prevent modifying the list while iterating, if a device gets added asynchronously.
if device.key == stored_cluster_id:
# Connect to it if the stored ID matches.
self._connectToOutputDevice(device, active_machine)
elif local_network_key and device.matchesNetworkKey(local_network_key):
# Connect to it if we can match the local network key that was already present.
self._connectToOutputDevice(device, active_machine)
elif device.key in output_device_manager.getOutputDeviceIds():
# Remove device if it is not meant for the active machine.
output_device_manager.removeOutputDevice(device.key)
def _setOutputDeviceMetadata(self, device: CloudOutputDevice, machine: GlobalStack):
machine.setName(device.name)
machine.setMetaDataEntry(self.META_CLUSTER_ID, device.key)
machine.setMetaDataEntry(self.META_HOST_GUID, device.clusterData.host_guid)
machine.setMetaDataEntry("group_name", device.name)
machine.setMetaDataEntry("group_size", device.clusterSize)
digital_factory_string = self.i18n_catalog.i18nc("info:name", "Ultimaker Digital Factory")
digital_factory_link = "<a href='https://digitalfactory.ultimaker.com?utm_source=cura&utm_medium=software&utm_campaign=change-account-remove-printer'>{digital_factory_string}</a>".format(digital_factory_string = digital_factory_string)
removal_warning_string = self.i18n_catalog.i18nc("@message {printer_name} is replaced with the name of the printer", "{printer_name} will be removed until the next account sync.").format(printer_name = device.name) \
+ "<br>" + self.i18n_catalog.i18nc("@message {printer_name} is replaced with the name of the printer", "To remove {printer_name} permanently, visit {digital_factory_link}").format(printer_name = device.name, digital_factory_link = digital_factory_link) \
+ "<br><br>" + self.i18n_catalog.i18nc("@message {printer_name} is replaced with the name of the printer", "Are you sure you want to remove {printer_name} temporarily?").format(printer_name = device.name)
machine.setMetaDataEntry("removal_warning", removal_warning_string)
machine.addConfiguredConnectionType(device.connectionType.value)
def _connectToOutputDevice(self, device: CloudOutputDevice, machine: GlobalStack) -> None:
"""Connects to an output device and makes sure it is registered in the output device manager."""
self._setOutputDeviceMetadata(device, machine)
if not device.isConnected():
device.connect()
output_device_manager = CuraApplication.getInstance().getOutputDeviceManager()
if device.key not in output_device_manager.getOutputDeviceIds():
output_device_manager.addOutputDevice(device)
def _printerRemoved(self, container: ContainerInterface) -> None:
"""
Callback connected to the containerRemoved signal. Invoked when a cloud printer is removed from Cura to remove
the printer's reference from the _remote_clusters.
:param container: The ContainerInterface passed to this function whenever the ContainerRemoved signal is emitted
:return: None
"""
if isinstance(container, GlobalStack):
container_cluster_id = container.getMetaDataEntry(self.META_CLUSTER_ID, None)
if container_cluster_id in self._remote_clusters.keys():
del self._remote_clusters[container_cluster_id]
def _onRemovedPrintersMessageActionTriggered(self, removed_printers_message: Message, action: str) -> None:
if action == "keep_printer_configurations_action":
removed_printers_message.hide()
elif action == "remove_printers_action":
machine_manager = CuraApplication.getInstance().getMachineManager()
remove_printers_ids = {self._um_cloud_printers[i].getId() for i in self.reported_device_ids}
all_ids = {m.getId() for m in CuraApplication.getInstance().getContainerRegistry().findContainerStacks(type = "machine")}
question_title = self.i18n_catalog.i18nc("@title:window", "Remove printers?")
question_content = self.i18n_catalog.i18ncp(
"@label",
"You are about to remove {0} printer from Cura. This action cannot be undone.\nAre you sure you want to continue?",
"You are about to remove {0} printers from Cura. This action cannot be undone.\nAre you sure you want to continue?",
len(remove_printers_ids)
)
if remove_printers_ids == all_ids:
question_content = self.i18n_catalog.i18nc("@label", "You are about to remove all printers from Cura. This action cannot be undone.\nAre you sure you want to continue?")
result = QMessageBox.question(None, question_title, question_content)
if result == QMessageBox.No:
return
for machine_cloud_id in self.reported_device_ids:
machine_manager.setActiveMachine(self._um_cloud_printers[machine_cloud_id].getId())
machine_manager.removeMachine(self._um_cloud_printers[machine_cloud_id].getId())
removed_printers_message.hide()
|
Ultimaker/Cura
|
plugins/UM3NetworkPrinting/src/Cloud/CloudOutputDeviceManager.py
|
Python
|
lgpl-3.0
| 27,748
|
[
"VisIt"
] |
6f3c92ca4f37237d25090824768ff805501e2dd89c15b6d01f2013a27cceddce
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import empty
from django.utils.functional import SimpleLazyObject # noqa
from django.utils.module_loading import module_has_submodule # noqa
from django.utils.translation import ugettext_lazy as _
from importlib import import_module
import six
from horizon import conf
from horizon.decorators import _current_component # noqa
from horizon.decorators import require_auth # noqa
from horizon.decorators import require_perms # noqa
from horizon import loaders
# Name of the panel group for panels to be displayed without a group.
DEFAULT_PANEL_GROUP = 'default'
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
# FIXME(lhcheng): We need to find a better way to cache the result.
# Rather than storing it in the session, we could leverage the Django
# session. Currently, this has been causing issue with cookie backend,
# adding 1600+ in the cookie size.
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = None
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
# FIXME(lhcheng): Removed the access_cached decorator for now until
# a better implementation has been figured out. This has been causing
# issue with cookie backend, adding 1600+ in the cookie size.
# @access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
if policy_check((rule,), request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", DEFAULT_PANEL_GROUP)
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup
or None if not registered
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return collections.OrderedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns += patterns('',
url(r'^%s/' % url_slug,
include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'',
include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
default_created = False
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
if panel_group.slug == DEFAULT_PANEL_GROUP:
default_created = True
# Plugin panels can be added to a default panel group. Make sure such a
# default group exists.
if not default_created:
default_group = PanelGroup(self)
panel_groups.insert(0, (default_group.slug, default_group))
self._panel_groups = collections.OrderedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug,
include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
ankur-gupta91/horizon-net-ip
|
horizon/base.py
|
Python
|
apache-2.0
| 39,012
|
[
"VisIt"
] |
18ef71d627e1718809a18e5130b53f0f8508163881ee04677b332e9a6a3313c2
|
#! /usr/bin/env python
# Copyright Ivan Sovic, 2015. www.sovic.org
#
# Creates a pileup from a given SAM/BAM file, and calls consensus bases (or variants).
import os;
import sys;
import operator;
import subprocess;
def increase_in_dict(dict_counter, value):
try:
dict_counter[value] += 1;
except:
dict_counter[value] = 1;
def process_mpileup_line(line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False):
# Split the line, and perform a sanity check.
split_line = line.strip().split('\t');
if (len(split_line) < 5 or len(split_line) > 6):
sys.stderr.write(line + '\n');
return 0;
ref_name = split_line[0];
position = split_line[1];
ref_base = split_line[2];
coverage = split_line[3];
original_bases = split_line[4];
if (len(split_line) == 6):
qualities = split_line[5];
bases = '';
# Replace the '.' and ',' signs with the actual reference base.
i = 0;
while (i < len(original_bases)):
if (original_bases[i] == '.' or original_bases[i] == ','):
bases += ref_base;
else:
bases += original_bases[i];
i += 1;
base_counts = {};
insertion_count = 0;
current_base_deletion_count = 0;
deletion_count = 0;
insertion_event_counts = {};
deletion_event_counts = {};
end_counts = 0;
# print 'position: %s' % position;
# print 'bases: "%s"' % bases;
# print 'line_number: %d' % line_number;
# print line;
# print '';
# sys.stdout.flush();
i = 0;
while (i < len(bases)):
base = bases[i];
if (base == r'^'):
# This is the starting position of a read. It encodes two
# symbols: '^' marking the read start and a char marking the
# mapping quality of the read.
#increase_in_dict(base_counts, bases[i + 1].upper());
i += 1; # Increase only by 1, because we have i += 1 down there.
elif (base == r'$'):
# This marks the end of a read.
end_counts += 1;
elif (base == r'*'):
# This is a deletion, just count it.
current_base_deletion_count += 1;
elif (base == r'-'):
# This marks the occurance of deletions. It is a composite object
# consisting of: the special character '-', the number of the deleted bases
# and the actual bases that are deleted (these bases follow the current position).
# In our approach, we ignore this case, because we count deletions one by one
# through the '*' character.
# Get the number of bases that need to be skipped in the string.
j = (i + 1);
while (bases[j] in '0123456789'):
j += 1;
num_bases = int(bases[(i + 1):j]);
skip_bases = (j - i) + num_bases - 1;
deletion_count += 1;
deletion = bases[j : (j + num_bases)].upper();
increase_in_dict(deletion_event_counts, deletion);
# Skip the length of the numeric entry plus the actual number of bases
# that need to be skipped.
i += skip_bases;
elif (base == r'+'):
# This marks the occurance of an insertion. It is a composite object
# consisting of: the special character '+', the number of the inserted bases
# and the actual bases that are inserted (these bases follow the current position).
# Similar to the deletion marking, but here we actually care about the bases,
# and we need to make an allele aware count.
# Get the number of bases that are inserted;
j = (i + 1);
while (bases[j] in '0123456789'):
j += 1;
num_bases = int(bases[(i + 1):j]);
skip_bases = (j - i) + num_bases - 1;
insertion_count += 1;
insertion = bases[j : (j + num_bases)].upper();
increase_in_dict(insertion_event_counts, insertion);
i += skip_bases;
else:
increase_in_dict(base_counts, bases[i].upper());
i += 1;
# TODO: An additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.
# There are pileup bases that do not have any actual bases, but only the '*' symbols. How should this be handled properly?
# Example line from the mpileup file:
# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-
# I chose to handle them as undercovered bases.
non_indel_coverage_current_base = int(coverage) - current_base_deletion_count;
if (verbose == True):
sys.stdout.write('%s\nbase_counts: %s\n' % (line.strip(), str(base_counts)));
# EDIT: Previously I compared the total coverage of the current base with the coverage threshold.
# However, the total coverage also accounts for the deletions denoted with the '*' sign, which I think
# isn't relevant, as deletions are counted prior to occuring, and at that point is already decided if there is going
# to be a deletion event. If we wound up at this base (i.e. this base didn't get skipped because of a deletion
# consensus), then the deletions on this base are ignored.
#if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):
# if (non_indel_coverage_current_base < coverage_threshold):
if (int(coverage) < coverage_threshold):
ret_num_undercovered_bases[0] += 1;
# ret_coverage_sum[0] += 0;
ret_coverage_sum[0] += int(coverage); # TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
sorted_base_counts = [['A', 0], ['C', 0], ['T', 0], ['G', 0]];
sorted_base_counts = sorted(base_counts.items(), key=operator.itemgetter(1));
try:
most_common_base_count = sorted_base_counts[-1][1];
except Exception, e:
most_common_base_count = 0;
pass;
#variant_line = 'undercovered1\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
#ret_variant_list.append(variant_line);
variant_line = 'undercovered1\tpos = %s\tref = %s\tcoverage = %d\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s' % (position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts));
ret_variant_list.append(variant_line);
### VCF output ###
qual = 1000;
info = 'DP=%s;TYPE=snp' % (coverage);
ref_field = ref_base;
alt_field = 'N';
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
else:
ret_num_called_bases[0] += 1;
ret_coverage_sum[0] += int(coverage); # TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
most_common_base_count = 0;
### Handling base consensus.
sorted_base_counts = sorted(base_counts.items(), key=operator.itemgetter(1));
try:
most_common_base_count = sorted_base_counts[-1][1];
except Exception, e:
pass;
# sys.stderr.write(str(e) + '\n');
# sys.stderr.write('sorted_base_counts:\n');
# sys.stderr.write(str(sorted_base_counts) + '\n');
# sys.stderr.write('base_counts:\n');
# sys.stderr.write(str(base_counts) + '\n');
# sys.stderr.write('original_bases:\n');
# sys.stderr.write(str(original_bases) + '\n');
# sys.stderr.write('line:\n');
# sys.stderr.write(line.strip() + '\n');
# most_common_base_count = 0;
# Allow for the case where there are multiple equally good choices.
# In this case, we prefer the choice which is equal to the reference.
is_good = False;
for base_count in sorted_base_counts:
if (base_count[1] == most_common_base_count):
if (base_count[0] == ref_base):
is_good = True;
break;
if (is_good == False):
if (len(sorted_base_counts) > 0):
ret_snp_count[0] += 1;
# ret_variant_list.append(line_number);
variant_line = 'SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
ret_variant_list.append(variant_line);
### VCF output ###
alt_base = ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]));
qual = 1000;
info = 'DP=%s;TYPE=snp' % (coverage);
ref_field = ref_base;
alt_field = alt_base;
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
else:
sys.stderr.write('\nWarning: a SNP was detected, but there were no bases in the sorted_base_counts!')
variant_line = 'SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
sys.stderr.write('\n');
else:
ret_num_correct_bases[0] += 1;
if (verbose == True):
sys.stdout.write('Reference base: %s\n' % (ref_base));
sys.stdout.write('Consensus base: %s\n\n' % (base_count[0]));
#if (int(position) == 100000 or int(position) == 1000000 or int(position) == 2000000 or int(position) == 3000000 or int(position) == 4000000):
#print '\nTEST\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
### Handling indel consensus.
### Put a different coverage threshold. Here we are interested even in the reads
### which had a '*' at the current position (because we don't know where it ends).
non_indel_coverage_next_base = int(coverage) - end_counts - deletion_count - insertion_count;
if ((non_indel_coverage_next_base + deletion_count + insertion_count) > coverage_threshold):
# Sanity check, just to see if there actually were any insertions (to avoid index out of bounds error).
# If there are insertions, get the most common one.
if (len(insertion_event_counts.keys()) > 0):
sorted_insertion_counts = sorted(insertion_event_counts.items(), key=operator.itemgetter(1));
most_common_insertion_count = sorted_insertion_counts[-1][1];
most_common_insertion_length = len(sorted_insertion_counts[-1][0]);
insertion_unique = True if (sum([int(insertion_count[1] == most_common_insertion_count) for insertion_count in sorted_insertion_counts]) == 1) else False;
else:
most_common_insertion_count = 0;
most_common_insertion_length = 0;
insertion_unique = False;
# Sanity check, just to see if there actually were any deletions (to avoid index out of bounds error).
# If there are deletions, get the most common one.
if (len(deletion_event_counts.keys()) > 0):
sorted_deletion_counts = sorted(deletion_event_counts.items(), key=operator.itemgetter(1));
most_common_deletion_count = sorted_deletion_counts[-1][1];
most_common_deletion_length = len(sorted_deletion_counts[-1][0]);
deletion_unique = True if (sum([int(deletion_count[1] == most_common_deletion_count) for deletion_count in sorted_deletion_counts]) == 1) else False;
else:
most_common_deletion_count = 0;
most_common_deletion_length = 0;
deletion_unique = False;
if (most_common_insertion_count > most_common_deletion_count and most_common_insertion_count > non_indel_coverage_next_base):
# In this case, insertions are a clear winner.
if (insertion_unique == True):
#ret_insertion_count[0] += most_common_insertion_length;
ret_insertion_count[0] += 1;
ret_num_called_bases[0] += most_common_insertion_length;
#variant_line = 'insertion\t%d\t%s\t%s\t%s\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
#ret_variant_list.append(variant_line);
try:
temp_sorted_bc = sorted_base_counts[-1][0];
except:
temp_sorted_bc = 0;
indel_length = most_common_insertion_length;
variant_line = 'ins\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
ret_variant_list.append(variant_line);
### Insertions in the VCF format specifies the position where a insertion occurs. The ref position should contain the base which is the same as ref, but the alt field contains the ref base + the insertion event.
### VCF output ###
alt_base = ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]));
qual = 1000;
info = 'DP=%s;TYPE=ins' % (coverage);
ref_field = ref_base;
alt_field = '%s%s' % (ref_base, sorted_insertion_counts[-1][0]);
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
elif (most_common_deletion_count > most_common_insertion_count and most_common_deletion_count > non_indel_coverage_next_base):
# In this case, deletions are a clear winner.
if (deletion_unique == True):
#ret_deletion_count[0] += most_common_deletion_length;
ret_deletion_count[0] += 1;
#variant_line = 'deletion\t%d\t%s\t%s\t%s\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
#ret_variant_list.append(variant_line);
#return most_common_deletion_length;
variant_line = 'del\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_deletion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
ret_variant_list.append(variant_line);
### Deletions in the VCF format specifies the position where a deletion occurs, with the first base being non-deletion, and the following bases being a deletion event.
### VCF output ###
alt_base = ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]));
qual = 1000;
info = 'DP=%s;TYPE=del' % (coverage);
ref_field = '%s%s' % (ref_base, sorted_deletion_counts[-1][0]);
alt_field = ref_base;
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
return most_common_deletion_length;
else:
# In this case, either the base count consensus wins, or the
# insertion/deletion count is ambiguous.
pass;
return 0;
def process_mpileup(alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=''):
fp = None;
try:
fp = open(mpileup_path, 'r');
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % mpileup_path);
return None;
ret_variant_list = [];
ret_vcf_list = [];
ret_snp_count = [0];
ret_insertion_count = [0];
ret_deletion_count = [0];
ret_num_undercovered_bases = [0];
ret_num_called_bases = [0];
ret_num_correct_bases = [0];
ret_coverage_sum = [0];
# lines = fp.readlines();
fp_variant = None;
fp_vcf = None;
if (output_prefix != ''):
if (not os.path.exists(os.path.dirname(output_prefix))):
os.makedirs(os.path.dirname(output_prefix));
variant_file = ('%s-cov_%d.variant.csv' % (output_prefix, coverage_threshold));
fp_variant = open(variant_file, 'w');
vcf_file = ('%s-cov_%d.variant.vcf' % (output_prefix, coverage_threshold));
fp_vcf = open(vcf_file, 'w');
fp_vcf.write('##fileformat=VCFv4.0\n');
fp_vcf.write('##fileDate=20150409\n');
fp_vcf.write('##source=%s\n' % (' '.join(sys.argv)));
fp_vcf.write('##reference=%s\n' % reference_path);
fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">\n');
fp_vcf.write('##INFO=<ID=TYPE,Number=A,Type=String,Description="Type of each allele (snp, ins, del, mnp, complex)">\n');
fp_vcf.write('##INFO=<ID=AF,Number=1,Type=Float,Description="Allele Frequency">\n');
fp_vcf.write('##INFO=<ID=SB,Number=1,Type=Integer,Description="Phred-scaled strand bias at this position">\n');
fp_vcf.write('##INFO=<ID=DP4,Number=4,Type=Integer,Description="Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">\n');
fp_vcf.write('##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n');
fp_vcf.write('##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description="Indicates that the variant is a consensus variant (as opposed to a low frequency variant).">\n');
fp_vcf.write('##INFO=<ID=HRUN,Number=1,Type=Integer,Description="Homopolymer length to the right of report indel position">\n');
fp_vcf.write('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n');
fp_vcf.flush();
use_bed = False;
bed_chromosome = "";
bed_pos_start = 0;
# bed_pos_end = len(lines);
bed_pos_end = -1;
if (bed_position != ""):
bed_split = bed_position.split(':');
if (len(bed_split) != 2):
use_bed = False;
else:
bed_chromosome = bed_split[0];
bed_pos_split = bed_split[1].split('-');
if (len(bed_pos_split) != 2):
use_bed = False;
else:
bed_pos_start = int(bed_pos_split[0]);
bed_pos_end = int(bed_pos_split[1]);
use_bed = True;
sys.stderr.write('Using location specified through commandline:\n');
sys.stderr.write('\tChromosome: "%s"\n' % bed_chromosome);
sys.stderr.write('\tStart: %d\n' % bed_pos_start);
sys.stderr.write('\tEnd: %d\n\n' % bed_pos_end);
# i = 0;
i = 0 if (use_bed == False) else max((bed_pos_start - 10), 0);
j = 0;
# while (i < bed_pos_end): # len(lines)):
num_bases_to_skip = 0;
for line in fp:
# line = lines[i];
if (num_bases_to_skip > 0):
num_bases_to_skip -= 1;
continue;
if (use_bed == True):
line_split = line.strip().split('\t');
if (len(line_split) > 2 and line_split[0] == bed_chromosome):
current_pos = int(line_split[1]);
if (current_pos < bed_pos_start or current_pos >= bed_pos_end):
i += 1;
j += 1;
continue;
else:
# print line_split[0];
# print bed_chromosome;
i += 1;
j += 1;
continue;
if (thread_id == 0):
if ((j % 1000) == 0):
sys.stderr.write('\r[%d] snps = %d, insertions = %d, deletions = %d, undercovered = %d, coverage = %.2f' % (i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0])/float((i + 1)))));
sys.stderr.flush();
variant_list_length = len(ret_variant_list);
vcf_list_length = len(ret_vcf_list);
num_bases_to_skip = process_mpileup_line(line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed);
if (len(ret_variant_list) > variant_list_length and fp_variant != None):
fp_variant.write('\n'.join(ret_variant_list[variant_list_length:]) + '\n');
fp_variant.flush();
if (len(ret_vcf_list) > vcf_list_length and fp_vcf != None):
fp_vcf.write('\n'.join(ret_vcf_list[vcf_list_length:]) + '\n');
fp_vcf.flush();
i += num_bases_to_skip;
i += 1;
j += 1;
#if (i > 10000):
#break;
fp.close();
sys.stderr.write('\n')
if (fp_variant != None):
fp_variant.close();
if (fp_vcf != None):
fp_vcf.close();
summary_lines = '';
summary_lines += 'alignments_file: %s\n' % alignments_path;
summary_lines += 'mpileup_file: %s\n' % mpileup_path;
summary_lines += 'coverage_threshold: %d\n' % coverage_threshold;
summary_lines += 'snp_count: %d\n' % ret_snp_count[0];
summary_lines += 'insertion_count: %d\n' % ret_insertion_count[0];
summary_lines += 'deletion_count: %d\n' % ret_deletion_count[0];
summary_lines += 'num_undercovered_bases: %d\n' % ret_num_undercovered_bases[0];
summary_lines += 'num_called_bases: %d\n' % ret_num_called_bases[0];
summary_lines += 'num_correct_bases: %d\n' % ret_num_correct_bases[0];
summary_lines += 'average_coverage: %.2f\n' % ((float(ret_coverage_sum[0])/float((i + 1))));
sys.stderr.write(summary_lines + '\n');
sys.stderr.write('\n');
if (output_prefix != ''):
#summary_file = output_prefix + '.conssum';
summary_file = ('%s-cov_%d.variant.sum' % (output_prefix, coverage_threshold));
try:
fp_sum = open(summary_file, 'w');
fp_sum.write(summary_lines);
fp_sum.close();
return summary_file;
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % (summary_file));
return None;
return None;
def main(alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=""):
# Sanity checking the existence of the file, and the correctness of its extension.
# Also, if input file is a SAM file, then convert it to a sorted BAM.
alignments_path_bam = alignments_path;
if (os.path.exists(alignments_path) == False):
sys.stderr.write('ERROR: File "%s" does not exist!\n' % alignments_path);
return;
if (alignments_path.endswith('sam')):
# Determine the path where the new BAM file will be generated.
dir_name = os.path.dirname(alignments_path);
if (dir_name == ''):
dir_name = '.';
alignments_path_bam = dir_name + '/' + os.path.splitext(os.path.basename(alignments_path))[0] + '.bam'
alignments_path_bam_exists = os.path.exists(alignments_path_bam);
# Check if a BAM file with the given name already exists.
if (alignments_path_bam_exists == False or (alignments_path_bam_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam))):
# Convert the SAM file to a sorted BAM file.
command = 'samtools view -bS %s | samtools sort - %s' % (alignments_path, os.path.splitext(alignments_path_bam)[0]);
sys.stderr.write(command + '\n')
subprocess.call(command, shell='True');
# Create the BAM index file.
command = 'samtools index %s %s.bai' % (alignments_path_bam, alignments_path_bam);
subprocess.call(command, shell='True');
elif (alignments_path.endswith('bam') == False):
sys.stderr.write('ERROR: File extension needs to be either .sam or .bam! Input file path: "%s".\n' % alignments_path);
return;
# Convert the sorted BAM file to a mpileup file if it doesn't exist yet.
mpileup_path = ('%s.mpileup' % alignments_path_bam);
mpileup_exists = os.path.exists(mpileup_path);
if (mpileup_exists == False or (mpileup_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path))):
command = 'samtools mpileup -B -d 1000000 -Q 0 -A -f %s %s > %s.mpileup' % (reference_path, alignments_path_bam, alignments_path_bam);
subprocess.call(command, shell='True');
sys.stderr.write('Processing file "%s"...\n' % alignments_path);
sys.stderr.write('Reference file "%s"...\n' % reference_path);
sys.stderr.write('Coverage threshold: %d\n' % coverage_threshold);
summary_file = process_mpileup(alignments_path, reference_path, ('%s.mpileup' % alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position);
def CollectSummaries(sam_files, prefix_for_intermediate_results, collective_output_file):
fp_collect = None;
try:
fp_collect = open(collective_output_file, 'w');
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % collective_output_file);
return;
for sam_file in sam_files:
summary_file = prefix_for_intermediate_results + '.sum';
try:
fp_sum = open(summary_file, 'r');
lines = fp_sum.readlines();
fp_sum.close();
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % summary_file);
continue;
fp_collect.write(''.join(lines) + '\n');
fp_collect.close();
if __name__ == "__main__":
# if (len(sys.argv) < 5):
# sys.stderr.write('Usage:\n');
# sys.stderr.write('\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\n' % sys.argv[0]);
# sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
# exit(1);
if (len(sys.argv) < 5):
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\n' % sys.argv[0]);
sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
sys.stderr.write('\tPosition parameter is a string specifying "chromosome:start-end"\n\n');
exit(1);
reference_file = sys.argv[1];
coverage_threshold = int(sys.argv[2]);
output_prefix = sys.argv[3];
sam_file = sys.argv[4];
bed_position = '';
if (len(sys.argv) > 5):
bed_position = sys.argv[5];
# sys.stderr.write('bed_position: "%s"\n\n' % bed_position);
processes = [];
if (output_prefix == '-'):
output_prefix = os.path.splitext(sam_file)[0];
main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position);
# if (output_prefix != '-'):
# CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');
|
isovic/samscripts
|
src/consensus.py
|
Python
|
mit
| 26,304
|
[
"BWA"
] |
eb917c71a4197b7df4b46abefdc13051081a72924504e0cf31c1518337a70048
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 19:12:47 2017
@author: roger
"""
# hyperparameter variance: 0.015
import sys
sys.path.append("/Users/roger/Dropbox/TVpgGLM-v1/TVpgGLM")
sys.path.append("/Users/roger/Dropbox/TVpgGLM-v1/TVpgGLM/libs")
import numpy as np
np.random.seed(100)
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
sns.set_style("white")
paper_rc = {'lines.linewidth': 1, 'lines.markersize': 10,
'font.size': 15, 'axes.labelsize':15,
'xtick.labelsize': 15, 'ytick.labelsize': 15}
sns.set_context("paper", rc = paper_rc)
plt.ion()
from pybasicbayes.util.text import progprint_xrange
from pyglm.utils.basis import cosine_basis
from pyglm.plotting import plot_glm
from models_tv import SparseBernoulliGLM_f
T = 1000 # Number of time bins to generate
N = 10 # Number of neurons
B = 1 # Number of "basis functions"
L = 2 # Autoregressive window of influence
# Create a cosine basis to model smooth influence of
# spikes on one neuron on the later spikes of others.
basis = cosine_basis(B=B, L=L, a=1.0 / 2) / L
# Generate some data from a model with self inhibition
# The model structure has the info for the network and regression we used
true_model = \
SparseBernoulliGLM_f(T, N, B, basis=basis,
regression_kwargs=dict(rho=1, mu_w=0,
S_w=0.001, mu_b=-2, S_b=0.0001))
# Generate simulated weights
_, Y = true_model.generate(T=T, keep=True)
# Plot the true model
fig, axs, handles = true_model.plot()
plt.figure()
sns.heatmap(np.transpose(Y), xticklabels=False)
# Create a test model for fitting
test_model = \
SparseBernoulliGLM_f(T, N, B, basis=basis,
regression_kwargs=dict(rho=1, mu_w=0, S_w=0.001, mu_b=-2, S_b=0.0001))
test_model.add_data(Y)
def _collect(m):
return m.log_likelihood(), m.weights, m.adjacency, m.biases, m.means[0]
def _update(m, itr):
m.resample_model()
return _collect(m)
N_samples = 100
samples = []
for itr in progprint_xrange(N_samples):
samples.append(_update(test_model, itr))
# Unpack the samples
samples = zip(*samples)
lps, W_smpls, A_smpls, b_smpls, fr_smpls = tuple(map(np.array, samples))
# Plot the posterior mean and variance
W_mean = W_smpls[N_samples // 2:].mean(0)
A_mean = A_smpls[N_samples // 2:].mean(0)
fr_mean = fr_smpls[N_samples // 2:].mean(0)
fr_std = fr_smpls[N_samples // 2:].std(0)
fig, _, _ = plot_glm(Y, W_mean[:, 0, :, :], A_mean, fr_mean,
std_firingrates=3 * fr_std, title="Posterior Mean")
# Saving the objects:
with open('/Users/roger/Dropbox/TVpgGLM-v1/TVpgGLM/results/sythetic_tv_N10.pickle', 'wb') as f:
pickle.dump([true_model.means[0], true_model.weights, fr_mean, fr_std, W_smpls, Y], f)
|
sheqi/TVpgGLM
|
runtime/syn_tv_demo1.py
|
Python
|
mit
| 2,813
|
[
"NEURON"
] |
aa4355f6c275421a3637e43d4962a32b2d85877edafdc61966f9e1626ee6a04e
|
import os
import sys
from gpaw.test import equal
from ase import Atom
from gpaw import GPAW, MixerDif
from gpaw.cluster import Cluster
h = .25
q = 0
box = 3.
spin=True
# B should not have spin contamination
s = Cluster([Atom('B')])
s.minimal_box(box, h=h)
s.set_initial_magnetic_moments([-1])
c = GPAW(xc='LDA', nbands=-3,
charge=q, spinpol=spin, h=h,
mixer=MixerDif(beta=0.05, nmaxold=5, weight=50.0),
convergence={'eigenstates': 0.078, 'density': 1e-2, 'energy': 0.1},
)
c.calculate(s)
equal(c.density.get_spin_contamination(s, 1), 0., 0.01)
# setup H2 at large distance with different spins for the atoms
s = Cluster([Atom('H'), Atom('H',[0,0,3.0])])
s.minimal_box(box, h=h)
s.set_initial_magnetic_moments([-1,1])
c = GPAW(xc='LDA', nbands=-3,
charge=q, spinpol=spin, h=h,
convergence={'eigenstates': 0.078, 'density': 1e-2, 'energy': 0.1},
)
c.calculate(s)
scont_s = [c.density.get_spin_contamination(s),
c.density.get_spin_contamination(s, 1)]
equal(scont_s[0], scont_s[1], 1.e-4) # symmetry
equal(scont_s[0], 0.9655, 1.e-3)
|
ajylee/gpaw-rtxs
|
gpaw/test/spin_contamination.py
|
Python
|
gpl-3.0
| 1,116
|
[
"ASE",
"GPAW"
] |
d76a00896efb002e2607b9e5cd8b2effea8509f1bc252791ed041576d30abdad
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.277575
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/powerstate.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class powerstate(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(powerstate, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_35114545 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2powerstate>
\t<e2instandby>
''')
if VFFSL(SL,"instandby",True) : # generated from line 5, col 3
_v = "true"
if _v is not None: write(_filter(_v))
else:
_v = "false"
if _v is not None: write(_filter(_v))
write(u'''\t</e2instandby>
</e2powerstate>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_35114545
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_powerstate= 'respond'
## END CLASS DEFINITION
if not hasattr(powerstate, '_initCheetahAttributes'):
templateAPIClass = getattr(powerstate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(powerstate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=powerstate()).run()
|
pli3/Openwebif
|
plugin/controllers/views/web/powerstate.py
|
Python
|
gpl-2.0
| 5,095
|
[
"VisIt"
] |
29f972396c38d9a560692d82ad5974e14a49cb8b8b04075b78c9aba973413de7
|
import numpy as np
import pylab as pl
#--------------------------------------------------------------------
def lens_equation_sie(x1,x2,lpar):
xc1 = lpar[0] #x coordinate of the center of the lens (in units of Einstein radius).
xc2 = lpar[1] #y coordinate of the center of the lens (in units of Einstein radius).
q = lpar[2] #Ellipticity of the lens.
rc = lpar[3] #Core size of the lens (in units of Einstein radius).
re = lpar[4] #Einstein radius of the lens.
pha = lpar[5] #Orientation of lens.
phirad = np.deg2rad(pha)
cosa = np.cos(phirad)
sina = np.sin(phirad)
xt1 = (x1-xc1)*cosa+(x2-xc2)*sina
xt2 = (x2-xc2)*cosa-(x1-xc1)*sina
phi = np.sqrt(xt2*xt2+xt1*q*xt1*q+rc*rc)
sq = np.sqrt(1.0-q*q)
pd1 = phi+rc/q
pd2 = phi+rc*q
fx1 = sq*xt1/pd1
fx2 = sq*xt2/pd2
qs = np.sqrt(q)
a1 = qs/sq*np.arctan(fx1)
a2 = qs/sq*np.arctanh(fx2)
xt11 = cosa
xt22 = cosa
xt12 = sina
xt21 =-sina
fx11 = xt11/pd1-xt1*(xt1*q*q*xt11+xt2*xt21)/(phi*pd1*pd1)
fx22 = xt22/pd2-xt2*(xt1*q*q*xt12+xt2*xt22)/(phi*pd2*pd2)
fx12 = xt12/pd1-xt1*(xt1*q*q*xt12+xt2*xt22)/(phi*pd1*pd1)
fx21 = xt21/pd2-xt2*(xt1*q*q*xt11+xt2*xt21)/(phi*pd2*pd2)
a11 = qs/(1.0+fx1*fx1)*fx11
a22 = qs/(1.0-fx2*fx2)*fx22
a12 = qs/(1.0+fx1*fx1)*fx12
a21 = qs/(1.0-fx2*fx2)*fx21
rea11 = (a11*cosa-a21*sina)*re
rea22 = (a22*cosa+a12*sina)*re
rea12 = (a12*cosa-a22*sina)*re
rea21 = (a21*cosa+a11*sina)*re
y11 = 1.0-rea11
y22 = 1.0-rea22
y12 = 0.0-rea12
y21 = 0.0-rea21
jacobian = y11*y22-y12*y21
mu = 1.0/jacobian
res1 = (a1*cosa-a2*sina)*re
res2 = (a2*cosa+a1*sina)*re
return res1,res2,mu
#--------------------------------------------------------------------
def xy_rotate(x, y, xcen, ycen, phi):
phirad = np.deg2rad(phi)
xnew = (x-xcen)*np.cos(phirad)+(y-ycen)*np.sin(phirad)
ynew = (y-ycen)*np.cos(phirad)-(x-xcen)*np.sin(phirad)
return (xnew,ynew)
def gauss_2d(x, y, par):
(xnew,ynew) = xy_rotate(x, y, par[2], par[3], par[5])
res0 = ((xnew**2)*par[4]+(ynew**2)/par[4])/np.abs(par[1])**2
res = par[0]*np.exp(-0.5*res0)
return res
#--------------------------------------------------------------------
# This function shows how to move points on grids to the local peaks of the luminosity map.
#--------------------------------------------------------------------
def pixels_trans(x1,x2,xx1,xx2,matrix,ntmp):
ntmp = ntmp*1.0
dr = 1000
nnx,nny = np.shape(matrix)
dsx = xx1[1,1]-xx1[0,0]
bsz = nnx*dsx
buf = np.zeros((2.0*ntmp,2.0*ntmp))
buf_x1 = np.zeros((2.0*ntmp,2.0*ntmp))
buf_x2 = np.zeros((2.0*ntmp,2.0*ntmp))
kk = 0
while (dr >dsx) :
i = int((x2+bsz*0.5)/dsx)
j = int((x1+bsz*0.5)/dsx)
idx1 = i-ntmp;sidx1 = 0
idx2 = i+ntmp;sidx2 = 2*ntmp
idy1 = j-ntmp;sidy1 = 0
idy2 = j+ntmp;sidy2 = 2*ntmp
if ((idx1<0)|(idx2>=nnx)|(idy1<0)|(idy2>=nny)):
break
buf[sidx1:sidx2,sidy1:sidy2] = matrix[idx1:idx2,idy1:idy2]
buf_x1[sidx1:sidx2,sidy1:sidy2] = xx1[idx1:idx2,idy1:idy2]
buf_x2[sidx1:sidx2,sidy1:sidy2] = xx2[idx1:idx2,idy1:idy2]
buf_tot = np.sum(buf)
buf_bar = buf_tot/(4.0*ntmp*ntmp)
buf_nrm = buf-buf_bar
buf_nrm[buf_nrm <=0] = 0
buf_nrm_tot = np.sum(buf_nrm)
if buf_nrm_tot == 0.0 :
break
xc2 = np.sum(buf_nrm*buf_x1)/buf_nrm_tot
xc1 = np.sum(buf_nrm*buf_x2)/buf_nrm_tot
dx1 = x1-xc1
dx2 = x2-xc2
dr = np.sqrt(dx1*dx1+dx2*dx2)
x1 = xc1
x2 = xc2
kk = kk+1
if kk > 100 :
break
return x1,x2
#------------------------------------------------------------------------------
if __name__ == '__main__':
re = 1.0 # in units of arcsec
boxsize = 6.0*re # in the units of arcsec
nnn = 1024
dsx = boxsize/nnn # arcsec
xx01 = np.linspace(-boxsize/2.0,boxsize/2.0,nnn)+0.5*dsx
xx02 = np.linspace(-boxsize/2.0,boxsize/2.0,nnn)+0.5*dsx
xi2,xi1 = np.meshgrid(xx01,xx02)
#----------------------------------------------------------------------
# Set the parameters of the image of the source galaxy
#
g_amp = 1.0 # peak brightness value
g_sig = 0.02 # Gaussian "sigma" (i.e., size)
g_xcen = 0.03 # x position of the center of the source (also try (0.0,0.14)
g_ycen = 0.1 # y position of the center of the source
g_axrat = 1.0 # minor-to-major axis ratio
g_pa = 0.0 # major-axis position angle (degrees) c.c.w. from x axis
gpar = np.asarray([g_amp,g_sig,g_xcen,g_ycen,g_axrat,g_pa])
#----------------------------------------------------------------------
# Set the parameters of the lens galaxy
#
xc1 = 0.0 #x coordinate of the center of the lens (in units of Einstein radius).
xc2 = 0.0 #y coordinate of the center of the lens (in units of Einstein radius).
q = 0.7 #Ellipticity of the lens.
rc = 0.0 #Core size of the lens (in units of Einstein radius).
re = 1.0 #Einstein radius of lens.
pha = 45.0 #Orientation of lens.
lpar = np.asarray([xc1,xc2,q,rc,re,pha])
#----------------------------------------------------------------------
# Calculate deflection angles
#
ai1,ai2,mua = lens_equation_sie(xi1,xi2,lpar)
yi1 = xi1-ai1
yi2 = xi2-ai2
#----------------------------------------------------------------------
# Produce Lensed images
#
gpar = np.asarray([g_amp,g_sig,g_xcen,g_ycen,g_axrat,g_pa])
g_lensimage = gauss_2d(yi1,yi2,gpar)
#----------------------------------------------------------------------
# Set the parameters of the image of the lens galaxy
#
g_amp = 5.0 # peak brightness value
g_sig = 0.5 # Gaussian "sigma" (i.e., size)
g_xcen = 0.0 # x position of center (also try (0.0,0.14)
g_ycen = 0.0 # y position of center
g_axrat = 0.7 # minor-to-major axis ratio
g_pa = 45.0 # major-axis position angle (degrees) c.c.w. from x axis
gpar = np.asarray([g_amp,g_sig,g_xcen,g_ycen,g_axrat,g_pa])
g_lens = gauss_2d(xi1,xi2,gpar)
g_lensimage = g_lensimage+g_lens
#----------------------------------------------------------------------
# Generate noises
#
g_noise = np.random.normal(0,1,[nnn,nnn])*1.0
#----------------------------------------------------------------------
# Generate the final mock images of the lensing system
#
g_lensimage = g_lensimage+g_noise
#----------------------------------------------------------------------
# Plot the final images
#
levels = [0.0,1.0,2.0,3.0,4.0,5.0,6.0]
pl.figure(figsize=(10,10))
pl.contourf(g_lensimage,levels)
#----------------------------------------------------------------------
# Sample the positions of the points on grids
#
nns = 16
xp01 = np.linspace(-boxsize/2.0,boxsize/2.0,nnn/nns)+0.5*dsx*nns
xp02 = np.linspace(-boxsize/2.0,boxsize/2.0,nnn/nns)+0.5*dsx*nns
xp2,xp1 = np.meshgrid(xp01,xp02)
xp2 = xp2.reshape((nnn/nns*nnn/nns))
xp1 = xp1.reshape((nnn/nns*nnn/nns))
#----------------------------------------------------------------------
# Draw the initial position of points on grids
#
pl.figure(figsize=(10,10))
pl.xlim(-3,3)
pl.ylim(-3,3)
pl.plot(xp1,xp2,'ko')
#----------------------------------------------------------------------
# Move the points to the local peaks
#
xr1 = xp1*0.0
xr2 = xp2*0.0
for i in xrange(len(xp2)):
xr1[i],xr2[i] = pixels_trans(xp1[i],xp2[i],xi1,xi2,g_lensimage,nns)
X = np.vstack((xr1,xr2)).T
#----------------------------------------------------------------------
# Plot final positions of the points
#
pl.figure(figsize=(10,10))
pl.xlim(-3,3)
pl.ylim(-3,3)
pl.plot(X[:, 0], X[:, 1], 'bo')
#----------------------------------------------------------------------
# Colorize different structures of the points
#
from sklearn.cluster import DBSCAN
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
dbscan = DBSCAN(eps=0.14,min_samples=6)
dbscan.fit(X)
y_pred = dbscan.labels_.astype(np.int)
pl.figure(figsize=(10,10))
pl.xlim(-3,3)
pl.ylim(-3,3)
pl.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=22)
pl.show()
|
cavestruz/StrongCNN
|
image_processing/feature_extraction_methods/mesh_transfer/mesh_trans.py
|
Python
|
mit
| 8,777
|
[
"Galaxy",
"Gaussian"
] |
20665bd447da07fad830b86276de050d5598457e067b049a9a3bf0e5d7a49c91
|
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
(c) 2017 Julian Rees
License: GNU GPLv3
Description: Plot the performance of the SCF convergence in ORCA.
Run: python scfconvergence.py filename [scfnum skip]
Arguments: filename - file name with extension;
there should be at least one SCF cycle present
scfnum - optional: if more than one SCF cycle (e.g. a geometry
optimization), the desired cycle to plot
skip - optional: SCF iterations to truncate from start;
to better visualize late-stage convergence
Dependencies: matplotlib
--------------------------------------------------------------------------------
"""
print(__doc__)
import sys
import math
import matplotlib.pyplot as plt
# check for correct number of inputs
if len(sys.argv) < 2:
print(' ')
sys.exit("You must supply exactly one filename!")
elif len(sys.argv) == 2:
print(' ')
print('- - !! ONLY THE FIRST SCF WILL BE PRINTED !! - -')
elif (len(sys.argv) == 3 and sys.argv[2].isdigit() == True):
pass
elif (len(sys.argv) == 4 and sys.argv[2].isdigit() == True and
sys.argv[3][1].isdigit() == True and sys.argv[3][0] is '-'):
pass
else:
print(' ')
str1 = 'You must supply exactly one filename, '
str2 = 'and an optional SCF number and pre-cutoff (negative)!'
sys.exit(str1 + str2)
# define search string and clear the list
searchfor = "SCF ITERATIONS"
energies = []
delta_energies = []
# optionally assign the SCF to print
if len(sys.argv) == 3:
scfnum = int(sys.argv[2])
skip = 0
elif len(sys.argv) == 4:
scfnum = int(sys.argv[2])
skip = int(float(sys.argv[3])*-1)
else:
scfnum = 1
skip = 0
# open filename
fname = str(sys.argv[1])
try:
with open(fname) as f:
# search lines for string and move down two lines to get energy
i = 1
for line in f:
if searchfor in line:
next(f)
try:
line = f.next()
except:
print(' ')
sys.exit('- - !! REACHED THE END OF THE OUTPUT FILE !! - -')
# check if i = scfnum
if i < scfnum:
i = i + 1
else:
# run a loop over the first SCF convergence
while "SUCCESS" not in line:
if not line.strip():
break
# check to see if the line is an iteration
elif line.split()[0].isdigit():
# get the energy as a number and add it to the list
try:
energy = float(line.split()[1])
energies.append(energy)
delta_energies.append(float(line.split()[2]))
except ValueError:
pass
try:
line = f.next()
except:
print(' ')
print('- - !! THE SCF IS NOT YET CONVERGED !! - -')
break
break
except IOError:
sys.exit("The specified file does not exist!")
# truncate the list if needed
if skip == 0:
pass
else:
energies[0:skip] = []
delta_energies[0:skip] = []
# plot energies
x_axis = range(1+skip, 1+len(energies)+skip)
plt.plot(x_axis, energies,'o-')
plt.title('%d SCF Iterations' %len(energies))
plt.xlabel('SCF Iteration')
plt.ylabel('SCF Energy')
plt.show()
x_axis = range(1+skip, 1+len(delta_energies)+skip)
plt.plot(x_axis, delta_energies,'o-')
plt.title('%d SCF Iterations' %len(delta_energies))
plt.xlabel('SCF Iteration')
plt.ylabel('SCF Energy Change')
plt.show()
|
julianrees/scripts
|
python/scfconvergence.py
|
Python
|
gpl-3.0
| 3,926
|
[
"ORCA"
] |
04265faf3b14af635ce2380da00ed4e3cbf71ef0b72e1b03d9e3793697ba40fe
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
# $HeadURL$
# File : dirac-distribution
# Author : Adria Casajus
########################################################################
"""
Create tarballs for a given DIRAC release
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities import List, File, Distribution, Platform, Subprocess, CFG
import sys, os, re, urllib2, tempfile, getpass, imp
try:
import hashlib as md5
except ImportError:
import md5
###
# Load release manager from dirac-install
##
diracInstallLocation = os.path.join( os.path.dirname( __file__ ), "dirac-install" )
if not os.path.isfile( diracInstallLocation ):
diracInstallLocation = os.path.join( os.path.dirname( __file__ ), "dirac-install.py" )
try:
diFile = open( diracInstallLocation, "r" )
DiracInstall = imp.load_module( "DiracInstall", diFile, diracInstallLocation, ( "", "r", imp.PY_SOURCE ) )
diFile.close()
except Exception, excp:
raise
gLogger.fatal( "Cannot find dirac-install! Aborting (%s)" % str( excp ) )
sys.exit( 1 )
##END OF LOAD
class Params:
def __init__( self ):
self.releasesToBuild = []
self.projectName = 'DIRAC'
self.debug = False
self.externalsBuildType = [ 'client' ]
self.ignoreExternals = False
self.forceExternals = False
self.ignorePackages = False
self.relcfg = False
self.externalsPython = '26'
self.destination = ""
self.externalsLocation = ""
self.makeJobs = 1
self.globalDefaults = ""
self.forcedLocations = {}
def setReleases( self, optionValue ):
self.releasesToBuild = List.fromChar( optionValue )
return S_OK()
def setProject( self, optionValue ):
self.projectName = optionValue
return S_OK()
def setDebug( self, optionValue ):
self.debug = True
return S_OK()
def setExternalsBuildType( self, optionValue ):
self.externalsBuildType = List.fromChar( optionValue )
return S_OK()
def setForceExternals( self, optionValue ):
self.forceExternals = True
return S_OK()
def setIgnoreExternals( self, optionValue ):
self.ignoreExternals = True
return S_OK()
def setDestination( self, optionValue ):
self.destination = optionValue
return S_OK()
def setPythonVersion( self, optionValue ):
self.externalsPython = optionValue
return S_OK()
def setIgnorePackages( self, optionValue ):
self.ignorePackages = True
return S_OK()
def setExternalsLocation( self, optionValue ):
self.externalsLocation = optionValue
return S_OK()
def setMakeJobs( self, optionValue ):
self.makeJobs = max( 1, int( optionValue ) )
return S_OK()
def setReleasesCFG( self, optionValue ):
self.relcfg = optionValue
return S_OK()
def setGlobalDefaults( self, value ):
self.globalDefaults = value
return S_OK()
def overWriteLocation( self, value ):
locSplit = value.split( ":" )
if len( locSplit ) < 2:
return S_ERROR( "Invalid location. It has to have format <moduleName>:<url> insteaf of %s" % value )
modName = locSplit[0]
location = ":".join( locSplit[1:] )
gLogger.notice( "Forcing location of %s to %s" % ( modName, location ) )
self.forcedLocations[ modName ] = location
return S_OK()
def registerSwitches( self ):
Script.registerSwitch( "r:", "releases=", "releases to build (mandatory, comma separated)", cliParams.setReleases )
Script.registerSwitch( "l:", "project=", "Project to build the release for (DIRAC by default)", cliParams.setProject )
Script.registerSwitch( "D:", "destination", "Destination where to build the tar files", cliParams.setDestination )
Script.registerSwitch( "i:", "pythonVersion", "Python version to use (25/26)", cliParams.setPythonVersion )
Script.registerSwitch( "P", "ignorePackages", "Do not make tars of python packages", cliParams.setIgnorePackages )
Script.registerSwitch( "C:", "relcfg=", "Use <file> as the releases.cfg", cliParams.setReleasesCFG )
Script.registerSwitch( "b", "buildExternals", "Force externals compilation even if already compiled", cliParams.setForceExternals )
Script.registerSwitch( "B", "ignoreExternals", "Skip externals compilation", cliParams.setIgnoreExternals )
Script.registerSwitch( "t:", "buildType=", "External type to build (client/server)", cliParams.setExternalsBuildType )
Script.registerSwitch( "x:", "externalsLocation=", "Use externals location instead of downloading them", cliParams.setExternalsLocation )
Script.registerSwitch( "j:", "makeJobs=", "Make jobs (default is 1)", cliParams.setMakeJobs )
Script.registerSwitch( 'M:', 'defaultsURL=', 'Where to retrieve the global defaults from', cliParams.setGlobalDefaults )
Script.registerSwitch( 'O:', 'overwriteLocation=', 'Force location of modules from where to make the release. Format <moduleName>:<url>', cliParams.overWriteLocation )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'\nUsage:',
' %s [option|cfgfile] ...\n' % Script.scriptName ] ) )
class DistributionMaker:
def __init__( self, cliParams ):
self.cliParams = cliParams
self.relConf = DiracInstall.ReleaseConfig( projectName = cliParams.projectName,
globalDefaultsURL = cliParams.globalDefaults )
self.relConf.setDebugCB( gLogger.info )
self.relConf.loadProjectDefaults()
def isOK( self ):
if not self.cliParams.releasesToBuild:
gLogger.error( "Missing releases to build!" )
Script.showHelp()
return False
if not self.cliParams.destination:
self.cliParams.destination = tempfile.mkdtemp( 'DiracDist' )
else:
try:
os.makedirs( self.cliParams.destination )
except:
pass
gLogger.notice( "Will generate tarballs in %s" % self.cliParams.destination )
return True
def loadReleases( self ):
gLogger.notice( "Loading releases.cfg" )
return self.relConf.loadProjectRelease( self.cliParams.releasesToBuild, releaseMode = True, relLocation = self.cliParams.relcfg )
def createModuleTarballs( self ):
for version in self.cliParams.releasesToBuild:
result = self.__createReleaseTarballs( version )
if not result[ 'OK' ]:
return result
return S_OK()
def __createReleaseTarballs( self, releaseVersion ):
result = self.relConf.getModulesForRelease( releaseVersion )
if not result[ 'OK' ]:
return result
modsToTar = result[ 'Value' ]
for modName in modsToTar:
modVersion = modsToTar[ modName ]
dctArgs = [ '-A' ] #Leave a copy of the release notes outside the tarballs
#Version
dctArgs.append( "-n '%s'" % modName )
dctArgs.append( "-v '%s'" % modVersion )
gLogger.notice( "Creating tar for %s version %s" % ( modName, modVersion ) )
#Source
if modName in cliParams.forcedLocations:
location = cliParams.forcedLocations[ modName ]
gLogger.notice( "Source is forced to %s" % location )
dctArgs.append( "-u '%s'" % location )
else:
result = self.relConf.getModSource( releaseVersion, modName )
if not result[ 'OK' ]:
return result
modSrcTuple = result[ 'Value' ]
if modSrcTuple[0]:
logMsgVCS = modSrcTuple[0]
dctArgs.append( "-z '%s'" % modSrcTuple[0] )
else:
logMsgVCS = "autodiscover"
dctArgs.append( "-u '%s'" % modSrcTuple[1] )
gLogger.notice( "Sources will be retrieved from %s (%s)" % ( modSrcTuple[1], logMsgVCS ) )
#Tar destination
dctArgs.append( "-D '%s'" % self.cliParams.destination )
if cliParams.debug:
dctArgs.append( "-dd" )
#Script location discovery
scriptName = os.path.join( os.path.dirname( __file__ ), "dirac-create-distribution-tarball" )
if not os.path.isfile( scriptName ):
scriptName = os.path.join( os.path.dirname( __file__ ), "dirac-create-distribution-tarball.py" )
cmd = "'%s' %s" % ( scriptName, " ".join( dctArgs ) )
gLogger.verbose( "Executing %s" % cmd )
if os.system( cmd ) != 0:
return S_ERROR( "Failed creating tarball for module %s. Aborting" % modName )
gLogger.notice( "Tarball for %s version %s created" % ( modName, modVersion ) )
return S_OK()
def getAvailableExternals( self ):
packagesURL = "http://lhcbproject.web.cern.ch/lhcbproject/dist/DIRAC3/installSource/tars.list"
try:
remoteFile = urllib2.urlopen( packagesURL )
except urllib2.URLError:
gLogger.exception()
return []
remoteData = remoteFile.read()
remoteFile.close()
versionRE = re.compile( "Externals-([a-zA-Z]*)-([a-zA-Z0-9]*(?:-pre[0-9]+)*)-(.*)-(python[0-9]+)\.tar\.gz" )
availableExternals = []
for line in remoteData.split( "\n" ):
res = versionRE.search( line )
if res:
availableExternals.append( res.groups() )
return availableExternals
def createExternalsTarballs( self ):
extDone = []
for releaseVersion in self.cliParams.releasesToBuild:
if releaseVersion in extDone:
continue
if not self.tarExternals( releaseVersion ):
return False
extDone.append( releaseVersion )
return True
def tarExternals( self, releaseVersion ):
externalsVersion = self.relConf.getExtenalsVersion( releaseVersion )
platform = Platform.getPlatformString()
availableExternals = self.getAvailableExternals()
if not externalsVersion:
gLogger.notice( "Externals is not defined for release %s" % releaseVersion )
return False
for externalType in self.cliParams.externalsBuildType:
requestedExternals = ( externalType, externalsVersion, platform, 'python%s' % self.cliParams.externalsPython )
requestedExternalsString = "-".join( list( requestedExternals ) )
gLogger.notice( "Trying to compile %s externals..." % requestedExternalsString )
if not self.cliParams.forceExternals and requestedExternals in availableExternals:
gLogger.notice( "Externals %s is already compiled, skipping..." % ( requestedExternalsString ) )
continue
compileScript = os.path.join( os.path.dirname( __file__ ), "dirac-compile-externals" )
if not os.path.isfile( compileScript ):
compileScript = os.path.join( os.path.dirname( __file__ ), "dirac-compile-externals.py" )
compileTarget = os.path.join( self.cliParams.destination, platform )
cmdArgs = []
cmdArgs.append( "-D '%s'" % compileTarget )
cmdArgs.append( "-t '%s'" % externalType )
cmdArgs.append( "-v '%s'" % externalsVersion )
cmdArgs.append( "-i '%s'" % self.cliParams.externalsPython )
if cliParams.externalsLocation:
cmdArgs.append( "-e '%s'" % self.cliParams.externalsLocation )
if cliParams.makeJobs:
cmdArgs.append( "-j '%s'" % self.cliParams.makeJobs )
compileCmd = "%s %s" % ( compileScript, " ".join( cmdArgs ) )
gLogger.info( compileCmd )
if os.system( compileCmd ):
gLogger.error( "Error while compiling externals!" )
sys.exit( 1 )
tarfilePath = os.path.join( self.cliParams.destination, "Externals-%s.tar.gz" % ( requestedExternalsString ) )
result = Distribution.createTarball( tarfilePath,
compileTarget,
os.path.join( self.cliParams.destination, "mysql" ) )
if not result[ 'OK' ]:
gLogger.error( "Could not generate tarball for package %s" % requestedExternalsString, result[ 'Error' ] )
sys.exit( 1 )
os.system( "rm -rf '%s'" % compileTarget )
return True
def doTheMagic( self ):
if not distMaker.isOK():
gLogger.fatal( "There was an error with the release description" )
return False
result = distMaker.loadReleases()
if not result[ 'OK' ]:
gLogger.fatal( "There was an error when loading the release.cfg file: %s" % result[ 'Message' ] )
return False
#Module tars
if self.cliParams.ignorePackages:
gLogger.notice( "Skipping creating module tarballs" )
else:
result = self.createModuleTarballs()
if not result[ 'OK' ]:
gLogger.fatal( "There was a problem when creating the module tarballs: %s" % result[ 'Message' ] )
return False
#Externals
if self.cliParams.ignoreExternals or cliParams.projectName != "DIRAC":
gLogger.notice( "Skipping creating externals tarball" )
else:
if not self.createExternalsTarballs():
gLogger.fatal( "There was a problem when creating the Externals tarballs" )
return False
#Write the releases files
for relVersion in self.cliParams.releasesToBuild:
projectCFG = self.relConf.getReleaseCFG( self.cliParams.projectName, relVersion )
projectCFGData = projectCFG.toString() + "\n"
try:
relFile = file( os.path.join( self.cliParams.destination, "release-%s-%s.cfg" % ( self.cliParams.projectName, relVersion ) ), "w" )
relFile.write( projectCFGData )
relFile.close()
except Exception, exc:
gLogger.fatal( "Could not write the release info: %s" % str( exc ) )
return False
try:
relFile = file( os.path.join( self.cliParams.destination, "release-%s-%s.md5" % ( self.cliParams.projectName, relVersion ) ), "w" )
relFile.write( md5.md5( projectCFGData ).hexdigest() )
relFile.close()
except Exception, exc:
gLogger.fatal( "Could not write the release info: %s" % str( exc ) )
return False
#Check deps
if 'DIRAC' != self.cliParams.projectName:
deps = self.relConf.getReleaseDependencies( self.cliParams.projectName, relVersion )
if 'DIRAC' not in deps:
gLogger.notice( "Release %s doesn't depend on DIRAC. Check it's what you really want" % relVersion )
else:
gLogger.notice( "Release %s depends on DIRAC %s" % ( relVersion, deps[ 'DIRAC'] ) )
return True
def getUploadCmd( self ):
result = self.relConf.getUploadCommand()
upCmd = False
if result['OK']:
upCmd = result[ 'Value' ]
filesToCopy = []
for fileName in os.listdir( cliParams.destination ):
for ext in ( ".tar.gz", ".md5", ".cfg", ".html", ".pdf" ):
if fileName.find( ext ) == len( fileName ) - len( ext ):
filesToCopy.append( os.path.join( cliParams.destination, fileName ) )
outFiles = " ".join( filesToCopy )
outFileNames = " ".join( [ os.path.basename( filePath ) for filePath in filesToCopy ] )
if not upCmd:
return "Upload to your installation source:\n'%s'\n" % "' '".join( filesToCopy )
for inRep, outRep in ( ( "%OUTLOCATION%", self.cliParams.destination ),
( "%OUTFILES%", outFiles ),
( "%OUTFILENAMES%", outFileNames ) ):
upCmd = upCmd.replace( inRep, outRep )
return upCmd
if __name__ == "__main__":
cliParams = Params()
Script.disableCS()
Script.addDefaultOptionValue( "/DIRAC/Setup", "Dummy" )
cliParams.registerSwitches()
Script.parseCommandLine( ignoreErrors = False )
if Script.localCfg.getDebugMode():
cliParams.debug = True
distMaker = DistributionMaker( cliParams )
if not distMaker.doTheMagic():
sys.exit( 1 )
gLogger.notice( "Everything seems ok. Tarballs generated in %s" % cliParams.destination )
upCmd = distMaker.getUploadCmd()
gLogger.always( upCmd )
|
miloszz/DIRAC
|
Core/scripts/dirac-distribution.py
|
Python
|
gpl-3.0
| 15,635
|
[
"DIRAC"
] |
5f1348077a7b4b753d386ef82011ff0aa8f9b16f42f6393bd92a77a18208562e
|
#-*- coding: utf-8 -*-
import re, string
import fileinput, codecs, sys
#Reconocimiento de referencias.
apeNomFecTitR = re.compile(ur'(?P<ape>[^,]+), (?P<nom>[^(]+?) \((?P<fec>[0-9]+)\):? (?:"|\'\')(?P<tit>.+?)(?:"|\'\')')
volR = re.compile(ur' vol\. ([-0-9]+)[,.]')
numR = re.compile(ur' no\. ([-0-9]+)[,.]')
ppR = re.compile(ur' pp\. ([-0-9]+)[,.]')
edR = re.compile(ur' ed\. by ([^,.]+)[,.]')
inR = re.compile(ur' In (?:"|\'\')([^\'"]+)(?:"|\'\')')
def crearRef(ref):
apeNomFecTitL = apeNomFecTitR.findall(ref)
volL = volR.findall(ref)
numL = numR.findall(ref)
ppL = ppR.findall(ref)
edL = edR.findall(ref)
inL = inR.findall(ref)
apeV = nomV = fecV = titV = volV = numV = ppV = otrosV = inV = ur''
if apeNomFecTitL:
apeV = apeNomFecTitL[0][0]
nomV = apeNomFecTitL[0][1]
fecV = apeNomFecTitL[0][2]
titV = apeNomFecTitL[0][3]
if volL:
volV = volL[0]
if numL:
numV = numL[0]
if ppL:
ppV = ppL[0]
if edL:
otrosV = ur'|nombre-editor='+edL[0]
if inL:
inV = inL[0]
# cita = ur'cita publicación |apellidos={ape} |nombre={nom} |enlaceautor= |año={fec} |título={tit} |publicación={in_} |volumen={vol} |número={num} |páginas={pp} |ubicación= |editorial= |issn= |url= |fechaacceso= {otros}'
return ur'{{cita publicación |apellidos='+apeV+ur' |nombre='+nomV+ur' |enlaceautor= |año='+fecV+ur' |título='+titV+ur' |publicación='+inV+ur' |volumen='+volV+ur' |número='+numV+ur' |páginas='+ppV+ur' |ubicación= |editorial= |issn= |url= |fechaacceso= '+otrosV+ur'}}'
UTF8Reader = codecs.getreader('utf8')
sys.stdin = UTF8Reader(sys.stdin)
lineaR = re.compile(ur'#? *\*(.*)')
def leer():
for line in fileinput.input():
res = lineaR.findall(line)
if res:
print line
print crearRef(res[0])
#Pruebas
# * Barbeau, Marius (1950) ''Totem Poles.'' 2 vols. (Anthropology Series 30, National Museum of Canada Bulletin 119.) Ottawa: National Museum of Canada.
# * Garfield, Viola E. (1939) "Tsimshian Clan and Society." ''University of Washington Publications in Anthropology,'' vol. 7, no. 3, pp. 167-340.
# *Beynon, William (1992) "The Feast of Nisyaganaat, Chief of the Gitsiis." In ''Na Amwaaltga Tsmsiyeen: The Tsimshian, Trade, and the Northwest Coast Economy,'' ed. by [[Susan Marsden]], pp. 45-54. (Suwilaay\'msga Na Ga'niiyatgm, Teachings of Our Grandfathers, vol. 1.) Prince Rupert, B.C.: First Nations Advisory Council of School District #52.
# *Helin, Calvin (2006) ''Dances with Dependency: Indigenous Success through Self-Reliance.'' Vancouver: Orca Spirit Publishing and Communications.
# * [[Jorge Basadre|Basadre Grohmann, Jorge]]: ''Historia de la República del Perú (1822 - 1933)'', Tomo 10, pp. 264-265; y Tomo 17, pp. 56-57. Editada por la Empresa Editora El Comercio S. A. Lima, 2005. ISBN 9972-205-72-X (V.10) / ISBN 9972-205-79-7 (V.17)
# * Sobrevilla, David (1982): ''Las ideas en el Perú contemporáneo''. Tomo XI de la “Historia del Perú” (Procesos e Instituciones), pp. 152-153. Cuarta Edición. Lima, Editorial Mejía Baca. ISBN 84-499-1616-X
# * [[Alberto Tauro del Pino|Tauro del Pino, Alberto]] (2001): ''Enciclopedia Ilustrada del Perú''. Tercera Edición. Tomo 17. VAC/ZUZ. Lima, PEISA. ISBN 9972-40-166-9
# * Varios autores (2000): ''Grandes Forjadores del Perú''. Artículo: <small>VILLARREAL, Federico.</small> Lima, Lexus Editores. ISBN 9972-625-50-8
|
races1986/SafeLanguage
|
CEM/crearRef.py
|
Python
|
epl-1.0
| 3,470
|
[
"ORCA"
] |
fb7819d01ce8a9dfdec5f3a3021d8fa7cd1edd5099e1dd598c68a9af138ea335
|
# context.py - changeset and file context objects for mercurial
#
# Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import filecmp
import os
import stat
from .i18n import _
from .node import (
hex,
nullrev,
short,
)
from .pycompat import (
getattr,
open,
)
from . import (
dagop,
encoding,
error,
fileset,
match as matchmod,
mergestate as mergestatemod,
metadata,
obsolete as obsmod,
patch,
pathutil,
phases,
pycompat,
repoview,
scmutil,
sparse,
subrepo,
subrepoutil,
util,
)
from .utils import (
dateutil,
stringutil,
)
propertycache = util.propertycache
class basectx(object):
"""A basectx object represents the common logic for its children:
changectx: read-only context that is already present in the repo,
workingctx: a context that represents the working directory and can
be committed,
memctx: a context that represents changes in-memory and can also
be committed."""
def __init__(self, repo):
self._repo = repo
def __bytes__(self):
return short(self.node())
__str__ = encoding.strmethod(__bytes__)
def __repr__(self):
return "<%s %s>" % (type(self).__name__, str(self))
def __eq__(self, other):
try:
return type(self) == type(other) and self._rev == other._rev
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self._manifest
def __getitem__(self, key):
return self.filectx(key)
def __iter__(self):
return iter(self._manifest)
def _buildstatusmanifest(self, status):
"""Builds a manifest that includes the given status results, if this is
a working copy context. For non-working copy contexts, it just returns
the normal manifest."""
return self.manifest()
def _matchstatus(self, other, match):
"""This internal method provides a way for child objects to override the
match operator.
"""
return match
def _buildstatus(
self, other, s, match, listignored, listclean, listunknown
):
"""build a status with respect to another context"""
# Load earliest manifest first for caching reasons. More specifically,
# if you have revisions 1000 and 1001, 1001 is probably stored as a
# delta against 1000. Thus, if you read 1000 first, we'll reconstruct
# 1000 and cache it so that when you read 1001, we just need to apply a
# delta to what's in the cache. So that's one full reconstruction + one
# delta application.
mf2 = None
if self.rev() is not None and self.rev() < other.rev():
mf2 = self._buildstatusmanifest(s)
mf1 = other._buildstatusmanifest(s)
if mf2 is None:
mf2 = self._buildstatusmanifest(s)
modified, added = [], []
removed = []
clean = []
deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
deletedset = set(deleted)
d = mf1.diff(mf2, match=match, clean=listclean)
for fn, value in pycompat.iteritems(d):
if fn in deletedset:
continue
if value is None:
clean.append(fn)
continue
(node1, flag1), (node2, flag2) = value
if node1 is None:
added.append(fn)
elif node2 is None:
removed.append(fn)
elif flag1 != flag2:
modified.append(fn)
elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
# When comparing files between two commits, we save time by
# not comparing the file contents when the nodeids differ.
# Note that this means we incorrectly report a reverted change
# to a file as a modification.
modified.append(fn)
elif self[fn].cmp(other[fn]):
modified.append(fn)
else:
clean.append(fn)
if removed:
# need to filter files if they are already reported as removed
unknown = [
fn
for fn in unknown
if fn not in mf1 and (not match or match(fn))
]
ignored = [
fn
for fn in ignored
if fn not in mf1 and (not match or match(fn))
]
# if they're deleted, don't report them as removed
removed = [fn for fn in removed if fn not in deletedset]
return scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
)
@propertycache
def substate(self):
return subrepoutil.state(self, self._repo.ui)
def subrev(self, subpath):
return self.substate[subpath][1]
def rev(self):
return self._rev
def node(self):
return self._node
def hex(self):
return hex(self.node())
def manifest(self):
return self._manifest
def manifestctx(self):
return self._manifestctx
def repo(self):
return self._repo
def phasestr(self):
return phases.phasenames[self.phase()]
def mutable(self):
return self.phase() > phases.public
def matchfileset(self, cwd, expr, badfn=None):
return fileset.match(self, cwd, expr, badfn=badfn)
def obsolete(self):
"""True if the changeset is obsolete"""
return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
def extinct(self):
"""True if the changeset is extinct"""
return self.rev() in obsmod.getrevs(self._repo, b'extinct')
def orphan(self):
"""True if the changeset is not obsolete, but its ancestor is"""
return self.rev() in obsmod.getrevs(self._repo, b'orphan')
def phasedivergent(self):
"""True if the changeset tries to be a successor of a public changeset
Only non-public and non-obsolete changesets may be phase-divergent.
"""
return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
def contentdivergent(self):
"""Is a successor of a changeset with multiple possible successor sets
Only non-public and non-obsolete changesets may be content-divergent.
"""
return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
def isunstable(self):
"""True if the changeset is either orphan, phase-divergent or
content-divergent"""
return self.orphan() or self.phasedivergent() or self.contentdivergent()
def instabilities(self):
"""return the list of instabilities affecting this changeset.
Instabilities are returned as strings. possible values are:
- orphan,
- phase-divergent,
- content-divergent.
"""
instabilities = []
if self.orphan():
instabilities.append(b'orphan')
if self.phasedivergent():
instabilities.append(b'phase-divergent')
if self.contentdivergent():
instabilities.append(b'content-divergent')
return instabilities
def parents(self):
"""return contexts for each parent changeset"""
return self._parents
def p1(self):
return self._parents[0]
def p2(self):
parents = self._parents
if len(parents) == 2:
return parents[1]
return self._repo[nullrev]
def _fileinfo(self, path):
if '_manifest' in self.__dict__:
try:
return self._manifest.find(path)
except KeyError:
raise error.ManifestLookupError(
self._node or b'None', path, _(b'not found in manifest')
)
if '_manifestdelta' in self.__dict__ or path in self.files():
if path in self._manifestdelta:
return (
self._manifestdelta[path],
self._manifestdelta.flags(path),
)
mfl = self._repo.manifestlog
try:
node, flag = mfl[self._changeset.manifest].find(path)
except KeyError:
raise error.ManifestLookupError(
self._node or b'None', path, _(b'not found in manifest')
)
return node, flag
def filenode(self, path):
return self._fileinfo(path)[0]
def flags(self, path):
try:
return self._fileinfo(path)[1]
except error.LookupError:
return b''
@propertycache
def _copies(self):
return metadata.computechangesetcopies(self)
def p1copies(self):
return self._copies[0]
def p2copies(self):
return self._copies[1]
def sub(self, path, allowcreate=True):
'''return a subrepo for the stored revision of path, never wdir()'''
return subrepo.subrepo(self, path, allowcreate=allowcreate)
def nullsub(self, path, pctx):
return subrepo.nullsubrepo(self, path, pctx)
def workingsub(self, path):
"""return a subrepo for the stored revision, or wdir if this is a wdir
context.
"""
return subrepo.subrepo(self, path, allowwdir=True)
def match(
self,
pats=None,
include=None,
exclude=None,
default=b'glob',
listsubrepos=False,
badfn=None,
cwd=None,
):
r = self._repo
if not cwd:
cwd = r.getcwd()
return matchmod.match(
r.root,
cwd,
pats,
include,
exclude,
default,
auditor=r.nofsauditor,
ctx=self,
listsubrepos=listsubrepos,
badfn=badfn,
)
def diff(
self,
ctx2=None,
match=None,
changes=None,
opts=None,
losedatafn=None,
pathfn=None,
copy=None,
copysourcematch=None,
hunksfilterfn=None,
):
"""Returns a diff generator for the given contexts and matcher"""
if ctx2 is None:
ctx2 = self.p1()
if ctx2 is not None:
ctx2 = self._repo[ctx2]
return patch.diff(
self._repo,
ctx2,
self,
match=match,
changes=changes,
opts=opts,
losedatafn=losedatafn,
pathfn=pathfn,
copy=copy,
copysourcematch=copysourcematch,
hunksfilterfn=hunksfilterfn,
)
def dirs(self):
return self._manifest.dirs()
def hasdir(self, dir):
return self._manifest.hasdir(dir)
def status(
self,
other=None,
match=None,
listignored=False,
listclean=False,
listunknown=False,
listsubrepos=False,
):
"""return status of files between two nodes or node and working
directory.
If other is None, compare this node with working directory.
ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
Returns a mercurial.scmutils.status object.
Data can be accessed using either tuple notation:
(modified, added, removed, deleted, unknown, ignored, clean)
or direct attribute access:
s.modified, s.added, ...
"""
ctx1 = self
ctx2 = self._repo[other]
# This next code block is, admittedly, fragile logic that tests for
# reversing the contexts and wouldn't need to exist if it weren't for
# the fast (and common) code path of comparing the working directory
# with its first parent.
#
# What we're aiming for here is the ability to call:
#
# workingctx.status(parentctx)
#
# If we always built the manifest for each context and compared those,
# then we'd be done. But the special case of the above call means we
# just copy the manifest of the parent.
reversed = False
if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
reversed = True
ctx1, ctx2 = ctx2, ctx1
match = self._repo.narrowmatch(match)
match = ctx2._matchstatus(ctx1, match)
r = scmutil.status([], [], [], [], [], [], [])
r = ctx2._buildstatus(
ctx1, r, match, listignored, listclean, listunknown
)
if reversed:
# Reverse added and removed. Clear deleted, unknown and ignored as
# these make no sense to reverse.
r = scmutil.status(
r.modified, r.removed, r.added, [], [], [], r.clean
)
if listsubrepos:
for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
try:
rev2 = ctx2.subrev(subpath)
except KeyError:
# A subrepo that existed in node1 was deleted between
# node1 and node2 (inclusive). Thus, ctx2's substate
# won't contain that subpath. The best we can do ignore it.
rev2 = None
submatch = matchmod.subdirmatcher(subpath, match)
s = sub.status(
rev2,
match=submatch,
ignored=listignored,
clean=listclean,
unknown=listunknown,
listsubrepos=True,
)
for k in (
'modified',
'added',
'removed',
'deleted',
'unknown',
'ignored',
'clean',
):
rfiles, sfiles = getattr(r, k), getattr(s, k)
rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
r.modified.sort()
r.added.sort()
r.removed.sort()
r.deleted.sort()
r.unknown.sort()
r.ignored.sort()
r.clean.sort()
return r
def mergestate(self, clean=False):
"""Get a mergestate object for this context."""
raise NotImplementedError(
'%s does not implement mergestate()' % self.__class__
)
def isempty(self):
return not (
len(self.parents()) > 1
or self.branch() != self.p1().branch()
or self.closesbranch()
or self.files()
)
class changectx(basectx):
"""A changecontext object makes access to data related to a particular
changeset convenient. It represents a read-only context already present in
the repo."""
def __init__(self, repo, rev, node, maybe_filtered=True):
super(changectx, self).__init__(repo)
self._rev = rev
self._node = node
# When maybe_filtered is True, the revision might be affected by
# changelog filtering and operation through the filtered changelog must be used.
#
# When maybe_filtered is False, the revision has already been checked
# against filtering and is not filtered. Operation through the
# unfiltered changelog might be used in some case.
self._maybe_filtered = maybe_filtered
def __hash__(self):
try:
return hash(self._rev)
except AttributeError:
return id(self)
def __nonzero__(self):
return self._rev != nullrev
__bool__ = __nonzero__
@propertycache
def _changeset(self):
if self._maybe_filtered:
repo = self._repo
else:
repo = self._repo.unfiltered()
return repo.changelog.changelogrevision(self.rev())
@propertycache
def _manifest(self):
return self._manifestctx.read()
@property
def _manifestctx(self):
return self._repo.manifestlog[self._changeset.manifest]
@propertycache
def _manifestdelta(self):
return self._manifestctx.readdelta()
@propertycache
def _parents(self):
repo = self._repo
if self._maybe_filtered:
cl = repo.changelog
else:
cl = repo.unfiltered().changelog
p1, p2 = cl.parentrevs(self._rev)
if p2 == nullrev:
return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
return [
changectx(repo, p1, cl.node(p1), maybe_filtered=False),
changectx(repo, p2, cl.node(p2), maybe_filtered=False),
]
def changeset(self):
c = self._changeset
return (
c.manifest,
c.user,
c.date,
c.files,
c.description,
c.extra,
)
def manifestnode(self):
return self._changeset.manifest
def user(self):
return self._changeset.user
def date(self):
return self._changeset.date
def files(self):
return self._changeset.files
def filesmodified(self):
modified = set(self.files())
modified.difference_update(self.filesadded())
modified.difference_update(self.filesremoved())
return sorted(modified)
def filesadded(self):
filesadded = self._changeset.filesadded
compute_on_none = True
if self._repo.filecopiesmode == b'changeset-sidedata':
compute_on_none = False
else:
source = self._repo.ui.config(b'experimental', b'copies.read-from')
if source == b'changeset-only':
compute_on_none = False
elif source != b'compatibility':
# filelog mode, ignore any changelog content
filesadded = None
if filesadded is None:
if compute_on_none:
filesadded = metadata.computechangesetfilesadded(self)
else:
filesadded = []
return filesadded
def filesremoved(self):
filesremoved = self._changeset.filesremoved
compute_on_none = True
if self._repo.filecopiesmode == b'changeset-sidedata':
compute_on_none = False
else:
source = self._repo.ui.config(b'experimental', b'copies.read-from')
if source == b'changeset-only':
compute_on_none = False
elif source != b'compatibility':
# filelog mode, ignore any changelog content
filesremoved = None
if filesremoved is None:
if compute_on_none:
filesremoved = metadata.computechangesetfilesremoved(self)
else:
filesremoved = []
return filesremoved
@propertycache
def _copies(self):
p1copies = self._changeset.p1copies
p2copies = self._changeset.p2copies
compute_on_none = True
if self._repo.filecopiesmode == b'changeset-sidedata':
compute_on_none = False
else:
source = self._repo.ui.config(b'experimental', b'copies.read-from')
# If config says to get copy metadata only from changeset, then
# return that, defaulting to {} if there was no copy metadata. In
# compatibility mode, we return copy data from the changeset if it
# was recorded there, and otherwise we fall back to getting it from
# the filelogs (below).
#
# If we are in compatiblity mode and there is not data in the
# changeset), we get the copy metadata from the filelogs.
#
# otherwise, when config said to read only from filelog, we get the
# copy metadata from the filelogs.
if source == b'changeset-only':
compute_on_none = False
elif source != b'compatibility':
# filelog mode, ignore any changelog content
p1copies = p2copies = None
if p1copies is None:
if compute_on_none:
p1copies, p2copies = super(changectx, self)._copies
else:
if p1copies is None:
p1copies = {}
if p2copies is None:
p2copies = {}
return p1copies, p2copies
def description(self):
return self._changeset.description
def branch(self):
return encoding.tolocal(self._changeset.extra.get(b"branch"))
def closesbranch(self):
return b'close' in self._changeset.extra
def extra(self):
"""Return a dict of extra information."""
return self._changeset.extra
def tags(self):
"""Return a list of byte tag names"""
return self._repo.nodetags(self._node)
def bookmarks(self):
"""Return a list of byte bookmark names."""
return self._repo.nodebookmarks(self._node)
def phase(self):
return self._repo._phasecache.phase(self._repo, self._rev)
def hidden(self):
return self._rev in repoview.filterrevs(self._repo, b'visible')
def isinmemory(self):
return False
def children(self):
"""return list of changectx contexts for each child changeset.
This returns only the immediate child changesets. Use descendants() to
recursively walk children.
"""
c = self._repo.changelog.children(self._node)
return [self._repo[x] for x in c]
def ancestors(self):
for a in self._repo.changelog.ancestors([self._rev]):
yield self._repo[a]
def descendants(self):
"""Recursively yield all children of the changeset.
For just the immediate children, use children()
"""
for d in self._repo.changelog.descendants([self._rev]):
yield self._repo[d]
def filectx(self, path, fileid=None, filelog=None):
"""get a file context from this changeset"""
if fileid is None:
fileid = self.filenode(path)
return filectx(
self._repo, path, fileid=fileid, changectx=self, filelog=filelog
)
def ancestor(self, c2, warn=False):
"""return the "best" ancestor context of self and c2
If there are multiple candidates, it will show a message and check
merge.preferancestor configuration before falling back to the
revlog ancestor."""
# deal with workingctxs
n2 = c2._node
if n2 is None:
n2 = c2._parents[0]._node
cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
if not cahs:
anc = self._repo.nodeconstants.nullid
elif len(cahs) == 1:
anc = cahs[0]
else:
# experimental config: merge.preferancestor
for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
try:
ctx = scmutil.revsymbol(self._repo, r)
except error.RepoLookupError:
continue
anc = ctx.node()
if anc in cahs:
break
else:
anc = self._repo.changelog.ancestor(self._node, n2)
if warn:
self._repo.ui.status(
(
_(b"note: using %s as ancestor of %s and %s\n")
% (short(anc), short(self._node), short(n2))
)
+ b''.join(
_(
b" alternatively, use --config "
b"merge.preferancestor=%s\n"
)
% short(n)
for n in sorted(cahs)
if n != anc
)
)
return self._repo[anc]
def isancestorof(self, other):
"""True if this changeset is an ancestor of other"""
return self._repo.changelog.isancestorrev(self._rev, other._rev)
def walk(self, match):
'''Generates matching file names.'''
# Wrap match.bad method to have message with nodeid
def bad(fn, msg):
# The manifest doesn't know about subrepos, so don't complain about
# paths into valid subrepos.
if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
return
match.bad(fn, _(b'no such file in rev %s') % self)
m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
return self._manifest.walk(m)
def matches(self, match):
return self.walk(match)
class basefilectx(object):
"""A filecontext object represents the common logic for its children:
filectx: read-only access to a filerevision that is already present
in the repo,
workingfilectx: a filecontext that represents files from the working
directory,
memfilectx: a filecontext that represents files in-memory,
"""
@propertycache
def _filelog(self):
return self._repo.file(self._path)
@propertycache
def _changeid(self):
if '_changectx' in self.__dict__:
return self._changectx.rev()
elif '_descendantrev' in self.__dict__:
# this file context was created from a revision with a known
# descendant, we can (lazily) correct for linkrev aliases
return self._adjustlinkrev(self._descendantrev)
else:
return self._filelog.linkrev(self._filerev)
@propertycache
def _filenode(self):
if '_fileid' in self.__dict__:
return self._filelog.lookup(self._fileid)
else:
return self._changectx.filenode(self._path)
@propertycache
def _filerev(self):
return self._filelog.rev(self._filenode)
@propertycache
def _repopath(self):
return self._path
def __nonzero__(self):
try:
self._filenode
return True
except error.LookupError:
# file is missing
return False
__bool__ = __nonzero__
def __bytes__(self):
try:
return b"%s@%s" % (self.path(), self._changectx)
except error.LookupError:
return b"%s@???" % self.path()
__str__ = encoding.strmethod(__bytes__)
def __repr__(self):
return "<%s %s>" % (type(self).__name__, str(self))
def __hash__(self):
try:
return hash((self._path, self._filenode))
except AttributeError:
return id(self)
def __eq__(self, other):
try:
return (
type(self) == type(other)
and self._path == other._path
and self._filenode == other._filenode
)
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def filerev(self):
return self._filerev
def filenode(self):
return self._filenode
@propertycache
def _flags(self):
return self._changectx.flags(self._path)
def flags(self):
return self._flags
def filelog(self):
return self._filelog
def rev(self):
return self._changeid
def linkrev(self):
return self._filelog.linkrev(self._filerev)
def node(self):
return self._changectx.node()
def hex(self):
return self._changectx.hex()
def user(self):
return self._changectx.user()
def date(self):
return self._changectx.date()
def files(self):
return self._changectx.files()
def description(self):
return self._changectx.description()
def branch(self):
return self._changectx.branch()
def extra(self):
return self._changectx.extra()
def phase(self):
return self._changectx.phase()
def phasestr(self):
return self._changectx.phasestr()
def obsolete(self):
return self._changectx.obsolete()
def instabilities(self):
return self._changectx.instabilities()
def manifest(self):
return self._changectx.manifest()
def changectx(self):
return self._changectx
def renamed(self):
return self._copied
def copysource(self):
return self._copied and self._copied[0]
def repo(self):
return self._repo
def size(self):
return len(self.data())
def path(self):
return self._path
def isbinary(self):
try:
return stringutil.binary(self.data())
except IOError:
return False
def isexec(self):
return b'x' in self.flags()
def islink(self):
return b'l' in self.flags()
def isabsent(self):
"""whether this filectx represents a file not in self._changectx
This is mainly for merge code to detect change/delete conflicts. This is
expected to be True for all subclasses of basectx."""
return False
_customcmp = False
def cmp(self, fctx):
"""compare with other file context
returns True if different than fctx.
"""
if fctx._customcmp:
return fctx.cmp(self)
if self._filenode is None:
raise error.ProgrammingError(
b'filectx.cmp() must be reimplemented if not backed by revlog'
)
if fctx._filenode is None:
if self._repo._encodefilterpats:
# can't rely on size() because wdir content may be decoded
return self._filelog.cmp(self._filenode, fctx.data())
if self.size() - 4 == fctx.size():
# size() can match:
# if file data starts with '\1\n', empty metadata block is
# prepended, which adds 4 bytes to filelog.size().
return self._filelog.cmp(self._filenode, fctx.data())
if self.size() == fctx.size() or self.flags() == b'l':
# size() matches: need to compare content
# issue6456: Always compare symlinks because size can represent
# encrypted string for EXT-4 encryption(fscrypt).
return self._filelog.cmp(self._filenode, fctx.data())
# size() differs
return True
def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
"""return the first ancestor of <srcrev> introducing <fnode>
If the linkrev of the file revision does not point to an ancestor of
srcrev, we'll walk down the ancestors until we find one introducing
this file revision.
:srcrev: the changeset revision we search ancestors from
:inclusive: if true, the src revision will also be checked
:stoprev: an optional revision to stop the walk at. If no introduction
of this file content could be found before this floor
revision, the function will returns "None" and stops its
iteration.
"""
repo = self._repo
cl = repo.unfiltered().changelog
mfl = repo.manifestlog
# fetch the linkrev
lkr = self.linkrev()
if srcrev == lkr:
return lkr
# hack to reuse ancestor computation when searching for renames
memberanc = getattr(self, '_ancestrycontext', None)
iteranc = None
if srcrev is None:
# wctx case, used by workingfilectx during mergecopy
revs = [p.rev() for p in self._repo[None].parents()]
inclusive = True # we skipped the real (revless) source
else:
revs = [srcrev]
if memberanc is None:
memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
# check if this linkrev is an ancestor of srcrev
if lkr not in memberanc:
if iteranc is None:
iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
fnode = self._filenode
path = self._path
for a in iteranc:
if stoprev is not None and a < stoprev:
return None
ac = cl.read(a) # get changeset data (we avoid object creation)
if path in ac[3]: # checking the 'files' field.
# The file has been touched, check if the content is
# similar to the one we search for.
if fnode == mfl[ac[0]].readfast().get(path):
return a
# In theory, we should never get out of that loop without a result.
# But if manifest uses a buggy file revision (not children of the
# one it replaces) we could. Such a buggy situation will likely
# result is crash somewhere else at to some point.
return lkr
def isintroducedafter(self, changelogrev):
"""True if a filectx has been introduced after a given floor revision"""
if self.linkrev() >= changelogrev:
return True
introrev = self._introrev(stoprev=changelogrev)
if introrev is None:
return False
return introrev >= changelogrev
def introrev(self):
"""return the rev of the changeset which introduced this file revision
This method is different from linkrev because it take into account the
changeset the filectx was created from. It ensures the returned
revision is one of its ancestors. This prevents bugs from
'linkrev-shadowing' when a file revision is used by multiple
changesets.
"""
return self._introrev()
def _introrev(self, stoprev=None):
"""
Same as `introrev` but, with an extra argument to limit changelog
iteration range in some internal usecase.
If `stoprev` is set, the `introrev` will not be searched past that
`stoprev` revision and "None" might be returned. This is useful to
limit the iteration range.
"""
toprev = None
attrs = vars(self)
if '_changeid' in attrs:
# We have a cached value already
toprev = self._changeid
elif '_changectx' in attrs:
# We know which changelog entry we are coming from
toprev = self._changectx.rev()
if toprev is not None:
return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
elif '_descendantrev' in attrs:
introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
# be nice and cache the result of the computation
if introrev is not None:
self._changeid = introrev
return introrev
else:
return self.linkrev()
def introfilectx(self):
"""Return filectx having identical contents, but pointing to the
changeset revision where this filectx was introduced"""
introrev = self.introrev()
if self.rev() == introrev:
return self
return self.filectx(self.filenode(), changeid=introrev)
def _parentfilectx(self, path, fileid, filelog):
"""create parent filectx keeping ancestry info for _adjustlinkrev()"""
fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
if '_changeid' in vars(self) or '_changectx' in vars(self):
# If self is associated with a changeset (probably explicitly
# fed), ensure the created filectx is associated with a
# changeset that is an ancestor of self.changectx.
# This lets us later use _adjustlinkrev to get a correct link.
fctx._descendantrev = self.rev()
fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
elif '_descendantrev' in vars(self):
# Otherwise propagate _descendantrev if we have one associated.
fctx._descendantrev = self._descendantrev
fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
return fctx
def parents(self):
_path = self._path
fl = self._filelog
parents = self._filelog.parents(self._filenode)
pl = [
(_path, node, fl)
for node in parents
if node != self._repo.nodeconstants.nullid
]
r = fl.renamed(self._filenode)
if r:
# - In the simple rename case, both parent are nullid, pl is empty.
# - In case of merge, only one of the parent is null id and should
# be replaced with the rename information. This parent is -always-
# the first one.
#
# As null id have always been filtered out in the previous list
# comprehension, inserting to 0 will always result in "replacing
# first nullid parent with rename information.
pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
def p1(self):
return self.parents()[0]
def p2(self):
p = self.parents()
if len(p) == 2:
return p[1]
return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
def annotate(self, follow=False, skiprevs=None, diffopts=None):
"""Returns a list of annotateline objects for each line in the file
- line.fctx is the filectx of the node where that line was last changed
- line.lineno is the line number at the first appearance in the managed
file
- line.text is the data on that line (including newline character)
"""
getlog = util.lrucachefunc(lambda x: self._repo.file(x))
def parents(f):
# Cut _descendantrev here to mitigate the penalty of lazy linkrev
# adjustment. Otherwise, p._adjustlinkrev() would walk changelog
# from the topmost introrev (= srcrev) down to p.linkrev() if it
# isn't an ancestor of the srcrev.
f._changeid
pl = f.parents()
# Don't return renamed parents if we aren't following.
if not follow:
pl = [p for p in pl if p.path() == f.path()]
# renamed filectx won't have a filelog yet, so set it
# from the cache to save time
for p in pl:
if not '_filelog' in p.__dict__:
p._filelog = getlog(p.path())
return pl
# use linkrev to find the first changeset where self appeared
base = self.introfilectx()
if getattr(base, '_ancestrycontext', None) is None:
# it is safe to use an unfiltered repository here because we are
# walking ancestors only.
cl = self._repo.unfiltered().changelog
if base.rev() is None:
# wctx is not inclusive, but works because _ancestrycontext
# is used to test filelog revisions
ac = cl.ancestors(
[p.rev() for p in base.parents()], inclusive=True
)
else:
ac = cl.ancestors([base.rev()], inclusive=True)
base._ancestrycontext = ac
return dagop.annotate(
base, parents, skiprevs=skiprevs, diffopts=diffopts
)
def ancestors(self, followfirst=False):
visit = {}
c = self
if followfirst:
cut = 1
else:
cut = None
while True:
for parent in c.parents()[:cut]:
visit[(parent.linkrev(), parent.filenode())] = parent
if not visit:
break
c = visit.pop(max(visit))
yield c
def decodeddata(self):
"""Returns `data()` after running repository decoding filters.
This is often equivalent to how the data would be expressed on disk.
"""
return self._repo.wwritedata(self.path(), self.data())
class filectx(basefilectx):
"""A filecontext object makes access to data related to a particular
filerevision convenient."""
def __init__(
self,
repo,
path,
changeid=None,
fileid=None,
filelog=None,
changectx=None,
):
"""changeid must be a revision number, if specified.
fileid can be a file revision or node."""
self._repo = repo
self._path = path
assert (
changeid is not None or fileid is not None or changectx is not None
), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
changeid,
fileid,
changectx,
)
if filelog is not None:
self._filelog = filelog
if changeid is not None:
self._changeid = changeid
if changectx is not None:
self._changectx = changectx
if fileid is not None:
self._fileid = fileid
@propertycache
def _changectx(self):
try:
return self._repo[self._changeid]
except error.FilteredRepoLookupError:
# Linkrev may point to any revision in the repository. When the
# repository is filtered this may lead to `filectx` trying to build
# `changectx` for filtered revision. In such case we fallback to
# creating `changectx` on the unfiltered version of the reposition.
# This fallback should not be an issue because `changectx` from
# `filectx` are not used in complex operations that care about
# filtering.
#
# This fallback is a cheap and dirty fix that prevent several
# crashes. It does not ensure the behavior is correct. However the
# behavior was not correct before filtering either and "incorrect
# behavior" is seen as better as "crash"
#
# Linkrevs have several serious troubles with filtering that are
# complicated to solve. Proper handling of the issue here should be
# considered when solving linkrev issue are on the table.
return self._repo.unfiltered()[self._changeid]
def filectx(self, fileid, changeid=None):
"""opens an arbitrary revision of the file without
opening a new filelog"""
return filectx(
self._repo,
self._path,
fileid=fileid,
filelog=self._filelog,
changeid=changeid,
)
def rawdata(self):
return self._filelog.rawdata(self._filenode)
def rawflags(self):
"""low-level revlog flags"""
return self._filelog.flags(self._filerev)
def data(self):
try:
return self._filelog.read(self._filenode)
except error.CensoredNodeError:
if self._repo.ui.config(b"censor", b"policy") == b"ignore":
return b""
raise error.Abort(
_(b"censored node: %s") % short(self._filenode),
hint=_(b"set censor.policy to ignore errors"),
)
def size(self):
return self._filelog.size(self._filerev)
@propertycache
def _copied(self):
"""check if file was actually renamed in this changeset revision
If rename logged in file revision, we report copy for changeset only
if file revisions linkrev points back to the changeset in question
or both changeset parents contain different file revisions.
"""
renamed = self._filelog.renamed(self._filenode)
if not renamed:
return None
if self.rev() == self.linkrev():
return renamed
name = self.path()
fnode = self._filenode
for p in self._changectx.parents():
try:
if fnode == p.filenode(name):
return None
except error.LookupError:
pass
return renamed
def children(self):
# hard for renames
c = self._filelog.children(self._filenode)
return [
filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
for x in c
]
class committablectx(basectx):
"""A committablectx object provides common functionality for a context that
wants the ability to commit, e.g. workingctx or memctx."""
def __init__(
self,
repo,
text=b"",
user=None,
date=None,
extra=None,
changes=None,
branch=None,
):
super(committablectx, self).__init__(repo)
self._rev = None
self._node = None
self._text = text
if date:
self._date = dateutil.parsedate(date)
if user:
self._user = user
if changes:
self._status = changes
self._extra = {}
if extra:
self._extra = extra.copy()
if branch is not None:
self._extra[b'branch'] = encoding.fromlocal(branch)
if not self._extra.get(b'branch'):
self._extra[b'branch'] = b'default'
def __bytes__(self):
return bytes(self._parents[0]) + b"+"
def hex(self):
self._repo.nodeconstants.wdirhex
__str__ = encoding.strmethod(__bytes__)
def __nonzero__(self):
return True
__bool__ = __nonzero__
@propertycache
def _status(self):
return self._repo.status()
@propertycache
def _user(self):
return self._repo.ui.username()
@propertycache
def _date(self):
ui = self._repo.ui
date = ui.configdate(b'devel', b'default-date')
if date is None:
date = dateutil.makedate()
return date
def subrev(self, subpath):
return None
def manifestnode(self):
return None
def user(self):
return self._user or self._repo.ui.username()
def date(self):
return self._date
def description(self):
return self._text
def files(self):
return sorted(
self._status.modified + self._status.added + self._status.removed
)
def modified(self):
return self._status.modified
def added(self):
return self._status.added
def removed(self):
return self._status.removed
def deleted(self):
return self._status.deleted
filesmodified = modified
filesadded = added
filesremoved = removed
def branch(self):
return encoding.tolocal(self._extra[b'branch'])
def closesbranch(self):
return b'close' in self._extra
def extra(self):
return self._extra
def isinmemory(self):
return False
def tags(self):
return []
def bookmarks(self):
b = []
for p in self.parents():
b.extend(p.bookmarks())
return b
def phase(self):
phase = phases.newcommitphase(self._repo.ui)
for p in self.parents():
phase = max(phase, p.phase())
return phase
def hidden(self):
return False
def children(self):
return []
def flags(self, path):
if '_manifest' in self.__dict__:
try:
return self._manifest.flags(path)
except KeyError:
return b''
try:
return self._flagfunc(path)
except OSError:
return b''
def ancestor(self, c2):
"""return the "best" ancestor context of self and c2"""
return self._parents[0].ancestor(c2) # punt on two parents for now
def ancestors(self):
for p in self._parents:
yield p
for a in self._repo.changelog.ancestors(
[p.rev() for p in self._parents]
):
yield self._repo[a]
def markcommitted(self, node):
"""Perform post-commit cleanup necessary after committing this ctx
Specifically, this updates backing stores this working context
wraps to reflect the fact that the changes reflected by this
workingctx have been committed. For example, it marks
modified and added files as normal in the dirstate.
"""
def dirty(self, missing=False, merge=True, branch=True):
return False
class workingctx(committablectx):
"""A workingctx object makes access to data related to
the current working directory convenient.
date - any valid date string or (unixtime, offset), or None.
user - username string, or None.
extra - a dictionary of extra values, or None.
changes - a list of file lists as returned by localrepo.status()
or None to use the repository status.
"""
def __init__(
self, repo, text=b"", user=None, date=None, extra=None, changes=None
):
branch = None
if not extra or b'branch' not in extra:
try:
branch = repo.dirstate.branch()
except UnicodeDecodeError:
raise error.Abort(_(b'branch name not in UTF-8!'))
super(workingctx, self).__init__(
repo, text, user, date, extra, changes, branch=branch
)
def __iter__(self):
d = self._repo.dirstate
for f in d:
if d[f] != b'r':
yield f
def __contains__(self, key):
return self._repo.dirstate[key] not in b"?r"
def hex(self):
return self._repo.nodeconstants.wdirhex
@propertycache
def _parents(self):
p = self._repo.dirstate.parents()
if p[1] == self._repo.nodeconstants.nullid:
p = p[:-1]
# use unfiltered repo to delay/avoid loading obsmarkers
unfi = self._repo.unfiltered()
return [
changectx(
self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
)
for n in p
]
def setparents(self, p1node, p2node=None):
if p2node is None:
p2node = self._repo.nodeconstants.nullid
dirstate = self._repo.dirstate
with dirstate.parentchange():
copies = dirstate.setparents(p1node, p2node)
pctx = self._repo[p1node]
if copies:
# Adjust copy records, the dirstate cannot do it, it
# requires access to parents manifests. Preserve them
# only for entries added to first parent.
for f in copies:
if f not in pctx and copies[f] in pctx:
dirstate.copy(copies[f], f)
if p2node == self._repo.nodeconstants.nullid:
for f, s in sorted(dirstate.copies().items()):
if f not in pctx and s not in pctx:
dirstate.copy(None, f)
def _fileinfo(self, path):
# populate __dict__['_manifest'] as workingctx has no _manifestdelta
self._manifest
return super(workingctx, self)._fileinfo(path)
def _buildflagfunc(self):
# Create a fallback function for getting file flags when the
# filesystem doesn't support them
copiesget = self._repo.dirstate.copies().get
parents = self.parents()
if len(parents) < 2:
# when we have one parent, it's easy: copy from parent
man = parents[0].manifest()
def func(f):
f = copiesget(f, f)
return man.flags(f)
else:
# merges are tricky: we try to reconstruct the unstored
# result from the merge (issue1802)
p1, p2 = parents
pa = p1.ancestor(p2)
m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
def func(f):
f = copiesget(f, f) # may be wrong for merges with copies
fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
if fl1 == fl2:
return fl1
if fl1 == fla:
return fl2
if fl2 == fla:
return fl1
return b'' # punt for conflicts
return func
@propertycache
def _flagfunc(self):
return self._repo.dirstate.flagfunc(self._buildflagfunc)
def flags(self, path):
try:
return self._flagfunc(path)
except OSError:
return b''
def filectx(self, path, filelog=None):
"""get a file context from the working directory"""
return workingfilectx(
self._repo, path, workingctx=self, filelog=filelog
)
def dirty(self, missing=False, merge=True, branch=True):
"""check whether a working directory is modified"""
# check subrepos first
for s in sorted(self.substate):
if self.sub(s).dirty(missing=missing):
return True
# check current working dir
return (
(merge and self.p2())
or (branch and self.branch() != self.p1().branch())
or self.modified()
or self.added()
or self.removed()
or (missing and self.deleted())
)
def add(self, list, prefix=b""):
with self._repo.wlock():
ui, ds = self._repo.ui, self._repo.dirstate
uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
rejected = []
lstat = self._repo.wvfs.lstat
for f in list:
# ds.pathto() returns an absolute file when this is invoked from
# the keyword extension. That gets flagged as non-portable on
# Windows, since it contains the drive letter and colon.
scmutil.checkportable(ui, os.path.join(prefix, f))
try:
st = lstat(f)
except OSError:
ui.warn(_(b"%s does not exist!\n") % uipath(f))
rejected.append(f)
continue
limit = ui.configbytes(b'ui', b'large-file-limit')
if limit != 0 and st.st_size > limit:
ui.warn(
_(
b"%s: up to %d MB of RAM may be required "
b"to manage this file\n"
b"(use 'hg revert %s' to cancel the "
b"pending addition)\n"
)
% (f, 3 * st.st_size // 1000000, uipath(f))
)
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
ui.warn(
_(
b"%s not added: only files and symlinks "
b"supported currently\n"
)
% uipath(f)
)
rejected.append(f)
elif not ds.set_tracked(f):
ui.warn(_(b"%s already tracked!\n") % uipath(f))
return rejected
def forget(self, files, prefix=b""):
with self._repo.wlock():
ds = self._repo.dirstate
uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
rejected = []
for f in files:
if not ds.set_untracked(f):
self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
rejected.append(f)
return rejected
def copy(self, source, dest):
try:
st = self._repo.wvfs.lstat(dest)
except OSError as err:
if err.errno != errno.ENOENT:
raise
self._repo.ui.warn(
_(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
)
return
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
self._repo.ui.warn(
_(b"copy failed: %s is not a file or a symbolic link\n")
% self._repo.dirstate.pathto(dest)
)
else:
with self._repo.wlock():
ds = self._repo.dirstate
ds.set_tracked(dest)
ds.copy(source, dest)
def match(
self,
pats=None,
include=None,
exclude=None,
default=b'glob',
listsubrepos=False,
badfn=None,
cwd=None,
):
r = self._repo
if not cwd:
cwd = r.getcwd()
# Only a case insensitive filesystem needs magic to translate user input
# to actual case in the filesystem.
icasefs = not util.fscasesensitive(r.root)
return matchmod.match(
r.root,
cwd,
pats,
include,
exclude,
default,
auditor=r.auditor,
ctx=self,
listsubrepos=listsubrepos,
badfn=badfn,
icasefs=icasefs,
)
def _filtersuspectsymlink(self, files):
if not files or self._repo.dirstate._checklink:
return files
# Symlink placeholders may get non-symlink-like contents
# via user error or dereferencing by NFS or Samba servers,
# so we filter out any placeholders that don't look like a
# symlink
sane = []
for f in files:
if self.flags(f) == b'l':
d = self[f].data()
if (
d == b''
or len(d) >= 1024
or b'\n' in d
or stringutil.binary(d)
):
self._repo.ui.debug(
b'ignoring suspect symlink placeholder "%s"\n' % f
)
continue
sane.append(f)
return sane
def _checklookup(self, files):
# check for any possibly clean files
if not files:
return [], [], []
modified = []
deleted = []
fixup = []
pctx = self._parents[0]
# do a full compare of any files that might have changed
for f in sorted(files):
try:
# This will return True for a file that got replaced by a
# directory in the interim, but fixing that is pretty hard.
if (
f not in pctx
or self.flags(f) != pctx.flags(f)
or pctx[f].cmp(self[f])
):
modified.append(f)
else:
fixup.append(f)
except (IOError, OSError):
# A file become inaccessible in between? Mark it as deleted,
# matching dirstate behavior (issue5584).
# The dirstate has more complex behavior around whether a
# missing file matches a directory, etc, but we don't need to
# bother with that: if f has made it to this point, we're sure
# it's in the dirstate.
deleted.append(f)
return modified, deleted, fixup
def _poststatusfixup(self, status, fixup):
"""update dirstate for files that are actually clean"""
poststatus = self._repo.postdsstatus()
if fixup or poststatus or self._repo.dirstate._dirty:
try:
oldid = self._repo.dirstate.identity()
# updating the dirstate is optional
# so we don't wait on the lock
# wlock can invalidate the dirstate, so cache normal _after_
# taking the lock
with self._repo.wlock(False):
dirstate = self._repo.dirstate
if dirstate.identity() == oldid:
if fixup:
if dirstate.pendingparentchange():
normal = lambda f: dirstate.update_file(
f, p1_tracked=True, wc_tracked=True
)
else:
normal = dirstate.set_clean
for f in fixup:
normal(f)
# write changes out explicitly, because nesting
# wlock at runtime may prevent 'wlock.release()'
# after this block from doing so for subsequent
# changing files
tr = self._repo.currenttransaction()
self._repo.dirstate.write(tr)
if poststatus:
for ps in poststatus:
ps(self, status)
else:
# in this case, writing changes out breaks
# consistency, because .hg/dirstate was
# already changed simultaneously after last
# caching (see also issue5584 for detail)
self._repo.ui.debug(
b'skip updating dirstate: identity mismatch\n'
)
except error.LockError:
pass
finally:
# Even if the wlock couldn't be grabbed, clear out the list.
self._repo.clearpostdsstatus()
def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
'''Gets the status from the dirstate -- internal use only.'''
subrepos = []
if b'.hgsub' in self:
subrepos = sorted(self.substate)
cmp, s = self._repo.dirstate.status(
match, subrepos, ignored=ignored, clean=clean, unknown=unknown
)
# check for any possibly clean files
fixup = []
if cmp:
modified2, deleted2, fixup = self._checklookup(cmp)
s.modified.extend(modified2)
s.deleted.extend(deleted2)
if fixup and clean:
s.clean.extend(fixup)
self._poststatusfixup(s, fixup)
if match.always():
# cache for performance
if s.unknown or s.ignored or s.clean:
# "_status" is cached with list*=False in the normal route
self._status = scmutil.status(
s.modified, s.added, s.removed, s.deleted, [], [], []
)
else:
self._status = s
return s
@propertycache
def _copies(self):
p1copies = {}
p2copies = {}
parents = self._repo.dirstate.parents()
p1manifest = self._repo[parents[0]].manifest()
p2manifest = self._repo[parents[1]].manifest()
changedset = set(self.added()) | set(self.modified())
narrowmatch = self._repo.narrowmatch()
for dst, src in self._repo.dirstate.copies().items():
if dst not in changedset or not narrowmatch(dst):
continue
if src in p1manifest:
p1copies[dst] = src
elif src in p2manifest:
p2copies[dst] = src
return p1copies, p2copies
@propertycache
def _manifest(self):
"""generate a manifest corresponding to the values in self._status
This reuse the file nodeid from parent, but we use special node
identifiers for added and modified files. This is used by manifests
merge to see that files are different and by update logic to avoid
deleting newly added files.
"""
return self._buildstatusmanifest(self._status)
def _buildstatusmanifest(self, status):
"""Builds a manifest that includes the given status results."""
parents = self.parents()
man = parents[0].manifest().copy()
ff = self._flagfunc
for i, l in (
(self._repo.nodeconstants.addednodeid, status.added),
(self._repo.nodeconstants.modifiednodeid, status.modified),
):
for f in l:
man[f] = i
try:
man.setflag(f, ff(f))
except OSError:
pass
for f in status.deleted + status.removed:
if f in man:
del man[f]
return man
def _buildstatus(
self, other, s, match, listignored, listclean, listunknown
):
"""build a status with respect to another context
This includes logic for maintaining the fast path of status when
comparing the working directory against its parent, which is to skip
building a new manifest if self (working directory) is not comparing
against its parent (repo['.']).
"""
s = self._dirstatestatus(match, listignored, listclean, listunknown)
# Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
# might have accidentally ended up with the entire contents of the file
# they are supposed to be linking to.
s.modified[:] = self._filtersuspectsymlink(s.modified)
if other != self._repo[b'.']:
s = super(workingctx, self)._buildstatus(
other, s, match, listignored, listclean, listunknown
)
return s
def _matchstatus(self, other, match):
"""override the match method with a filter for directory patterns
We use inheritance to customize the match.bad method only in cases of
workingctx since it belongs only to the working directory when
comparing against the parent changeset.
If we aren't comparing against the working directory's parent, then we
just use the default match object sent to us.
"""
if other != self._repo[b'.']:
def bad(f, msg):
# 'f' may be a directory pattern from 'match.files()',
# so 'f not in ctx1' is not enough
if f not in other and not other.hasdir(f):
self._repo.ui.warn(
b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
)
match.bad = bad
return match
def walk(self, match):
'''Generates matching file names.'''
return sorted(
self._repo.dirstate.walk(
self._repo.narrowmatch(match),
subrepos=sorted(self.substate),
unknown=True,
ignored=False,
)
)
def matches(self, match):
match = self._repo.narrowmatch(match)
ds = self._repo.dirstate
return sorted(f for f in ds.matches(match) if ds[f] != b'r')
def markcommitted(self, node):
with self._repo.dirstate.parentchange():
for f in self.modified() + self.added():
self._repo.dirstate.update_file(
f, p1_tracked=True, wc_tracked=True
)
for f in self.removed():
self._repo.dirstate.update_file(
f, p1_tracked=False, wc_tracked=False
)
self._repo.dirstate.setparents(node)
self._repo._quick_access_changeid_invalidate()
sparse.aftercommit(self._repo, node)
# write changes out explicitly, because nesting wlock at
# runtime may prevent 'wlock.release()' in 'repo.commit()'
# from immediately doing so for subsequent changing files
self._repo.dirstate.write(self._repo.currenttransaction())
def mergestate(self, clean=False):
if clean:
return mergestatemod.mergestate.clean(self._repo)
return mergestatemod.mergestate.read(self._repo)
class committablefilectx(basefilectx):
"""A committablefilectx provides common functionality for a file context
that wants the ability to commit, e.g. workingfilectx or memfilectx."""
def __init__(self, repo, path, filelog=None, ctx=None):
self._repo = repo
self._path = path
self._changeid = None
self._filerev = self._filenode = None
if filelog is not None:
self._filelog = filelog
if ctx:
self._changectx = ctx
def __nonzero__(self):
return True
__bool__ = __nonzero__
def linkrev(self):
# linked to self._changectx no matter if file is modified or not
return self.rev()
def renamed(self):
path = self.copysource()
if not path:
return None
return (
path,
self._changectx._parents[0]._manifest.get(
path, self._repo.nodeconstants.nullid
),
)
def parents(self):
'''return parent filectxs, following copies if necessary'''
def filenode(ctx, path):
return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
path = self._path
fl = self._filelog
pcl = self._changectx._parents
renamed = self.renamed()
if renamed:
pl = [renamed + (None,)]
else:
pl = [(path, filenode(pcl[0], path), fl)]
for pc in pcl[1:]:
pl.append((path, filenode(pc, path), fl))
return [
self._parentfilectx(p, fileid=n, filelog=l)
for p, n, l in pl
if n != self._repo.nodeconstants.nullid
]
def children(self):
return []
class workingfilectx(committablefilectx):
"""A workingfilectx object makes access to data related to a particular
file in the working directory convenient."""
def __init__(self, repo, path, filelog=None, workingctx=None):
super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
@propertycache
def _changectx(self):
return workingctx(self._repo)
def data(self):
return self._repo.wread(self._path)
def copysource(self):
return self._repo.dirstate.copied(self._path)
def size(self):
return self._repo.wvfs.lstat(self._path).st_size
def lstat(self):
return self._repo.wvfs.lstat(self._path)
def date(self):
t, tz = self._changectx.date()
try:
return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
except OSError as err:
if err.errno != errno.ENOENT:
raise
return (t, tz)
def exists(self):
return self._repo.wvfs.exists(self._path)
def lexists(self):
return self._repo.wvfs.lexists(self._path)
def audit(self):
return self._repo.wvfs.audit(self._path)
def cmp(self, fctx):
"""compare with other file context
returns True if different than fctx.
"""
# fctx should be a filectx (not a workingfilectx)
# invert comparison to reuse the same code path
return fctx.cmp(self)
def remove(self, ignoremissing=False):
"""wraps unlink for a repo's working directory"""
rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
self._repo.wvfs.unlinkpath(
self._path, ignoremissing=ignoremissing, rmdir=rmdir
)
def write(self, data, flags, backgroundclose=False, **kwargs):
"""wraps repo.wwrite"""
return self._repo.wwrite(
self._path, data, flags, backgroundclose=backgroundclose, **kwargs
)
def markcopied(self, src):
"""marks this file a copy of `src`"""
self._repo.dirstate.copy(src, self._path)
def clearunknown(self):
"""Removes conflicting items in the working directory so that
``write()`` can be called successfully.
"""
wvfs = self._repo.wvfs
f = self._path
wvfs.audit(f)
if self._repo.ui.configbool(
b'experimental', b'merge.checkpathconflicts'
):
# remove files under the directory as they should already be
# warned and backed up
if wvfs.isdir(f) and not wvfs.islink(f):
wvfs.rmtree(f, forcibly=True)
for p in reversed(list(pathutil.finddirs(f))):
if wvfs.isfileorlink(p):
wvfs.unlink(p)
break
else:
# don't remove files if path conflicts are not processed
if wvfs.isdir(f) and not wvfs.islink(f):
wvfs.removedirs(f)
def setflags(self, l, x):
self._repo.wvfs.setflags(self._path, l, x)
class overlayworkingctx(committablectx):
"""Wraps another mutable context with a write-back cache that can be
converted into a commit context.
self._cache[path] maps to a dict with keys: {
'exists': bool?
'date': date?
'data': str?
'flags': str?
'copied': str? (path or None)
}
If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
is `False`, the file was deleted.
"""
def __init__(self, repo):
super(overlayworkingctx, self).__init__(repo)
self.clean()
def setbase(self, wrappedctx):
self._wrappedctx = wrappedctx
self._parents = [wrappedctx]
# Drop old manifest cache as it is now out of date.
# This is necessary when, e.g., rebasing several nodes with one
# ``overlayworkingctx`` (e.g. with --collapse).
util.clearcachedproperty(self, b'_manifest')
def setparents(self, p1node, p2node=None):
if p2node is None:
p2node = self._repo.nodeconstants.nullid
assert p1node == self._wrappedctx.node()
self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
def data(self, path):
if self.isdirty(path):
if self._cache[path][b'exists']:
if self._cache[path][b'data'] is not None:
return self._cache[path][b'data']
else:
# Must fallback here, too, because we only set flags.
return self._wrappedctx[path].data()
else:
raise error.ProgrammingError(
b"No such file or directory: %s" % path
)
else:
return self._wrappedctx[path].data()
@propertycache
def _manifest(self):
parents = self.parents()
man = parents[0].manifest().copy()
flag = self._flagfunc
for path in self.added():
man[path] = self._repo.nodeconstants.addednodeid
man.setflag(path, flag(path))
for path in self.modified():
man[path] = self._repo.nodeconstants.modifiednodeid
man.setflag(path, flag(path))
for path in self.removed():
del man[path]
return man
@propertycache
def _flagfunc(self):
def f(path):
return self._cache[path][b'flags']
return f
def files(self):
return sorted(self.added() + self.modified() + self.removed())
def modified(self):
return [
f
for f in self._cache.keys()
if self._cache[f][b'exists'] and self._existsinparent(f)
]
def added(self):
return [
f
for f in self._cache.keys()
if self._cache[f][b'exists'] and not self._existsinparent(f)
]
def removed(self):
return [
f
for f in self._cache.keys()
if not self._cache[f][b'exists'] and self._existsinparent(f)
]
def p1copies(self):
copies = {}
narrowmatch = self._repo.narrowmatch()
for f in self._cache.keys():
if not narrowmatch(f):
continue
copies.pop(f, None) # delete if it exists
source = self._cache[f][b'copied']
if source:
copies[f] = source
return copies
def p2copies(self):
copies = {}
narrowmatch = self._repo.narrowmatch()
for f in self._cache.keys():
if not narrowmatch(f):
continue
copies.pop(f, None) # delete if it exists
source = self._cache[f][b'copied']
if source:
copies[f] = source
return copies
def isinmemory(self):
return True
def filedate(self, path):
if self.isdirty(path):
return self._cache[path][b'date']
else:
return self._wrappedctx[path].date()
def markcopied(self, path, origin):
self._markdirty(
path,
exists=True,
date=self.filedate(path),
flags=self.flags(path),
copied=origin,
)
def copydata(self, path):
if self.isdirty(path):
return self._cache[path][b'copied']
else:
return None
def flags(self, path):
if self.isdirty(path):
if self._cache[path][b'exists']:
return self._cache[path][b'flags']
else:
raise error.ProgrammingError(
b"No such file or directory: %s" % path
)
else:
return self._wrappedctx[path].flags()
def __contains__(self, key):
if key in self._cache:
return self._cache[key][b'exists']
return key in self.p1()
def _existsinparent(self, path):
try:
# ``commitctx` raises a ``ManifestLookupError`` if a path does not
# exist, unlike ``workingctx``, which returns a ``workingfilectx``
# with an ``exists()`` function.
self._wrappedctx[path]
return True
except error.ManifestLookupError:
return False
def _auditconflicts(self, path):
"""Replicates conflict checks done by wvfs.write().
Since we never write to the filesystem and never call `applyupdates` in
IMM, we'll never check that a path is actually writable -- e.g., because
it adds `a/foo`, but `a` is actually a file in the other commit.
"""
def fail(path, component):
# p1() is the base and we're receiving "writes" for p2()'s
# files.
if b'l' in self.p1()[component].flags():
raise error.Abort(
b"error: %s conflicts with symlink %s "
b"in %d." % (path, component, self.p1().rev())
)
else:
raise error.Abort(
b"error: '%s' conflicts with file '%s' in "
b"%d." % (path, component, self.p1().rev())
)
# Test that each new directory to be created to write this path from p2
# is not a file in p1.
components = path.split(b'/')
for i in pycompat.xrange(len(components)):
component = b"/".join(components[0:i])
if component in self:
fail(path, component)
# Test the other direction -- that this path from p2 isn't a directory
# in p1 (test that p1 doesn't have any paths matching `path/*`).
match = self.match([path], default=b'path')
mfiles = list(self.p1().manifest().walk(match))
if len(mfiles) > 0:
if len(mfiles) == 1 and mfiles[0] == path:
return
# omit the files which are deleted in current IMM wctx
mfiles = [m for m in mfiles if m in self]
if not mfiles:
return
raise error.Abort(
b"error: file '%s' cannot be written because "
b" '%s/' is a directory in %s (containing %d "
b"entries: %s)"
% (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
)
def write(self, path, data, flags=b'', **kwargs):
if data is None:
raise error.ProgrammingError(b"data must be non-None")
self._auditconflicts(path)
self._markdirty(
path, exists=True, data=data, date=dateutil.makedate(), flags=flags
)
def setflags(self, path, l, x):
flag = b''
if l:
flag = b'l'
elif x:
flag = b'x'
self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
def remove(self, path):
self._markdirty(path, exists=False)
def exists(self, path):
"""exists behaves like `lexists`, but needs to follow symlinks and
return False if they are broken.
"""
if self.isdirty(path):
# If this path exists and is a symlink, "follow" it by calling
# exists on the destination path.
if (
self._cache[path][b'exists']
and b'l' in self._cache[path][b'flags']
):
return self.exists(self._cache[path][b'data'].strip())
else:
return self._cache[path][b'exists']
return self._existsinparent(path)
def lexists(self, path):
"""lexists returns True if the path exists"""
if self.isdirty(path):
return self._cache[path][b'exists']
return self._existsinparent(path)
def size(self, path):
if self.isdirty(path):
if self._cache[path][b'exists']:
return len(self._cache[path][b'data'])
else:
raise error.ProgrammingError(
b"No such file or directory: %s" % path
)
return self._wrappedctx[path].size()
def tomemctx(
self,
text,
branch=None,
extra=None,
date=None,
parents=None,
user=None,
editor=None,
):
"""Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
committed.
``text`` is the commit message.
``parents`` (optional) are rev numbers.
"""
# Default parents to the wrapped context if not passed.
if parents is None:
parents = self.parents()
if len(parents) == 1:
parents = (parents[0], None)
# ``parents`` is passed as rev numbers; convert to ``commitctxs``.
if parents[1] is None:
parents = (self._repo[parents[0]], None)
else:
parents = (self._repo[parents[0]], self._repo[parents[1]])
files = self.files()
def getfile(repo, memctx, path):
if self._cache[path][b'exists']:
return memfilectx(
repo,
memctx,
path,
self._cache[path][b'data'],
b'l' in self._cache[path][b'flags'],
b'x' in self._cache[path][b'flags'],
self._cache[path][b'copied'],
)
else:
# Returning None, but including the path in `files`, is
# necessary for memctx to register a deletion.
return None
if branch is None:
branch = self._wrappedctx.branch()
return memctx(
self._repo,
parents,
text,
files,
getfile,
date=date,
extra=extra,
user=user,
branch=branch,
editor=editor,
)
def tomemctx_for_amend(self, precursor):
extra = precursor.extra().copy()
extra[b'amend_source'] = precursor.hex()
return self.tomemctx(
text=precursor.description(),
branch=precursor.branch(),
extra=extra,
date=precursor.date(),
user=precursor.user(),
)
def isdirty(self, path):
return path in self._cache
def clean(self):
self._mergestate = None
self._cache = {}
def _compact(self):
"""Removes keys from the cache that are actually clean, by comparing
them with the underlying context.
This can occur during the merge process, e.g. by passing --tool :local
to resolve a conflict.
"""
keys = []
# This won't be perfect, but can help performance significantly when
# using things like remotefilelog.
scmutil.prefetchfiles(
self.repo(),
[
(
self.p1().rev(),
scmutil.matchfiles(self.repo(), self._cache.keys()),
)
],
)
for path in self._cache.keys():
cache = self._cache[path]
try:
underlying = self._wrappedctx[path]
if (
underlying.data() == cache[b'data']
and underlying.flags() == cache[b'flags']
):
keys.append(path)
except error.ManifestLookupError:
# Path not in the underlying manifest (created).
continue
for path in keys:
del self._cache[path]
return keys
def _markdirty(
self, path, exists, data=None, date=None, flags=b'', copied=None
):
# data not provided, let's see if we already have some; if not, let's
# grab it from our underlying context, so that we always have data if
# the file is marked as existing.
if exists and data is None:
oldentry = self._cache.get(path) or {}
data = oldentry.get(b'data')
if data is None:
data = self._wrappedctx[path].data()
self._cache[path] = {
b'exists': exists,
b'data': data,
b'date': date,
b'flags': flags,
b'copied': copied,
}
util.clearcachedproperty(self, b'_manifest')
def filectx(self, path, filelog=None):
return overlayworkingfilectx(
self._repo, path, parent=self, filelog=filelog
)
def mergestate(self, clean=False):
if clean or self._mergestate is None:
self._mergestate = mergestatemod.memmergestate(self._repo)
return self._mergestate
class overlayworkingfilectx(committablefilectx):
"""Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
cache, which can be flushed through later by calling ``flush()``."""
def __init__(self, repo, path, filelog=None, parent=None):
super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
self._repo = repo
self._parent = parent
self._path = path
def cmp(self, fctx):
return self.data() != fctx.data()
def changectx(self):
return self._parent
def data(self):
return self._parent.data(self._path)
def date(self):
return self._parent.filedate(self._path)
def exists(self):
return self.lexists()
def lexists(self):
return self._parent.exists(self._path)
def copysource(self):
return self._parent.copydata(self._path)
def size(self):
return self._parent.size(self._path)
def markcopied(self, origin):
self._parent.markcopied(self._path, origin)
def audit(self):
pass
def flags(self):
return self._parent.flags(self._path)
def setflags(self, islink, isexec):
return self._parent.setflags(self._path, islink, isexec)
def write(self, data, flags, backgroundclose=False, **kwargs):
return self._parent.write(self._path, data, flags, **kwargs)
def remove(self, ignoremissing=False):
return self._parent.remove(self._path)
def clearunknown(self):
pass
class workingcommitctx(workingctx):
"""A workingcommitctx object makes access to data related to
the revision being committed convenient.
This hides changes in the working directory, if they aren't
committed in this context.
"""
def __init__(
self, repo, changes, text=b"", user=None, date=None, extra=None
):
super(workingcommitctx, self).__init__(
repo, text, user, date, extra, changes
)
def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
"""Return matched files only in ``self._status``
Uncommitted files appear "clean" via this context, even if
they aren't actually so in the working directory.
"""
if clean:
clean = [f for f in self._manifest if f not in self._changedset]
else:
clean = []
return scmutil.status(
[f for f in self._status.modified if match(f)],
[f for f in self._status.added if match(f)],
[f for f in self._status.removed if match(f)],
[],
[],
[],
clean,
)
@propertycache
def _changedset(self):
"""Return the set of files changed in this context"""
changed = set(self._status.modified)
changed.update(self._status.added)
changed.update(self._status.removed)
return changed
def makecachingfilectxfn(func):
"""Create a filectxfn that caches based on the path.
We can't use util.cachefunc because it uses all arguments as the cache
key and this creates a cycle since the arguments include the repo and
memctx.
"""
cache = {}
def getfilectx(repo, memctx, path):
if path not in cache:
cache[path] = func(repo, memctx, path)
return cache[path]
return getfilectx
def memfilefromctx(ctx):
"""Given a context return a memfilectx for ctx[path]
This is a convenience method for building a memctx based on another
context.
"""
def getfilectx(repo, memctx, path):
fctx = ctx[path]
copysource = fctx.copysource()
return memfilectx(
repo,
memctx,
path,
fctx.data(),
islink=fctx.islink(),
isexec=fctx.isexec(),
copysource=copysource,
)
return getfilectx
def memfilefrompatch(patchstore):
"""Given a patch (e.g. patchstore object) return a memfilectx
This is a convenience method for building a memctx based on a patchstore.
"""
def getfilectx(repo, memctx, path):
data, mode, copysource = patchstore.getfile(path)
if data is None:
return None
islink, isexec = mode
return memfilectx(
repo,
memctx,
path,
data,
islink=islink,
isexec=isexec,
copysource=copysource,
)
return getfilectx
class memctx(committablectx):
"""Use memctx to perform in-memory commits via localrepo.commitctx().
Revision information is supplied at initialization time while
related files data and is made available through a callback
mechanism. 'repo' is the current localrepo, 'parents' is a
sequence of two parent revisions identifiers (pass None for every
missing parent), 'text' is the commit message and 'files' lists
names of files touched by the revision (normalized and relative to
repository root).
filectxfn(repo, memctx, path) is a callable receiving the
repository, the current memctx object and the normalized path of
requested file, relative to repository root. It is fired by the
commit function for every file in 'files', but calls order is
undefined. If the file is available in the revision being
committed (updated or added), filectxfn returns a memfilectx
object. If the file was removed, filectxfn return None for recent
Mercurial. Moved files are represented by marking the source file
removed and the new file added with copy information (see
memfilectx).
user receives the committer name and defaults to current
repository username, date is the commit date in any format
supported by dateutil.parsedate() and defaults to current date, extra
is a dictionary of metadata or is left empty.
"""
# Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
# Extensions that need to retain compatibility across Mercurial 3.1 can use
# this field to determine what to do in filectxfn.
_returnnoneformissingfiles = True
def __init__(
self,
repo,
parents,
text,
files,
filectxfn,
user=None,
date=None,
extra=None,
branch=None,
editor=None,
):
super(memctx, self).__init__(
repo, text, user, date, extra, branch=branch
)
self._rev = None
self._node = None
parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
p1, p2 = parents
self._parents = [self._repo[p] for p in (p1, p2)]
files = sorted(set(files))
self._files = files
self.substate = {}
if isinstance(filectxfn, patch.filestore):
filectxfn = memfilefrompatch(filectxfn)
elif not callable(filectxfn):
# if store is not callable, wrap it in a function
filectxfn = memfilefromctx(filectxfn)
# memoizing increases performance for e.g. vcs convert scenarios.
self._filectxfn = makecachingfilectxfn(filectxfn)
if editor:
self._text = editor(self._repo, self, [])
self._repo.savecommitmessage(self._text)
def filectx(self, path, filelog=None):
"""get a file context from the working directory
Returns None if file doesn't exist and should be removed."""
return self._filectxfn(self._repo, self, path)
def commit(self):
"""commit context to the repo"""
return self._repo.commitctx(self)
@propertycache
def _manifest(self):
"""generate a manifest based on the return values of filectxfn"""
# keep this simple for now; just worry about p1
pctx = self._parents[0]
man = pctx.manifest().copy()
for f in self._status.modified:
man[f] = self._repo.nodeconstants.modifiednodeid
for f in self._status.added:
man[f] = self._repo.nodeconstants.addednodeid
for f in self._status.removed:
if f in man:
del man[f]
return man
@propertycache
def _status(self):
"""Calculate exact status from ``files`` specified at construction"""
man1 = self.p1().manifest()
p2 = self._parents[1]
# "1 < len(self._parents)" can't be used for checking
# existence of the 2nd parent, because "memctx._parents" is
# explicitly initialized by the list, of which length is 2.
if p2.rev() != nullrev:
man2 = p2.manifest()
managing = lambda f: f in man1 or f in man2
else:
managing = lambda f: f in man1
modified, added, removed = [], [], []
for f in self._files:
if not managing(f):
added.append(f)
elif self[f]:
modified.append(f)
else:
removed.append(f)
return scmutil.status(modified, added, removed, [], [], [], [])
def parents(self):
if self._parents[1].rev() == nullrev:
return [self._parents[0]]
return self._parents
class memfilectx(committablefilectx):
"""memfilectx represents an in-memory file to commit.
See memctx and committablefilectx for more details.
"""
def __init__(
self,
repo,
changectx,
path,
data,
islink=False,
isexec=False,
copysource=None,
):
"""
path is the normalized file path relative to repository root.
data is the file content as a string.
islink is True if the file is a symbolic link.
isexec is True if the file is executable.
copied is the source file path if current file was copied in the
revision being committed, or None."""
super(memfilectx, self).__init__(repo, path, None, changectx)
self._data = data
if islink:
self._flags = b'l'
elif isexec:
self._flags = b'x'
else:
self._flags = b''
self._copysource = copysource
def copysource(self):
return self._copysource
def cmp(self, fctx):
return self.data() != fctx.data()
def data(self):
return self._data
def remove(self, ignoremissing=False):
"""wraps unlink for a repo's working directory"""
# need to figure out what to do here
del self._changectx[self._path]
def write(self, data, flags, **kwargs):
"""wraps repo.wwrite"""
self._data = data
class metadataonlyctx(committablectx):
"""Like memctx but it's reusing the manifest of different commit.
Intended to be used by lightweight operations that are creating
metadata-only changes.
Revision information is supplied at initialization time. 'repo' is the
current localrepo, 'ctx' is original revision which manifest we're reuisng
'parents' is a sequence of two parent revisions identifiers (pass None for
every missing parent), 'text' is the commit.
user receives the committer name and defaults to current repository
username, date is the commit date in any format supported by
dateutil.parsedate() and defaults to current date, extra is a dictionary of
metadata or is left empty.
"""
def __init__(
self,
repo,
originalctx,
parents=None,
text=None,
user=None,
date=None,
extra=None,
editor=None,
):
if text is None:
text = originalctx.description()
super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
self._rev = None
self._node = None
self._originalctx = originalctx
self._manifestnode = originalctx.manifestnode()
if parents is None:
parents = originalctx.parents()
else:
parents = [repo[p] for p in parents if p is not None]
parents = parents[:]
while len(parents) < 2:
parents.append(repo[nullrev])
p1, p2 = self._parents = parents
# sanity check to ensure that the reused manifest parents are
# manifests of our commit parents
mp1, mp2 = self.manifestctx().parents
if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
raise RuntimeError(
r"can't reuse the manifest: its p1 "
r"doesn't match the new ctx p1"
)
if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
raise RuntimeError(
r"can't reuse the manifest: "
r"its p2 doesn't match the new ctx p2"
)
self._files = originalctx.files()
self.substate = {}
if editor:
self._text = editor(self._repo, self, [])
self._repo.savecommitmessage(self._text)
def manifestnode(self):
return self._manifestnode
@property
def _manifestctx(self):
return self._repo.manifestlog[self._manifestnode]
def filectx(self, path, filelog=None):
return self._originalctx.filectx(path, filelog=filelog)
def commit(self):
"""commit context to the repo"""
return self._repo.commitctx(self)
@property
def _manifest(self):
return self._originalctx.manifest()
@propertycache
def _status(self):
"""Calculate exact status from ``files`` specified in the ``origctx``
and parents manifests.
"""
man1 = self.p1().manifest()
p2 = self._parents[1]
# "1 < len(self._parents)" can't be used for checking
# existence of the 2nd parent, because "metadataonlyctx._parents" is
# explicitly initialized by the list, of which length is 2.
if p2.rev() != nullrev:
man2 = p2.manifest()
managing = lambda f: f in man1 or f in man2
else:
managing = lambda f: f in man1
modified, added, removed = [], [], []
for f in self._files:
if not managing(f):
added.append(f)
elif f in self:
modified.append(f)
else:
removed.append(f)
return scmutil.status(modified, added, removed, [], [], [], [])
class arbitraryfilectx(object):
"""Allows you to use filectx-like functions on a file in an arbitrary
location on disk, possibly not in the working directory.
"""
def __init__(self, path, repo=None):
# Repo is optional because contrib/simplemerge uses this class.
self._repo = repo
self._path = path
def cmp(self, fctx):
# filecmp follows symlinks whereas `cmp` should not, so skip the fast
# path if either side is a symlink.
symlinks = b'l' in self.flags() or b'l' in fctx.flags()
if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
# Add a fast-path for merge if both sides are disk-backed.
# Note that filecmp uses the opposite return values (True if same)
# from our cmp functions (True if different).
return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
return self.data() != fctx.data()
def path(self):
return self._path
def flags(self):
return b''
def data(self):
return util.readfile(self._path)
def decodeddata(self):
with open(self._path, b"rb") as f:
return f.read()
def remove(self):
util.unlink(self._path)
def write(self, data, flags, **kwargs):
assert not flags
with open(self._path, b"wb") as f:
f.write(data)
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/context.py
|
Python
|
apache-2.0
| 102,025
|
[
"VisIt"
] |
9c77d53cf1dd4669e821b650c0879d1322057816b6e18bd11e1238ffd602dca3
|
# Autodetecting setup.py script for building the Python extensions
#
import sys, os, importlib.machinery, re, optparse
from glob import glob
import importlib._bootstrap
import importlib.util
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.command.build_scripts import build_scripts
from distutils.spawn import find_executable
cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
def get_platform():
# cross build
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Get value of sys.platform
if sys.platform.startswith('osf1'):
return 'osf1'
return sys.platform
host_platform = get_platform()
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (after any relative
directories) if:
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory.
"""
if dir is None or not os.path.isdir(dir) or dir in dirlist:
return
for i, path in enumerate(dirlist):
if not os.path.isabs(path):
dirlist.insert(i + 1, dir)
return
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/.
srcdir = sysconfig.get_config_var('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
if missing:
print()
print("Python build finished successfully!")
print("The necessary bits to build these optional modules were not "
"found:")
print_three_column(missing)
print("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print()
if self.failed:
failed = self.failed[:]
print()
print("Failed to build these modules:")
print_three_column(failed)
print()
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError) as why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if host_platform == 'darwin' and (
sys.maxsize > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# If the build directory didn't exist when setup.py was
# started, sys.path_importer_cache has a negative result
# cached. Clear that cache before trying to import.
sys.path_importer_cache.clear()
# Don't try to load extensions for cross builds
if cross_compiling:
return
loader = importlib.machinery.ExtensionFileLoader(ext.name, ext_filename)
spec = importlib.util.spec_from_file_location(ext.name, ext_filename,
loader=loader)
try:
importlib._bootstrap._SpecMethods(spec).load()
except ImportError as why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
opt = ''
if cross_compiling:
opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
(opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def add_gcc_paths(self):
gcc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'gccpaths')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
is_gcc = False
in_incdirs = False
inc_dirs = []
lib_dirs = []
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
for line in fp.readlines():
if line.startswith("gcc version"):
is_gcc = True
elif line.startswith("#include <...>"):
in_incdirs = True
elif line.startswith("End of search list"):
in_incdirs = False
elif is_gcc and line.startswith("LIBRARY_PATH"):
for d in line.strip().split("=")[1].split(":"):
d = os.path.normpath(d)
if '/gcc/' not in d:
add_dir_to_list(self.compiler.library_dirs,
d)
elif is_gcc and in_incdirs and '/gcc/' not in line:
add_dir_to_list(self.compiler.include_dirs,
line.strip())
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used, but the local build
# directories (i.e. '.' and 'Include') must be first. See issue
# 10520.
if not cross_compiling:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
# only change this for cross builds for 3.3, issues on Mageia
if cross_compiling:
self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.base_prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
if not cross_compiling:
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
else:
lib_dirs = self.compiler.library_dirs[:]
inc_dirs = self.compiler.include_dirs[:]
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if host_platform == 'darwin':
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# time libraries: librt may be needed for clock_gettime()
time_libs = []
lib = sysconfig.get_config_var('TIMEMODULE_LIB')
if lib:
time_libs.append(lib)
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=time_libs) )
exts.append( Extension('_datetime', ['_datetimemodule.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# C-optimized pickle replacement
exts.append( Extension("_pickle", ["_pickle.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# Python PEP-3118 (buffer protocol) test module
exts.append( Extension('_testbuffer', ['_testbuffer.c']) )
# Test loading multiple modules from one compiled file (http://bugs.python.org/issue16421)
exts.append( Extension('_testimportmultiple', ['_testimportmultiple.c']) )
# profiler (_lsprof is for cProfile.py)
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# _opcode module
exts.append( Extension('_opcode', ['_opcode.c']) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Cannot use os.popen here in py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Determine if readline is already linked against curses or tinfo.
if do_readline:
if cross_compiling:
ret = os.system("%s -d %s | grep '(NEEDED)' > %s" \
% (sysconfig.get_config_var('READELF'),
do_readline, tmpfile))
elif find_executable('ldd'):
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
else:
ret = 256
if ret >> 8 == 0:
with open(tmpfile) as fp:
for ln in fp:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
# termcap interface split out from ncurses
if 'tinfo' in ln:
readline_termcap_library = 'tinfo'
break
if os.path.exists(tmpfile):
os.unlink(tmpfile)
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if dep_target and dep_target.split('.') < ['10', '5']:
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('_crypt', ['_cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
os.getenv("KBE_ROOT") + '/kbe/src/lib/dependencies/openssl/include/',
os.getenv("KBE_ROOT") + 'kbe/src/lib/dependencies/openssl/include/',
os.getcwd()[0 : os.getcwd().find("kbe/src/lib") + len("kbe/src/lib")] + '/dependencies/openssl/include/',
]
ssl_incs = find_file('openssl/ssl.h', [],
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', search_for_ssl_incs_in,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',[],
[os.getenv("KBE_ROOT") + '/kbe/src/libs/',
os.getenv("KBE_ROOT") + 'kbe/src/libs/',
os.getcwd()[0 : os.getcwd().find("kbe/src/") + len("kbe/src/")] + 'libs/',
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if host_platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
with open(name, 'r') as incfile:
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = int(m.group(1), 16)
break
except IOError as msg:
print("IOError while reading opensshv.h:", msg)
#print('openssl_ver = 0x%08x' % openssl_ver)
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
depends = ['hashlib.h'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
# We always compile these even when OpenSSL is available (issue #14693).
# It's harmless and the object code is tiny (40-50 KB per module,
# only loaded when actually used).
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper for these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
if cross_compiling:
db_inc_paths = []
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print("db: looking for db.h in", f)
if os.path.exists(f):
with open(f, 'rb') as file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print("db.h: found", db_ver, "in", d)
else:
# we already found a header for this library version
if db_setup_debug: print("db.h: ignoring", d)
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print("db.h: no version number version in", d)
db_found_vers = list(db_ver_inc_map.keys())
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if host_platform != 'darwin':
db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check))
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print("db lib: ", dblib, "not found")
except db_found:
if db_setup_debug:
print("bsddb using BerkeleyDB lib:", db_ver, dblib)
print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir)
dblibs = [dblib]
# Only add the found library and include directories if they aren't
# already being searched. This avoids an explicit runtime library
# dependency.
if db_incdir in inc_dirs:
db_incs = None
else:
db_incs = [db_incdir]
if dblib_dir[0] in lib_dirs:
dblib_dir = None
else:
if db_setup_debug: print("db: no appropriate library found")
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
if cross_compiling:
sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if host_platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print("sqlite: found %s"%f)
with open(f) as file:
incf = file.read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print("%s/sqlite3.h: version %s"%(d, sqlite_version))
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print("%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION))
elif sqlite_setup_debug:
print("sqlite: %s had no SQLITE_VERSION"%(f,))
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Enable support for loadable extensions in the sqlite3 module
# if --enable-loadable-sqlite-extensions configure option is used.
if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"):
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
include_dirs = ["Modules/_sqlite"]
# Only include the directory where sqlite was found if it does
# not already exist in set include directories, otherwise you
# can end up with a bad search path order.
if sqlite_incdir not in self.compiler.include_dirs:
include_dirs.append(sqlite_incdir)
# avoid a runtime library path for a system library dir
if sqlite_libdir and sqlite_libdir[0] in lib_dirs:
sqlite_libdir = None
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=include_dirs,
library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
dbm_setup_debug = False # verbose debug prints from this script?
dbm_order = ['gdbm']
# The standard Unix dbm module:
if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
if dbm_setup_debug: print("building dbm using ndbm")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if dblibs:
if dbm_setup_debug: print("building dbm using bdb")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('_dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('_gdbm', ['_gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('_gdbm')
# Unix-only modules
if host_platform != 'win32':
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
exts.append( Extension('resource', ['resource.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if (host_platform not in ['cygwin', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
curses_defines = []
curses_includes = []
panel_library = 'panel'
if curses_library == 'ncursesw':
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_includes.append('/usr/include/ncursesw')
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
if host_platform == 'darwin':
# On OS X, there is no separate /usr/lib/libncursesw nor
# libpanelw. If we are here, we found a locally-supplied
# version of libncursesw. There should be also be a
# libpanelw. _XOPEN_SOURCE defines are usually excluded
# for OS X but we need _XOPEN_SOURCE_EXTENDED here for
# ncurses wide char support
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
elif host_platform == 'darwin' and curses_library == 'ncurses':
# Building with the system-suppied combined libncurses/libpanel
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
if curses_library.startswith('ncurses'):
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = curses_libs) )
elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
define_macros=curses_defines,
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
with open(zlib_h) as fp:
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('_bz2', ['_bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('_bz2')
# LZMA compression support.
if self.compiler.find_library_file(lib_dirs, 'lzma'):
exts.append( Extension('_lzma', ['_lzmamodule.c'],
libraries = ['lzma']) )
else:
missing.append('_lzma')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Stefan Krah's _decimal module
exts.append(self._decimal_ext())
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=list(macros.items()),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if host_platform.startswith(('linux', 'freebsd', 'gnukfreebsd')):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if host_platform == 'darwin':
exts.append(
Extension('_scproxy', ['_scproxy.c'],
extra_link_args=[
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation',
]))
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
if 'd' not in sys.abiflags:
ext = Extension('xxlimited', ['xxlimited.c'],
define_macros=[('Py_LIMITED_API', '0x03040000')])
self.extensions.append(ext)
return missing
def detect_tkinter_explicitly(self):
# Build _tkinter using explicit locations for Tcl/Tk.
#
# This is enabled when both arguments are given to ./configure:
#
# --with-tcltk-includes="-I/path/to/tclincludes \
# -I/path/to/tkincludes"
# --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
# -L/path/to/tklibs -ltkm.n"
#
# These values can also be specified or overriden via make:
# make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
#
# This can be useful for building and testing tkinter with multiple
# versions of Tcl/Tk. Note that a build of Tk depends on a particular
# build of Tcl so you need to specify both arguments and use care when
# overriding.
# The _TCLTK variables are created in the Makefile sharedmods target.
tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
tcltk_libs = os.environ.get('_TCLTK_LIBS')
if not (tcltk_includes and tcltk_libs):
# Resume default configuration search.
return 0
extra_compile_args = tcltk_includes.split()
extra_link_args = tcltk_libs.split()
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
)
self.extensions.append(ext)
return 1
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
tmpfile = os.path.join(self.build_temp, 'tk.arch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Note: cannot use os.popen or subprocess here, that
# requires extensions that are not available here.
if is_macosx_sdk_path(F):
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(os.path.join(sysroot, F[1:]), tmpfile))
else:
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(F, tmpfile))
with open(tmpfile) as fp:
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
os.unlink(tmpfile)
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Check whether --with-tcltk-includes and --with-tcltk-libs were
# configured or passed into the make target. If so, use these values
# to build tkinter and bypass the searches for Tcl and TK in standard
# locations.
if self.detect_tkinter_explicitly():
return
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split()
if (('--host=' in arg) or ('--build=' in arg))]
if not self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print("Failed to configure _ctypes module")
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec(f.read(), globals(), fficonfig)
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if host_platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
with open(ffi_h) as fp:
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
def _decimal_ext(self):
extra_compile_args = []
undef_macros = []
if '--with-system-libmpdec' in sysconfig.get_config_var("CONFIG_ARGS"):
include_dirs = []
libraries = [':libmpdec.so.2']
sources = ['_decimal/_decimal.c']
depends = ['_decimal/docstrings.h']
else:
srcdir = sysconfig.get_config_var('srcdir')
include_dirs = [os.path.abspath(os.path.join(srcdir,
'Modules',
'_decimal',
'libmpdec'))]
libraries = []
sources = [
'_decimal/_decimal.c',
'_decimal/libmpdec/basearith.c',
'_decimal/libmpdec/constants.c',
'_decimal/libmpdec/context.c',
'_decimal/libmpdec/convolute.c',
'_decimal/libmpdec/crt.c',
'_decimal/libmpdec/difradix2.c',
'_decimal/libmpdec/fnt.c',
'_decimal/libmpdec/fourstep.c',
'_decimal/libmpdec/io.c',
'_decimal/libmpdec/memory.c',
'_decimal/libmpdec/mpdecimal.c',
'_decimal/libmpdec/numbertheory.c',
'_decimal/libmpdec/sixstep.c',
'_decimal/libmpdec/transpose.c',
]
depends = [
'_decimal/docstrings.h',
'_decimal/libmpdec/basearith.h',
'_decimal/libmpdec/bits.h',
'_decimal/libmpdec/constants.h',
'_decimal/libmpdec/convolute.h',
'_decimal/libmpdec/crt.h',
'_decimal/libmpdec/difradix2.h',
'_decimal/libmpdec/fnt.h',
'_decimal/libmpdec/fourstep.h',
'_decimal/libmpdec/io.h',
'_decimal/libmpdec/memory.h',
'_decimal/libmpdec/mpdecimal.h',
'_decimal/libmpdec/numbertheory.h',
'_decimal/libmpdec/sixstep.h',
'_decimal/libmpdec/transpose.h',
'_decimal/libmpdec/typearith.h',
'_decimal/libmpdec/umodarith.h',
]
config = {
'x64': [('CONFIG_64','1'), ('ASM','1')],
'uint128': [('CONFIG_64','1'), ('ANSI','1'), ('HAVE_UINT128_T','1')],
'ansi64': [('CONFIG_64','1'), ('ANSI','1')],
'ppro': [('CONFIG_32','1'), ('PPRO','1'), ('ASM','1')],
'ansi32': [('CONFIG_32','1'), ('ANSI','1')],
'ansi-legacy': [('CONFIG_32','1'), ('ANSI','1'),
('LEGACY_COMPILER','1')],
'universal': [('UNIVERSAL','1')]
}
cc = sysconfig.get_config_var('CC')
sizeof_size_t = sysconfig.get_config_var('SIZEOF_SIZE_T')
machine = os.environ.get('PYTHON_DECIMAL_WITH_MACHINE')
if machine:
# Override automatic configuration to facilitate testing.
define_macros = config[machine]
elif host_platform == 'darwin':
# Universal here means: build with the same options Python
# was built with.
define_macros = config['universal']
elif sizeof_size_t == 8:
if sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X64'):
define_macros = config['x64']
elif sysconfig.get_config_var('HAVE_GCC_UINT128_T'):
define_macros = config['uint128']
else:
define_macros = config['ansi64']
elif sizeof_size_t == 4:
ppro = sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X87')
if ppro and ('gcc' in cc or 'clang' in cc) and \
not 'sunos' in host_platform:
# solaris: problems with register allocation.
# icc >= 11.0 works as well.
define_macros = config['ppro']
extra_compile_args.append('-Wno-unknown-pragmas')
else:
define_macros = config['ansi32']
else:
raise DistutilsError("_decimal: unsupported architecture")
# Workarounds for toolchain bugs:
if sysconfig.get_config_var('HAVE_IPA_PURE_CONST_BUG'):
# Some versions of gcc miscompile inline asm:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46491
# http://gcc.gnu.org/ml/gcc/2010-11/msg00366.html
extra_compile_args.append('-fno-ipa-pure-const')
if sysconfig.get_config_var('HAVE_GLIBC_MEMMOVE_BUG'):
# _FORTIFY_SOURCE wrappers for memmove and bcopy are incorrect:
# http://sourceware.org/ml/libc-alpha/2010-12/msg00009.html
undef_macros.append('_FORTIFY_SOURCE')
# Faster version without thread local contexts:
if not sysconfig.get_config_var('WITH_THREAD'):
define_macros.append(('WITHOUT_THREADS', 1))
# Increase warning level for gcc:
if 'gcc' in cc:
cmd = ("echo '' | %s -Wextra -Wno-missing-field-initializers -E - "
"> /dev/null 2>&1" % cc)
ret = os.system(cmd)
if ret >> 8 == 0:
extra_compile_args.extend(['-Wextra',
'-Wno-missing-field-initializers'])
# Uncomment for extra functionality:
#define_macros.append(('EXTRA_FUNCTIONALITY', 1))
ext = Extension (
'_decimal',
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
undef_macros=undef_macros,
extra_compile_args=extra_compile_args,
sources=sources,
depends=depends
)
return ext
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
# Customize subcommands to not install an egg-info file for Python
sub_commands = [('install_lib', install.has_lib),
('install_headers', install.has_headers),
('install_scripts', install.has_scripts),
('install_data', install.has_data)]
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
# this is works for EXT_SUFFIX too, which ends with SHLIB_SUFFIX
shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.shlib_suffix): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
for dirpath, dirnames, fnames in os.walk(dirname):
if os.path.islink(dirpath):
continue
log.info("changing mode of %s to %o", dirpath, mode)
if not self.dry_run: os.chmod(dirpath, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
class PyBuildScripts(build_scripts):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info)
minoronly = '.{0[1]}'.format(sys.version_info)
newoutfiles = []
newupdated_files = []
for filename in outfiles:
if filename.endswith(('2to3', 'pyvenv')):
newfilename = filename + fullversion
else:
newfilename = filename + minoronly
log.info('renaming {} to {}'.format(filename, newfilename))
os.rename(filename, newfilename)
newoutfiles.append(newfilename)
if filename in updated_files:
newupdated_files.append(newfilename)
return newoutfiles, newupdated_files
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = [x for x in CLASSIFIERS.split("\n") if x],
platforms = ["Many"],
# Build info
cmdclass = {'build_ext': PyBuildExt,
'build_scripts': PyBuildScripts,
'install': PyBuildInstall,
'install_lib': PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# If you change the scripts installed here, you also need to
# check the PyBuildScripts command above, and change the links
# created by the bininstall target in Makefile.pre.in
scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3",
"Tools/scripts/2to3", "Tools/scripts/pyvenv"]
)
# --install-platlib
if __name__ == '__main__':
main()
|
amyvmiwei/kbengine
|
kbe/src/lib/python/setup.py
|
Python
|
lgpl-3.0
| 97,472
|
[
"VisIt"
] |
19054475a41e2bd28c219cc3f8b553e4d9db70cfa14cc4c84bc70f9035665579
|
from ase import Atom, Atoms
from ase.calculators.test import numeric_force
from gpaw import GPAW, Mixer
from gpaw.test import equal
a = 4.0
n = 16
atoms = Atoms([Atom('H', [1.234, 2.345, 3.456])],
cell=(a, a, a), pbc=True)
calc = GPAW(nbands=1,
gpts=(n, n, n),
txt=None,
mixer=Mixer(0.25, 3, 1),
convergence={'energy': 1e-7})
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
f1 = atoms.get_forces()[0]
for i in range(3):
f2i = numeric_force(atoms, 0, i)
print f1[i]-f2i
equal(f1[i], f2i, 0.00025)
energy_tolerance = 0.00006
force_tolerance = 0.0001
niter_tolerance = 0
equal(e1, -0.531042, energy_tolerance)
f1_ref = [-0.291893, -0.305174, -0.35329]
for i in range(3):
equal(f1[i], f1_ref[i], force_tolerance)
assert 34 <= niter1 <= 35, niter1
|
qsnake/gpaw
|
gpaw/test/H_force.py
|
Python
|
gpl-3.0
| 883
|
[
"ASE",
"GPAW"
] |
f3de906dcbb09749cf0d267db942f88f79d880dca3a37f7bb59b7b3e8f3e9c1a
|
"""
Data providers for genome visualizations.
"""
import os, sys, re
import pkg_resources
import itertools
import random
import math
pkg_resources.require( "numpy" )
pkg_resources.require( "bx-python" )
pkg_resources.require( "pysam" )
from bx.interval_index_file import Indexes
from bx.bbi.bigwig_file import BigWigFile
from bx.bbi.bigbed_file import BigBedFile
from pysam import csamtools, ctabix
from galaxy.datatypes.util.gff_util import convert_gff_coords_to_bed, GFFFeature, GFFInterval, GFFReaderWrapper, parse_gff_attributes
from galaxy.util.json import loads
from galaxy.visualization.data_providers.basic import BaseDataProvider
from galaxy.visualization.data_providers.cigar import get_ref_based_read_seq_and_cigar
from galaxy.datatypes.interval import Bed, Gff, Gtf
#
# Utility functions.
#
def float_nan(n):
'''
Return None instead of NaN to pass jQuery 1.4's strict JSON
'''
if n != n: # NaN != NaN
return None
else:
return float(n)
def get_bounds( reads, start_pos_index, end_pos_index ):
'''
Returns the minimum and maximum position for a set of reads.
'''
max_low = sys.maxint
max_high = -sys.maxint
for read in reads:
if read[ start_pos_index ] < max_low:
max_low = read[ start_pos_index ]
if read[ end_pos_index ] > max_high:
max_high = read[ end_pos_index ]
return max_low, max_high
def _convert_between_ucsc_and_ensemble_naming( chrom ):
'''
Convert between UCSC chromosome ('chr1') naming conventions and Ensembl
naming conventions ('1')
'''
if chrom.startswith( 'chr' ):
# Convert from UCSC to Ensembl
return chrom[ 3: ]
else:
# Convert from Ensembl to UCSC
return 'chr' + chrom
def _chrom_naming_matches( chrom1, chrom2 ):
return ( chrom1.startswith( 'chr' ) and chrom2.startswith( 'chr' ) ) or ( not chrom1.startswith( 'chr' ) and not chrom2.startswith( 'chr' ) )
class FeatureLocationIndexDataProvider( BaseDataProvider ):
"""
Reads/writes/queries feature location index (FLI) datasets.
"""
def __init__( self, converted_dataset ):
self.converted_dataset = converted_dataset
def get_data( self, query ):
# Init.
textloc_file = open( self.converted_dataset.file_name, 'r' )
line_len = int( textloc_file.readline() )
file_len = os.path.getsize( self.converted_dataset.file_name )
query = query.lower()
# Find query in file using binary search.
low = 0
high = file_len / line_len
while low < high:
mid = ( low + high ) // 2
position = mid * line_len
textloc_file.seek( position )
# Compare line with query and update low, high.
line = textloc_file.readline()
if line < query:
low = mid + 1
else:
high = mid
# Need to move back one line because last line read may be included in
# results.
position = low * line_len
textloc_file.seek( position )
# At right point in file, generate hits.
result = []
while True:
line = textloc_file.readline()
if not line.startswith( query ):
break
if line[ -1: ] == '\n':
line = line[ :-1 ]
result.append( line.split()[1:] )
textloc_file.close()
return result
class GenomeDataProvider( BaseDataProvider ):
"""
Base class for genome data providers. All genome providers use BED coordinate
format (0-based, half-open coordinates) for both queries and returned data.
"""
dataset_type = None
"""
Mapping from column name to payload data; this mapping is used to create
filters. Key is column name, value is a dict with mandatory key 'index' and
optional key 'name'. E.g. this defines column 4
col_name_data_attr_mapping = {4 : { index: 5, name: 'Score' } }
"""
col_name_data_attr_mapping = {}
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None,
error_max_vals="Only the first %i %s in this region are displayed." ):
super( GenomeDataProvider, self ).__init__( converted_dataset=converted_dataset,
original_dataset=original_dataset,
dependencies=dependencies,
error_max_vals=error_max_vals )
# File/pointer where data is obtained from. It is useful to set this for repeated
# queries, such as is necessary for genome-wide data.
# TODO: add functions to (a) create data_file and (b) clean up data_file.
self.data_file = None
def write_data_to_file( self, regions, filename ):
"""
Write data in region defined by chrom, start, and end to a file.
"""
raise Exception( "Unimplemented Function" )
def valid_chroms( self ):
"""
Returns chroms/contigs that the dataset contains
"""
return None # by default
def has_data( self, chrom, start, end, **kwargs ):
"""
Returns true if dataset has data in the specified genome window, false
otherwise.
"""
raise Exception( "Unimplemented Function" )
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
raise Exception( "Unimplemented Function" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
raise Exception( "Unimplemented Function" )
def get_data( self, chrom=None, low=None, high=None, start_val=0, max_vals=sys.maxint, **kwargs ):
"""
Returns data in region defined by chrom, start, and end. start_val and
max_vals are used to denote the data to return: start_val is the first element to
return and max_vals indicates the number of values to return.
Return value must be a dictionary with the following attributes:
dataset_type, data
"""
start, end = int( low ), int( high )
iterator = self.get_iterator( chrom, start, end, **kwargs )
return self.process_data( iterator, start_val, max_vals, start=start, end=end, **kwargs )
def get_genome_data( self, chroms_info, **kwargs ):
"""
Returns data for complete genome.
"""
genome_data = []
for chrom_info in chroms_info[ 'chrom_info' ]:
chrom = chrom_info[ 'chrom' ]
chrom_len = chrom_info[ 'len' ]
chrom_data = self.get_data( chrom, 0, chrom_len, **kwargs )
# FIXME: data providers probably should never return None.
# Some data providers return None when there's no data, so
# create a dummy dict if necessary.
if not chrom_data:
chrom_data = {
'data': None
}
chrom_data[ 'region' ] = "%s:%i-%i" % ( chrom, 0, chrom_len )
genome_data.append( chrom_data )
return {
'data': genome_data,
'dataset_type': self.dataset_type
}
def get_filters( self ):
"""
Returns filters for provider's data. Return value is a list of
filters; each filter is a dictionary with the keys 'name', 'index', 'type'.
NOTE: This method uses the original dataset's datatype and metadata to
create the filters.
"""
# Get column names.
try:
column_names = self.original_dataset.datatype.column_names
except AttributeError:
try:
column_names = range( self.original_dataset.metadata.columns )
except: # Give up
return []
# Dataset must have column types; if not, cannot create filters.
try:
column_types = self.original_dataset.metadata.column_types
except AttributeError:
return []
# Create and return filters.
filters = []
if self.original_dataset.metadata.viz_filter_cols:
for viz_col_index in self.original_dataset.metadata.viz_filter_cols:
# Some columns are optional, so can't assume that a filter
# column is in dataset.
if viz_col_index >= len( column_names ):
continue;
col_name = column_names[ viz_col_index ]
# Make sure that column has a mapped index. If not, do not add filter.
try:
attrs = self.col_name_data_attr_mapping[ col_name ]
except KeyError:
continue
filters.append(
{ 'name' : attrs[ 'name' ], 'type' : column_types[viz_col_index], \
'index' : attrs[ 'index' ] } )
return filters
def get_default_max_vals( self ):
return 5000
#
# -- Base mixins and providers --
#
class FilterableMixin:
def get_filters( self ):
""" Returns a dataset's filters. """
# is_ functions taken from Tabular.set_meta
def is_int( column_text ):
try:
int( column_text )
return True
except:
return False
def is_float( column_text ):
try:
float( column_text )
return True
except:
if column_text.strip().lower() == 'na':
return True #na is special cased to be a float
return False
#
# Get filters.
# TODOs:
# (a) might be useful to move this into each datatype's set_meta method;
# (b) could look at first N lines to ensure GTF attribute types are consistent.
#
filters = []
# HACK: first 8 fields are for drawing, so start filter column index at 9.
filter_col = 8
if isinstance( self.original_dataset.datatype, Gff ):
# Can filter by score and GTF attributes.
filters = [ { 'name': 'Score',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c6' } ]
filter_col += 1
if isinstance( self.original_dataset.datatype, Gtf ):
# Create filters based on dataset metadata.
for name, a_type in self.original_dataset.metadata.attribute_types.items():
if a_type in [ 'int', 'float' ]:
filters.append(
{ 'name': name,
'type': 'number',
'index': filter_col,
'tool_id': 'gff_filter_by_attribute',
'tool_exp_name': name } )
filter_col += 1
'''
# Old code: use first line in dataset to find attributes.
for i, line in enumerate( open(self.original_dataset.file_name) ):
if not line.startswith('#'):
# Look at first line for attributes and types.
attributes = parse_gff_attributes( line.split('\t')[8] )
for attr, value in attributes.items():
# Get attribute type.
if is_int( value ):
attr_type = 'int'
elif is_float( value ):
attr_type = 'float'
else:
attr_type = 'str'
# Add to filters.
if attr_type is not 'str':
filters.append( { 'name': attr, 'type': attr_type, 'index': filter_col } )
filter_col += 1
break
'''
elif isinstance( self.original_dataset.datatype, Bed ):
# Can filter by score column only.
filters = [ { 'name': 'Score',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c5'
} ]
return filters
class TabixDataProvider( FilterableMixin, GenomeDataProvider ):
dataset_type = 'tabix'
"""
Tabix index data provider for the Galaxy track browser.
"""
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
def get_iterator( self, chrom, start, end, **kwargs ):
start, end = int(start), int(end)
if end >= (2<<29):
end = (2<<29 - 1) # Tabix-enforced maximum
bgzip_fname = self.dependencies['bgzip'].file_name
if not self.data_file:
self.data_file = ctabix.Tabixfile(bgzip_fname, index_filename=self.converted_dataset.file_name)
# Get iterator using either naming scheme.
iterator = iter( [] )
if chrom in self.data_file.contigs:
iterator = self.data_file.fetch(reference=chrom, start=start, end=end)
else:
# Try alternative naming scheme.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
if chrom in self.data_file.contigs:
iterator = self.data_file.fetch(reference=chrom, start=start, end=end)
return iterator
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
for region in regions:
# Write data in region.
chrom = region.chrom
start = region.start
end = region.end
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
out.close()
#
# -- Interval data providers --
#
class IntervalDataProvider( GenomeDataProvider ):
dataset_type = 'interval_index'
"""
Processes interval data from native format to payload format.
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
def get_iterator( self, chrom, start, end, **kwargs ):
raise Exception( "Unimplemented Function" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand> ]
#
# First three entries are mandatory, others are optional.
#
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
rval = []
message = None
# Subtract one b/c columns are 1-based but indices are 0-based.
col_fn = lambda col: None if col is None else col - 1
start_col = self.original_dataset.metadata.startCol - 1
end_col = self.original_dataset.metadata.endCol - 1
strand_col = col_fn( self.original_dataset.metadata.strandCol )
name_col = col_fn( self.original_dataset.metadata.nameCol )
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
feature = line.split()
length = len(feature)
# Unique id is just a hash of the line
payload = [ hash(line), int( feature[start_col] ), int( feature [end_col] ) ]
if no_detail:
rval.append( payload )
continue
# Name, strand.
if name_col:
payload.append( feature[name_col] )
if strand_col:
# Put empty name as placeholder.
if not name_col: payload.append( "" )
payload.append( feature[strand_col] )
# Score (filter data)
if length >= 5 and filter_cols and filter_cols[0] == "Score":
try:
payload.append( float( feature[4] ) )
except:
payload.append( feature[4] )
rval.append( payload )
return { 'data': rval, 'message': message }
def write_data_to_file( self, regions, filename ):
raise Exception( "Unimplemented Function" )
class IntervalTabixDataProvider( TabixDataProvider, IntervalDataProvider ):
"""
Provides data from a BED file indexed via tabix.
"""
pass
#
# -- BED data providers --
#
class BedDataProvider( GenomeDataProvider ):
"""
Processes BED data from native format to payload format.
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
dataset_type = 'interval_index'
def get_iterator( self, chrom, start, end, **kwargs ):
raise Exception( "Unimplemented Method" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
#
# First three entries are mandatory, others are optional.
#
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
rval = []
message = None
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
# TODO: can we use column metadata to fill out payload?
# TODO: use function to set payload data
feature = line.split()
length = len(feature)
# Unique id is just a hash of the line
payload = [ hash(line), int(feature[1]), int(feature[2]) ]
if no_detail:
rval.append( payload )
continue
# Name, strand, thick start, thick end.
if length >= 4:
payload.append(feature[3])
if length >= 6:
payload.append(feature[5])
if length >= 8:
payload.append(int(feature[6]))
payload.append(int(feature[7]))
# Blocks.
if length >= 12:
block_sizes = [ int(n) for n in feature[10].split(',') if n != '']
block_starts = [ int(n) for n in feature[11].split(',') if n != '' ]
blocks = zip( block_sizes, block_starts )
payload.append( [ ( int(feature[1]) + block[1], int(feature[1]) + block[1] + block[0] ) for block in blocks ] )
# Score (filter data)
if length >= 5 and filter_cols and filter_cols[0] == "Score":
# If dataset doesn't have name/strand/thick start/thick end/blocks,
# add placeholders. There should be 8 entries if all attributes
# are present.
payload.extend( [ None for i in range( 8 - len( payload ) ) ] )
try:
payload.append( float( feature[4] ) )
except:
payload.append( feature[4] )
rval.append( payload )
return { 'data': rval, 'dataset_type': self.dataset_type, 'message': message }
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
for region in regions:
# Write data in region.
chrom = region.chrom
start = region.start
end = region.end
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
out.close()
class BedTabixDataProvider( TabixDataProvider, BedDataProvider ):
"""
Provides data from a BED file indexed via tabix.
"""
pass
class RawBedDataProvider( BedDataProvider ):
"""
Provide data from BED file.
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
def get_iterator( self, source, chrom=None, start=None, end=None, **kwargs ):
# Read first line in order to match chrom naming format.
line = source.readline()
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
# Undo read.
source.seek( 0 )
def line_filter_iter():
for line in open( self.original_dataset.file_name ):
if line.startswith( "track" ) or line.startswith( "browser" ):
continue
feature = line.split()
feature_chrom = feature[0]
feature_start = int( feature[1] )
feature_end = int( feature[2] )
if ( chrom is not None and feature_chrom != chrom ) \
or ( start is not None and feature_start > end ) \
or ( end is not None and feature_end < start ):
continue
yield line
return line_filter_iter()
#
# -- VCF data providers --
#
class VcfDataProvider( GenomeDataProvider ):
"""
Abstract class that processes VCF data from native format to payload format.
Payload format: An array of entries for each locus in the file. Each array
has the following entries:
1. GUID (unused)
2. location (0-based)
3. reference base(s)
4. alternative base(s)
5. quality score
6. whether variant passed filter
7. sample genotypes -- a single string with samples separated by commas; empty string
denotes the reference genotype
8-end: allele counts for each alternative
"""
col_name_data_attr_mapping = { 'Qual' : { 'index': 6 , 'name' : 'Qual' } }
dataset_type = 'variant'
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Returns a dict with the following attributes::
data - a list of variants with the format
.. raw:: text
[<guid>, <start>, <end>, <name>, cigar, seq]
message - error/informative message
"""
data = []
message = None
def get_mapping( ref, alt ):
"""
Returns ( offset, new_seq, cigar ) tuple that defines mapping of
alt to ref. Cigar format is an array of [ op_index, length ] pairs
where op_index is the 0-based index into the string "MIDNSHP=X"
"""
cig_ops = "MIDNSHP=X"
ref_len = len( ref )
alt_len = len( alt )
# Substitutions?
if ref_len == alt_len:
return 0, alt, [ [ cig_ops.find( "M" ), ref_len ] ]
# Deletions?
alt_in_ref_index = ref.find( alt )
if alt_in_ref_index != -1:
return alt_in_ref_index, ref[ alt_in_ref_index + 1: ], [ [ cig_ops.find( "D" ), ref_len - alt_len ] ]
# Insertions?
ref_in_alt_index = alt.find( ref )
if ref_in_alt_index != -1:
return ref_in_alt_index, alt[ ref_in_alt_index + 1: ], [ [ cig_ops.find( "I" ), alt_len - ref_len ] ]
# Pack data.
genotype_re = re.compile( '/|\|' )
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
# Split line and aggregate data.
feature = line.split()
pos, c_id, ref, alt, qual, c_filter, info = feature[ 1:8 ]
# Format and samples data are optional.
format = None
samples_data = []
if len( feature ) > 8:
format = feature[ 8 ]
samples_data = feature [ 9: ]
# VCF is 1-based but provided position is 0-based.
pos = int( pos ) - 1
# FIXME: OK to skip?
if alt == '.':
count -= 1
continue
# Set up array to track allele counts.
allele_counts = [ 0 for i in range ( alt.count( ',' ) + 1 ) ]
sample_gts = []
if samples_data:
# Process and pack samples' genotype and count alleles across samples.
alleles_seen = {}
has_alleles = False
for i, sample in enumerate( samples_data ):
# Parse and count alleles.
genotype = sample.split( ':' )[ 0 ]
has_alleles = False
alleles_seen.clear()
for allele in genotype_re.split( genotype ):
try:
# This may throw a ValueError if allele is missing.
allele = int( allele )
# Only count allele if it hasn't been seen yet.
if allele != 0 and allele not in alleles_seen:
allele_counts[ allele - 1 ] += 1
alleles_seen[ allele ] = True
has_alleles = True
except ValueError:
pass
# If no alleles, use empty string as proxy.
if not has_alleles:
genotype = ''
sample_gts.append( genotype )
else:
# No samples, so set allele count and sample genotype manually.
allele_counts = [ 1 ]
sample_gts = [ '1/1' ]
# Add locus data.
locus_data = [
-1,
pos,
c_id,
ref,
alt,
qual,
c_filter,
','.join( sample_gts )
]
locus_data.extend( allele_counts )
data.append( locus_data )
return { 'data': data, 'message': message }
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
for region in regions:
# Write data in region.
chrom = region.chrom
start = region.start
end = region.end
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
out.close()
class VcfTabixDataProvider( TabixDataProvider, VcfDataProvider ):
"""
Provides data from a VCF file indexed via tabix.
"""
dataset_type = 'variant'
class RawVcfDataProvider( VcfDataProvider ):
"""
Provide data from VCF file.
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
def get_iterator( self, chrom, start, end, **kwargs ):
source = open( self.original_dataset.file_name )
# Skip comments.
pos = 0
line = None
for line in source:
if not line.startswith("#"):
break
else:
pos = source.tell()
# If last line is a comment, there are no data lines.
if line.startswith( "#" ):
return []
# Match chrom naming format.
if line:
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
def line_in_region( vcf_line, chrom, start, end ):
""" Returns true if line is in region. """
variant_chrom, variant_start = vcf_line.split()[ 0:2 ]
# VCF format is 1-based.
variant_start = int( variant_start ) - 1
return variant_chrom == chrom and variant_start >= start and variant_start <= end
def line_filter_iter():
""" Yields lines in source that are in region chrom:start-end """
# Yield data line read above.
if line_in_region( line, chrom, start, end ):
yield line
# Search for and yield other data lines.
for data_line in source:
if line_in_region( data_line, chrom, start, end ):
yield data_line
return line_filter_iter()
class BamDataProvider( GenomeDataProvider, FilterableMixin ):
"""
Provides access to intervals from a sorted indexed BAM file. Coordinate
data is reported in BED format: 0-based, half-open.
"""
dataset_type = 'bai'
def get_filters( self ):
"""
Returns filters for dataset.
"""
# HACK: first 7 fields are for drawing, so start filter column index at 7.
filter_col = 7
filters = []
filters.append( { 'name': 'Mapping Quality',
'type': 'number',
'index': filter_col
} )
return filters
def write_data_to_file( self, regions, filename ):
"""
Write reads in regions to file.
"""
# Open current BAM file using index.
bamfile = csamtools.Samfile( filename=self.original_dataset.file_name, mode='rb', \
index_filename=self.converted_dataset.file_name )
# TODO: write headers as well?
new_bamfile = csamtools.Samfile( template=bamfile, filename=filename, mode='wb' )
for region in regions:
# Write data from region.
chrom = region.chrom
start = region.start
end = region.end
try:
data = bamfile.fetch(start=start, end=end, reference=chrom)
except ValueError, e:
# Try alternative chrom naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
try:
data = bamfile.fetch( start=start, end=end, reference=chrom )
except ValueError:
return None
# Write reads in region.
for i, read in enumerate( data ):
new_bamfile.write( read )
# Cleanup.
new_bamfile.close()
bamfile.close()
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
start, end = int( start ), int( end )
orig_data_filename = self.original_dataset.file_name
index_filename = self.converted_dataset.file_name
# Attempt to open the BAM file with index
bamfile = csamtools.Samfile( filename=orig_data_filename, mode='rb', index_filename=index_filename )
try:
data = bamfile.fetch( start=start, end=end, reference=chrom )
except ValueError, e:
# Try alternative chrom naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
try:
data = bamfile.fetch( start=start, end=end, reference=chrom )
except ValueError:
return None
return data
def process_data( self, iterator, start_val=0, max_vals=None, ref_seq=None,
iterator_type='nth', mean_depth=None, start=0, end=0, **kwargs ):
"""
Returns a dict with the following attributes::
data - a list of reads with the format
[<guid>, <start>, <end>, <name>, <read_1>, <read_2>, [empty], <mapq_scores>]
where <read_1> has the format
[<start>, <end>, <cigar>, <strand>, <read_seq>]
and <read_2> has the format
[<start>, <end>, <cigar>, <strand>, <read_seq>]
Field 7 is empty so that mapq scores' location matches that in single-end reads.
For single-end reads, read has format:
[<guid>, <start>, <end>, <name>, <cigar>, <strand>, <seq>, <mapq_score>]
NOTE: read end and sequence data are not valid for reads outside of
requested region and should not be used.
max_low - lowest coordinate for the returned reads
max_high - highest coordinate for the returned reads
message - error/informative message
"""
# No iterator indicates no reads.
if iterator is None:
return { 'data': [], 'message': None }
#
# Helper functions.
#
def decode_strand( read_flag, mask ):
""" Decode strand from read flag. """
strand_flag = ( read_flag & mask == 0 )
if strand_flag:
return "+"
else:
return "-"
def _random_read_iterator( read_iterator, threshold ):
"""
An iterator that returns a random stream of reads from the read_iterator
as well as corresponding pairs for returned reads.
threshold is a value in [0,1] that denotes the percentage of reads
to return.
"""
for e in read_iterator:
if e.qname in paired_pending or random.uniform( 0, 1 ) <= threshold:
yield e
def _nth_read_iterator( read_iterator, threshold ):
"""
An iterator that returns every nth read.
"""
# Convert threshold to N for stepping through iterator.
n = int( 1/threshold )
return itertools.islice( read_iterator, None, None, n )
# Alternatate and much slower implementation that looks for pending pairs.
'''
for i, e in enumerate( read_iterator ):
if e.qname in paired_pending or ( i % n ) == 0:
yield e
'''
# -- Choose iterator. --
# Calculate threshold for non-sequential iterators based on mean_depth and read length.
try:
first_read = next( iterator )
except StopIteration:
# no reads.
return { 'data': [], 'message': None, 'max_low': start, 'max_high': start }
read_len = len( first_read.seq )
num_reads = max( ( end - start ) * mean_depth / float ( read_len ), 1 )
threshold = float( max_vals )/ num_reads
iterator = itertools.chain( iter( [ first_read ] ), iterator )
# Use specified iterator type, save for when threshold is >= 1.
# A threshold of >= 1 indicates all reads are to be returned, so no
# sampling needed and seqential iterator will be used.
if iterator_type == 'sequential' or threshold >= 1:
read_iterator = iterator
elif iterator_type == 'random':
read_iterator = _random_read_iterator( iterator, threshold )
elif iterator_type == 'nth':
read_iterator = _nth_read_iterator( iterator, threshold )
#
# Encode reads as list of lists.
#
results = []
paired_pending = {}
unmapped = 0
message = None
count = 0
for read in read_iterator:
if count < start_val:
continue
if ( count - start_val - unmapped ) >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
# If not mapped, skip read.
is_mapped = ( read.flag & 0x0004 == 0 )
if not is_mapped:
unmapped += 1
continue
qname = read.qname
seq = read.seq
strand = decode_strand( read.flag, 0x0010 )
if read.cigar is not None:
read_len = sum( [cig[1] for cig in read.cigar] ) # Use cigar to determine length
else:
read_len = len(seq) # If no cigar, just use sequence length
if read.is_proper_pair:
if qname in paired_pending:
# Found pair.
pair = paired_pending[qname]
results.append( [ hash( "%i_%s" % ( pair['start'], qname ) ),
pair['start'],
read.pos + read_len,
qname,
[ pair['start'], pair['end'], pair['cigar'], pair['strand'], pair['seq'] ],
[ read.pos, read.pos + read_len, read.cigar, strand, seq ],
None, [ pair['mapq'], read.mapq ]
] )
del paired_pending[qname]
else:
# Insert first of pair.
paired_pending[qname] = { 'start': read.pos, 'end': read.pos + read_len, 'seq': seq, 'mate_start': read.mpos,
'rlen': read_len, 'strand': strand, 'cigar': read.cigar, 'mapq': read.mapq }
count += 1
else:
results.append( [ hash( "%i_%s" % ( read.pos, qname ) ),
read.pos, read.pos + read_len, qname,
read.cigar, strand, read.seq, read.mapq ] )
count += 1
# Take care of reads whose mates are out of range.
for qname, read in paired_pending.iteritems():
if read['mate_start'] < read['start']:
# Mate is before read.
read_start = read['mate_start']
read_end = read['end']
# Make read_1 start=end so that length is 0 b/c we don't know
# read length.
r1 = [ read['mate_start'], read['mate_start'] ]
r2 = [ read['start'], read['end'], read['cigar'], read['strand'], read['seq'] ]
else:
# Mate is after read.
read_start = read['start']
# Make read_2 start=end so that length is 0 b/c we don't know
# read length. Hence, end of read is start of read_2.
read_end = read['mate_start']
r1 = [ read['start'], read['end'], read['cigar'], read['strand'], read['seq'] ]
r2 = [ read['mate_start'], read['mate_start'] ]
results.append( [ hash( "%i_%s" % ( read_start, qname ) ), read_start, read_end, qname, r1, r2, [read[ 'mapq' ], 125] ] )
# Clean up. TODO: is this needed? If so, we'll need a cleanup function after processing the data.
# bamfile.close()
def compress_seq_and_cigar( read, start_field, cigar_field, seq_field ):
'''
Use reference-based compression to compress read sequence and cigar.
'''
read_seq, read_cigar = get_ref_based_read_seq_and_cigar( read[ seq_field ].upper(),
read[ start_field ],
ref_seq.sequence,
ref_seq.start,
read[ cigar_field ] )
read[ seq_field ] = read_seq
read[ cigar_field ] = read_cigar
def convert_cigar( read, start_field, cigar_field, seq_field ):
'''
Convert read cigar from pysam format to string format.
'''
cigar_ops = 'MIDNSHP=X'
read_cigar = ''
for op_tuple in read[ cigar_field ]:
read_cigar += '%i%s' % ( op_tuple[1], cigar_ops[ op_tuple[0] ] )
read[ cigar_field ] = read_cigar
# Choose method for processing reads. Use reference-based compression
# if possible. Otherwise, convert cigar.
if ref_seq:
# Uppercase for easy comparison.
ref_seq.sequence = ref_seq.sequence.upper()
process_read = compress_seq_and_cigar
else:
process_read = convert_cigar
# Process reads.
for read in results:
if isinstance( read[ 5 ], list ):
# Paired-end read.
if len( read[4] ) > 2:
process_read( read[4], 0, 2, 4 )
if len( read[5] ) > 2:
process_read( read[5], 0, 2, 4 )
else:
# Single-end read.
process_read( read, 1, 4, 6)
max_low, max_high = get_bounds( results, 1, 2 )
return { 'data': results, 'message': message, 'max_low': max_low, 'max_high': max_high }
class SamDataProvider( BamDataProvider ):
dataset_type = 'bai'
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None ):
""" Create SamDataProvider. """
super( SamDataProvider, self ).__init__( converted_dataset=converted_dataset,
original_dataset=original_dataset,
dependencies=dependencies )
# To use BamDataProvider, original dataset must be BAM and
# converted dataset must be BAI. Use BAI from BAM metadata.
if converted_dataset:
self.original_dataset = converted_dataset
self.converted_dataset = converted_dataset.metadata.bam_index
class BBIDataProvider( GenomeDataProvider ):
"""
BBI data provider for the Galaxy track browser.
"""
dataset_type = 'bigwig'
def valid_chroms( self ):
# No way to return this info as of now
return None
def has_data( self, chrom ):
f, bbi = self._get_dataset()
all_dat = bbi.query( chrom, 0, 2147483647, 1 ) or \
bbi.query( _convert_between_ucsc_and_ensemble_naming( chrom ), 0, 2147483647, 1 )
f.close()
return all_dat is not None
def get_data( self, chrom, start, end, start_val=0, max_vals=None, num_samples=1000, **kwargs ):
start = int( start )
end = int( end )
# Helper function for getting summary data regardless of chromosome
# naming convention.
def _summarize_bbi( bbi, chrom, start, end, num_points ):
return bbi.summarize( chrom, start, end, num_points ) or \
bbi.summarize( _convert_between_ucsc_and_ensemble_naming( chrom ) , start, end, num_points )
# Bigwig can be a standalone bigwig file, in which case we use
# original_dataset, or coming from wig->bigwig conversion in
# which we use converted_dataset
f, bbi = self._get_dataset()
# If stats requested, compute overall summary data for the range
# start:endbut no reduced data. This is currently used by client
# to determine the default range.
if 'stats' in kwargs:
summary = _summarize_bbi( bbi, chrom, start, end, 1 )
f.close()
min_val = 0
max_val = 0
mean = 0
sd = 0
if summary is not None:
# Does the summary contain any defined values?
valid_count = summary.valid_count[0]
if summary.valid_count > 0:
# Compute $\mu \pm 2\sigma$ to provide an estimate for upper and lower
# bounds that contain ~95% of the data.
mean = summary.sum_data[0] / valid_count
var = summary.sum_squares[0] - mean
if valid_count > 1:
var /= valid_count - 1
sd = math.sqrt( var )
min_val = summary.min_val[0]
max_val = summary.max_val[0]
return dict( data=dict( min=min_val, max=max_val, mean=mean, sd=sd ) )
def summarize_region( bbi, chrom, start, end, num_points ):
'''
Returns results from summarizing a region using num_points.
NOTE: num_points cannot be greater than end - start or BBI
will return None for all positions.
'''
result = []
# Get summary; this samples at intervals of length
# (end - start)/num_points -- i.e. drops any fractional component
# of interval length.
summary = _summarize_bbi( bbi, chrom, start, end, num_points )
if summary:
#mean = summary.sum_data / summary.valid_count
## Standard deviation by bin, not yet used
## var = summary.sum_squares - mean
## var /= minimum( valid_count - 1, 1 )
## sd = sqrt( var )
pos = start
step_size = (end - start) / num_points
for i in range( num_points ):
result.append( (pos, float_nan( summary.sum_data[i] / summary.valid_count[i] ) ) )
pos += step_size
return result
# Approach is different depending on region size.
num_samples = int( num_samples )
if end - start < num_samples:
# Get values for individual bases in region, including start and end.
# To do this, need to increase end to next base and request number of points.
num_points = end - start + 1
end += 1
else:
#
# The goal is to sample the region between start and end uniformly
# using ~N (num_samples) data points. The challenge is that the size of
# sampled intervals rarely is full bases, so sampling using N points
# will leave the end of the region unsampled due to remainders for
# each interval. To recitify this, a new N is calculated based on the
# step size that covers as much of the region as possible.
#
# However, this still leaves some of the region unsampled. This
# could be addressed by repeatedly sampling remainder using a
# smaller and smaller step_size, but that would require iteratively
# going to BBI, which could be time consuming.
#
# Start with N samples.
num_points = num_samples
step_size = ( end - start ) / num_points
# Add additional points to sample in the remainder not covered by
# the initial N samples.
remainder_start = start + step_size * num_points
additional_points = ( end - remainder_start ) / step_size
num_points += additional_points
result = summarize_region( bbi, chrom, start, end, num_points )
# Cleanup and return.
f.close()
return {
'data': result,
'dataset_type': self.dataset_type
}
class BigBedDataProvider( BBIDataProvider ):
def _get_dataset( self ):
# Nothing converts to bigBed so we don't consider converted dataset
f = open( self.original_dataset.file_name )
return f, BigBedFile(file=f)
class BigWigDataProvider ( BBIDataProvider ):
"""
Provides data from BigWig files; position data is reported in 1-based
coordinate system, i.e. wiggle format.
"""
def _get_dataset( self ):
if self.converted_dataset is not None:
f = open( self.converted_dataset.file_name )
else:
f = open( self.original_dataset.file_name )
return f, BigWigFile(file=f)
class IntervalIndexDataProvider( FilterableMixin, GenomeDataProvider ):
"""
Interval index files used for GFF, Pileup files.
"""
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
dataset_type = 'interval_index'
def write_data_to_file( self, regions, filename ):
source = open( self.original_dataset.file_name )
index = Indexes( self.converted_dataset.file_name )
out = open( filename, 'w' )
for region in regions:
# Write data from region.
chrom = region.chrom
start = region.start
end = region.end
for start, end, offset in index.find( chrom, start, end ):
source.seek( offset )
# HACK: write differently depending on original dataset format.
if self.original_dataset.ext not in [ 'gff', 'gff3', 'gtf' ]:
line = source.readline()
out.write( line )
else:
reader = GFFReaderWrapper( source, fix_strand=True )
feature = reader.next()
for interval in feature.intervals:
out.write( '\t'.join( interval.fields ) + '\n' )
source.close()
out.close()
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an array with values: (a) source file and (b) an iterator that
provides data in the region chrom:start-end
"""
start, end = int(start), int(end)
source = open( self.original_dataset.file_name )
index = Indexes( self.converted_dataset.file_name )
if chrom not in index.indexes:
# Try alternative naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
return index.find(chrom, start, end)
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
results = []
message = None
source = open( self.original_dataset.file_name )
#
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <score>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
#
# First three entries are mandatory, others are optional.
#
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
for count, val in enumerate( iterator ):
start, end, offset = val[0], val[1], val[2]
if count < start_val:
continue
if count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
source.seek( offset )
# TODO: can we use column metadata to fill out payload?
# GFF dataset.
reader = GFFReaderWrapper( source, fix_strand=True )
feature = reader.next()
payload = package_gff_feature( feature, no_detail, filter_cols )
payload.insert( 0, offset )
results.append( payload )
return { 'data': results, 'message': message }
class RawGFFDataProvider( GenomeDataProvider ):
"""
Provide data from GFF file that has not been indexed.
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
dataset_type = 'interval_index'
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end as well as
a file offset.
"""
source = open( self.original_dataset.file_name )
# Read first line in order to match chrom naming format.
line = source.readline()
# If line empty, assume file is empty and return empty iterator.
if len( line ) == 0:
return iter([])
# Determine chromosome naming format.
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
# Undo read.
source.seek( 0 )
def features_in_region_iter():
offset = 0
for feature in GFFReaderWrapper( source, fix_strand=True ):
# Only provide features that are in region.
feature_start, feature_end = convert_gff_coords_to_bed( [ feature.start, feature.end ] )
if feature.chrom == chrom and feature_end > start and feature_start < end:
yield feature, offset
offset += feature.raw_size
return features_in_region_iter()
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
results = []
message = None
for count, ( feature, offset ) in enumerate( iterator ):
if count < start_val:
continue
if count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
payload = package_gff_feature( feature, no_detail=no_detail, filter_cols=filter_cols )
payload.insert( 0, offset )
results.append( payload )
return { 'data': results, 'dataset_type': self.dataset_type, 'message': message }
class GtfTabixDataProvider( TabixDataProvider ):
"""
Returns data from GTF datasets that are indexed via tabix.
"""
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
# Loop through lines and group by transcript_id; each group is a feature.
# TODO: extend this code or use code in gff_util to process GFF/3 as well
# and then create a generic GFFDataProvider that can be used with both
# raw and tabix datasets.
features = {}
for count, line in enumerate( iterator ):
line_attrs = parse_gff_attributes( line.split('\t')[8] )
transcript_id = line_attrs[ 'transcript_id' ]
if transcript_id in features:
feature = features[ transcript_id ]
else:
feature = []
features[ transcript_id ] = feature
feature.append( GFFInterval( None, line.split( '\t') ) )
# Process data.
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
results = []
message = None
for count, intervals in enumerate( features.values() ):
if count < start_val:
continue
if count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
feature = GFFFeature( None, intervals=intervals )
payload = package_gff_feature( feature, no_detail=no_detail, filter_cols=filter_cols )
payload.insert( 0, feature.intervals[ 0 ].attributes[ 'transcript_id' ] )
results.append( payload )
return { 'data': results, 'message': message }
#
# -- ENCODE Peak data providers.
#
class ENCODEPeakDataProvider( GenomeDataProvider ):
"""
Abstract class that processes ENCODEPeak data from native format to payload format.
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
def get_iterator( self, chrom, start, end, **kwargs ):
raise "Unimplemented Method"
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
## FIXMEs:
# (1) should be able to unify some of this code with BedDataProvider.process_data
# (2) are optional number of parameters supported?
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
#
# First three entries are mandatory, others are optional.
#
no_detail = ( "no_detail" in kwargs )
rval = []
message = None
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
feature = line.split()
length = len( feature )
# Feature initialization.
payload = [
# GUID is just a hash of the line
hash( line ),
# Add start, end.
int( feature[1] ),
int( feature[2] )
]
if no_detail:
rval.append( payload )
continue
# Extend with additional data.
payload.extend( [
# Add name, strand.
feature[3],
feature[5],
# Thick start, end are feature start, end for now.
int( feature[1] ),
int( feature[2] ),
# No blocks.
None,
# Filtering data: Score, signalValue, pValue, qValue.
float( feature[4] ),
float( feature[6] ),
float( feature[7] ),
float( feature[8] )
] )
rval.append( payload )
return { 'data': rval, 'message': message }
class ENCODEPeakTabixDataProvider( TabixDataProvider, ENCODEPeakDataProvider ):
"""
Provides data from an ENCODEPeak dataset indexed via tabix.
"""
def get_filters( self ):
"""
Returns filters for dataset.
"""
# HACK: first 8 fields are for drawing, so start filter column index at 9.
filter_col = 8
filters = []
filters.append( { 'name': 'Score',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c6' } )
filter_col += 1
filters.append( { 'name': 'Signal Value',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c7' } )
filter_col += 1
filters.append( { 'name': 'pValue',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c8' } )
filter_col += 1
filters.append( { 'name': 'qValue',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c9' } )
return filters
#
# -- ChromatinInteraction data providers --
#
class ChromatinInteractionsDataProvider( GenomeDataProvider ):
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
rval = []
message = None
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "interactions" )
break
feature = line.split()
length = len( feature )
s1 = int( feature[1] )
e1 = int( feature[2] )
c = feature[3]
s2 = int( feature[4] )
e2 = int( feature[5] )
v = float( feature[6] )
# Feature initialization.
payload = [
# GUID is just a hash of the line
hash( line ),
# Add start1, end1, chr2, start2, end2, value.
s1, e1, c, s2, e2, v
]
rval.append( payload )
return { 'data': rval, 'message': message }
def get_default_max_vals( self ):
return 100000;
class ChromatinInteractionsTabixDataProvider( TabixDataProvider, ChromatinInteractionsDataProvider ):
def get_iterator( self, chrom, start=0, end=sys.maxint, interchromosomal=False, **kwargs ):
"""
"""
# Modify start as needed to get earlier interactions with start region.
span = int( end ) - int( start )
filter_start = max( 0, int( start ) - span - span/2 )
def filter( iter ):
for line in iter:
feature = line.split()
s1 = int( feature[1] )
e1 = int( feature[2] )
c = feature[3]
s2 = int( feature[4] )
e2 = int( feature[5] )
# Check for intrachromosal interactions.
if ( ( s1 + s2 ) / 2 <= end ) and ( ( e1 + e2 ) / 2 >= start ) and ( c == chrom ):
yield line
# Check for interchromosal interactions.
if interchromosomal and c != chrom:
yield line
return filter( TabixDataProvider.get_iterator( self, chrom, filter_start, end ) )
#
# -- Helper methods. --
#
def package_gff_feature( feature, no_detail=False, filter_cols=[] ):
""" Package a GFF feature in an array for data providers. """
feature = convert_gff_coords_to_bed( feature )
# No detail means only start, end.
if no_detail:
return [ feature.start, feature.end ]
# Return full feature.
payload = [ feature.start,
feature.end,
feature.name(),
feature.strand,
# No notion of thick start, end in GFF, so make everything
# thick.
feature.start,
feature.end
]
# HACK: ignore interval with name 'transcript' from feature.
# Cufflinks puts this interval in each of its transcripts,
# and they mess up trackster by covering the feature's blocks.
# This interval will always be a feature's first interval,
# and the GFF's third column is its feature name.
feature_intervals = feature.intervals
if feature.intervals[0].fields[2] == 'transcript':
feature_intervals = feature.intervals[1:]
# Add blocks.
block_sizes = [ (interval.end - interval.start ) for interval in feature_intervals ]
block_starts = [ ( interval.start - feature.start ) for interval in feature_intervals ]
blocks = zip( block_sizes, block_starts )
payload.append( [ ( feature.start + block[1], feature.start + block[1] + block[0] ) for block in blocks ] )
# Add filter data to payload.
for col in filter_cols:
if col == "Score":
if feature.score == 'nan':
payload.append( feature.score )
else:
try:
f = float( feature.score )
payload.append( f )
except:
payload.append( feature.score )
elif col in feature.attributes:
if feature.attributes[col] == 'nan':
payload.append( feature.attributes[col] )
else:
try:
f = float( feature.attributes[col] )
payload.append( f )
except:
payload.append( feature.attributes[col] )
else:
# Dummy value.
payload.append( 0 )
return payload
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/visualization/data_providers/genome.py
|
Python
|
gpl-3.0
| 63,991
|
[
"Galaxy",
"pysam"
] |
3fc27af7fee8ca2201bd7a35b6c478c27ab2970e7d642bd9be32d49a273cbe22
|
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
from simstream import PikaAsyncConsumer
settings = {}
with open("../settings.json", 'r') as f:
settings = json.load(f)
settings["routing_key"] = "openmm.log"
def print_log_line(body):
try:
lines = json.loads(body.decode())
if lines is not None:
for line in lines:
print(line)
except json.decoder.JSONDecodeError as e:
print("[Error]: Could not decode %s" % (body))
except UnicodeError as e:
print("[Error]: Could not decode from bytes to string: %s" % (e.reason))
consumer = PikaAsyncConsumer(settings["url"],
settings["exchange"],
"openmm.log", # settings["queue"],
message_handler=print_log_line,
routing_key=settings["routing_key"],
exchange_type=settings["exchange_type"])
if __name__ == "__main__":
try:
consumer.start()
except KeyboardInterrupt:
consumer.stop()
|
gouravshenoy/airavata
|
sandbox/simstream/example/openmm_example/openmm_log_consumer.py
|
Python
|
apache-2.0
| 1,822
|
[
"OpenMM"
] |
047cc6cca69f8697caf0274d8e32ab156c788268761ced234ca32e8874fbb898
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This unit test verifies that satellite server synchronization works
It is currently designed to be run from the commcare-hq install dir, like:
<install-dir>/python tests/deployment/testSync.py
TODO - clean this up so that we delete all submissions after each test
"""
""" VARIABLES """
import os
serverhost = 'test.commcarehq.org' #for the actual server
#serverhost = 'localhost:8000'
#serverhost = 'test.commcarehq.org' #for the actual server
#curl_command = 'c:\curl\curl.exe' #if you have curl installed on windows
curl_command = 'curl' #if curl is in your path/linux
filedir = os.path.dirname(__file__)
DATA_DIR = os.path.join( filedir, 'data' )
""" IMPORTS """
import bz2
import urllib2
import tarfile
import httplib
import unittest
import cStringIO
from urlparse import urlparse
from receiver.models import Submission
from receiver.management.commands.generate_submissions import generate_submissions
from receiver.management.commands.load_submissions import load_submissions
from xformmanager.management.commands.sync_schema import generate_schemata, load_schemata
from xformmanager.tests.util import create_xsd_and_populate, populate
from django_rest_interface import util as rest_util
from xformmanager.models import FormDefModel, Metadata
from xformmanager.manager import XFormManager
from xformmanager.xformdef import FormDef
""" TESTS """
class TestSync(unittest.TestCase):
def setUp(self):
self._delete_schema_from_filename("pf_followup.xsd", path = DATA_DIR)
self._delete_schema_from_filename("pf_new_reg.xsd", path = DATA_DIR)
self._delete_schema_from_filename("pf_ref_completed.xsd", path = DATA_DIR)
def test_generate_all_submissions(self):
""" Tests downloading all submissions from self """
# setup
schema_1 = create_xsd_and_populate("pf_followup.xsd", path = DATA_DIR)
submit_1 = populate("pf_followup_1.xml", path = DATA_DIR)
submit_2 = populate("pf_followup_2.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", path = DATA_DIR)
submit_3 = populate("pf_new_reg_1.xml", path = DATA_DIR)
submit_4 = populate("pf_new_reg_2.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", path = DATA_DIR)
submit_5 = populate("pf_ref_completed_1.xml", path = DATA_DIR)
submit_6 = populate("pf_ref_completed_2.xml", path = DATA_DIR)
# download and check
submissions_file = "submissions.tar"
generate_submissions(serverhost, 'brian', 'test', latest=False, download=True, to=submissions_file)
try:
self._assert_tar_count_equals(submissions_file, Submission.objects.all().count())
# cleanup
finally:
# delete all data on self
manager = XFormManager()
submit_1.delete()
submit_2.delete()
submit_3.delete()
submit_4.delete()
submit_5.delete()
submit_6.delete()
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
def test_generate_debug_submissions(self):
""" Tests downloading some submissions from self
This is only useful to make sure that the test_load_diff_submissions test
below is working properly.
"""
#setup
schema_1 = create_xsd_and_populate("pf_followup.xsd", \
"pf_followup_1.xml", path = DATA_DIR)
populate("pf_followup_2.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", \
"pf_new_reg_1.xml", path = DATA_DIR)
populate("pf_new_reg_2.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", \
"pf_ref_completed_1.xml", path = DATA_DIR)
populate("pf_ref_completed_2.xml", path = DATA_DIR)
# the 'debug' flag limits the generated MD5s to a count of 5
submissions_file = "submissions.tar"
# debug means we only post 5 submissions (instead of all)
generate_submissions(serverhost, 'brian', 'test', debug=True, download=True, to=submissions_file)
try:
self._assert_tar_count_equals(submissions_file, Submission.objects.all().count()-5)
# cleanup
finally:
# delete all data on self
manager = XFormManager()
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
"""
ro - We do not want to run this unit test on every build, since it's
going to generate something like 200 duplicate submission errors =b.
To do this cleanly, we would delete all existing submissions from the db
before running this, but silently wiping the db on the local machine
is probably going to cause more headache than it saves.
So we comment this test case out for now (most functionality is duplicated
in test_generate_all_submissions anyways), but we can always add this
back in later
def test_sync_all_submissions(self):
"" Tests synchronizing all data from self (no MD5s posted) ""
manager = XFormManager()
# load data
schema_1 = create_xsd_and_populate("pf_followup.xsd", \
"pf_followup_1.xml", path = DATA_DIR)
populate("pf_followup_2.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", \
"pf_new_reg_1.xml", path = DATA_DIR)
populate("pf_new_reg_2.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", \
"pf_ref_completed_1.xml", path = DATA_DIR)
populate("pf_ref_completed_2.xml", path = DATA_DIR)
populate("pf_ref_completed_3.xml", path = DATA_DIR)
starting_submissions_count = Submission.objects.all().count()
# get sync file from self
submissions_file = "submissions.tar"
generate_submissions(serverhost, 'brian', 'test', latest=False, download=True, to=submissions_file)
# delete all data on self
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
# add schemas back
schema_1 = create_xsd_and_populate("pf_followup.xsd", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", path = DATA_DIR)
# load data from sync file
load_submissions(submissions_file)
try:
# verify that the submissions etc. count are correct
self.assertEqual( starting_submissions_count, Submission.objects.all().count())
finally:
# clean up
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
"""
def test_sync_some_submissions(self):
""" Tests synchronizing some data from self (posts a few MD5s) """
manager = XFormManager()
# populate some files
schema_1 = create_xsd_and_populate("pf_followup.xsd", "pf_followup_1.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", "pf_new_reg_1.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", "pf_ref_completed_1.xml", path = DATA_DIR)
# get MD5 of all current submissions
MD5_buffer = rest_util.get_field_as_bz2(Submission, 'checksum')
# populate a few more files
submit_1 = populate("pf_followup_2.xml", path = DATA_DIR)
submit_2 = populate("pf_new_reg_2.xml", path = DATA_DIR)
submit_3 = populate("pf_ref_completed_2.xml", path = DATA_DIR)
submit_4 = populate("pf_ref_completed_3.xml", path = DATA_DIR)
starting_submissions_count = Submission.objects.all().count()
starting_schemata_count = FormDefModel.objects.all().count()
# get the difference between the first 3 files and the current
# set of files (i.e. the last 4 files)
submissions_file = "submissions.tar"
self._POST_MD5s(MD5_buffer, submissions_file)
# save checksums and delete the ones just populated (d,e,f)
checksums = [ submit_1.checksum, submit_2.checksum, submit_3.checksum, submit_3.checksum ]
manager.remove_data(schema_1.id, Metadata.objects.get(attachment=submit_1.xform).raw_data, \
remove_submission = True)
manager.remove_data(schema_2.id, Metadata.objects.get(attachment=submit_2.xform).raw_data, \
remove_submission = True)
manager.remove_data(schema_3.id, Metadata.objects.get(attachment=submit_3.xform).raw_data, \
remove_submission = True)
manager.remove_data(schema_3.id, Metadata.objects.get(attachment=submit_4.xform).raw_data, \
remove_submission = True)
# load data from sync file (d,e,f)
load_submissions(submissions_file, "127.0.0.1:8000")
try:
# verify that the submissions etc. count are correct (d,e,f)
self.assertEqual( starting_submissions_count, Submission.objects.all().count())
submits = Submission.objects.all().order_by('-submit_time')[:4]
# verify that the correct submissions were loaded
Submission.objects.get(checksum=checksums[0])
Submission.objects.get(checksum=checksums[1])
Submission.objects.get(checksum=checksums[2])
Submission.objects.get(checksum=checksums[3])
except Submission.DoesNotExist:
self.fail("Incorrect submission received")
finally:
# clean up
manager = XFormManager()
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
def test_sync_dupe_submissions(self):
""" Tests synchronizing duplicate data from self"""
manager = XFormManager()
# populate some files
schema_1 = create_xsd_and_populate("pf_followup.xsd", "pf_followup_1.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", "pf_new_reg_1.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", "pf_ref_completed_1.xml", path = DATA_DIR)
starting_submissions_count = Submission.objects.all().count()
# <STATE 1/>
# get MD5 of 3 populated files
MD5_buffer = rest_util.get_field_as_bz2(Submission, 'checksum')
# add 3 dupes and 1 new file
submit_1 = populate("pf_followup_1.xml", path = DATA_DIR)
submit_2 = populate("pf_new_reg_1.xml", path = DATA_DIR)
submit_3 = populate("pf_ref_completed_1.xml", path = DATA_DIR)
# <STATE 2/>
submissions_file = "submissions.tar"
self._POST_MD5s(MD5_buffer, submissions_file)
self._assert_tar_count_equals(submissions_file, 0)
submit_4 = populate("pf_ref_completed_3.xml", path = DATA_DIR)
# <STATE 3/>
# get the difference between state 1 and state 3
self._POST_MD5s(MD5_buffer, submissions_file)
# save checksum and delete the ones just populated
checksum_4 = submit_4.checksum
submit_1.delete()
submit_2.delete()
submit_3.delete()
submit_4.delete()
# should get the same 3 schemas we registered above
self._assert_tar_count_equals(submissions_file, 1)
# load data from sync file (d,e,f)
load_submissions(submissions_file, "127.0.0.1:8000")
try:
# verify that we only have 4 submissions
self.assertEqual( starting_submissions_count+1, Submission.objects.all().count() )
Submission.objects.get(checksum=checksum_4)
except Submission.DoesNotExist:
self.fail("Incorrect submission received")
finally:
# clean up
manager = XFormManager()
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
def test_sync_weird_submissions(self):
""" Tests synchronizing some data from self (posts a few MD5s) """
# setup - if we don't do this, we just get back "no submissions found"
manager = XFormManager()
# populate some files
schema_1 = create_xsd_and_populate("pf_followup.xsd", "pf_followup_1.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", "pf_new_reg_1.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", "pf_ref_completed_1.xml", path = DATA_DIR)
submissions_count = Submission.objects.count()
url = 'http://%s/api/submissions/' % (serverhost)
up = urlparse(url)
conn = httplib.HTTPConnection(up.netloc)
# test posting junk md5
MD5_buffer = "sadfndan;ofansdn"
conn.request('POST', up.path, MD5_buffer, {'Content-Type': 'application/bz2', 'User-Agent': 'CCHQ-submitfromfile-python-v0.1'})
response = conn.getresponse().read()
self.assertTrue( response.lower().find('poorly formatted') != -1 )
# test posting non-existent md5s
md5 = "e402f026c762a6bc999f9f2703efd367\n"
bz2_md5 = bz2.compress(md5)
submissions_file = "submissions.tar"
self._POST_MD5s(bz2_md5, submissions_file)
# should get the same 3 schemas we registered above
self._assert_tar_count_equals(submissions_file, submissions_count)
# test posting duplicate md5s
string = cStringIO.StringIO()
submits = Submission.objects.all().order_by('checksum')[:2]
for submit in submits:
string.write(unicode( submit.checksum ) + '\n')
string.write(unicode( submit.checksum ) + '\n')
MD5s = string.getvalue()
dupe_buffer = bz2.compress(MD5s)
submissions_file = "submissions.tar"
self._POST_MD5s(dupe_buffer, submissions_file)
self._assert_tar_count_equals(submissions_file, submissions_count-2)
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
def test_sync_no_submissions(self):
""" Tests synchronizing no data from self (posts all MD5s) """
manager = XFormManager()
# load data
schema_1 = create_xsd_and_populate("pf_followup.xsd", \
"pf_followup_1.xml", path = DATA_DIR)
populate("pf_followup_2.xml", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", \
"pf_new_reg_1.xml", path = DATA_DIR)
populate("pf_new_reg_2.xml", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", \
"pf_ref_completed_1.xml", path = DATA_DIR)
populate("pf_ref_completed_2.xml", path = DATA_DIR)
populate("pf_ref_completed_3.xml", path = DATA_DIR)
starting_submissions_count = Submission.objects.all().count()
# get sync file from self
submissions_file = "submissions.tar"
generate_submissions(serverhost, 'brian', 'test', download=True, to=submissions_file)
# test that the received submissions file is empty
self._assert_tar_count_equals(submissions_file, 0)
load_submissions(submissions_file, "127.0.0.1:8000")
try:
# verify that no new submissions were loaded
self.assertEqual( starting_submissions_count, Submission.objects.all().count())
finally:
# clean up
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
# a lot of similar code below as above - should modularize better
def test_sync_all_schemata(self):
""" Tests synchronizing all schemata from self (no xmlns posted) """
manager = XFormManager()
# load data
schema_1 = create_xsd_and_populate("pf_followup.xsd", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", path = DATA_DIR)
starting_schemata_count = FormDefModel.objects.all().count()
# get sync file from self
schemata_file = "schemata.tar"
generate_schemata(serverhost, 'brian', 'test', latest=False, download=True, to=schemata_file)
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
# load data from sync file
load_schemata(schemata_file, "127.0.0.1:8000")
try:
# verify that the submissions etc. count are correct
self.assertEqual( starting_schemata_count, FormDefModel.objects.all().count())
finally:
# clean up
self._delete_schema_from_filename("pf_followup.xsd", path = DATA_DIR)
self._delete_schema_from_filename("pf_new_reg.xsd", path = DATA_DIR)
self._delete_schema_from_filename("pf_ref_completed.xsd", path = DATA_DIR)
def test_sync_some_schemata(self):
""" Tests synchronizing some schemata from self (posts a few xmlns) """
manager = XFormManager()
# populate some files
schema_1 = create_xsd_and_populate("pf_followup.xsd", path = DATA_DIR)
# get xmlns of populated schemas
xmlns_buffer = rest_util.get_field_as_bz2(FormDefModel, 'target_namespace')
# populate a few more schema
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", path = DATA_DIR)
starting_schemata_count = FormDefModel.objects.all().count()
# get the difference between the first schema and current state
schemata_file = "schemata.tar"
self._POST_XMLNS(xmlns_buffer, schemata_file)
# delete the ones just populated (d,e,f)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
# load data from sync file (d,e,f)
load_schemata(schemata_file, "127.0.0.1:8000")
try:
# verify that the schematas etc. count are correct (d,e,f)
self.assertEqual( starting_schemata_count, FormDefModel.objects.all().count())
self._assert_schema_registered("pf_followup.xsd", DATA_DIR)
self._assert_schema_registered("pf_new_reg.xsd", DATA_DIR)
self._assert_schema_registered("pf_ref_completed.xsd", DATA_DIR)
finally:
# clean up
manager = XFormManager()
manager.remove_schema(schema_1.id, remove_submissions = True)
self._delete_schema_from_filename("pf_new_reg.xsd", path = DATA_DIR)
self._delete_schema_from_filename("pf_ref_completed.xsd", path = DATA_DIR)
def test_sync_weird_schemata(self):
""" Tests synchronizing some data from self (posts a few MD5s) """
# setup - if we don't do this, we just get back "no submissions found"
manager = XFormManager()
# populate some files
starting_schemata_count = FormDefModel.objects.count()
schema_1 = create_xsd_and_populate("pf_followup.xsd", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", path = DATA_DIR)
url = 'http://%s/api/xforms/?format=sync' % (serverhost)
up = urlparse(url)
conn = httplib.HTTPConnection(up.netloc)
# test posting junk namespace
namespace_buffer = "sadfndan;ofansdn"
conn.request('POST', up.path, namespace_buffer, {'Content-Type': 'application/bz2', 'User-Agent': 'CCHQ-submitfromfile-python-v0.1'})
response = conn.getresponse().read()
self.assertTrue( response.lower().find('poorly formatted') != -1 )
# test posting non-existent namespaces
namespace = "http://zilch.com"
bz2_namespace = bz2.compress(namespace)
schemata_file = "schemata.tar"
self._POST_XMLNS(bz2_namespace, schemata_file)
# should get all the schemas back
self._assert_tar_count_equals(schemata_file, starting_schemata_count+3)
# test posting duplicate namespaces
string = cStringIO.StringIO()
formdefs = FormDefModel.objects.all().order_by('target_namespace')[:2]
for formdef in formdefs:
string.write(unicode( formdef.target_namespace ) + '\n')
string.write(unicode( formdef.target_namespace ) + '\n')
dupe_buffer = bz2.compress(string.getvalue())
self._POST_XMLNS(dupe_buffer, schemata_file)
self._assert_tar_count_equals(schemata_file, starting_schemata_count+1)
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
def test_sync_no_schemata(self):
""" Tests synchronizing no data from self (posts all MD5s) """
manager = XFormManager()
# load data
schema_1 = create_xsd_and_populate("pf_followup.xsd", path = DATA_DIR)
schema_2 = create_xsd_and_populate("pf_new_reg.xsd", path = DATA_DIR)
schema_3 = create_xsd_and_populate("pf_ref_completed.xsd", path = DATA_DIR)
# get sync file from self
schemata_file = 'schemata.tar'
generate_schemata(serverhost, 'brian', 'test', download=True, to=schemata_file)
# test that the received schemata file is empty
self._assert_tar_count_equals(schemata_file, 0)
starting_schemata_count = FormDefModel.objects.all().count()
load_schemata(schemata_file, "127.0.0.1:8000")
try:
# verify that no new schemata were loaded
self.assertEqual( starting_schemata_count, FormDefModel.objects.all().count())
finally:
# clean up
manager.remove_schema(schema_1.id, remove_submissions = True)
manager.remove_schema(schema_2.id, remove_submissions = True)
manager.remove_schema(schema_3.id, remove_submissions = True)
def tearDown(self):
pass
def _assert_tar_count_equals(self, file_name, count):
if not tarfile.is_tarfile(file_name):
# Mabye it's not a tar cuz it's a status message.
fin = open(file_name, 'r')
contents = fin.read(256)
fin.close()
if contents.lower().find("no ") != -1:
self.assertEqual( 0, count)
return
raise Exception("%s is not a tar file" % file_name)
tar = tarfile.open(file_name)
tmp_dir = "unit_test_tmp"
if os.path.exists(tmp_dir):
filenames = os.listdir(tmp_dir)
for file in filenames:
os.remove(os.path.join(tmp_dir, file))
os.rmdir(tmp_dir)
os.mkdir(tmp_dir)
tar.extractall(path=tmp_dir)
tar.close()
filenames = os.listdir(tmp_dir)
try:
self.assertEqual( len(filenames), count)
finally:
# clean up
for file in filenames:
os.remove(os.path.join(tmp_dir, file))
os.rmdir(tmp_dir)
def _assert_schema_registered(self, schema, path):
schema = open(os.path.join(path, schema), 'r')
formdef = FormDef(schema)
schema.close()
try:
formdef = FormDefModel.objects.get(target_namespace=formdef.target_namespace)
except FormDefModel.DoesNotExist:
self.fail("%s schema not registered!" % formdef.target_namespace)
return
def _delete_schema_from_filename(self, file_name, path):
schema = open(os.path.join(path, file_name), 'r')
formdef = FormDef(schema)
schema.close()
try:
formdef = FormDefModel.objects.get(target_namespace=formdef.target_namespace)
except FormDefModel.DoesNotExist:
return
manager = XFormManager()
manager.remove_schema(formdef.id, remove_submissions=True)
def _POST_MD5s(self, MD5_buffer, output_file):
url = 'http://%s/api/submissions/' % (serverhost)
up = urlparse(url)
conn = httplib.HTTPConnection(up.netloc)
conn.request('POST', up.path, MD5_buffer, {'Content-Type': 'application/bz2', 'User-Agent': 'CCHQ-submitfromfile-python-v0.1'})
response = conn.getresponse()
fout = open(output_file, 'w+b')
fout.write(response.read())
fout.close()
def _POST_XMLNS(self, xmlns_buffer, output_file):
url = 'http://%s/api/xforms/?format=sync' % (serverhost)
up = urlparse(url)
conn = httplib.HTTPConnection(up.netloc)
conn.request('POST', up.path, xmlns_buffer, {'Content-Type': 'application/bz2', 'User-Agent': 'CCHQ-submitfromfile-python-v0.1'})
response = conn.getresponse()
fout = open(output_file, 'w+b')
fout.write(response.read())
fout.close()
def run():
suite = unittest.TestLoader().loadTestsFromTestCase(TestSync)
unittest.TextTestRunner(verbosity=2).run(suite)
|
commtrack/temp-aquatest
|
tests/deployment/testSync.py
|
Python
|
bsd-3-clause
| 26,984
|
[
"Brian"
] |
ff0f6a83cb139bd419e5850f13480ee323b955006e848dfc70fc24162bf1946d
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class DNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
"""Store DNA sequence data and optional associated metadata.
Only characters in the IUPAC DNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the DNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC DNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC DNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
default_gap_char
definite_chars
degenerate_chars
degenerate_map
complement_map
See Also
--------
RNA
GrammaredSequence
Notes
-----
Subclassing is disabled for DNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import DNA
>>> DNA('ACCGAAT')
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
Convert lowercase characters to uppercase:
>>> DNA('AcCGaaT', lowercase=True)
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGT")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def transcribe(self):
"""Transcribe DNA into RNA.
DNA sequence is assumed to be the coding strand. Thymine (T) is
replaced with uracil (U) in the transcribed sequence.
Returns
-------
RNA
Transcribed sequence.
See Also
--------
translate
translate_six_frames
Notes
-----
DNA sequence's metadata and positional metadata are included in the
transcribed RNA sequence.
Examples
--------
Transcribe DNA into RNA:
>>> from skbio import DNA
>>> dna = DNA('TAACGTTA')
>>> dna
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
>>> dna.transcribe()
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
"""
seq = self._string.replace(b'T', b'U')
# turn off validation because `seq` is guaranteed to be valid
return skbio.RNA(seq, metadata=self.metadata,
positional_metadata=self.positional_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, *args, **kwargs):
"""Translate DNA sequence into protein sequence.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
RNA.reverse_transcribe
RNA.translate
translate_six_frames
transcribe
Notes
-----
DNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> dna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same DNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> dna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
return self.transcribe().translate(*args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, *args, **kwargs):
"""Translate DNA into protein using six possible reading frames.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein. The six possible
reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
RNA.translate_six_frames
translate
transcribe
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
DNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> for protein in dna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
return self.transcribe().translate_six_frames(*args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(DNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(DNA, "find_motifs")
|
anderspitman/scikit-bio
|
skbio/sequence/_dna.py
|
Python
|
bsd-3-clause
| 12,313
|
[
"scikit-bio"
] |
341ed6fac9ad6c7416e2d98bc5f4cdb3335f2903b2249a5546eae5c9954ba1bc
|
import psi4
import numpy as np
from psi4.driver.p4util.solvers import davidson_solver
from psi4.driver.procrouting.response.scf_products import SCFProducts
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({"SAVE_JK": True})
psi4.set_options({"e_convergence": 1.e-1, "d_convergence": 1.e-1})
# psi4.set_options({"reference": "uhf"})
e, wfn = psi4.energy("HF/6-31G", return_wfn=True)
nmo = wfn.nmopi().sum()
ndocc = wfn.doccpi().sum()
nvir = nmo - ndocc
nrot = ndocc * nvir
wfn.form_D()
record = [(False, wfn.compute_E())]
for x in range(5):
prod = SCFProducts(wfn)
def func(vector):
return -1 * prod.H1_product(vector)
def precon(resid, i, A_w):
return resid
nvecs = 5
guess = np.ones((prod.narot, nvecs))
evals, evecs = davidson_solver(func, precon, guess, no_eigs=nvecs, e_conv=1.e-4)
# If x == 0 we take a "bad" step, require rotation next step to get back on track
if (x == 0) or (evals[0] < 0):
stab = True
evecs = -evecs[:, 0]
rot = psi4.core.Matrix.from_array(evecs.reshape(ndocc, nvir))
wfn.rotate_orbitals(wfn.Ca(), rot)
else:
stab = False
wfn.form_C()
wfn.form_D()
wfn.form_G()
wfn.form_F()
print(wfn.compute_E())
record.append((stab, wfn.compute_E()))
for stab, energy in record:
print("% 8s % 14.10f" % (stab, energy))
|
jgonthier/psi4
|
tests/python/tddft/stab.py
|
Python
|
lgpl-3.0
| 1,400
|
[
"Psi4"
] |
5425f5e80f7cd70064033be27f014e5717ccade60e241041f94dc5fc66d97203
|
from __future__ import division
from ase import Atoms, Atom
from multiasecalc.lammps import Bonds
import numpy as np
from numpy import pi, cos, sin, sqrt, dot
import random
random.seed(None)
np.random.seed(None)
from itertools import combinations, product
def distance_geometry(mol, bond_matrix, unit_indices, characteristic_ratio=8.3):
N = len(mol)
BOND_LENGTH = 1.6
BOND_ANGLE = 113*np.pi/180
DIST_1_3 = BOND_LENGTH * sqrt(2 - 2*cos(BOND_ANGLE))
MIN_DIST_1_4 = BOND_LENGTH * (1 + 2*cos(pi-BOND_ANGLE))
MAX_DIST_1_4 = 2 * sqrt(BOND_LENGTH**2 + (BOND_LENGTH/2)**2 - 2 * BOND_LENGTH**2 /2 * cos(BOND_ANGLE))
MIN_VDW_DIST = 1.6*2
MAX_DIST = 99
print 'Create distance matrix'
"""bond_matrix = np.zeros((N,N), dtype = bool)
for i in range(N):
for j in bonds[i]:
bond_matrix[i,j] = True
"""
bonds12 = bond_matrix
bonds13 = dot(bonds12, bond_matrix)
bonds14 = dot(bonds13, bond_matrix)
Dmax = np.ones((N, N))*MAX_DIST
Dmin = np.ones((N, N))*MIN_VDW_DIST
Dmax[np.where(bonds14)] = MAX_DIST_1_4
Dmin[np.where(bonds14)] = MIN_DIST_1_4
Dmax[np.where(bonds13)] = DIST_1_3
Dmin[np.where(bonds13)] = DIST_1_3
Dmax[np.where(bonds12)] = BOND_LENGTH
Dmin[np.where(bonds12)] = BOND_LENGTH
D = np.triu(Dmax, k=1) + np.tril(Dmin, k=-1)
print D
print 'Triangle smoothing'
triangle_smooth(D)
#print D
print 'Generate coarse chain'
N_monomers = len(mol[unit_indices])
kuhn_length = BOND_LENGTH * characteristic_ratio / cos(BOND_ANGLE/2)
N_coarse = N_monomers * cos(BOND_ANGLE/2)**2 / characteristic_ratio
coarse_segment_units = int(N_monomers/N_coarse)
print 'Coarse chain length:', N_coarse
print 'kuhn length =', kuhn_length
print coarse_segment_units, 'polymer units in a coarse unit'
coarse_indices = slice(unit_indices.start, None, unit_indices.step*coarse_segment_units)
D_coarse = D[coarse_indices, coarse_indices]
r_mean = kuhn_length
gaussian_matrix = gaussian_chain(D_coarse, r_mean)
print gaussian_matrix
D[coarse_indices, coarse_indices] = gaussian_matrix
triangle_smooth(D)
print 'Random generation'
for i, j in combinations(range(N), 2):
val = random.uniform(D[i,j], D[j,i])
D[i,j] = val
D[j,i] = val
#print D
print 'Metric matrix'
d0 = D[0,:]
#print np.mean(D**2, 1) - 1.0/N**2 * np.sum(np.triu(D, k=1)**2)
#d0 = sqrt( np.mean(D**2, 1) - 1.0/N**2 * np.sum(np.triu(D, k=1)**2) )
d0h, d0v = np.meshgrid(d0, d0)
G = (d0h**2 + d0v**2 - D**2) / 2
#print G
print 'Eigenvalues of metric matrix'
evals, evecs = np.linalg.eig(G)
descending_order = np.argsort(evals)[::-1]
evals = evals[descending_order]
evecs = evecs[:,descending_order]
#print evals
#print 'Eigenvectors'
#print evecs
evals3 = np.append(evals[:3], np.zeros(len(evals)-3))
X = dot(evecs[:,:3], np.diag(sqrt(evals[:3])))
mol.positions = X
def triangle_smooth(D):
N = D.shape[0]
Dmax = np.triu(D)
Dmax = Dmax + Dmax.T
Dmin = np.tril(D)
Dmin = Dmin + Dmin.T
changed = True
while changed:
#print 'upper interation'
changed = False
for i in range(1,N):
row = Dmax[i,:]
M = Dmax + np.repeat(np.reshape(row, (N,1)), N, axis=1)
new_row = np.min(M, axis=0)
if any(new_row < row):
new_row = np.min([row, new_row], axis=0)
changed = True
Dmax[i,:] = new_row
Dmax[:,i] = new_row
"""
while changed:
changed = False
for a, b in combinations(range(N), 2):
bound = np.min(Dmax[a,:]+Dmax[b,:])
if bound < Dmax[a,b]:
Dmax[a,b] = bound
changed = True
"""
"""
for a, b, c in combinations(range(N), 3):
acmax = D[a,b] + D[b,c]
if D[a,c] > acmax:
D[a,c] = acmax
changed = True
abmax = D[a,c] + D[b,c]
if D[a,b] > abmax:
D[a,b] = abmax
changed = True
bcmax = D[a,b] + D[a,c]
if D[b,c] > bcmax:
D[b,c] = bcmax
changed = True
"""
changed = True
while changed:
#print 'lower iteration'
changed = False
for i in range(1,N):
maxrow = Dmax[i,:]
minrow = Dmin[i,:]
M1 = Dmin - np.repeat(np.reshape(maxrow, (N,1)), N, axis=1)
M2 = np.repeat(np.reshape(minrow, (N,1)), N, axis=1) - Dmax
new_row1 = np.max(M1, axis=0)
new_row2 = np.max(M2, axis=0)
new_row = np.max([new_row1, new_row2, minrow], axis=0)
if any(new_row != minrow):
changed = True
Dmin[i,:] = new_row
Dmin[:,i] = new_row
"""
while changed:
changed = False
for a, b in combinations(range(N), 2):
bound = np.max([Dmin[a,:]-Dmax[b,:], Dmin[b,:]-Dmax[a,:]])
if bound > Dmin[b,a]:
Dmin[b,a] = bound
changed = True
"""
"""
while changed:
changed = False
for a, b, c in combinations(range(N), 3):
acmin = max(D[b,a] - D[b,c], D[c,b] - D[a,b])
if D[c,a] < acmin:
D[c,a] = acmin
changed = True
abmin = max(D[c,a] - D[b,c], D[c,b] - D[a,c])
if D[b,a] < abmin:
D[b,a] = abmin
changed = True
bcmin = max(D[b,a] - D[a,c], D[c,a] - D[a,b])
if D[c,b] < bcmin:
D[c,b] = bcmin
changed = True
"""
D[:] = np.tril(Dmin) + np.triu(Dmax)
def gaussian_chain(dist_matrix, r_mean):
N = dist_matrix.shape[0]
pos = np.zeros((3))
points = np.zeros((N, 3))
for i in range(1,N):
ok = False
while not ok:
step = np.random.normal(0, r_mean/sqrt(3), size=3)
points[i,:] = points[i-1,:] + step
D_new = distance_matrix(points)[:i+1,:i+1]
too_far = np.any(np.triu(D_new) > np.triu(dist_matrix[:i+1,:i+1]))
too_close = np.any(np.tril(D_new) < np.tril(dist_matrix[:i+1,:i+1]))
ok = not too_far and not too_close
return D_new
def distance_matrix(coordinates):
metric = dot(coordinates, coordinates.T)
sq_pos = np.diag(metric)
sq_pos_x, sq_pos_y = np.meshgrid(sq_pos, sq_pos)
D2 = sq_pos_x + sq_pos_y - 2*metric
return sqrt(D2)
def create_PVA(units):
start_group = Atoms('H')
start_bonds = np.matrix('1 1; 1 1')
unit = Atoms('CH2CHOH')
unit_bonds = np.matrix("""
1 1 1 1 0 0 0 0 ;
1 1 0 0 0 0 0 0 ;
1 0 1 0 0 0 0 0 ;
1 0 0 1 1 1 0 1 ;
0 0 0 1 1 0 0 0 ;
0 0 0 1 0 1 1 0 ;
0 0 0 0 0 1 1 0 ;
0 0 0 1 0 0 0 0
""")
unit_carbons = np.array([0, 3])
end_group = Atoms('CH3')
end_bonds = np.matrix("""
1 1 1 1 ;
1 1 0 0 ;
1 0 1 0 ;
1 0 0 1
""")
N = len(start_group) + units*len(unit) + len(end_group)
atoms = start_group.copy()
bonds = np.zeros((N,N), dtype=int)
bonds[:2,:2] = start_bonds
for i in range(units):
n = len(atoms)
atoms.extend(unit)
bonds[n:n+len(unit)+1, n:n+len(unit)+1] = unit_bonds
n = len(atoms)
atoms.extend(end_group)
bonds[n:n+len(end_group), n:n+len(end_group)] = end_bonds
unit_indices = slice(len(start_group), None, len(unit))
return atoms, bonds, unit_indices
def build_PVA(N):
mol, bond_matrix, backbone_carbons = create_PVA(N)
distance_geometry(mol, bond_matrix, backbone_carbons)
N = len(mol)
bonds = np.array(np.where(np.triu(bond_matrix, 1))).T
mol.info['bonds'] = Bonds(mol, pairs=bonds)
from multiasecalc.lammps.compass import COMPASS
from multiasecalc.lammps.dynamics import LAMMPSOptimizer
from multiasecalc.utils import get_datafile
mol.calc = COMPASS(get_datafile('compass.frc'), parameters=dict(extra_cmds=['communicate single cutoff 80']), debug=True)
dyn = LAMMPSOptimizer(mol)
dyn.run()
return mol
if __name__ == '__main__':
from ase.visualize import view
mol = build_PVA(20)
view(mol)
|
csmm/multiase
|
scripts/dgbuild.py
|
Python
|
gpl-2.0
| 7,325
|
[
"ASE",
"LAMMPS"
] |
2ba87ee661c73c715271761ebc87bab746160bb6665a7a4e16aa864a3c365e56
|
""" NOTA BENE: This agent should NOT be run alone. Instead, it serves as a base class for extensions.
The TaskManagerAgentBase is the base class to submit tasks to external systems,
monitor and update the tasks and file status in the transformation DB.
This agent is extended in WorkflowTaskAgent and RequestTaskAgent.
In case you want to further extend it you are required to follow the note on the
initialize method and on the _getClients method.
"""
__RCSID__ = "$Id$"
import time
import datetime
from Queue import Queue
from DIRAC import S_OK, gMonitor
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.TransformationSystem.Client.FileReport import FileReport
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.TransformationSystem.Client.TaskManager import WorkflowTasks
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.TransformationAgentsUtilities import TransformationAgentsUtilities
AGENT_NAME = 'Transformation/TaskManagerAgentBase'
gSynchro = Synchronizer()
class TaskManagerAgentBase( AgentModule, TransformationAgentsUtilities ):
""" To be extended. Please look at WorkflowTaskAgent and RequestTaskAgent.
"""
def __init__( self, *args, **kwargs ):
""" c'tor
Always call this in the extension agent
"""
AgentModule.__init__( self, *args, **kwargs )
TransformationAgentsUtilities.__init__( self )
self.transClient = None
self.transType = []
self.tasksPerLoop = 50
self.owner = ''
self.ownerGroup = ''
self.ownerDN = ''
self.pluginLocation = ''
# for the threading
self.transQueue = Queue()
self.transInQueue = []
self.transInThread = {}
#############################################################################
def initialize( self ):
""" Agent initialization.
The extensions MUST provide in the initialize method the following data members:
- TransformationClient objects (self.transClient),
- set the shifterProxy if different from the default one set here ('ProductionManager')
- list of transformation types to be looked (self.transType)
"""
gMonitor.registerActivity( "SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks",
gMonitor.OP_ACUM )
self.pluginLocation = self.am_getOption( 'PluginLocation', 'DIRAC.TransformationSystem.Client.TaskManagerPlugin' )
# Default clients
self.transClient = TransformationClient()
# setting up the threading
maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', 15 )
threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads )
self.log.verbose( "Multithreaded with %d threads" % maxNumberOfThreads )
for i in xrange( maxNumberOfThreads ):
threadPool.generateJobAndQueueIt( self._execute, [i] )
return S_OK()
def finalize( self ):
""" graceful finalization
"""
if self.transInQueue:
self._logInfo( "Wait for threads to get empty before terminating the agent (%d tasks)" % len( self.transInThread ) )
self.transInQueue = []
while self.transInThread:
time.sleep( 2 )
self.log.info( "Threads are empty, terminating the agent..." )
return S_OK()
#############################################################################
def execute( self ):
""" The TaskManagerBase execution method is just filling the Queues of transformations that need to be processed
"""
operationsOnTransformationDict = {}
# Determine whether the task status is to be monitored and updated
enableTaskMonitor = self.am_getOption( 'MonitorTasks', '' )
if not enableTaskMonitor:
self.log.verbose( "Monitoring of tasks is disabled. To enable it, create the 'MonitorTasks' option" )
else:
# Get the transformations for which the tasks have to be updated
status = self.am_getOption( 'UpdateTasksStatus', ['Active', 'Completing', 'Stopped'] )
transformations = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['updateTaskStatus']}
# Determine whether the task files status is to be monitored and updated
enableFileMonitor = self.am_getOption( 'MonitorFiles', '' )
if not enableFileMonitor:
self.log.verbose( "Monitoring of files is disabled. To enable it, create the 'MonitorFiles' option" )
else:
# Get the transformations for which the files have to be updated
status = self.am_getOption( 'UpdateFilesStatus', ['Active', 'Completing', 'Stopped'] )
transformations = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append( 'updateFileStatus' )
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['updateFileStatus']}
# Determine whether the checking of reserved tasks is to be performed
enableCheckReserved = self.am_getOption( 'CheckReserved', '' )
if not enableCheckReserved:
self.log.verbose( "Checking of reserved tasks is disabled. To enable it, create the 'CheckReserved' option" )
else:
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption( 'CheckReservedStatus', ['Active', 'Completing', 'Stopped'] )
transformations = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append( 'checkReservedTasks' )
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['checkReservedTasks']}
# Determine whether the submission of tasks is to be performed
enableSubmission = self.am_getOption( 'SubmitTasks', '' )
if not enableSubmission:
self.log.verbose( "Submission of tasks is disabled. To enable it, create the 'SubmitTasks' option" )
else:
# getting the credentials for submission
res = getProxyInfo( False, False )
if not res['OK']:
self.log.error( "Failed to determine credentials for submission", res['Message'] )
return res
proxyInfo = res['Value']
self.owner = proxyInfo['username']
self.ownerGroup = proxyInfo['group']
self.ownerDN = proxyInfo['identity']
self.log.info( "Tasks will be submitted with the credentials %s:%s" % ( self.owner, self.ownerGroup ) )
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption( 'SubmitStatus', ['Active', 'Completing'] )
transformations = self._selectTransformations( transType = self.transType, status = status )
if not transformations['OK']:
self.log.warn( "Could not select transformations: %s" % transformations['Message'] )
else:
# Get the transformations which should be submitted
self.tasksPerLoop = self.am_getOption( 'TasksPerLoop', self.tasksPerLoop )
transformationIDsAndBodies = dict( [( transformation['TransformationID'],
transformation['Body'] ) for transformation in transformations['Value']] )
for transID, body in transformationIDsAndBodies.iteritems():
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]['Operations'].append( 'submitTasks' )
else:
operationsOnTransformationDict[transID] = {'Body': body, 'Operations': ['submitTasks']}
self._fillTheQueue( operationsOnTransformationDict )
return S_OK()
def _selectTransformations( self, transType = [], status = ['Active', 'Completing'], agentType = ['Automatic'] ):
""" get the transformations
"""
selectCond = {}
if status:
selectCond['Status'] = status
if transType:
selectCond['Type'] = transType
if agentType:
selectCond['AgentType'] = agentType
res = self.transClient.getTransformations( condDict = selectCond )
if not res['OK']:
self.log.error( "Failed to get transformations: %s" % res['Message'] )
elif not res['Value']:
self.log.verbose( "No transformations found" )
else:
self.log.verbose( "Obtained %d transformations" % len( res['Value'] ) )
return res
def _fillTheQueue( self, operationsOnTransformationsDict ):
""" Just fill the queue with the operation to be done on a certain transformation
"""
count = 0
for transID, bodyAndOps in operationsOnTransformationsDict.iteritems():
if transID not in self.transInQueue:
count += 1
self.transInQueue.append( transID )
self.transQueue.put( {transID: bodyAndOps} )
self.log.info( "Out of %d transformations, %d put in thread queue" % ( len( operationsOnTransformationsDict ),
count ) )
#############################################################################
def _getClients( self ):
""" returns the clients used in the threads - this is another function that should be extended.
The clients provided here are defaults, and should be adapted
"""
threadTransformationClient = TransformationClient()
threadTaskManager = WorkflowTasks() # this is for wms tasks, replace it with something else if needed
threadTaskManager.pluginLocation = self.pluginLocation
return {'TransformationClient': threadTransformationClient,
'TaskManager': threadTaskManager}
def _execute( self, threadID ):
""" This is what runs inside the threads, in practice this is the function that does the real stuff
"""
# Each thread will have its own clients
clients = self._getClients()
startTime = 0
method = '_execute'
while True:
transIDOPBody = self.transQueue.get()
try:
transID = transIDOPBody.keys()[0]
operations = transIDOPBody[transID]['Operations']
if transID not in self.transInQueue:
self._logWarn( "Got a transf not in transInQueue...?", method = method, transID = transID )
break
self.transInThread[transID] = ' [Thread%d] [%s] ' % ( threadID, str( transID ) )
clients['TaskManager'].transInThread = self.transInThread
for operation in operations:
self._logInfo( "Starting processing operation %s" % operation, method = method, transID = transID )
startTime = time.time()
res = getattr( self, operation )( transIDOPBody, clients )
if not res['OK']:
self._logError( "Failed to %s: %s" % ( operation, res['Message'] ), method = method, transID = transID )
self._logInfo( "Processed operation %s in %.1f seconds" % ( operation, time.time() - startTime if startTime else time.time() ),
method = method, transID = transID )
except Exception, x:
self._logException( 'Exception executing operation %s' % operation, lException = x, transID = transID, method = method )
finally:
if not transID:
transID = 'None'
self._logInfo( "Processed transformation in %.1f seconds" % ( time.time() - startTime if startTime else time.time() ),
method = method, transID = transID )
self._logVerbose( "%d transformations still in queue" % ( len( self.transInQueue ) - 1 ),
method = method, transID = transID )
self.transInThread.pop( transID, None )
if transID in self.transInQueue:
self.transInQueue.remove( transID )
self._logDebug( "transInQueue = %s" % str( self.transInQueue ), method = method, transID = transID )
#############################################################################
# real operations done
def updateTaskStatus( self, transIDOPBody, clients ):
""" Updates the task status
"""
transID = transIDOPBody.keys()[0]
method = 'updateTaskStatus'
# Get the tasks which are in an UPDATE state
updateStatus = self.am_getOption( 'TaskUpdateStatus', ['Checking', 'Deleted', 'Killed', 'Staging', 'Stalled',
'Matched', 'Scheduled', 'Rescheduled', 'Completed',
'Submitted', 'Assigned', 'Received',
'Waiting', 'Running'] )
condDict = {"TransformationID":transID, "ExternalStatus":updateStatus}
timeStamp = str( datetime.datetime.utcnow() - datetime.timedelta( minutes = 10 ) )
transformationTasks = clients['TransformationClient'].getTransformationTasks( condDict = condDict,
older = timeStamp,
timeStamp = 'LastUpdateTime' )
self._logDebug( "getTransformationTasks(%s) return value: %s" % ( str( condDict ), str( transformationTasks ) ),
method = method, transID = transID )
if not transformationTasks['OK']:
self._logError( "Failed to get tasks to update: %s" % transformationTasks['Message'],
method = method, transID = transID )
return transformationTasks
if not transformationTasks['Value']:
self._logVerbose( "No tasks found to update", method = method, transID = transID )
return transformationTasks
self._logVerbose( "Getting %d tasks status" % len( transformationTasks['Value'] ),
method = method, transID = transID )
submittedTaskStatus = clients['TaskManager'].getSubmittedTaskStatus( transformationTasks['Value'] )
self._logDebug( "getSubmittedTaskStatus return value: %s" % str( submittedTaskStatus ),
method = method, transID = transID )
if not submittedTaskStatus['OK']:
self._logError( "Failed to get updated task states: %s" % submittedTaskStatus['Message'],
method = method, transID = transID )
return submittedTaskStatus
statusDict = submittedTaskStatus['Value']
if not statusDict:
self._logInfo( "No tasks to update", method = method, transID = transID )
return submittedTaskStatus
else:
for status in sorted( statusDict ):
taskIDs = statusDict[status]
self._logInfo( "Updating %d task(s) to %s" % ( len( taskIDs ), status ),
method = method, transID = transID )
setTaskStatus = clients['TransformationClient'].setTaskStatus( transID, taskIDs, status )
self._logDebug( "setTaskStatus return value: %s" % str( setTaskStatus ),
method = method, transID = transID )
if not setTaskStatus['OK']:
self._logError( "Failed to update task status for transformation: %s" % setTaskStatus['Message'],
method = method, transID = transID )
return setTaskStatus
return S_OK()
def updateFileStatus( self, transIDOPBody, clients ):
""" Update the files status
"""
transID = transIDOPBody.keys()[0]
method = 'updateFileStatus'
timeStamp = str( datetime.datetime.utcnow() - datetime.timedelta( minutes = 10 ) )
condDict = {'TransformationID' : transID, 'Status' : ['Assigned']}
transformationFiles = clients['TransformationClient'].getTransformationFiles( condDict = condDict,
older = timeStamp, timeStamp = 'LastUpdate' )
self._logDebug( "getTransformationFiles(%s) return value: %s" % ( str( condDict ), transformationFiles ),
method = method, transID = transID )
if not transformationFiles['OK']:
self._logError( "Failed to get transformation files to update: %s" % transformationFiles['Message'],
method = method )
return transformationFiles
if not transformationFiles['Value']:
self._logInfo( "No files to be updated", transID = transID, method = method )
return transformationFiles
submittedFileStatus = clients['TaskManager'].getSubmittedFileStatus( transformationFiles['Value'] )
self._logDebug( "getSubmittedFileStatus return value: %s" % submittedFileStatus,
method = method, transID = transID )
if not submittedFileStatus['OK']:
self._logError( "Failed to get updated file states for transformation: %s" % submittedFileStatus['Message'],
transID = transID, method = method )
return submittedFileStatus
statusDict = submittedFileStatus['Value']
if not statusDict:
self._logInfo( "No file states to be updated", transID = transID, method = method )
return submittedFileStatus
fileReport = FileReport( server = clients['TransformationClient'].getServer() )
for lfn, status in statusDict.items():
setFileStatus = fileReport.setFileStatus( transID, lfn, status )
if not setFileStatus['OK']:
return setFileStatus
commit = fileReport.commit()
if not commit['OK']:
self._logError( "Failed to update file states for transformation: %s" % commit['Message'],
transID = transID, method = method )
return commit
else:
self._logInfo( "Updated the states of %d files" % len( commit['Value'] ),
transID = transID, method = method )
return S_OK()
def checkReservedTasks( self, transIDOPBody, clients ):
""" Checking Reserved tasks
"""
transID = transIDOPBody.keys()[0]
method = 'checkReservedTasks'
# Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
condDict = {"TransformationID":transID, "ExternalStatus":'Reserved'}
time_stamp_older = str( datetime.datetime.utcnow() - datetime.timedelta( hours = 1 ) )
time_stamp_newer = str( datetime.datetime.utcnow() - datetime.timedelta( days = 7 ) )
res = clients['TransformationClient'].getTransformationTasks( condDict = condDict, older = time_stamp_older,
newer = time_stamp_newer )
self._logDebug( "getTransformationTasks(%s) return value: %s" % ( condDict, res ),
method = method, transID = transID )
if not res['OK']:
self._logError( "Failed to get Reserved tasks: %s" % res['Message'],
transID = transID, method = method )
return res
if not res['Value']:
self._logVerbose( "No Reserved tasks found", transID = transID )
return res
reservedTasks = res['Value']
res = clients['TaskManager'].updateTransformationReservedTasks( reservedTasks )
self._logDebug( "updateTransformationReservedTasks(%s) return value: %s" % ( reservedTasks, res ),
method = method, transID = transID )
if not res['OK']:
self._logError( "Failed to update transformation reserved tasks: %s" % res['Message'],
transID = transID, method = method )
return res
noTasks = res['Value']['NoTasks']
taskNameIDs = res['Value']['TaskNameIDs']
# For the tasks with no associated request found re-set the status of the task in the transformationDB
for taskName in noTasks:
transID, taskID = taskName.split( '_' )
self._logInfo( "Resetting status of %s to Created as no associated task found" % ( taskName ),
transID = transID, method = method )
res = clients['TransformationClient'].setTaskStatus( int( transID ), int( taskID ), 'Created' )
if not res['OK']:
self._logError( "Failed to update task status and ID after recovery: %s %s" % ( taskName, res['Message'] ),
transID = transID, method = method )
return res
# For the tasks for which an associated request was found update the task details in the transformationDB
for taskName, extTaskID in taskNameIDs.items():
transID, taskID = taskName.split( '_' )
self._logInfo( "Setting status of %s to Submitted with ID %s" % ( taskName, extTaskID ),
transID = transID, method = method )
setTaskStatusAndWmsID = clients['TransformationClient'].setTaskStatusAndWmsID( int( transID ), int( taskID ),
'Submitted', str( extTaskID ) )
if not setTaskStatusAndWmsID['OK']:
self._logError( "Failed to update task status and ID after recovery: %s %s" % ( taskName,
setTaskStatusAndWmsID['Message'] ),
transID = transID, method = method )
return setTaskStatusAndWmsID
return S_OK()
def submitTasks( self, transIDOPBody, clients ):
""" Submit the tasks to an external system, using the taskManager provided
"""
transID = transIDOPBody.keys()[0]
transBody = transIDOPBody[transID]['Body']
method = 'submitTasks'
tasksToSubmit = clients['TransformationClient'].getTasksToSubmit( transID, self.tasksPerLoop )
self._logDebug( "getTasksToSubmit(%s, %s) return value: %s" % ( transID, self.tasksPerLoop, tasksToSubmit ),
method = method, transID = transID )
if not tasksToSubmit['OK']:
self._logError( "Failed to obtain tasks: %s" % tasksToSubmit['Message'], transID = transID, method = method )
return tasksToSubmit
tasks = tasksToSubmit['Value']['JobDictionary']
if not tasks:
self._logVerbose( "No tasks found for submission", transID = transID, method = method )
return tasksToSubmit
self._logInfo( "Obtained %d tasks for submission" % len( tasks ), transID = transID, method = method )
preparedTransformationTasks = clients['TaskManager'].prepareTransformationTasks( transBody, tasks,
self.owner, self.ownerGroup, self.ownerDN )
self._logDebug( "prepareTransformationTasks return value: %s" % preparedTransformationTasks,
method = method, transID = transID )
if not preparedTransformationTasks['OK']:
self._logError( "Failed to prepare tasks: %s" % preparedTransformationTasks['Message'],
transID = transID, method = method )
return preparedTransformationTasks
res = self.__actualSubmit( preparedTransformationTasks, clients, transID )
if not res['OK']:
return res
res = clients['TaskManager'].updateDBAfterTaskSubmission( res['Value'] )
self._logDebug( "updateDBAfterTaskSubmission return value: %s" % res, method = method, transID = transID )
if not res['OK']:
self._logError( "Failed to update DB after task submission: %s" % res['Message'],
transID = transID, method = method )
return res
return S_OK()
# This gSynchro is necessary in order to avoid race conditions when submitting to the WMS,
# because WMSClient wants jobDescription.xml to be present in the local directory prior to submission
@gSynchro
def __actualSubmit( self, preparedTransformationTasks, clients, transID ):
""" This function contacts either RMS or WMS depending on the type of transformation.
"""
method = 'submitTasks'
res = clients['TaskManager'].submitTransformationTasks( preparedTransformationTasks['Value'] )
self._logDebug( "submitTransformationTasks return value: %s" % res, method = method, transID = transID )
if not res['OK']:
self._logError( "Failed to submit prepared tasks: %s" % res['Message'],
transID = transID, method = method )
return res
|
coberger/DIRAC
|
TransformationSystem/Agent/TaskManagerAgentBase.py
|
Python
|
gpl-3.0
| 25,548
|
[
"DIRAC"
] |
4b58b1fc956b55517c9887830496f170e7aa7231a476612dc936b1077d457586
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkVertexGlyphFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkVertexGlyphFilter(), 'Processing.',
('vtkPointSet',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkVertexGlyphFilter.py
|
Python
|
bsd-3-clause
| 495
|
[
"VTK"
] |
d81ac90d4a18e457cb91f270a0cbf4c026dba1b56ccce4693f97db5d82080428
|
import os
import markdown
import collections
import logging
log = logging.getLogger(__name__)
from MooseObjectSyntax import MooseObjectSyntax
from MooseParameters import MooseParameters
from MooseDescription import MooseDescription
from MooseActionSyntax import MooseActionSyntax
from MooseTextFile import MooseTextFile
from MooseImageFile import MooseImageFile
from MooseFigure import MooseFigure
from MooseFigureReference import MooseFigureReference
from MooseEquationReference import MooseEquationReference
from MooseInlineProcessor import MooseInlineProcessor
from MooseInputBlock import MooseInputBlock
from MooseCppMethod import MooseCppMethod
from MoosePackageParser import MoosePackageParser
from MooseSlider import MooseSlider
from MooseDiagram import MooseDiagram
from MooseCSS import MooseCSS
from MooseSlidePreprocessor import MooseSlidePreprocessor
from MooseBuildStatus import MooseBuildStatus
from MooseBibtex import MooseBibtex
from MooseActionList import MooseActionList
import MooseDocs
import mooseutils
class MooseMarkdown(markdown.Extension):
"""
Extensions that comprise the MOOSE flavored markdown.
"""
def __init__(self, **kwargs):
# Storage for the MooseLinkDatabase object
self.syntax = None
# Define the configuration options
self.config = dict()
self.config['executable'] = ['', "The executable to utilize for generating application syntax."]
self.config['locations'] = [dict(), "The locations to parse for syntax."]
self.config['repo'] = ['', "The remote repository to create hyperlinks."]
self.config['links'] = [dict(), "The set of paths for generating input file and source code links to objects."]
self.config['slides'] = [False, "Enable the parsing for creating reveal.js slides."]
self.config['package'] = [False, "Enable the use of the MoosePackageParser."]
self.config['graphviz'] = ['/opt/moose/graphviz/bin', 'The location of graphviz executable for use with diagrams.']
self.config['dot_ext'] = ['svg', "The graphviz/dot output file extension (default: svg)."]
self.config['install'] = ['', "The location to install system and object documentation."]
self.config['macro_files'] = ['', "List of paths to files that contain macros to be used in bibtex parsing."]
# Construct the extension object
super(MooseMarkdown, self).__init__(**kwargs)
# Create the absolute path to the executable
self.setConfig('executable', MooseDocs.abspath(self.getConfig('executable')))
def execute(self):
"""
Execute the supplied MOOSE application and return the YAML.
"""
exe = self.getConfig('executable')
if not (exe or os.path.exists(exe)):
log.critical('The executable does not exist: {}'.format(exe))
raise Exception('Critical Error')
else:
log.debug("Executing {} to extract syntax.".format(exe))
try:
raw = mooseutils.runExe(exe, '--yaml')
return mooseutils.MooseYaml(raw)
except:
log.critical('Failed to read YAML file, MOOSE and modules are likely not compiled correctly.')
raise Exception('Critical Error')
def extendMarkdown(self, md, md_globals):
"""
Builds the extensions for MOOSE flavored markdown.
"""
md.registerExtension(self)
# Create a config object
config = self.getConfigs()
# Extract YAML
exe_yaml = self.execute()
# Generate YAML data from application
# Populate the database for input file and children objects
log.debug('Creating input file and source code use database.')
database = MooseDocs.MooseLinkDatabase(**config)
# Populate the syntax
self.syntax = collections.OrderedDict()
for item in config['locations']:
key = item.keys()[0]
options = item.values()[0]
options.setdefault('group', key)
options.setdefault('name', key.replace('_', ' ').title())
options.setdefault('install', config['install'])
self.syntax[key] = MooseDocs.MooseApplicationSyntax(exe_yaml, **options)
# Replace the InlineTreeprocessor with the MooseInlineProcessor, this allows
# for an initialize() method to be called prior to the convert for re-setting state.
md.treeprocessors['inline'] = MooseInlineProcessor(markdown_instance=md, **config)
# Preprocessors
md.preprocessors.add('moose_bibtex', MooseBibtex(markdown_instance=md, **config), '_end')
if config['slides']:
md.preprocessors.add('moose_slides', MooseSlidePreprocessor(markdown_instance=md), '_end')
# Block processors
md.parser.blockprocessors.add('diagrams', MooseDiagram(md.parser, **config), '_begin')
md.parser.blockprocessors.add('slider', MooseSlider(md.parser, **config), '_begin')
md.parser.blockprocessors.add('css', MooseCSS(md.parser, **config), '_begin')
# Inline Patterns
params = MooseParameters(markdown_instance=md, syntax=self.syntax, **config)
md.inlinePatterns.add('moose_parameters', params, '_begin')
desc = MooseDescription(markdown_instance=md, syntax=self.syntax, **config)
md.inlinePatterns.add('moose_description', desc, '_begin')
object_markdown = MooseObjectSyntax(markdown_instance=md, syntax=self.syntax, database=database, **config)
md.inlinePatterns.add('moose_object_syntax', object_markdown, '_begin')
system_markdown = MooseActionSyntax(markdown_instance=md, syntax=self.syntax, **config)
md.inlinePatterns.add('moose_system_syntax', system_markdown, '_begin')
system_list = MooseActionList(markdown_instance=md, yaml=exe_yaml, syntax=self.syntax, **config)
md.inlinePatterns.add('moose_system_list', system_list, '_begin')
md.inlinePatterns.add('moose_input_block', MooseInputBlock(markdown_instance=md, **config), '_begin')
md.inlinePatterns.add('moose_cpp_method', MooseCppMethod(markdown_instance=md, **config), '_begin')
md.inlinePatterns.add('moose_text', MooseTextFile(markdown_instance=md, **config), '_begin')
md.inlinePatterns.add('moose_image', MooseImageFile(markdown_instance=md, **config), '_begin')
md.inlinePatterns.add('moose_figure', MooseFigure(markdown_instance=md, **config), '_begin')
md.inlinePatterns.add('moose_figure_reference', MooseFigureReference(markdown_instance=md, **config), '>moose_figure')
md.inlinePatterns.add('moose_equation_reference', MooseEquationReference(markdown_instance=md, **config), '<moose_figure_reference')
md.inlinePatterns.add('moose_build_status', MooseBuildStatus(markdown_instance=md, **config), '_begin')
if config['package']:
md.inlinePatterns.add('moose_package_parser', MoosePackageParser(markdown_instance=md, **config), '_end')
def makeExtension(*args, **kwargs):
return MooseMarkdown(*args, **kwargs)
|
paulthulstrup/moose
|
python/MooseDocs/extensions/MooseMarkdown.py
|
Python
|
lgpl-2.1
| 6,758
|
[
"MOOSE"
] |
7b3f97a257fc3e15055ae4b57f50f37594750a7f6a205899380ca0f729b6486c
|
import glob
import os
from copy import copy
from datetime import datetime
import netCDF4 as nc
import numpy as np
import pandas as pd
from spatialnc.proj import add_proj
C_TO_K = 273.16
FREEZE = C_TO_K
# Kelvin to Celsius
K_TO_C = lambda x: x - FREEZE
def open_files_nc(myawsm):
"""
Open the netCDF files for initial conditions and inputs
- Reads in the initial_conditions file
- Required variables are x,y,z,z_0
- The others z_s, rho, T_s_0, T_s, h2o_sat, mask can be specified
but will be set to default of 0's or 1's for mask
- Open the files for the inputs and store the file identifier
Args:
myawsm: awsm class
Returns:
force: dictionary of opened netCDF forcing data files
"""
# -------------------------------------------------------------------------
# get the forcing data and open the file
force = {}
force['thermal'] = nc.Dataset(os.path.join(myawsm.paths, 'thermal.nc'), 'r')
force['air_temp'] = nc.Dataset(os.path.join(myawsm.paths, 'air_temp.nc'), 'r')
force['vapor_pressure'] = nc.Dataset(os.path.join(myawsm.paths, 'vapor_pressure.nc'), 'r')
force['wind_speed'] = nc.Dataset(os.path.join(myawsm.paths, 'wind_speed.nc'), 'r')
force['net_solar'] = nc.Dataset(os.path.join(myawsm.paths, 'net_solar.nc'), 'r')
# soil temp can either be distributed for set to a constant
try:
force['soil_temp'] = nc.Dataset(options['inputs']['soil_temp'], 'r')
except:
force['soil_temp'] = float(myawsm.soil_temp) * np.ones((myawsm.topo.ny,
myawsm.topo.nx))
force['precip_mass'] = nc.Dataset(os.path.join(myawsm.paths, 'precip.nc'), 'r')
force['percent_snow'] = nc.Dataset(os.path.join(myawsm.paths, 'percent_snow.nc'), 'r')
force['snow_density'] = nc.Dataset(os.path.join(myawsm.paths, 'snow_density.nc'), 'r')
force['precip_temp'] = nc.Dataset(os.path.join(myawsm.paths, 'precip_temp.nc'), 'r')
return force
def open_files_ipw(myawsm):
"""
Compile list of input data hours from ipw files stored in standard AWSM
file structure. These are only list of integer water year hours, the actual
reading of ipw files happens from the standard directory structure.
Args:
myawsm: awsm class
Returns:
ppt_list: list of hours for reference from the ppt_desc file
input_list: list of input hours to read in from the data directory
"""
# ------------------------------------------------------------------------
# get the forcing data and open the file
# path to snow and em files
path_inputs = os.path.join(myawsm.pathi, "in.*")
# get precip from ipw
header = ['hour', 'path']
df_ppt = pd.read_csv(myawsm.ppt_desc, names=header, sep=' ')
# get list of isnobal outputs and sort by time step
input_files = sorted(glob.glob(path_inputs), key=os.path.getmtime)
input_files.sort(key=lambda f: os.path.basename(f).split('in.')[1])
ppt_list = np.zeros(len(df_ppt['path'].values))
input_list = np.zeros(len(input_files))
# store input and ppt hours in numpy arrays
for idx, fl in enumerate(input_files):
input_list[idx] = int(os.path.basename(fl).split('in.')[1])
for idx, ppt_hr in enumerate(df_ppt['hour'].values):
ppt_list[idx] = int(ppt_hr)
return input_list, ppt_list
def close_files(force):
"""
Close input netCDF forcing files
"""
for f in force.keys():
if not isinstance(force[f], np.ndarray):
force[f].close()
def output_files(options, init, start_date, myawsm):
"""
Create the snow and em output netCDF file
Args:
options: dictionary of Snobal options
init: dictionary of Snobal initialization images
start_date: date for time units in files
myawsm: awsm class
"""
fmt = '%Y-%m-%d %H:%M:%S'
# chunk size
cs = (6, 10, 10)
if myawsm.topo.nx < 10:
cs = (3, 3, 3)
# ------------------------------------------------------------------------
# EM netCDF
m = {}
m['name'] = ['net_rad', 'sensible_heat', 'latent_heat', 'snow_soil',
'precip_advected', 'sum_EB', 'evaporation', 'snowmelt',
'SWI', 'cold_content']
m['units'] = ['W m-2', 'W m-2', 'W m-2', 'W m-2', 'W m-2', 'W m-2',
'kg m-2', 'kg m-2', 'kg or mm m-2', 'J m-2']
m['description'] = ['Average net all-wave radiation',
'Average sensible heat transfer',
'Average latent heat exchange',
'Average snow/soil heat exchange',
'Average advected heat from precipitation',
'Average sum of EB terms for snowcover',
'Total evaporation',
'Total snowmelt',
'Total runoff',
'Snowcover cold content']
emname = myawsm.em_name+'.nc'
# if myawsm.restart_run:
# emname = 'em_restart_{}.nc'.format(myawsm.restart_hr)
# start_date = myawsm.restart_date
netcdfFile = os.path.join(options['output']['location'], emname)
if os.path.isfile(netcdfFile):
myawsm._logger.warning(
'Opening {}, data may be overwritten!'.format(netcdfFile))
em = nc.Dataset(netcdfFile, 'a')
h = '[{}] Data added or updated'.format(
datetime.now().strftime(fmt))
setattr(em, 'last_modified', h)
if 'projection' not in em.variables.keys():
em = add_proj(em, None, myawsm.topo.topoConfig['filename'])
else:
em = nc.Dataset(netcdfFile, 'w')
dimensions = ('time', 'y', 'x')
# create the dimensions
em.createDimension('time', None)
em.createDimension('y', len(init['y']))
em.createDimension('x', len(init['x']))
# create some variables
em.createVariable('time', 'f', dimensions[0])
em.createVariable('y', 'f', dimensions[1])
em.createVariable('x', 'f', dimensions[2])
# setattr(em.variables['time'], 'units', 'hours since %s' % options['time']['start_date'])
setattr(em.variables['time'], 'units', 'hours since %s' % start_date)
setattr(em.variables['time'], 'time_zone', myawsm.tmz)
setattr(em.variables['time'], 'calendar', 'standard')
# setattr(em.variables['time'], 'time_zone', time_zone)
em.variables['x'][:] = init['x']
em.variables['y'][:] = init['y']
# em image
for i, v in enumerate(m['name']):
# check to see if in output variables
if v.lower() in myawsm.pysnobal_output_vars:
# em.createVariable(v, 'f', dimensions[:3], chunksizes=(6,10,10))
em.createVariable(v, 'f', dimensions[:3], chunksizes=cs)
setattr(em.variables[v], 'units', m['units'][i])
setattr(em.variables[v], 'description', m['description'][i])
# add projection info
em = add_proj(em, None, myawsm.topo.topoConfig['filename'])
options['output']['em'] = em
# ------------------------------------------------------------------------
# SNOW netCDF
s = {}
s['name'] = ['thickness', 'snow_density', 'specific_mass', 'liquid_water',
'temp_surf', 'temp_lower', 'temp_snowcover',
'thickness_lower', 'water_saturation']
s['units'] = ['m', 'kg m-3', 'kg m-2', 'kg m-2', 'C',
'C', 'C', 'm', 'percent']
s['description'] = ['Predicted thickness of the snowcover',
'Predicted average snow density',
'Predicted specific mass of the snowcover',
'Predicted mass of liquid water in the snowcover',
'Predicted temperature of the surface layer',
'Predicted temperature of the lower layer',
'Predicted temperature of the snowcover',
'Predicted thickness of the lower layer',
'Predicted percentage of liquid water saturation of the snowcover']
snowname = myawsm.snow_name + '.nc'
# if myawsm.restart_run:
# snowname = 'snow_restart_{}.nc'.format(myawsm.restart_hr)
netcdfFile = os.path.join(options['output']['location'], snowname)
if os.path.isfile(netcdfFile):
myawsm._logger.warning(
'Opening {}, data may be overwritten!'.format(netcdfFile))
snow = nc.Dataset(netcdfFile, 'a')
h = '[{}] Data added or updated'.format(
datetime.now().strftime(fmt))
setattr(snow, 'last_modified', h)
if 'projection' not in snow.variables.keys():
snow = add_proj(snow, None, myawsm.topo.topoConfig['filename'])
else:
dimensions = ('time', 'y', 'x')
snow = nc.Dataset(netcdfFile, 'w')
# create the dimensions
snow.createDimension('time', None)
snow.createDimension('y', len(init['y']))
snow.createDimension('x', len(init['x']))
# create some variables
snow.createVariable('time', 'f', dimensions[0])
snow.createVariable('y', 'f', dimensions[1])
snow.createVariable('x', 'f', dimensions[2])
setattr(snow.variables['time'], 'units', 'hours since %s' % start_date)
setattr(snow.variables['time'], 'time_zone', myawsm.tmz)
setattr(snow.variables['time'], 'calendar', 'standard')
# setattr(snow.variables['time'], 'time_zone', time_zone)
snow.variables['x'][:] = init['x']
snow.variables['y'][:] = init['y']
# snow image
for i, v in enumerate(s['name']):
# check to see if in output variables
if v.lower() in myawsm.pysnobal_output_vars:
snow.createVariable(v, 'f', dimensions[:3], chunksizes=cs)
# snow.createVariable(v, 'f', dimensions[:3])
setattr(snow.variables[v], 'units', s['units'][i])
setattr(snow.variables[v], 'description', s['description'][i])
# add projection info
snow = add_proj(snow, None, myawsm.topo.topoConfig['filename'])
options['output']['snow'] = snow
def output_timestep(s, tstep, options, output_vars):
"""
Output the model results for the current time step
Args:
s: dictionary of output variable numpy arrays
tstep: datetime time step
options: dictionary of Snobal options
"""
em_out = {'net_rad': 'R_n_bar', 'sensible_heat': 'H_bar',
'latent_heat': 'L_v_E_bar',
'snow_soil': 'G_bar', 'precip_advected': 'M_bar',
'sum_EB': 'delta_Q_bar', 'evaporation': 'E_s_sum',
'snowmelt': 'melt_sum', 'SWI': 'ro_pred_sum',
'cold_content': 'cc_s'}
snow_out = {'thickness': 'z_s', 'snow_density': 'rho',
'specific_mass': 'm_s', 'liquid_water': 'h2o',
'temp_surf': 'T_s_0', 'temp_lower': 'T_s_l',
'temp_snowcover': 'T_s', 'thickness_lower': 'z_s_l',
'water_saturation': 'h2o_sat'}
# preallocate
em = {}
snow = {}
# gather all the data together
for key, value in em_out.items():
em[key] = copy(s[value])
for key, value in snow_out.items():
snow[key] = copy(s[value])
# convert from K to C
snow['temp_snowcover'] -= FREEZE
snow['temp_surf'] -= FREEZE
snow['temp_lower'] -= FREEZE
# now find the correct index
# the current time integer
times = options['output']['snow'].variables['time']
# offset to match same convention as iSnobal
tstep -= pd.to_timedelta(1, unit='h')
t = nc.date2num(tstep.replace(tzinfo=None), times.units, times.calendar)
if len(times) != 0:
index = np.where(times[:] == t)[0]
if index.size == 0:
index = len(times)
else:
index = index[0]
else:
index = len(times)
# insert the time
options['output']['snow'].variables['time'][index] = t
options['output']['em'].variables['time'][index] = t
# insert the data
for key in em_out:
if key.lower() in output_vars:
options['output']['em'].variables[key][index, :] = em[key]
for key in snow_out:
if key.lower() in output_vars:
options['output']['snow'].variables[key][index, :] = snow[key]
# sync to disk
options['output']['snow'].sync()
options['output']['em'].sync()
|
USDA-ARS-NWRC/AWSF
|
awsm/interface/pysnobal_io.py
|
Python
|
gpl-3.0
| 12,601
|
[
"NetCDF"
] |
6e5e9c956a17bb2a1fbb37973069037fc637f31cda183cf6ba26a39c5629512a
|
#!/usr/bin/env python
'''
Parse logged data from accelerometer to compute translations in x, y, z and rotations
about these axes. Output as X3D text so that the data can be visualized in an X3D Browser.
--
Mike McCann
5 May 2011
$Id: accel2x3d.py 13595 2016-06-16 16:31:02Z mccann $
'''
import csv
from numpy import *
from optparse import OptionParser
import math
class BEDS_X3D:
def __init__(self, opts):
'''Initialize with options and set base X3D text
'''
self.inputFileName = opts.input
self.x3dBaseText = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd">
<X3D profile="Immersive" version="3.2" xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance" xsd:noNamespaceSchemaLocation="http://www.web3d.org/specifications/x3d-3.2.xsd">
<head>
<meta content="BEDS_Vis1.x3d" name="title"/>
<meta content="Translation and orientation visualization of Benthic Event Detectors." name="description"/>
<meta content="Mike McCann mccann@mbari.org" name="creator"/>
<meta content="21 May 2011" name="created"/>
<meta content="Copyright (c) Monterey Bay Aquarium Research Institute 2011" name="rights"/>
<meta content="accel2x3d.py --input %(input)s --output %(output)s" name="generator"/>
</head>
<Scene>
<Transform DEF="TRANSLATE">
<Transform DEF="XROT">
<Transform DEF="YROT">
<Transform DEF="ZROT">
<Shape>
<Appearance>
<Material/>
</Appearance>
<Box DEF="BEDS_BOX"/>
</Shape>
<TouchSensor DEF="TOUCH"/>
</Transform>
</Transform>
</Transform>
</Transform>
<!-- 6 DOF data from the BEDS coded here as position and orientation interpolators -->
<PositionInterpolator DEF="POS_INTERP" key="%(iKeys)s" keyValue="%(posValues)s"/>
<OrientationInterpolator DEF="X_OI" key="%(iKeys)s" keyValue="%(xRotValues)s"/>
<OrientationInterpolator DEF="Y_OI" key="%(iKeys)s" keyValue="%(yRotValues)s"/>
<OrientationInterpolator DEF="Z_OI" key="%(iKeys)s" keyValue="%(zRotValues)s"/>
<!-- The cycleInterval is the time duration in seconds of the data -->
<TimeSensor DEF="TS" cycleInterval="%(cycInt)s" loop="true"/>
<!-- Wire up the connections between the nodes to animate the motion of the Shape -->
<ROUTE fromField="value_changed" fromNode="POS_INTERP" toField="translation" toNode="TRANSLATE"/>
<ROUTE fromField="value_changed" fromNode="X_OI" toField="rotation" toNode="XROT"/>
<ROUTE fromField="value_changed" fromNode="Y_OI" toField="rotation" toNode="YROT"/>
<ROUTE fromField="value_changed" fromNode="Z_OI" toField="rotation" toNode="ZROT"/>
<ROUTE fromField="fraction_changed" fromNode="TS" toField="set_fraction" toNode="POS_INTERP"/>
<ROUTE fromField="fraction_changed" fromNode="TS" toField="set_fraction" toNode="X_OI"/>
<ROUTE fromField="fraction_changed" fromNode="TS" toField="set_fraction" toNode="Y_OI"/>
<ROUTE fromField="fraction_changed" fromNode="TS" toField="set_fraction" toNode="Z_OI"/>
<ROUTE fromField="touchTime" fromNode="TOUCH" toField="startTime" toNode="TS"/>
</Scene>
</X3D>
'''
# End __init__()
def readAtomicLogFile(self, infile):
'''Open `infile`, read values, apply offsets and scaling.
Return numpy arrays of the acceleration and rotation data in engineering units.
'''
# From: "Kieft, Brian" <bkieft@mbari.org>
# Date: Wed, 18 May 2011 14:42:54 -0700
# To: Mike McCann <mccann@mbari.org>
# Cc: "Herlien, Bob" <bobh@mbari.org>
# Subject: data scaling
#
# Hi Mike,
#
# Here's the first two lines of the log file 20110518135934_atomic_imu.log in your tempbox:
#
# epoch ms,accel x, accel y, accel z, pitch, roll, yaw
# 1305752376772,470.0,483.0,780.0,521.0,527.0,500.0
#
# For this sensor, in this configuration, we're running full scale at 10 bits.
# For accelerations measured in g we have: 0.00403 g/count. So, in this line you'll
# see (780-512)*.00403 = 1.08g for the Z axis. This is when it was sitting on the desk
# before I picked it up. So, 0g would be 512 counts and you'll notice the other two
# axis are slightly negative (since it all has to add up to 1 g since it was just sitting there).
#
# For pitch,roll, and yaw we're looking at .977 degrees/count, same A/D.
#
# Does that work for you?
#
# -bk-
zeroOffset = 512
gPerCount = 0.00403
degPerCount = 0.977
# Header is:
# ['epoch ms', 'accel x', ' accel y', ' accel z', ' pitch', ' roll', ' yaw']
secList = []
axList = []
ayList = []
azList = []
pitchList = []
rollList = []
yawList = []
reader = csv.reader(open(infile))
for r in reader:
if opts.verbose: print r
try:
secList.append(float(r[0]) / 1000.0)
axList.append((float(r[1]) - zeroOffset) * gPerCount)
ayList.append((float(r[2]) - zeroOffset) * gPerCount)
azList.append((float(r[3]) - zeroOffset) * gPerCount - 1.0)
pitchList.append((float(r[4]) - zeroOffset) * degPerCount * math.pi / 180)
rollList.append((float(r[5]) - zeroOffset) * degPerCount * math.pi / 180)
yawList.append((float(r[6]) - zeroOffset) * degPerCount * math.pi / 180)
except ValueError:
if opts.verbose: print "Skipping row = %s" % r
# Make the Lists numpy arrays so that we can do Matlab-like operations
# These arrays have units of seconds, g, and radians.
self.s = array(secList)
self.ax = array(axList)
self.ay = array(ayList)
self.az = array(azList)
self.pitch = array(pitchList)
self.roll = array(rollList)
self.yaw = array(yawList)
return
def cumtrapz(self, x, y):
'''Returns indefinite integral of discrete data in y wrt x.
Test with:
>> cumtrapz([0,.2,.4,.6,.8],[1:5]) ! Matlab
ans =
0 0.3000 0.8000 1.5000 2.4000
print self.cumtrapz([.0,.2,.4,.6,.8], array([1,2,3,4,5])) ! Python
[ 0. 0.3 0.8 1.5 2.4]
'''
sumA = 0
sumAList = [sumA]
for i in range(len(y))[:-1]:
A = (x[i+1] - x[i]) * (y[i] + y[i+1]) / 2.0
sumA = sumA + A
sumAList.append(sumA)
return array(sumAList)
def createX3DfromFile(self):
'''Read accelerometer data from log file and apply operations to convert it to the keys and values of
position and orientation the X3D likes.
'''
self.readAtomicLogFile(self.inputFileName)
# Iterpolate data to regularly spaced time values - may need to do this to improve accuracy
# (See http://www.freescale.com/files/sensors/doc/app_note/AN3397.pdf)
##si = linspace(self.s[0], self.s[-1], len(self.s))
##axi = interp(si, self.s, self.ax)
# Double integrate accelerations to get position and construct X3D position values string
# (May need to high-pass filter the data to remove noise that can give unreasonably large positions.)
t = self.s
xA = self.cumtrapz(t, self.cumtrapz(t, self.ax))
yA = self.cumtrapz(t, self.cumtrapz(t, self.ay))
zA = self.cumtrapz(t, self.cumtrapz(t, self.az))
# Construct X3D strings for keys, positions, orientations, and duration of the data
iKeys = ' '.join(['%.4f' % k for k in (t - t[0]) / (t[-1] - t[0])])
posList = ['%.4f %.4f %.4f' % (x, y, z) for (x, y, z) in zip(xA, yA, zA)]
posValues = ' '.join(posList)
xRotValues = ' '.join(['1 0 0 %.6f' % p for p in self.pitch])
yRotValues = ' '.join(['0 1 0 %.6f' % r for r in self.roll])
zRotValues = ' '.join(['0 0 1 %.6f' % y for y in self.yaw])
cycInt = '%.4f' % (t[-1] - t[0])
return self.x3dBaseText % {'input': opts.input, 'output': opts.output,
'iKeys': iKeys, 'posValues': posValues, 'xRotValues': xRotValues,
'yRotValues': yRotValues, 'zRotValues': zRotValues, 'cycInt': cycInt}
if __name__ == '__main__':
parser = OptionParser(usage="""\
Synopsis: %prog --input <accelerometer_log_file_name> --ouptut <x3d_ouptut_file_name>
Given an input file name of accelerometer data in this format:
epoch ms,accel x, accel y, accel z, pitch, roll, yaw
1305752376772,470.0,483.0,780.0,521.0,527.0,500.0
Produce an X3D file such that the data can be visualized in any number of X3D browsers.
Example:
%prog --input 20110518135934_atomic_imu.log --output 20110518135934_atomic_imu.x3d
""")
parser.add_option('', '--input',
type='string', action='store',
help="Specify input log file name")
parser.add_option('', '--output',
type='string', action='store',
help="Specify output x3d file name")
parser.add_option('-v', '--verbose',
action='store_true', default=False,
help="Specify verbose output to the screen")
opts, args = parser.parse_args()
if not (opts.input and opts.output):
parser.error("Must specify both --input and --output options.\n")
beds_x3d = BEDS_X3D(opts)
x3dText = beds_x3d.createX3DfromFile()
if opts.verbose: print "x3dText = %s" % x3dText
f = open(opts.output, 'w')
f.write(x3dText)
f.close()
print "Wrote file %s. Open it in InstantReality Player, BS Contact, Xj3D, or other X3D browser." % opts.output
|
stoqs/stoqs
|
stoqs/loaders/CCE/bed2netcdf/accel2x3d.py
|
Python
|
gpl-3.0
| 8,947
|
[
"Brian"
] |
364fe7111c1fd300276cd4bc71c25dcdf2151e9231ddb612ffa022fc4046f629
|
# Export functions for NeuroML NetworkML 1.8.1
# TODO Should have been implemented in the client side in javascript
# given that the amount of data to transfer would have been smaller,
# and no computations would bog down the server.
# TODO Synapses are potentially incorrect: the properties of release
# are expected to be the same for all postsynaptic members
# of the polyadic synapse, because NeuroML cannot express polyadic synapses.
# TODO Consider removing segments when the soma segment radius is large.
import time
from collections import defaultdict
def exportMutual(neuron_names, all_treenodes, connections, scale=0.001):
""" Export a group of neuronal arbors and their synapses as NeuroML Level 3 v1.8.1.
all_treenodes: an iterator (can be lazy) of treenodes like [<id>, <parent_id>, <location>, <radius>, <skeleton_id>].
connections: a dictionary of skeleton ID vs tuple of tuple of tuples, each a pair containing the presynaptic treenode ID and the map of connector ID vs list of postsynaptic treenode IDs.
scale: defaults to 0.001 to transform nanometers (CATMAID) into micrometers (NeuroML).
Returns a lazy sequence of strings that expresses the XML. """
for source in ([header()], bodyMutual(neuron_names, all_treenodes, connections, scale), ["</neuroml>"]):
for line in source:
yield line
def exportSingle(neuron_names, all_treenodes, inputs, scale=0.001):
""" Export a single neuronal arbor with a set of inputs as NeuroML Level 3 v1.8.1. """
for source in ([header()], bodySingle(neuron_names, all_treenodes, inputs, scale), ["</neuroml>"]):
for line in source:
yield line
def header():
return """<?xml version="1.0" encoding="UTF-8"?>
<!-- Exported from CATMAID (http://catmaid.org) on %s -->
<neuroml xmlns="http://morphml.org/neuroml/schema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:net="http://morphml.org/networkml/schema"
xmlns:mml="http://morphml.org/morphml/schema"
xmlns:meta="http://morphml.org/metadata/schema"
xmlns:bio="http://morphml.org/biophysics/schema"
xmlns:cml="http://morphml.org/channelml/schema"
xsi:schemaLocation="http://morphml.org/neuroml/schema http://www.neuroml.org/NeuroMLValidator/NeuroMLFiles/Schemata/v1.8.1/Level3/NeuroML_Level3_v1.8.1.xsd"
length_units="micrometer">
""" % time.strftime("%c %z")
def segment(t1, t2, p, q, segmentID, parentSegmentID, cableID, is_first):
s = '<segment id="%s" name="s%s"' % (segmentID, segmentID)
if parentSegmentID:
s += ' parent="%s"' % parentSegmentID
s += ' cable="%s">\n' % cableID
# Fix radius when not set (-1) to 20 nanometers
r = t1[3]
if r < 0:
r = 20
# Scale radius to micrometers and convert to a diameter
r *= 0.002
if is_first:
s += '<proximal x="%s" y="%s" z="%s" diameter="%s"/>\n' % (p[0], p[1], p[2], r)
s += '<distal x="%s" y="%s" z="%s" diameter="%s"/>\n' % (q[0], q[1], q[2], r)
s += '</segment>\n'
return s
def make_segments(slab, cableID, scale, state):
nodes = slab.nodes
points = smooth(nodes, scale)
if 1 == len(nodes):
# segment of zero length
segmentID = state.nextID()
state.record(nodes[0][0], segmentID)
lastSegmentIDOfParent = slab.lastSegmentIDOfParent() # prior to setting the slab's last_segmentID, or root would reference itself
slab.last_segmentID = segmentID
yield segment(nodes[0], nodes[0], points[0], points[0], segmentID, lastSegmentIDOfParent, cableID, True)
else:
previous_segmentID = slab.lastSegmentIDOfParent()
for i in xrange(1, len(nodes)):
segmentID = state.nextID()
id2 = previous_segmentID
previous_segmentID = segmentID
if 1 == i:
# A synapse could exist at the first node
# (Realize that CATMAID operates on nodes, and NeuroML on edges aka segments)
state.record(nodes[i-1][0], segmentID)
state.record(nodes[i][0], segmentID)
slab.last_segmentID = segmentID
yield segment(nodes[i-1], nodes[i], points[i-1], points[i], segmentID, id2, cableID, 1 == i)
def smooth(treenodes, scale):
""" Apply a three-point average sliding window, keeping first and last points intact.
Returns a new list of points. """
points = []
if len(treenodes) < 3:
for t in treenodes:
points.append((t[2][0] * scale, t[2][1] * scale, t[2][2] * scale))
return points
t = treenodes[0][2]
ax, ay, az = t
# Scale first point after having copied its original values
points.append((ax * scale, ay * scale, az * scale))
t = treenodes[1][2]
bx, by, bz = t
for i in xrange(1, len(treenodes) -1):
tc = treenodes[i+1][2]
cx, cy, cz = tc
points.append((((ax + bx + cx) / 3.0) * scale,
((ay + by + cy) / 3.0) * scale,
((az + bz + cz) / 3.0) * scale))
ax, ay, az = bx, by, bz
bx, by, bz = cx, cy, cz
t = tc
# Scale last point
points.append((cx * scale, cy * scale, cz * scale))
return points
class Slab:
def __init__(self, nodes, parent):
self.nodes = nodes
self.parent = parent
self.last_segmentID = None
def lastSegmentIDOfParent(self):
if self.parent:
return self.parent.last_segmentID
# Root slab
return self.last_segmentID
def make_slabs(root, root_segmentID, successors, cableIDs, scale, state):
# Create cables, each consisting of one or more segments. Three types:
# 1. end node to previous branch node or root
# 2. branch node to previous branch node or root
# 3. branch node to root
root_slab = Slab([root], None)
root_slab.last_segmentID = root_segmentID
leads = [root_slab]
while leads:
slab = leads.pop(0)
parent = slab.nodes[-1]
children = successors[parent[0]] # parent[0] is the treenode ID
while children:
if len(children) > 1:
# Found branch point
leads.extend(Slab([parent, child], slab) for child in children)
break
else:
parent = children[0]
slab.nodes.append(parent)
children = successors[parent[0]] # parent[0] is the treenode ID
# Add segments
cableID = state.nextID()
cableIDs.append(cableID)
for line in make_segments(slab, cableID, scale, state):
yield line
def make_cables(cableIDs):
for i, cableID in enumerate(cableIDs):
yield '<cable id="%s" name="c%s" fract_along_parent="%s"><meta:group>%s_group</meta:group></cable>\n' % (cableID, cableID, 0.5 if 0 == i else 1.0, "soma" if 0 == i else "arbor")
def make_arbor(neuron_name, treenodes, scale, state):
""" treenodes is a sequence of treenodes, where each treenode is a tuple of id, parent_id, location. """
successors = defaultdict(list)
for treenode in treenodes:
if treenode[1]:
successors[treenode[1]].append(treenode)
else:
root = treenode
root_point = smooth([root], scale)[0]
root_segmentID = state.nextID()
root_cableID = state.nextID()
# Accumulate new cable IDs, one for each slab
cableIDs = [root_cableID]
for source in [['<cell name="%s">\n' % neuron_name, '<segments xmlns="http://morphml.org/morphml/schema">\n'],
# Create zero-length point before root to represent the cell body
[segment(root, root, root_point, root_point, root_segmentID, None, root_cableID, True)],
make_slabs(root, root_segmentID, successors, cableIDs, scale, state),
['</segments>\n', '<cables xmlns="http://morphml.org/morphml/schema">\n'],
make_cables(cableIDs),
['</cables>\n', '</cell>\n']]:
for line in source:
yield line
class State:
def __init__(self, synaptic_treenodes):
self.ID = 0
self.synaptic_treenodes = synaptic_treenodes
def nextID(self):
self.ID += 1
return self.ID
def record(self, treenodeID, segmentID):
if treenodeID in self.synaptic_treenodes:
self.synaptic_treenodes[treenodeID] = segmentID
def make_arbors(neuron_names, all_treenodes, cellIDs, scale, state):
""" Consume all_treenodes lazily. Assumes treenodes are sorted by skeleton_id.
Accumulates new cell IDs in cellIDs (the skeletonID is used). """
i = 0
length = len(all_treenodes)
while i < length:
skeletonID = all_treenodes[i][6]
treenodes = []
while i < length and all_treenodes[i][6] == skeletonID:
t = all_treenodes[i]
treenodes.append((t[0], t[1], map(float, (t[2], t[3], t[4])), t[5]))
i += 1
cellIDs.append(skeletonID)
for line in make_arbor(neuron_name(skeletonID, neuron_names), treenodes, scale, state):
yield line
def make_connection_entries(pre_skID, post_skID, synapses, state):
for pre_treenodeID, post_treenodeID in synapses:
yield '<connection id="syn_%s" pre_cell_id="sk_%s" pre_segment_id="%s" pre_fraction_along="0.5" post_cell_id="sk_%s" post_segment_id="%s"/>\n' % (state.nextID(), pre_skID, state.synaptic_treenodes[pre_treenodeID], post_skID, state.synaptic_treenodes[post_treenodeID])
def make_connection(connection, state):
pre_skID, m = connection
for post_skID, synapses in m.iteritems():
for source in (('<projection name="NetworkConnection" source="sk_%s" target="sk_%s">\n' % (pre_skID, post_skID),
'<synapse_props synapse_type="DoubExpSynA" internal_delay="5" weight="1" threshold="-20"/>\n',
'<connections size="%s">\n' % len(synapses)),
make_connection_entries(pre_skID, post_skID, synapses, state),
('</connections>\n', '</projection>\n')):
for line in source:
yield line
def make_connections(connections, state):
""" Generate connections between neurons. """
for connection in connections.iteritems():
for line in make_connection(connection, state):
yield line
def neuron_name(skeleton_id, neuron_names):
""" Generate a valid name for a neuron: must start with [a-zZ-a]
and not contain any double quotes or line breaks or spaces. """
name = neuron_names[skeleton_id].replace('"', "'").replace('\n', ' ')
return "neuron %s - sk_%s" % (neuron_names[skeleton_id], skeleton_id)
def make_cells(cellIDs, neuron_names):
for cellID in cellIDs:
name = neuron_name(cellID, neuron_names)
yield '<population name="%s" cell_type="%s"><instances size="1"><instance id="0"><location x="0" y="0" z="0"/></instance></instances></population>\n' % (name, name)
def bodyMutual(neuron_names, all_treenodes, connections, scale):
""" Create a cell for each arbor. """
synaptic_treenodes = {}
for m in connections.itervalues():
for synapses in m.itervalues():
for pre_treenodeID, post_treenodeID in synapses:
synaptic_treenodes[pre_treenodeID] = None
synaptic_treenodes[post_treenodeID] = None
state = State(synaptic_treenodes)
cellIDs = []
# First cells
sources = [['<cells>\n'],
make_arbors(neuron_names, all_treenodes, cellIDs, scale, state),
['</cells>\n']]
# Then populations: one instance of each cell
sources.append(['<populations xmlns="http://morphml.org/networkml/schema">\n'])
sources.append(make_cells(cellIDs, neuron_names))
sources.append(['</populations>\n'])
# Then connections between cells
if connections:
sources.append(['<projections units="Physiological Units" xmlns="http://morphml.org/networkml/schema">\n'])
sources.append(make_connections(connections, state))
sources.append(['</projections>\n'])
for source in sources:
for line in source:
yield line
def make_inputs(cellIDs, neuron_names, inputs, state):
cellID = cellIDs[0]
for inputSkeletonID, treenodeIDs in inputs.iteritems():
for source in [('<input name="%s">\n' % inputSkeletonID,
'<random_stim frequency="20" synaptic_mechanism="DoubExpSynA"/>\n',
'<target population="%s">\n' % neuron_name(cellID, neuron_names),
'<sites size="%s">\n' % len(treenodeIDs)),
('<site cell_id="0" segment_id="%s"/>\n' % state.synaptic_treenodes[treenodeID] for treenodeID in treenodeIDs),
('</sites>\n',
'</target>\n',
'</input>\n')]:
for line in source:
yield line
def bodySingle(neuron_names, all_treenodes, inputs, scale):
synaptic_treenodes = {treenodeID: None for treenodeIDs in inputs.itervalues() for treenodeID in treenodeIDs}
state = State(synaptic_treenodes)
cellIDs = []
# First cells (only one)
sources = [['<cells>\n'],
make_arbors(neuron_names, all_treenodes, cellIDs, scale, state),
['</cells>\n']]
# Then populations: one instance of the one cell
sources.append(['<populations xmlns="http://morphml.org/networkml/schema">\n'])
sources.append(make_cells(cellIDs, neuron_names))
sources.append(['</populations>\n'])
# Then inputs onto the one cell
sources.append(['<inputs units="SI Units" xmlns="http://morphml.org/networkml/schema">\n'])
sources.append(make_inputs(cellIDs, neuron_names, inputs, state))
sources.append(['</inputs>\n'])
for source in sources:
for line in source:
yield line
|
catsop/CATMAID
|
django/applications/catmaid/control/export_NeuroML_Level3.py
|
Python
|
gpl-3.0
| 13,858
|
[
"NEURON"
] |
b575542c70b52a80e83f58804168c3b0d90a2fe417ebf85f0fbb0991d915849b
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGseabase(RPackage):
"""Gene set enrichment data structures and methods.
This package provides classes and methods to support Gene Set Enrichment
Analysis (GSEA)."""
homepage = "https://bioconductor.org/packages/GSEABase"
git = "https://git.bioconductor.org/packages/GSEABase.git"
version('1.46.0', commit='edce83a9256a0c03206c2bce7c90ada0d90f6622')
version('1.44.0', commit='7042ff64a98b05b9572231ee1b4f3ae4fc9c768e')
version('1.42.0', commit='5e40ce0fdd4dc0cff7601b169bbf6aa1430ae33e')
version('1.40.1', commit='3e5441708b80aab2c9642988bee709d5732831a6')
version('1.38.2', commit='84c9f10c316163118ca990900a7a67555b96e75b')
depends_on('r@2.6.0:', type=('build', 'run'))
depends_on('r-biocgenerics@0.13.8:', type=('build', 'run'))
depends_on('r-biobase@2.17.8:', type=('build', 'run'))
depends_on('r-annotate@1.45.3:', type=('build', 'run'))
depends_on('r-graph@1.37.2:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-gseabase/package.py
|
Python
|
lgpl-2.1
| 1,301
|
[
"Bioconductor"
] |
bb693e77cd1bac31f5dc497d02c3169acd16621f332e81c7a5984d021764162a
|
########################################################################
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Utilities for managing DIRAC configuration:
getCEsFromCS
getUnusedGridCEs
getUnusedGridSEs
getSiteUpdates
getSEUpdates
"""
__RCSID__ = "$Id$"
import re
import socket
from urlparse import urlparse
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import getBdiiCEInfo, getBdiiSEInfo, ldapService
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName, getDIRACSesForHostName
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
def getGridVOs():
""" Get all the VOMS VO names served by this DIRAC service
"""
voNames = []
result = getVOs()
if not result['OK']:
return result
else:
vos = result['Value']
for vo in vos:
vomsVO = getVOOption(vo, "VOMSName")
if vomsVO:
voNames.append(vomsVO)
return S_OK(voNames)
def getCEsFromCS():
""" Get all the CEs defined in the CS
"""
knownCEs = []
result = gConfig.getSections('/Resources/Sites')
if not result['OK']:
return result
grids = result['Value']
for grid in grids:
result = gConfig.getSections('/Resources/Sites/%s' % grid)
if not result['OK']:
return result
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict('/Resources/Sites/%s/%s' % (grid, site))['Value']
ces = List.fromChar(opt.get('CE', ''))
knownCEs += ces
return S_OK(knownCEs)
def getSEsFromCS(protocol='srm'):
""" Get all the SEs defined in the CS
"""
knownSEs = {}
result = gConfig.getSections('/Resources/StorageElements')
if not result['OK']:
return result
ses = result['Value']
for se in ses:
seSection = '/Resources/StorageElements/%s' % se
result = gConfig.getSections(seSection)
if not result['OK']:
continue
accesses = result['Value']
for access in accesses:
seProtocol = gConfig.getValue(cfgPath(seSection, access, 'Protocol'), '')
if seProtocol.lower() == protocol.lower() or protocol == 'any':
host = gConfig.getValue(cfgPath(seSection, access, 'Host'), '')
knownSEs.setdefault(host, [])
knownSEs[host].append(se)
else:
continue
return S_OK(knownSEs)
def getGridCEs(vo, bdiiInfo=None, ceBlackList=None, hostURL=None, glue2=False):
""" Get all the CEs available for a given VO and having queues in Production state
"""
knownCEs = set()
if ceBlackList is not None:
knownCEs = knownCEs.union(set(ceBlackList))
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo(vo, host=hostURL, glue2=glue2)
if not result['OK']:
return result
ceBdiiDict = result['Value']
siteDict = {}
for site in ceBdiiDict:
siteCEs = set(ceBdiiDict[site]['CEs'].keys())
newCEs = siteCEs - knownCEs
if not newCEs:
continue
ceFullDict = {}
for ce in newCEs:
ceDict = {}
ceInfo = ceBdiiDict[site]['CEs'][ce]
ceType = 'Unknown'
ceDict['Queues'] = []
for queue in ceInfo['Queues']:
queueStatus = ceInfo['Queues'][queue].get('GlueCEStateStatus', 'UnknownStatus')
if 'production' in queueStatus.lower():
ceType = ceInfo['Queues'][queue].get('GlueCEImplementationName', '')
ceDict['Queues'].append(queue)
if not ceDict['Queues']:
continue
ceDict['CEType'] = ceType
ceDict['GOCSite'] = site
ceDict['CEID'] = ce
systemName = ceInfo.get('GlueHostOperatingSystemName', 'Unknown')
systemVersion = ceInfo.get('GlueHostOperatingSystemVersion', 'Unknown')
systemRelease = ceInfo.get('GlueHostOperatingSystemRelease', 'Unknown')
ceDict['System'] = (systemName, systemVersion, systemRelease)
ceFullDict[ce] = ceDict
siteDict[site] = ceFullDict
result = S_OK(siteDict)
result['BdiiInfo'] = ceBdiiDict
return result
def getSiteUpdates(vo, bdiiInfo=None, log=None):
""" Get all the necessary updates for the already defined sites and CEs
"""
def addToChangeSet(entry, changeSet):
""" Inner function to update changeSet with entry (a tuple)
:param tuple entry: entry to add to changeSet
:param set changeSet: set collecting stuff to change
"""
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add(entry)
if log is None:
log = gLogger
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo(vo)
if not result['OK']:
return result
ceBdiiDict = result['Value']
changeSet = set()
for site in ceBdiiDict:
result = getDIRACSiteName(site)
if not result['OK']:
continue
siteNames = result['Value']
for siteName in siteNames:
siteSection = cfgPath('/Resources', 'Sites', siteName.split('.')[0], siteName)
result = gConfig.getOptionsDict(siteSection)
if not result['OK']:
continue
siteDict = result['Value']
# Current CS values
coor = siteDict.get('Coordinates', 'Unknown')
mail = siteDict.get('Mail', 'Unknown').replace(' ', '')
description = siteDict.get('Description', 'Unknown')
description = description.replace(' ,', ',')
longitude = ceBdiiDict[site].get('GlueSiteLongitude', '').strip()
latitude = ceBdiiDict[site].get('GlueSiteLatitude', '').strip()
# Current BDII value
newcoor = ''
if longitude and latitude:
newcoor = "%s:%s" % (longitude, latitude)
newmail = ceBdiiDict[site].get('GlueSiteSysAdminContact', '').replace('mailto:', '').strip()
newdescription = ceBdiiDict[site].get('GlueSiteDescription', '').strip()
newdescription = ", ".join([line.strip() for line in newdescription.split(",")])
# Adding site data to the changes list
addToChangeSet((siteSection, 'Coordinates', coor, newcoor), changeSet)
addToChangeSet((siteSection, 'Mail', mail, newmail), changeSet)
addToChangeSet((siteSection, 'Description', description, newdescription), changeSet)
ces = gConfig.getValue(cfgPath(siteSection, 'CE'), [])
for ce in ces:
ceSection = cfgPath(siteSection, 'CEs', ce)
ceDict = {}
result = gConfig.getOptionsDict(ceSection)
if result['OK']:
ceDict = result['Value']
else:
if ceBdiiDict[site]['CEs'].get(ce, None):
log.notice("Adding new CE", "%s to site %s/%s" % (ce, siteName, site))
ceInfo = ceBdiiDict[site]['CEs'].get(ce, None)
if ceInfo is None:
ceType = ceDict.get('CEType', '')
continue
# Current CS CE info
arch = ceDict.get('architecture', 'Unknown')
OS = ceDict.get('OS', 'Unknown')
si00 = ceDict.get('SI00', 'Unknown')
ceType = ceDict.get('CEType', 'Unknown')
ram = ceDict.get('MaxRAM', 'Unknown')
submissionMode = ceDict.get('SubmissionMode', 'Unknown')
# Current BDII CE info
newarch = ceBdiiDict[site]['CEs'][ce].get('GlueHostArchitecturePlatformType', '').strip()
systemName = ceInfo.get('GlueHostOperatingSystemName', '').strip()
systemVersion = ceInfo.get('GlueHostOperatingSystemVersion', '').strip()
systemRelease = ceInfo.get('GlueHostOperatingSystemRelease', '').strip()
newOS = ''
if systemName and systemVersion and systemRelease:
newOS = '_'.join((systemName, systemVersion, systemRelease))
newsi00 = ceInfo.get('GlueHostBenchmarkSI00', '').strip()
newCEType = 'Unknown'
for queue in ceInfo['Queues']:
queueDict = ceInfo['Queues'][queue]
newCEType = queueDict.get('GlueCEImplementationName', '').strip()
if newCEType:
break
if newCEType == 'ARC-CE':
newCEType = 'ARC'
newSubmissionMode = None
if newCEType in ['ARC', 'CREAM']:
newSubmissionMode = "Direct"
newRAM = ceInfo.get('GlueHostMainMemoryRAMSize', '').strip()
# Protect from unreasonable values
if newRAM and int(newRAM) > 150000:
newRAM = ''
# Adding CE data to the change list
addToChangeSet((ceSection, 'architecture', arch, newarch), changeSet)
addToChangeSet((ceSection, 'OS', OS, newOS), changeSet)
addToChangeSet((ceSection, 'SI00', si00, newsi00), changeSet)
addToChangeSet((ceSection, 'CEType', ceType, newCEType), changeSet)
addToChangeSet((ceSection, 'MaxRAM', ram, newRAM), changeSet)
if submissionMode == "Unknown" and newSubmissionMode:
addToChangeSet((ceSection, 'SubmissionMode', submissionMode, newSubmissionMode), changeSet)
queues = ceInfo['Queues'].keys()
for queue in queues:
queueInfo = ceInfo['Queues'][queue]
queueStatus = queueInfo['GlueCEStateStatus']
queueSection = cfgPath(ceSection, 'Queues', queue)
queueDict = {}
result = gConfig.getOptionsDict(queueSection)
if result['OK']:
queueDict = result['Value']
else:
if queueStatus.lower() == "production":
log.notice("Adding new queue", "%s to CE %s" % (queue, ce))
else:
continue
# Current CS queue info
maxCPUTime = queueDict.get('maxCPUTime', 'Unknown')
si00 = queueDict.get('SI00', 'Unknown')
maxTotalJobs = queueDict.get('MaxTotalJobs', 'Unknown')
# Current BDII queue info
newMaxCPUTime = queueInfo.get('GlueCEPolicyMaxCPUTime', '')
if newMaxCPUTime == "4" * len(newMaxCPUTime) or newMaxCPUTime == "9" * len(newMaxCPUTime):
newMaxCPUTime = ''
wallTime = queueInfo.get('GlueCEPolicyMaxWallClockTime', '')
if wallTime == "4" * len(wallTime) or wallTime == "9" * len(wallTime):
wallTime = ''
if wallTime and int(wallTime) > 0:
if not newMaxCPUTime:
newMaxCPUTime = str(int(0.8 * int(wallTime)))
else:
if int(wallTime) <= int(newMaxCPUTime):
newMaxCPUTime = str(int(0.8 * int(wallTime)))
newSI00 = ''
caps = queueInfo.get('GlueCECapability', [])
if isinstance(caps, basestring):
caps = [caps]
for cap in caps:
if 'CPUScalingReferenceSI00' in cap:
newSI00 = cap.split('=')[-1]
# Adding queue info to the CS
addToChangeSet((queueSection, 'maxCPUTime', maxCPUTime, newMaxCPUTime), changeSet)
addToChangeSet((queueSection, 'SI00', si00, newSI00), changeSet)
if maxTotalJobs == "Unknown":
newTotalJobs = min(1000, int(int(queueInfo.get('GlueCEInfoTotalCPUs', 0)) / 2))
newWaitingJobs = max(2, int(newTotalJobs * 0.1))
newTotalJobs = str(newTotalJobs)
newWaitingJobs = str(newWaitingJobs)
addToChangeSet((queueSection, 'MaxTotalJobs', '', newTotalJobs), changeSet)
addToChangeSet((queueSection, 'MaxWaitingJobs', '', newWaitingJobs), changeSet)
# Updating eligible VO list
VOs = set()
if queueDict.get('VO', ''):
VOs = set([q.strip() for q in queueDict.get('VO', '').split(',') if q])
if vo not in VOs:
VOs.add(vo)
VOs = list(VOs)
newVOs = ','.join(VOs)
addToChangeSet((queueSection, 'VO', '', newVOs), changeSet)
return S_OK(changeSet)
def getGridSEs(vo, bdiiInfo=None, seBlackList=None):
""" Get all the SEs available for a given VO
"""
seBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiSEInfo(vo)
if not result['OK']:
return result
seBdiiDict = result['Value']
knownSEs = set()
if seBlackList is not None:
knownSEs = knownSEs.union(set(seBlackList))
siteDict = {}
for site in seBdiiDict:
for gridSE in seBdiiDict[site]['SEs']:
seDict = seBdiiDict[site]['SEs'][gridSE]
# if "lhcb" in seDict['GlueSAName']:
# print '+'*80
# print gridSE
# for k,v in seDict.items():
# print k,'\t',v
if gridSE not in knownSEs:
siteDict.setdefault(site, {})
if isinstance(seDict['GlueSAAccessControlBaseRule'], list):
voList = [re.sub('^VO:', '', s) for s in seDict['GlueSAAccessControlBaseRule']]
else:
voList = [re.sub('^VO:', '', seDict['GlueSAAccessControlBaseRule'])]
siteDict[site][gridSE] = {'GridSite': seDict['GlueSiteUniqueID'],
'BackendType': seDict['GlueSEImplementationName'],
'Description': seDict.get('GlueSEName', '-'),
'VOs': voList
}
result = S_OK(siteDict)
result['BdiiInfo'] = seBdiiDict
return result
def getGridSRMs(vo, bdiiInfo=None, srmBlackList=None, unUsed=False):
result = ldapService(serviceType='SRM', vo=vo)
if not result['OK']:
return result
srmBdiiDict = result['Value']
knownSRMs = set()
if srmBlackList is not None:
knownSRMs = knownSRMs.union(set(srmBlackList))
siteSRMDict = {}
for srm in srmBdiiDict:
srm = dict(srm)
endPoint = srm.get('GlueServiceEndpoint', '')
srmHost = ''
if endPoint:
srmHost = urlparse(endPoint).hostname
if not srmHost:
continue
if srmHost in knownSRMs:
continue
if unUsed:
result = getDIRACSesForHostName(srmHost)
if not result['OK']:
return result
diracSEs = result['Value']
if diracSEs:
# If it is a known SRM and only new SRMs are requested, continue
continue
site = srm.get('GlueForeignKey', '').replace('GlueSiteUniqueID=', '')
siteSRMDict.setdefault(site, {})
siteSRMDict[site][srmHost] = srm
if bdiiInfo is None:
result = getBdiiSEInfo(vo)
if not result['OK']:
return result
seBdiiDict = dict(result['Value'])
else:
seBdiiDict = dict(bdiiInfo)
srmSeDict = {}
for site in siteSRMDict:
srms = siteSRMDict[site].keys()
for srm in srms:
if seBdiiDict.get(site, {}).get('SEs', {}).get(srm, {}):
srmSeDict.setdefault(site, {})
srmSeDict[site].setdefault(srm, {})
srmSeDict[site][srm]['SRM'] = siteSRMDict[site][srm]
srmSeDict[site][srm]['SE'] = seBdiiDict[site]['SEs'][srm]
return S_OK(srmSeDict)
def getSRMUpdates(vo, bdiiInfo=None):
changeSet = set()
def addToChangeSet(entry, changeSet):
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add(entry)
result = getGridSRMs(vo, bdiiInfo=bdiiInfo)
if not result['OK']:
return result
srmBdiiDict = result['Value']
result = getSEsFromCS()
if not result['OK']:
return result
seDict = result['Value']
result = getVOs()
if result['OK']:
csVOs = set(result['Value'])
else:
csVOs = set([vo])
for seHost, diracSE in seDict.items():
seSection = '/Resources/StorageElements/%s' % diracSE[0]
# Look up existing values first
description = gConfig.getValue(cfgPath(seSection, 'Description'), 'Unknown')
backend = gConfig.getValue(cfgPath(seSection, 'BackendType'), 'Unknown')
vos = gConfig.getValue(cfgPath(seSection, 'VO'), 'Unknown').replace(' ', '')
size = gConfig.getValue(cfgPath(seSection, 'TotalSize'), 'Unknown')
# Look up current BDII values
srmDict = {}
seBdiiDict = {}
for site in srmBdiiDict:
if seHost in srmBdiiDict[site]:
srmDict = srmBdiiDict[site][seHost]['SRM']
seBdiiDict = srmBdiiDict[site][seHost]['SE']
break
if not srmDict or not seBdiiDict:
continue
newDescription = seBdiiDict.get('GlueSEName', 'Unknown')
newBackend = seBdiiDict.get('GlueSEImplementationName', 'Unknown')
newSize = seBdiiDict.get('GlueSESizeTotal', 'Unknown')
addToChangeSet((seSection, 'Description', description, newDescription), changeSet)
addToChangeSet((seSection, 'BackendType', backend, newBackend), changeSet)
addToChangeSet((seSection, 'TotalSize', size, newSize), changeSet)
# Evaluate VOs if no space token defined, otherwise this is VO specific
spaceToken = ''
for i in range(1, 10):
protocol = gConfig.getValue(cfgPath(seSection, 'AccessProtocol.%d' % i, 'Protocol'), '')
if protocol.lower() == 'srm':
spaceToken = gConfig.getValue(cfgPath(seSection, 'AccessProtocol.%d' % i, 'SpaceToken'), '')
break
if not spaceToken:
bdiiVOs = srmDict.get('GlueServiceAccessControlBaseRule', [])
bdiiVOs = set([re.sub('^VO:', '', rule) for rule in bdiiVOs])
seVOs = csVOs.intersection(bdiiVOs)
newVOs = ','.join(seVOs)
addToChangeSet((seSection, 'VO', vos, newVOs), changeSet)
return S_OK(changeSet)
def getDBParameters(fullname):
"""
Retrieve Database parameters from CS
fullname should be of the form <System>/<DBname>
defaultHost is the host to return if the option is not found in the CS.
Not used as the method will fail if it cannot be found
defaultPort is the port to return if the option is not found in the CS
defaultUser is the user to return if the option is not found in the CS.
Not usePassword is the password to return if the option is not found in
the CS.
Not used as the method will fail if it cannot be found
defaultDB is the db to return if the option is not found in the CS.
Not used as the method will fail if it cannot be found
defaultQueueSize is the QueueSize to return if the option is not found in the
CS
Returns a dictionary with the keys: 'host', 'port', 'user', 'password',
'db' and 'queueSize'
"""
cs_path = getDatabaseSection(fullname)
parameters = {}
result = gConfig.getOption(cs_path + '/Host')
if not result['OK']:
# No host name found, try at the common place
result = gConfig.getOption('/Systems/Databases/Host')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: Host')
dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == dbHost:
dbHost = 'localhost'
parameters['Host'] = dbHost
# Mysql standard
dbPort = 3306
result = gConfig.getOption(cs_path + '/Port')
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption('/Systems/Databases/Port')
if result['OK']:
dbPort = int(result['Value'])
else:
dbPort = int(result['Value'])
parameters['Port'] = dbPort
result = gConfig.getOption(cs_path + '/User')
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption('/Systems/Databases/User')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: User')
dbUser = result['Value']
parameters['User'] = dbUser
result = gConfig.getOption(cs_path + '/Password')
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption('/Systems/Databases/Password')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: Password')
dbPass = result['Value']
parameters['Password'] = dbPass
result = gConfig.getOption(cs_path + '/DBName')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: DBName')
dbName = result['Value']
parameters['DBName'] = dbName
return S_OK(parameters)
def getElasticDBParameters(fullname):
"""
Retrieve Database parameters from CS
fullname should be of the form <System>/<DBname>
"""
cs_path = getDatabaseSection(fullname)
parameters = {}
result = gConfig.getOption(cs_path + '/Host')
if not result['OK']:
# No host name found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Host')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Host. Using localhost")
dbHost = 'localhost'
else:
dbHost = result['Value']
else:
dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == dbHost:
dbHost = 'localhost'
parameters['Host'] = dbHost
# Elasticsearch standard port
result = gConfig.getOption(cs_path + '/Port')
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Port')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Port. Using 9200")
dbPort = 9200
else:
dbPort = int(result['Value'])
else:
dbPort = int(result['Value'])
parameters['Port'] = dbPort
result = gConfig.getOption(cs_path + '/User')
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/User')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: User. Assuming no user/password is provided/needed")
dbUser = None
else:
dbUser = result['Value']
else:
dbUser = result['Value']
parameters['User'] = dbUser
result = gConfig.getOption(cs_path + '/Password')
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Password')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Password. Assuming no user/password is provided/needed")
dbPass = None
else:
dbPass = result['Value']
else:
dbPass = result['Value']
parameters['Password'] = dbPass
result = gConfig.getOption(cs_path + '/SSL')
if not result['OK']:
# No SSL option found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/SSL')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: SSL. Assuming SSL is needed")
ssl = True
else:
ssl = False if result['Value'].lower() in ('false', 'no', 'n') else True
else:
ssl = False if result['Value'].lower() in ('false', 'no', 'n') else True
parameters['SSL'] = ssl
return S_OK(parameters)
def getOAuthAPI(instance):
""" Get OAuth API url """
return gConfig.getValue("/Systems/Framework/%s/URLs/OAuthAPI" % instance)
|
chaen/DIRAC
|
ConfigurationSystem/Client/Utilities.py
|
Python
|
gpl-3.0
| 22,844
|
[
"DIRAC"
] |
2e7006c692c8f1683147f40a73a30a4e258cb2889bf006518552a5d5a3ac5d90
|
import os
INSTALLED_APPS = [
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages', 'django.contrib.sessions',
'django.contrib.admin',
'octopus',
'test_app',
'django.contrib.sites'
]
SECRET_KEY = '1'
DEBUG = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
APPEND_SLASHES = True
root_dir = os.path.dirname(os.path.realpath(__file__))
STATIC_ROOT = os.path.join(root_dir, 'static')
# STATICFILES_DIRS = [STATIC_ROOT]
print(STATIC_ROOT)
TEMPLATE_DIRECTORIES = (os.path.join(root_dir, 'test_app/templates'))
MIDDLEWARE_CLASSES = ('django.middleware.csrf.CsrfViewMiddleware',)
ROOT_URLCONF = "test_app.urls"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.db',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRECTORIES,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
|
brmc/django-octopus
|
tests/settings.py
|
Python
|
mit
| 1,705
|
[
"Octopus"
] |
1fa1dbe8aa5d0cc3747cb2d9058bda3a17c3f56b86b5cc58204e6a571fb06170
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.support.script_helper import assert_python_ok
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(os.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_concurrent_futures.py
|
Python
|
gpl-3.0
| 25,887
|
[
"Brian"
] |
8388035d2591824a63cef988714ec98d608c6bb4a05ab13f92c94b2ecb4b16f0
|
import theano.tensor as T
import theano.tensor.nnet as Tnn
import numpy as np
import theano
import nonlinearity
class GaussianHidden(object):
"""
Stochastic layer: Gaussian distribution, linear transpose + random sampling z
"""
def __init__(self, rng, input, n_in, n_out, W_var=None, b_var=None, W_mean=None, b_mean=None,
activation=None):
"""
:rng
:sampling np for initialization
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the stochastic layer typically
"""
self.input = input
if W_var is None:
if activation is None:
W_values_var= np.asarray(
np.zeros((n_in, n_out)),
dtype=theano.config.floatX
)
elif activation == T.tanh or activation == Tnn.sigmoid:
W_values_var = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == Tnn.sigmoid:
W_values_var *= 4
else:
raise Exception('Unknown activation in HiddenLayer.')
W_var = theano.shared(value=W_values_var, name='W', borrow=True)
if b_var is None:
b_values_var = np.zeros((n_out,), dtype=theano.config.floatX)
b_var = theano.shared(value=b_values_var, name='b', borrow=True)
if W_mean is None:
if activation is None:
W_values_mean= nonlinearity.initialize_matrix(rng, n_in, n_out)
elif activation == T.tanh or activation == Tnn.sigmoid:
W_values_mean = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == Tnn.sigmoid:
W_values_mean *= 4
else:
raise Exception('Unknown activation in HiddenLayer.')
W_mean = theano.shared(value=W_values_mean, name='W', borrow=True)
if b_mean is None:
b_values_mean = nonlinearity.initialize_vector(rng, n_out)
b_mean = theano.shared(value=b_values_mean, name='b', borrow=True)
self.W_var = W_var
self.W_mean = W_mean
self.b_var = b_var
self.b_mean = b_mean
# N x d_out
self.q_logvar = (T.dot(input, self.W_var) + self.b_var).clip(-10,10)
self.q_mean = T.dot(input, self.W_mean) + self.b_mean
# loglikelihood
self.logpz = -0.5 * (np.log(2 * np.pi) + (self.q_mean**2 + T.exp(self.q_logvar))).sum(axis=1)
self.logqz = - 0.5 * (np.log(2 * np.pi) + 1 + self.q_logvar).sum(axis=1)
# parameters of the model
self.params = [self.W_var, self.b_var, self.W_mean, self.b_mean]
def sample_z(self, rng_share):
'''
eps1 = rng_share.normal(size=(1000,500), dtype=theano.config.floatX)
print 'eps1', np.mean(eps1.eval()), np.var(eps1.eval())
'''
eps = rng_share.normal(size=self.q_mean.shape, dtype=theano.config.floatX)
return self.q_mean + T.exp(0.5 * self.q_logvar) * eps
|
zhenxuan00/mmdgm
|
conv-mmdgm/layer/GaussianHidden.py
|
Python
|
mit
| 3,834
|
[
"Gaussian"
] |
2c716742620de5c2384e18a7197c7b161ce598771aab62e161fddc895263b556
|
#!/usr/bin/python
BoysNames = [ "Aaron", "Adam", "Alexander", "Andrew", "Anthony", "Austin",
"Benjamin", "Blake", "Brandon", "Brendan", "Brian", "Brady",
"Bryson", "Charles", "Christian", "Christopher", "Cody", "Conner",
"Daniel", "Dean", "Derek", "Donald", "Dylan", "Edward", "Eli",
"Eric", "Ethan", "Evan", "Felix", "Ferris", "Floyd", "Flynn",
"Foster", "Frederick", "Garrett", "Gavin", "Gerald", "Glenn",
"Gordon", "Grant", "Gregory", "Hal", "Hans", "Harold", "Hayden",
"Henry", "Howard", "Hugh", "Ian", "Ignatius", "Isaac", "Isaiah",
"Ishmael", "Jacob", "James", "Jason", "Jeremy", "John",
"Jonathan", "Joseph", "Joshua", "Justin", "Keith", "Kenneth",
"Kevin", "Kirk", "Kurt", "Kyle", "Lawrence", "Leonard", "Logan",
"Louis", "Luke", "Mason", "Matthew", "Maxwell", "Merritt",
"Michael", "Milton", "Morgan", "Nathan", "Newman", "Nicholas",
"Nigel", "Noah", "Norman", "Olin", "Omar", "Orson", "Oswald",
"Otto", "Owen", "Parker", "Patrick", "Paul", "Perry", "Philip",
"Pierce", "Preston", "Quentin", "Quincy", "Quinn", "Richard",
"Riley", "Robert", "Ryan", "Roland", "Roy", "Ruben", "Samuel",
"Sander", "Sawyer", "Scott", "Sean", "Sheldon", "Spencer",
"Steven", "Thomas", "Theodore", "Timothy", "Trenton", "Trevor",
"Troy", "Tucker", "Tyler", "Val", "Vaughn", "Vernon", "Victor",
"Vincent", "Virgil", "Wade", "Warren", "Wesley", "Wilbur",
"Wilfred", "William", "Wilson", "Wyatt", "Xander", "Xavier",
"Yancy", "Yves", "Yuri", "Zachary", "Zane", "Zebulon" ]
GirlsNames = ["Abaigeal", "Abana", "Abbey", "Abbie", "Abby", "Abegaila",
"Abelia", "Abi", "Abigail", "Abilene", "Acacia", "Ada", "Addie",
"Aesha", "Agatha", "Aideen", "Aimee", "Ainsley", "Ailsa", "Aisha",
"Alaina", "Alana", "Alanah", "Alberta", "Alcinda", "Aleida",
"Alena", "Alesa", "Alessia", "Aletia", "Alexa", "Alexandra",
"Alicia", "Alida", "Alina", "Alisha", "Alissa", "Allie",
"Allison", "Alma", "Althea", "Alysen", "Alyssa", "Amanda",
"Amber", "Amberlee", "Ambrosia", "Amelia", "Amethyst", "Amiela",
"Amy", "AnaMaree", "Anastasia", "Andie", "Andra", "Andrea",
"Andriana", "Anesa", "Angel", "Angela", "Angelica", "Angelika",
"Angelina", "Angie", "Ann", "Anna", "Annabeth", "Annalee",
"Annaliese", "Annalisa", "Annette", "Annie", "Annitta", "Antheia",
"Antonella", "Arabella", "Arianne", "Ariel", "Arlene", "Aubrey",
"Aubry", "Augusta", "Avis.", "Beatrice", "Beatrix", "Becca",
"Belinda", "Belle", "Benita", "Bernadette", "Bernice", "Bernita",
"Berta", "Beryl", "Bess", "Bessie", "Beta", "Beth", "Betsy",
"Betty", "Bev", "Beverly", "Bianca", "Billie", "Billy", "Blossom",
"Bridget", "Bridgid", "Brigit", "Brigitte", "Britney", "Brittany",
"Brooke", "Brooklyn", "Bryana", "Caitriona", "Calandra", "Callie",
"Camelia", "Camella", "Camila", "Camilla", "Camillah", "Cammie",
"Candice", "Candee", "Candi", "Candis", "Candy", "Caprice",
"Cara", "Caresse", "Carey", "Cari", "Carina", "Carine", "Carisa",
"Carla", "Carlana", "Carlee", "Carlene", "Carley", "Carlie",
"Carmel", "Carmella", "Carmen", "Carmencita", "Carol", "Carolina",
"Catalina", "Cate", "Cath", "Cathie", "Cathleen", "Cathlin",
"Cecily", "Celia", "Celine", "Celeste", "Celestina", "Celestine",
"Celestyn", "Chandelle", "Chanel", "Chantal", "Charleen",
"Charlene", "Charlie", "Charlot", "Charlotte", "Charly",
"Charmain", "Charmaine", "Chase", "Chelsea", "Chelsey",
"Cherry", "Cheryl", "Cheryll", "Chrissie", "Chrissy", "Christa",
"Christabel", "Christal", "Christel", "Christelle", "Christiane",
"Christie", "Christin", "Christina", "Christine", "Clarabelle",
"Clare", "Clarisa", "Clarissa", "Claudette", "Claudia",
"Claudine", "Colene", "Colette", "Collana", "Constance",
"Coral", "Coreen", "Corina", "Corine", "Cory", "Courtney",
"Crissie", "Crissy", "Cristina", "Crystal", "Cybil", "Cybill",
"Dahlia", "Daisy", "Dale", "Dana", "Danette", "Dani", "Daniela",
"Dannah", "Daphne", "Darcee", "Darcy", "Darcy", "Dawn", "Deanna",
"Delwyn", "Delyth", "Demi", "Dena", "Denell", "Denise", "Desiree",
"Diana", "Diane", "Dianna", "Dianne", "Diantha", "Dina", "Dinah",
"Dolores", "Dominique", "Donatella", "Donella", "Donna", "Dora",
"Doreen", "Dorothy", "Drew", "Ebony", "Eden", "Edna", "Edwina",
"Eileen", "Elaine", "Eleanor", "Eli", "Elisa", "Elisabeth",
"Elissa", "Eliza", "Elizabeth", "Ella", "Ellen", "Elli",
"Ellie", "Ellinor", "Elly", "Elma", "Eloise", "Emilie", "Emily",
"Emma", "Enid", "Erica", "Erika", "Erin", "Erina", "Eryn",
"Esmeralda", "Estella", "Estelle", "Ester", "Euna", "Eunice",
"Eve", "Eveleen", "Fae", "Faith", "Farrah", "Fay", "Faye",
"Felicia", "Felicity", "Fern", "Fifi", "Fleur", "Floella",
"Flora", "Florence", "Florens", "Florenz", "Fran", "Franca",
"Frances", "Francesca", "Gabriela", "Gabriell", "Gabriella",
"Gabrielle", "Gail", "Gayle", "Gemma", "Geneva", "Genevieve",
"Georgette", "Georgia", "Georgie", "Georgina", "Geraldine",
"Gertrud", "Gillian", "Giselle", "Gladys", "Glenda", "Glenys",
"Gloria", "Gloriana", "Glynn", "Goloria", "Goldie", "Grace",
"Gracie", "Greta", "Gretchen", "Gretel", "Gwen", "Gwyneth.",
"Halley", "Hallie", "Harriet", "Hayley", "Heather", "Heidi",
"Helaine", "Helana", "Helen", "Helena", "Helene", "Henrietta",
"Hilary", "Hilda", "Hillary", "Holly", "Hope", "Hyacinth", "Ida",
"Imogen", "Iona", "Ingrid", "Irena", "Irene", "Irina", "Iris",
"Jacalyn", "Jacaranda", "Jacinda", "Jacinta", "Jackie",
"Jacqueline", "Jacqui", "Jacynth", "Jade", "Jaime", "Jaleen",
"Jamie", "Jan", "Janae", "Jane", "Janel", "Janessa", "Janet",
"Janey", "Janice", "Janie", "Janis", "Janita", "Jasmin",
"Jasmine", "Jaylene", "Jayme", "Jayne", "Jazmin", "Jean",
"Jeana", "Jessa", "Jessi", "Jessica", "Jessie", "Jessy", "Jill",
"Jillian", "Jinny", "Jo", "Joan", "Joanna", "Joceline", "Jocelyn",
"Jo Ella", "Jodi", "Jodie", "Jody", "Johana", "Johanna",
"Jolanda", "Joleen", "Jordan", "Josephine", "Josie", "Joss",
"Joy", "Joyce", "Jude", "Judith", "Judy", "Juli", "Julia",
"Julie", "Julienne", "Juliet", "June", "Kaitlin", "Kala",
"Kalani", "Kalee", "Kali", "Kallie", "Kami", "Kara", "Karen",
"Karena", "Kari", "Karin", "Karina", "Karissa", "Karla",
"Karren", "Kasa", "Kasi", "Kate", "Katelin", "Kath", "Katharina",
"Kathe", "Katherine", "Kathi", "Kathie", "Kathleen", "Kathryn",
"Katie", "Katrina", "Katy", "Katya", "Kayla", "Kaylie", "Kaylenn",
"Kayley", "Kaylynn", "Keisha", "Kelly", "Kelsey", "Kelsi",
"Kessa", "Kiley", "Kim", "Kimberlee", "Kimberley", "Kimberly",
"Kira", "Kiri", "Kirsten", "Kirsti", "Kirstin", "Kitty",
"Klarissa", "Klarysa", "Klaudia", "Krislyn", "Krissy", "Krista",
"Kristal", "Kristen", "Kristie", "Kristin", "Kristina", "Kristy",
"Kristin", "Kyla", "Kylie", "Lacy", "Lana", "Lara", "Larisa",
"Laura", "Laureen", "Lauren", "Leah", "Leanne", "Lee", "Leigh",
"Leila", "Leisha", "Lenice", "Lesley", "Leticia", "Lexa", "Lexi",
"Lexy", "Liana", "Liane", "Libby", "Licia", "Lila", "Lili",
"Lilian", "Lilly", "Linda", "Lindsay", "Lisa", "Loreen",
"Loretta", "Lorinda", "Lorna", "Lorraine", "Lottie", "Louisa",
"Louise", "Lucerne", "Lucia", "Lucinda", "Lucy", "Lydia",
"Lynda", "Lynette", "Madeleine", "Madeline", "Madelyn", "Madge",
"Madison", "Madonna", "Maegan", "Maeve", "Maggie", "Maisie",
"Mallory", "Marica", "Margaret", "Marge", "Margie", "Margo",
"Margot", "Maria", "Marian", "Marianne", "Marie", "Marien",
"Marilyn", "Marisa", "Marissa", "Marjorie", "Martha", "Mary",
"Matilda", "Maud", "Maude", "Maureen", "Mavis", "Maxine", "May",
"Megan", "Meghan", "Melanie", "Melinda", "Melisa", "Melissa",
"Melodie", "Melody", "Meredith", "Michelle", "Mildred", "Millie",
"Mimi", "Mindy", "Miranda", "Miriam", "Molly", "Monica",
"Monique", "Morgan", "Muriel", "Nadia", "Nadine", "Nadira",
"Nancy", "Nanette", "Natalie", "Natasha", "Nelli", "Nellie",
"Nelly", "Nichola", "Nichole", "Nickie", "Nicola", "Nikki",
"Nikola", "Nina", "Nola", "Nora", "Norah", "Noreen",
"Olive", "Olivia", "Paige", "Pam", "Pamela", "Patience",
"Patrice", "Patricia", "Patsy", "Paula", "Paulette", "Paulina",
"Pauline", "Peggy", "Peni", "Penny", "Pennington", "Petra",
"Phebe", "Philippa", "Phillis", "Phoebe", "Phylicia", "Phyllis",
"Pippa", "Polly", "Pollyam", "Pollyanna", "Poppy", "Portia",
"Priscilla", "Prudence", "Rachael", "Rachel", "Rachelle",
"Rae", "Raeleen", "Rai", "Rasheda", "Rashida", "Rayna", "Rea",
"Reagan", "Reanna", "Rebecca", "Rebekah", "Rebekka", "Rei",
"Rhea", "Rheanna", "Rhianna", "Rhoda", "Rhonda", "Ria",
"Rickie", "Riley", "Rimona", "Rina", "Rita", "Roberta", "Robin",
"Robyn", "Rochelle", "Romana", "Ros", "Rosanne", "Rosaleen",
"Rosalia", "Rosalie", "Rosalind", "Rose", "Roseanne", "Rosemary",
"Rosie", "Roslyn", "Rowena", "Roxana", "Roxann", "Roxanne",
"Ruby", "Sabrina", "Sadie", "Safia", "Salena", "Sally", "Salma",
"Samantha", "Sandi", "Sandy", "Sandyha", "Sara", "Sarah",
"Sarala", "Sasha", "Savina", "Scarlett", "Selena", "Selene",
"Shannon", "Sharleen", "Sharlotte", "Sharon", "Sheela", "Sheena",
"Shela", "Shelley", "Shellie", "Shelly", "Sherri", "Sherrie",
"Sherry", "Shiela", "Shieryl", "Shiree", "Shirley", "Silvia",
"Simone", "Sindy", "Sondra", "Sonja", "Sonya", "Sophia", "Sophie",
"Sophy", "Stacey", "Staci", "Stacie", "Stefanie", "Stella",
"Steph", "Stephanie", "Sue", "Suela", "Susan", "Susana",
"Susannah", "Susie", "Suzanna", "Suzannah", "Suzanne", "Suzie",
"Suzy", "Sybil", "Tabitha", "Talia", "Tam", "Tamara", "Tami",
"Tammi", "Tammie", "Tammy", "Tania", "Tansy", "Tanya", "Taylor",
"Teresa", "Terri", "Terry", "Tess", "Tessa", "Tessi", "Thea",
"Thelma", "Theresa", "Tia", "Tina", "Toni", "Tori",
"Tracey", "Traci", "Tracy", "Trish", "Trisha", "Trudie", "Trudy",
"Una", "Val", "Valaree", "Valda", "Valeria", "Valerie", "Vanessa",
"Velma", "Venessa", "Vera", "Veronica", "Veronique", "Vicki",
"Vickie", "Vicky", "Victoire", "Victoria", "Viki", "Virginia", "Viv",
"Wendie", "Wendy", "Whitney", "Whoopi", "Wilma", "Winnifred",
"Yasmin", "Yasmine", "Yolanda", "Yvette", "Zoe", ]
LastNames = [ "Abrahams", "Acker", "Ackerman", "Adamson", "Addison", "Adkins",
"Aiken", "Aitken", "Akers", "Alberts", "Albinson", "Alexander",
"Alfredson", "Alfson", "Allard", "Allsopp", "Alvey", "Anderson",
"Andrews", "Andrewson", "Anson", "Anthonyson", "Appleby",
"Appleton", "Archer", "Arkwright", "Armistead", "Arnold",
"Arrington", "Arterberry", "Arterbury", "Arthur", "Arthurson",
"Ash", "Ashley", "Ashworth", "Atkins", "Atkinson", "Attaway",
"Atteberry", "Atterberry", "Attwater", "Augustine", "Auteberry",
"Autenberry", "Auttenberg", "Avery", "Ayton", "Babcock",
"Babcocke", "Babcoke", "Backus", "Badcock", "Badcocke",
"Bagley", "Bailey", "Baker", "Baldwin", "Bancroft", "Banister",
"Banks", "Barber", "Bardsley", "Barker", "Barlow", "Barnes",
"Barton", "Bartram", "Bass", "Bates", "Bateson", "Battle",
"Batts", "Baxter", "Beake", "Beasley", "Beattie", "Becket",
"Beckett", "Beckham", "Belcher", "Bellamy", "Benbow",
"Bennet", "Bennett", "Benson", "Benton", "Bernard", "Berry",
"Bird", "Bishop", "Black", "Blackbourne", "Blackburn", "Blackman",
"Blackwood", "Blake", "Blakeslee", "Bloodworth", "Bloxam",
"Bloxham", "Blue", "Blythe", "Boivin", "Bolton", "Bond",
"Bonham", "Bonher", "Bonner", "Bonney", "Boone", "Booner",
"Boothman", "Botwright", "Bourke", "Boyce", "Braddock",
"Bradford", "Bradley", "Brams", "Bramson", "Brasher",
"Brassington", "Bray", "Breckenridge", "Breckinridge",
"Brewer", "Brewster", "Brigham", "Bristol", "Bristow",
"Britton", "Broadbent", "Brock", "Brooks", "Brown", "Brownlow",
"Bryant", "Bryson", "Bullard", "Bulle", "Bullock", "Bunker",
"Burke", "Burnham", "Burrell", "Burton", "Bush", "Butcher",
"Butler", "Butts", "Byrd", "Cannon", "Cantrell", "Carl",
"Carlisle", "Carlyle", "Carpenter", "Carter", "Cartwright",
"Caulfield", "Causer", "Causey", "Chamberlain", "Chance",
"Chancellor", "Chandler", "Chapman", "Chase", "Cheshire",
"Christians", "Christianson", "Christinsen", "Christinson",
"Christisen", "Christison", "Christopher", "Christophers",
"Church", "Clark", "Clarke", "Clarkson", "Clausson", "Clawson",
"Clayton", "Clemens", "Clifford", "Cline", "Clinton", "Close",
"Coburn", "Collingwood", "Combs", "Comstock", "Constable", "Cook",
"Cooke", "Cookson", "Coombs", "Cooper", "Corra", "Cotterill",
"Cowden", "Cox", "Crawford", "Crewe", "Cristians", "Cristiansen",
"Cristianson", "Croft", "Cropper", "Cross", "Crouch", "Cummins",
"Curtis", "Dalton", "Danell", "Daniel", "Daniell", "Daniels",
"Danielson", "Dannel", "Danniel", "Danniell", "Darby",
"Davies", "Davis", "Davison", "Dawson", "Day", "Deadman",
"Deering", "Denman", "Dennel", "Dennell", "Derrick", "Derricks",
"Derrickson", "Dexter", "Dick", "Dickens", "Dickenson",
"Dickson", "Disney", "Dixon", "Donalds", "Donaldson", "Downer",
"Draper", "Duke", "Dukeson", "Durant", "Dwerryhouse", "Dyer",
"Eads", "Earl", "Earls", "Earlson", "Easom", "Eason", "Eaton",
"Eccleston", "Ecclestone", "Edison", "Edwards", "Edwardson",
"Elder", "Eldridge", "Elliot", "Ellison", "Ellisson", "Elliston",
"Ellsworth", "Elmerson", "Ely", "Emerson", "Endicott", "Ericson",
"Espenson", "Ethans", "Ethanson", "Eustis", "Evanson", "Evered",
"Fabian", "Fairbairn", "Fairburn", "Fairchild", "Fairclough",
"Faulkner", "Fay", "Fear", "Fenn", "Firmin", "Fisher",
"Fishman", "Fitzroy", "Fleming", "Fletcher", "Ford", "Forester",
"Forney", "Foss", "Foster", "Fox", "Franklin", "Freeman", "Frost",
"Fry", "Frye", "Fuller", "Gabriels", "Gabrielson", "Gardenar",
"Gardiner", "Gardner", "Gardyner", "Garner", "Garrard", "Garrod",
"Georgeson", "Gibb", "Gibbs", "Gibson", "Gilbert", "Giles", "Glendon",
"Gilliam", "Glover", "Godfrey", "Goffe", "Goode", "Gorbold",
"Gore", "Granger", "Grant", "Gray", "Green", "Greene",
"Gregory", "Grey", "Groves", "Gully", "Hackett", "Hadaway",
"Haden", "Haggard", "Haight", "Hale", "Hall", "Hallman",
"Hameldon", "Hamilton", "Hamm", "Hampson", "Hampton", "Hancock",
"Hanley", "Hanson", "Harden", "Hardwick", "Hardy", "Harford",
"Hargrave", "Harley", "Harlow", "Harman", "Harmon", "Haroldson",
"Harper", "Harrell", "Harrelson", "Harris", "Harrison", "Hart",
"Hartell", "Harvey", "Hathaway", "Hatheway", "Hathoway", "Haward",
"Hawk", "Hawking", "Hawkins", "Hayward", "Haywood", "Heath",
"Henderson", "Hendry", "Henryson", "Henson", "Hepburn", "Herbert",
"Herberts", "Herbertson", "Hermanson", "Hewitt", "Hext",
"Hicks", "Hightower", "Hill", "Hillam", "Hilton", "Hobbes",
"Hobbs", "Hobson", "Hodges", "Hodson", "Hogarth", "Hollands",
"Hollins", "Holme", "Holmes", "Holmwood", "Holt", "Honeycutt",
"Honeysett", "Hooker", "Hooper", "Hope", "Hopkins", "Hopper",
"Hopson", "Horne", "Horsfall", "Horton", "House", "Howard",
"Howe", "Howland", "Howse", "Huddleson", "Huddleston", "Hudnall",
"Hudson", "Huff", "Hughes", "Hull", "Hume", "Hunnisett", "Hunt",
"Hunter", "Hurst", "Hutson", "Huxley", "Huxtable", "Hyland",
"I'Anson", "Ibbot", "Ibbott", "Ikin", "Ingham", "Ingram",
"Jackson", "Jacobs", "Jacobson", "James", "Jamison",
"Janson", "Jardine", "Jarvis", "Jeffers", "Jefferson", "Jeffery",
"Jeffries", "Jenkins", "Jephson", "Jepson", "Jernigan", "Jerome",
"Jinks", "Johns", "Johnson", "Joiner", "Jones", "Josephs",
"Josephson", "Joyner", "Keegan", "Keen", "Kellogg", "Kelsey",
"Kemp", "Kendall", "Kendrick", "Kersey", "Kevins", "Kevinson",
"Key", "Keys", "Kidd", "Killam", "Kimball", "King", "Kipling",
"Kirby", "Kitchen", "Kitchens", "Knaggs", "Knight", "Law",
"Lawson", "Leach", "Leavitt", "Ledford", "Leon", "Leonardson",
"Levitt", "Lewis", "Linwood", "Little", "Lockwood", "Loman",
"Long", "Longstaff", "Low", "Lowry", "Lucas", "Lukeson", "Lum",
"Lund", "Lynn", "Lyon", "Maddison", "Madison", "Mallory",
"Malone", "Mann", "Marchand", "Mark", "Marley", "Marlow",
"Marsden", "Marshall", "Marston", "Martin", "Martins",
"Martinson", "Mason", "Masters", "Masterson", "Mathers",
"Mathews", "Mathewson", "Matthews", "Matthewson", "May", "Mayes",
"Meadows", "Mercer", "Merchant", "Merrick", "Merricks",
"Merritt", "Michaels", "Michaelson", "Midgley",
"Milburn", "Miles", "Milford", "Miller", "Millhouse",
"Mills", "Milton", "Mitchell", "Mondy", "Montgomery",
"Moors", "Morce", "Morison", "Morris", "Morrish", "Morrison",
"Morriss", "Morse", "Moses", "Mottershead", "Mounce",
"Murgatroyd", "Murray", "Muttoone", "Myers", "Myles", "Nathans",
"Nathanson", "Nelson", "Ness", "Neville", "Newell", "Newman",
"Newport", "Newton", "Nichols", "Nicholson", "Nicolson",
"Nielson", "Nigel", "Nixon", "Normanson", "North", "Northrop",
"Norwood", "Nye", "Oakley", "Odell", "Ogden", "Olhouser",
"Orman", "Osborne", "Osbourne", "Ott", "Outlaw", "Outterridge",
"Overton", "Owston", "Padmore", "Page", "Palmer", "Parent",
"Parker", "Parsons", "Paternoster", "Paterson", "Patrick",
"Patterson", "Paulson", "Payne", "Peacock", "Peak", "Pearson",
"Pelley", "Pemberton", "Penny", "Perkins", "Perry", "Peter",
"Peters", "Peterson", "Petit", "Pettigrew", "Philips", "Phillips",
"Pickering", "Pickle", "Pierson", "Pitts", "Plank", "Plaskett",
"Platt", "Pocock", "Polley", "Pond", "Poole", "Pope", "Porcher",
"Porter", "Potter", "Pound", "Powers", "Prescott", "Pressley",
"Preston", "Proudfoot", "Pryor", "Purcell", "Putnam", "Queen",
"Queshire", "Quick", "Quickley", "Quigg", "Quigley", "Quincey",
"Quincy", "Raines", "Rains", "Rake", "Rakes", "Ramsey", "Randall",
"Rayne", "Raynerson", "Readdie", "Reed", "Reeve",
"Reier", "Rennell", "Rennold", "Rennoll", "Revie", "Reynell",
"Reynolds", "Rice", "Richard", "Richards", "Richardson",
"Rider", "Ridley", "Rier", "Rigby", "Riley", "Rimmer", "Roach",
"Robbins", "Robert", "Roberts", "Robertson", "Robinson",
"Rogers", "Rogerson", "Rollins", "Romilly", "Roscoe", "Ross",
"Rounds", "Rowbottom", "Rowe", "Rowland", "Rowntree", "Royce",
"Royceston", "Roydon", "Royle", "Royston", "Ruggles", "Rupertson",
"Ryder", "Ryeley", "Ryely", "Ryer", "Ryers", "Ryley", "Sackville",
"Sadler", "Salomon", "Salvage", "Sampson", "Samson",
"Samuels", "Samuelson", "Sanders", "Sanderson", "Sandford",
"Sappington", "Sargent", "Saunders", "Sauvage", "Savage",
"Savege", "Savidge", "Sawyer", "Saylor", "School", "Scott",
"Scriven", "Scrivener", "Scrivenor", "Scrivens", "Seabrooke",
"Seaver", "Sempers", "Senior", "Sergeant", "Sessions", "Sexton",
"Shakesheave", "Sharman", "Sharrow", "Shelby", "Shepard",
"Sherburne", "Simmons", "Simms", "Simon", "Simons", "Simonson",
"Simpkin", "Simpson", "Sims", "Skinner", "Slater", "Smalls",
"Smith", "Smythe", "Snelling", "Snider", "Sniders", "Snyder",
"Snyders", "Southers", "Southgate", "Sowards", "Spalding",
"Spear", "Spearing", "Spears", "Speight", "Spence", "Spencer",
"Spurling", "Stack", "Stacks", "Stafford", "Stainthorpe", "Stamp",
"Stanton", "Stark", "Starr", "Statham", "Steed", "Steele",
"Steffen", "Stenet", "Stephens", "Stephenson", "Stern",
"Stevens", "Stevenson", "Stidolph", "St John", "Stoddard",
"Strange", "Street", "Strickland", "Stringer", "Stroud",
"Strudwick", "Studwick", "Styles", "Sudworth", "Suggitt",
"Sumner", "Sutton", "Sweet", "Swindlehurst", "Symons", "Tailor",
"Tanner", "Tash", "Tasker", "Tate", "Tatham", "Taylor",
"Teel", "Tennison", "Tennyson", "Thacker", "Thatcher", "Thomas",
"Thompsett", "Thompson", "Thomson", "Thorn", "Thorne",
"Thorpe", "Thrussell", "Thwaite", "Timberlake", "Timothyson",
"Tinker", "Tipton", "Tittensor", "Tobias", "Toft", "Tolbert",
"Tollemache", "Toller", "Towner", "Townsend", "Tracy", "Traiylor",
"Trask", "Traver", "Travers", "Traves", "Travis", "Traviss",
"Traylor", "Treloar", "Trengove", "Trent", "Trevis", "Triggs",
"Tucker", "Tuff", "Tuft", "Tupper", "Turnbull", "Turner",
"Tyler", "Tyson", "Underhill", "Underwood", "Upton", "Vance",
"Van Middlesworth", "Varley", "Varnham", "Vaughan", "Vaughn",
"Verity", "Vernon", "Victor", "Victors", "Victorson", "Vipond",
"Virgo", "Wakefield", "Walker", "Wallace", "Wallis", "Walmsley",
"Warren", "Wash", "Washington", "Watkins", "Watson",
"Way", "Weaver", "Webster", "Weekes", "Wembley", "Wescott",
"Westbrook", "Wheeler", "Wheelock", "Whinery", "Whitaker",
"White", "Whitney", "Whittemore", "Whittle", "Wickham", "Wilcox",
"Wilkerson", "Wilkins", "Wilkinson", "William", "Williams",
"Williamson", "Willis", "Willoughby", "Wilson", "Winchester",
"Winfield", "Winship", "Winston", "Winter", "Winterbottom",
"Winthrop", "Witherspoon", "Wolf", "Wolfe", "Womack", "Wood",
"Woodcock", "Woodham", "Woodhams", "Woods", "Woodward",
"Wootton", "Wortham", "Wragge", "Wray", "Wyatt", "Wyght",
"Wyndham", "Yap", "Yates", "Yong", "York", "Young", "Younge",
"Yoxall", ]
StreetTypes = [ "Street", "Avenue", "Parade", "Ave", "St", "Rd", "Road",
"Highway", "Rd", "Ave", "St", ]
from random import choice, randint
def chooseName(gender="MF"):
if len(gender) > 1:
gender = choice(gender)
if gender == 'M':
firstName = choice(BoysNames)
else:
firstName = choice(GirlsNames)
lastName = choice(LastNames)
return firstName + " " + lastName
def chooseAddress():
city = choice(LastNames)
district = choice([choice(LastNames), choice(LastNames), ""])
while district == city:
district = choice(LastNames)
streetName = choice(LastNames)
while streetName == city or streetName == district:
streetName = choice(LastNames)
suffixes = [""]
for suffix in ["ton", "ton", "town", " City", "ville", "land"]:
if not city.endswith(suffix):
suffixes.append(suffix)
city += choice(suffixes)
number = randint(1, 110)
if number > 100:
number = randint(100, 1010)
if number > 1000:
number = randint(1000, 3000)
number2 = randint(1, 300)
letter = choice(["", "", "", "", chr(ord('a') + randint(0, 25))])
street = "%d%s %s %s" % (number, letter, streetName, choice(StreetTypes))
if number2 < 50:
street = "%d/%s" % (number2, street)
return (street, district, city)
def main():
print (chooseName())
addr = chooseAddress()
print (addr[0])
print (addr[1])
print (addr[2])
if __name__ == "__main__":
main()
|
linuxsoftware/dominoes
|
davezdominoes/gamecoordinator/utils/name.py
|
Python
|
agpl-3.0
| 26,257
|
[
"Amber",
"Brian",
"CRYSTAL",
"Dalton",
"FLEUR",
"TINKER"
] |
4628a380229a58ebb9c0eb0465af8e3abacd5a4a0beb8fe7b9d743233423409c
|
#---------------------------------------#
# This file is part of EbmLib.
#
# EbmLib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EbmLib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EbmLib. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------#
# author:
# tllake
# email:
# <thomas.l.lake@wmich.edu>
# <thom.l.lake@gmail.com>
# date:
# 2011.08.30
# file:
# cdktrainer.py
# description:
# Contrastive Divergence for training recursive RBM variants
#---------------------------------------#
from .. units import rthresh, sigmoid
import numpy as np
class CdkTrainer(object):
"""contrastive divergence trainer class
:param rbm: the model to train
:param lr: learning rate
:param m: momentum
:param l2: l2 regularization penalty
:param spen: sparisty penaly
:param p: desired sparsity
:param pdecay: decay rate for mean approximation
:type rbm: ebmlib.srrbm.Srrbm
:type lr: float
:type m: float
:type l2: float
:type spen: float
:type p: float
:type pdecay: float
"""
def __init__(self, rbm, lr = 0.01, m = 0.4, l2 = 0.001,
spen = 0.001, p = 0.1, pdecay = 0.96):
self.lr, self.m, self.l2 = lr, m, l2
self.spen, self.p, self.pdecay = spen, p, pdecay
self.q = np.zeros(rbm.nhid)
def cross_entropy(self, x, v):
return (x * np.log(v + 1e-8) + (1 - x) * np.log(1 - v + 1e-8)).sum()
def sparseterm(self, h):
"""compute the sparse penalty term and update the exponential decaying mean approximation
:param h: hidden state
:type h: numpy.array
:returns: spen * (q - p)
:rtype: float
"""
qnew = (self.pdecay * self.q) + ((1 - self.pdecay) * h)
self.q = qnew
return self.spen * (qnew - self.p)
def batchsparseterm(self, q):
"""compute the sparsity penalty
:param q: mean unit activities
:type q: numpy.array
:returns: spen * (q - p)
:rtype: float
"""
return self.spen * (q - self.p)
def learn(self, rbm, x, k = 1, m = True, l2 = True, s = True):
"""cdk weight update for single visible vector
:param rbm: model to update
:param x: data sample
:param k: number of gibbs steps to take for negative phase
:param m: include momentum term in cost function
:param l2: include l2 regularization term in cost function
:param s: include sparsity penalty term in cost function
:type rbm: ebmlib.srrbm.Srrbm
:type x: numpy.array
:type k: int
:type m: bool
:type l2: bool
:type s: bool
:rtype: None
"""
pv = x
pc = rbm.h
ph = rbm.ff(pv, rbm.hid_sample(pc))
if k == 1:
#nv, nc = rbm.fb(rthresh(ph))
nv, nc = rbm.fb(rbm.hid_sample(ph))
nh = rbm.ff(rbm.vis_sample(nv), rbm.hid_sample(nc))
else:
nh = ph.copy()
for i in range(k):
nv, nc = rbm.fb(rbm.hid_sample(nh))
nh = rbm.ff(rbm.vis_sample(nv), rbm.hid_sample(nc))
pgv = np.outer(ph, pv)
pgc = np.outer(ph, pc)
ngv = np.outer(nh, nv)
ngc = np.outer(nh, nc)
gWhv = pgv - ngv
gWhc = pgc - ngc
gvb = pv - nv
gcb = pc - nc
ghb = ph - nh
# regulization
if l2:
gWhv -= self.l2 * rbm.Whv
gWhc -= self.l2 * rbm.Whc
# sparsity
if s:
sparse_penalty_term = self.sparseterm(ph)
gWhv = (gWhv.T - sparse_penalty_term).T
gWhc = (gWhc.T - sparse_penalty_term).T
ghb -= sparse_penalty_term
gcb -= sparse_penalty_term
dWhv = self.lr * gWhv
dWhc = self.lr * gWhc
dvb = self.lr * gvb
dcb = self.lr * gcb
dhb = self.lr * ghb
# momentum
if m:
dWhv += self.m * rbm.dWhv
dWhc += self.m * rbm.dWhc
dvb += self.m * rbm.dvb
dcb += self.m * rbm.dcb
dhb += self.m * rbm.dhb
rbm.Whv += dWhv
rbm.Whc += dWhc
rbm.vb += dvb
rbm.cb += dcb
rbm.hb += dhb
rbm.dWhv = dWhv
rbm.dWhc = dWhc
rbm.dvb = dvb
rbm.dcb = dcb
rbm.dhb = dhb
rbm.h = ph
#rbm.push(x)
def batchlearn(self, rbm, X, k = 1, m = True, l2 = True, s = True):
"""cdk weight update for a sequence of visible vector
:param rbm: model to update
:param X: datapoints
:param k: number of gibbs steps to take for negative phase
:param m: include momentum term in cost function
:param l2: include l2 regularization term in cost function
:param s: include sparsity penalty term in cost function
:type rbm: ebmlib.srrbm.Srrbm
:type X: 2d numpy.array or list of numpy.array
:type k: int
:type m: bool
:type l2: bool
:type s: bool
:rtype: None
"""
gWhv = np.zeros(rbm.Whv.shape)
gWhc = np.zeros(rbm.Whc.shape)
gvb = np.zeros(rbm.vb.shape)
gcb = np.zeros(rbm.cb.shape)
ghb = np.zeros(rbm.hb.shape)
q = np.zeros(rbm.nhid)
for x in X:
pv = x
pc = rbm.h
ph = rbm.ff(pv, rbm.hid_sample(pc))
if k == 1:
nv, nc = rbm.fb(rbm.hid_sample(ph))
nh = rbm.ff(rbm.vis_sample(nv), rbm.hid_sample(nc))
else:
nh = ph.copy()
for i in range(k):
nv, nc = rbm.fb(rbm.hid_sample(nh))
nh = rbm.ff(rbm.vis_sample(nv), rbm.hid_sample(nc))
gWhv += np.outer(ph, pv) - np.outer(nh, nv)
gWhc += np.outer(ph, pc) - np.outer(nh, nc)
gvb += (pv - nv)
gcb += (pc - nc)
ghb += (ph - nh)
if s:
q += ph
#rbm.push(x)
rbm.h = ph
gWhv /= len(X)
gWhc /= len(X)
gvb /= len(X)
ghb /= len(X)
gcb /= len(X)
# regulization
if l2:
gWhv -= self.l2 * rbm.Whv
gWhc -= self.l2 * rbm.Whc
# sparsity
if s:
sparse_penalty_term = self.batchsparseterm(q/len(X))
gWhv = (gWhv.T - sparse_penalty_term).T
gWhc = (gWhc.T - sparse_penalty_term).T
ghb -= sparse_penalty_term
gcb -= sparse_penalty_term
#dwv = self.lr / len(X) * gwv
#dwc = self.lr / len(X) * gwc
#dvb = self.lr / len(X) * gvb
#dcb = self.lr / len(X) * gcb
#dhb = self.lr / len(X) * ghb
dWhv = self.lr * gWhv
dWhc = self.lr * gWhc
dvb = self.lr * gvb
dcb = self.lr * gcb
dhb = self.lr * ghb
if m:
dWhv += self.m * rbm.dWhv
dWhc += self.m * rbm.dWhc
dvb += self.m * rbm.dvb
dcb += self.m * rbm.dcb
dhb += self.m * rbm.dhb
rbm.Whv += dWhv
rbm.Whc += dWhc
rbm.vb += dvb
rbm.cb += dcb
rbm.hb += dhb
rbm.dWhv = dWhv
rbm.dWhc = dWhc
rbm.dvb = dvb
rbm.dcb = dcb
rbm.dhb = dhb
|
thomlake/EbmLib
|
ebmlib/srrbm/cdktrainer.py
|
Python
|
gpl-3.0
| 6,491
|
[
"CDK"
] |
6a1c3270ec1e0e1d5e11ce05174652cd948f19c09610057bdc35a42c3f889c48
|
#----------------------------------------------------------
#Example to run a multi-dimensional GP analysis with pyaneti
#Barragan O., 2021
#This analysis reproduces the full analysis of K2-100 presented in
#http://dx.doi.org/10.1093/mnras/stz2569
#----------------------------------------------------------
#----------------------------------------------------------
#This part is exactly as any other pyaneti run
#----------------------------------------------------------
nplanets = 1
#Filename including all the RV and activity indicators time-series
fname_rv = ['timeseries.dat']
#File name with the light curve data
#Note that there are four columns ordered as:
#time, normalised flux, normalised flux error, instrument/band label
fname_tr = ['multi_band_K2-100.dat']
#MCMC controls
thin_factor = 10
niter = 500
nchains = 100
#Define a vector with the bands
#In this case
#K2 - K2 C5 long cadence data
#G - ARCTIC data
#SC - K2 C18 short cadence data
#band1 - MUSCAT i data
#band2 - MUSCAT r data
#band3 - MUSCAT z data
bands = ['K2','G','SC','band1','band2','band3']
#Pyaneti is able to deal with multi band with a different cadence for each band
#In this example K2 C5 data was observed in long cadence (29.425 min) we need to resample the model
#All other bands do not need resampling
#We need to define t_cad, a vector indicating the integration time (in units of days),
#each element has to correspond to the bands defined in bands variable
#Note that when no resample of the model is needed, the value that we put in t_cad is not important, but we still need to give a value
t_cad = [29.425 / 60. / 24.0,1.5 / 60. / 24.0,1.5 / 60. / 24.0,1.5 / 60. / 24.0,1.5 / 60. / 24.0,1.5 / 60. / 24.0]
#Vector indicating the number of steps to integrate the model, each element has to correspond to the bands defined in bands variable
#We only integrate the model for the K2 C5 long cadence with 10 steps
n_cad = [10,1,1,1,1,1]
#This variable controls if we want to fit a single radius for all bands or a radius fit for each band
#If False (default) pyaneti will fit a single planet radius for all bands
#If True pyaneti will fit a planet radius for EACH band, this might be useful to check if the planet radius is consistent within all bands
is_multi_radius = False
method = 'mcmc'
#method = 'plot'
is_rasterized = False
#K2-100 parameters as in Barragan et al., 2019
mstar_mean = 1.15
mstar_sigma = 0.05
rstar_mean = 1.24
rstar_sigma = 0.05
tstar_mean = 5945.
tstar_sigma = 110.
unit_mass = 'earth'
fit_rv = [True]
fit_tr = [True]
is_jitter_tr = True
is_jitter_rv = True
#Prior section
# f -> fixed value
# u -> Uniform priors
# g -> Gaussian priors
fit_t0 = ['u']
fit_P = ['u']
fit_e = ['f']
fit_w = ['f']
fit_ew1= ['f']
fit_ew2= ['f']
fit_b = ['u']
fit_a = ['u']
fit_rp = ['u']
fit_k = ['u']
fit_v0 = 'u'
#Now we have to fit LDC for each band, in this case we will set uniform priors for all bands
#This makes the trick to create a uniform prior for all 6 bands
fit_q1 = ['u']*6 #We fit q1 with gaussian priors
fit_q2 = ['u']*6 #We fit q2 with gaussian priors
#Set the prior limits for all bands
min_q1 = [0]*6
max_q1 = [1]*6
min_q2 = [0]*6
max_q2 = [1]*6
#There is a difference in the photometric and spectroscopic time-series of 4833. days
#We can solve this by addig 4833. to the light curve as
textra = 4833.
#Prior ranges for a parameter A
#if 'f' is selected for the parameter A, A is fixed to the one given by min_A
#if 'u' is selected for the parameter A, sets uniform priors between min_A and max_A
#if 'g' is selected for the parameter A, sets gaussian priors with mean min_A and standard deviation max_A
min_t0 = [7140.71]
max_t0 = [7140.73]
min_P = [1.673901]
max_P = [1.673910]
min_a = [1.1]
max_a = [15.]
min_b = [0.0]
max_b = [1.0]
min_k = [0.0]
max_k = [0.05]
min_rp = [0.0]
max_rp = [0.05]
#----------------------------------------------------------
# Here ends the part that is as any other pyaneti run
#----------------------------------------------------------
#----------------------------------------------------------
#In this part is where we start to add our GPs magic
#----------------------------------------------------------
#we are going to reproduce the result in Barragan et al., 2019, where the multi-gp approach as the form
# RV = A_0 G + A_1 dG
# R_hk = A_2 G + A_3 dG
# BIS = A_4 G + A_5 dG
# with A_3 = 0 to recover original approach in Rajpaul et al. (2015)
#The Quasi-periodic kernel implemented in pyaneti is
#G(ti,tj) = A * exp[ sin(pi (ti - tj)/P_GP)**2 / (2*lambda_p**2) - (ti-tj)**2/(2*lamda_e**2) ]
#where A, lambda_e, lambda_p, and P_GP are the hyperparameters
#The first thing to note is how pyaneti deal with the RVs and the activity indicators
#The file timeseries.dat contains all data to be used in this example
#The data has to be provided as if each activity indicator were an extra instrument, i.e.,
#The activity indicator level has to be given in the fourth column of the data file, in this case timeseries.dat
#We are going to use RVs, log_RHK and Bisector span, they are labelled as INST, RHK, and BIS, respectively
#We indicate to pyaneti to read the data
telescopes = ['INST','RHK','BIS']
#This vector has to be filled with the name of each telescope telescopes[i]
telescopes_labels = ['HARPS RV','HARPS log $R_{HK}$','HARPS BIS SPAN']
#We have to tell to pyaneti what correlation matrix we want for our RV analysis, this is done by setting
kernel_rv = 'MQ3'
# where 'MQ3' is the keyword for the quasiperiodic kernel in the multi-dimensional GP framework, with three time-series.
#Pyaneti can deal with a different number of time-series by changing to MQX, where X is the number of time-series.
#now we have to treat the hyper parameters as "normal" pyaneti parameters
#i.e., we need to say what kind of prior we will use and the ranges
#First we need to tell to pyaneti what kind of priors we want, equivalent to the fit_* parameters previously used in this file
#The variable that pass this to pyaneti is called fit_krv. In this case we are going to deal with 9 parameters, the six A_i amplitudes
#plus the lambda_e, lambda_p and P_GP from the QP Kernel.
#The first elements of the fit_krv vector are always the A_i's and the last are the kernel hyperparameters.
#The fit_krv vector would be then
#kernel_rv = 'None'
fit_krv = [None]*9 #Define the list with 9 elements
fit_krv[0] = 'u' #A_0
fit_krv[1] = 'u' #A_1
fit_krv[2] = 'u' #A_2
fit_krv[3] = 'f' #A_3, we fix it to zero to recover Rajpaul et al., 2015 approach
fit_krv[4] = 'u' #A_4
fit_krv[5] = 'u' #A_5
fit_krv[6] = 'u' #lambda_e
fit_krv[7] = 'u' #lambda_p
fit_krv[8] = 'u' #P_GP
#We have already indicated the kind of prior that we want, the next step is to set the prior ranges
#The prior ranges are stored in a list called krv_priors, this list lenght is two times the lenght of fit_krv
#It follows the same logic as fit_krv, the first elements correspond to the ranges of the A_i's variables and then the kernel variables
#Prior ranges for a parameter A
#if 'f' is selected for the parameter A, A is fixed to the one given by min_A
#if 'u' is selected for the parameter A, sets uniform priors between min_A and max_A
#if 'g' is selected for the parameter A, sets gaussian priors with mean min_A and standard deviation max_A
krv_priors = [
0.0,0.5, #ranges for A_0
0.0,0.5, #ranges for A_1
0.0,0.5, #ranges for A_2
0.0,0.0, #ranges for A_3, note that this one is fixed to zero
-1.5,1.5, #ranges for A_4
-1.5,1.5, #ranges for A_5
1,80, #ranges for lambda_e
0.1,2.0, #ranges for lambda_p
4.,5.1 #ranges for P_GP
]
#----------------------------------------------------------
#END
#----------------------------------------------------------
|
oscaribv/pyaneti
|
inpy/example_full_k2100/input_fit.py
|
Python
|
gpl-3.0
| 7,885
|
[
"Gaussian"
] |
c7d2f9a6bec3a5f5a72a67c9acb9e7fd4241d8d4d3f84b2b9af3f8832eee3a5e
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
r"""Module to provide mechanism to store and restore option states in driver.
"""
import sys
from contextlib import contextmanager
from psi4 import core
from .exceptions import ValidationError
class OptionState(object):
"""Class to store the state of a single *option*. If *module* given, the *option*
value and has_changed value is stored for global, local to *module*, and used by
*module* scopes; otherwise (used for BASIS keywords), only global scope is stored.
Class can store, print, and restore option values. ::
>>> OptionState('E_CONVERGENCE', 'SCF')
>>> print(OptionState('DF_BASIS_MP2'))
"""
def __init__(self, option, module=None):
self.option = option.upper()
if module:
self.module = module.upper()
else:
self.module = None
self.value_global = core.get_global_option(option)
self.haschanged_global = core.has_global_option_changed(option)
if self.module:
self.value_local = core.get_local_option(self.module, option)
self.haschanged_local = core.has_local_option_changed(self.module, option)
self.value_used = core.get_option(self.module, option)
self.haschanged_used = core.has_option_changed(self.module, option)
else:
self.value_local = None
self.haschanged_local = None
self.value_used = None
self.haschanged_used = None
def __str__(self):
text = ''
if self.module:
text += """ ==> %s Option in Module %s <==\n\n""" % (self.option, self.module)
text += """ Global (has changed?) value: %7s %s\n""" % ('(' + str(self.haschanged_global) + ')',
self.value_global)
text += """ Local (has changed?) value: %7s %s\n""" % ('(' + str(self.haschanged_local) + ')',
self.value_local)
text += """ Used (has changed?) value: %7s %s\n""" % ('(' + str(self.haschanged_used) + ')',
self.value_used)
else:
text += """ ==> %s Option in Global Scope <==\n\n""" % (self.option)
text += """ Global (has changed?) value: %7s %s\n""" % ('(' + str(self.haschanged_global) + ')',
self.value_global)
text += """\n"""
return text
def restore(self):
core.set_global_option(self.option, self.value_global)
if not self.haschanged_global:
core.revoke_global_option_changed(self.option)
if self.module:
core.set_local_option(self.module, self.option, self.value_local)
if not self.haschanged_local:
core.revoke_local_option_changed(self.module, self.option)
class OptionsState(object):
"""Class to contain multiple :py:func:`~psi4.driver.p4util.OptionState` objects.
Used in python driver functions to collect several options before altering
them, then restoring before function return. ::
>>> optstash = OptionsState(
['DF_BASIS_SCF'],
['SCF_TYPE'],
['SCF', 'REFERENCE'])
>>> print(optstash)
>>> optstash.restore()
"""
def __init__(self, *largs):
self.data = {}
for item in largs:
self.add_option(item)
def add_option(self, item):
if len(item) == 2:
key = (item[1], item[0])
elif len(item) == 1:
key = (item[0], )
else:
raise ValidationError(
'Each argument to OptionsState should be an array, the first element of which is the module scope and the second element of which is the module name. Bad argument: %s'
% (item))
if key in self.data:
raise ValidationError(
'Malformed options state, duplicate key adds of "{}". This should not happen, please raise a issue on github.com/psi4/psi4'.format(key))
else:
self.data[key] = OptionState(*key)
def __str__(self):
text = ''
for key, item in self.data.items():
text += str(item)
return text
def restore(self):
for key, item in self.data.items():
item.restore()
@contextmanager
def OptionsStateCM(osd):
oso = OptionsState(osd)
yield
oso.restore()
|
jturney/psi4
|
psi4/driver/p4util/optproc.py
|
Python
|
lgpl-3.0
| 5,468
|
[
"Psi4"
] |
3404b2eadf8147c9a25a5035f0cd41dc87a08433113b2868a1e2ad9634681fb2
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
import galsim
valid_gsobject_types = {
# Note: these are just the types that need a special builder. Most of GSObject sub-classes
# in base.py (and some elsewhere) can use the default builder, called _BuildSimple, which
# just uses the req, opt, and single class variables.
# See the des module for examples of how to extend this from a module.
'None' : '_BuildNone',
'Add' : '_BuildAdd',
'Sum' : '_BuildAdd',
'Convolve' : '_BuildConvolve',
'Convolution' : '_BuildConvolve',
'List' : '_BuildList',
'Ring' : '_BuildRing',
'Pixel' : '_BuildPixel',
'RealGalaxy' : '_BuildRealGalaxy',
}
class SkipThisObject(Exception):
"""
A class that a builder can throw to indicate that nothing went wrong, but for some
reason, this particular object should be skipped and just move onto the next object.
The constructor takes an optional message that will be output to the logger if
logging is active.
"""
def __init__(self, message=None):
# Using self.message gives a deprecation warning. Avoid this by using a different name.
self.msg = message
def BuildGSObject(config, key, base=None, gsparams={}):
"""Build a GSObject using config dict for key=key.
@param config A dict with the configuration information.
@param key The key name in config indicating which object to build.
@param base A dict which stores potentially useful things like
base['rng'] = random number generator
base['catalog'] = input catalog for InputCat items
base['real_catalog'] = real galaxy catalog for RealGalaxy objects
Typically on the initial call to BuildGSObject, this will be
the same as config, hence the name base.
@param gsparams Optionally, provide non-default gsparams items. Any gsparams specified
at this level will be added to the list. This should be a dict with
whatever kwargs should be used in constructing the GSParams object.
@returns gsobject, safe
gsobject is the built object
safe is a bool that says whether it is safe to use this object again next time
"""
# I'd like to be able to have base=config be the default value, but python doesn't
# allow that. So None is the default, and if it's None, we set it to config.
if not base:
base = config
#print 'Start BuildGSObject: config = ',config
#print 'gsparams = ',gsparams
if isinstance(config,dict):
if not key in config:
raise AttributeError("key %s not found in config"%key)
elif isinstance(config,list):
if not key < len(config):
raise AttributeError("Trying to build past the end of a list in config")
else:
raise AttributeError("BuildGSObject not given a valid dictionary")
# Alias for convenience
ck = config[key]
# Check that the input config has a type to even begin with!
if not 'type' in ck:
raise AttributeError("type attribute required in config.%s"%key)
type = ck['type']
# If we have previously saved an object and marked it as safe, then use it.
if 'current_val' in ck and ck['safe']:
#print 'current is safe: ',ck['current_val'], True
return ck['current_val'], True
# Ring is only allowed for top level gal (since it requires special handling in
# multiprocessing, and that's the only place we look for it currently).
if type == 'Ring' and key != 'gal':
raise AttributeError("Ring type only allowed for top level gal")
# Check if we need to skip this object
if 'skip' in ck:
skip = galsim.config.ParseValue(ck, 'skip', base, bool)[0]
if skip:
raise SkipThisObject('config.skip = True')
# Set up the initial default list of attributes to ignore while building the object:
ignore = [
'dilate', 'dilation', 'ellip', 'rotate', 'rotation',
'magnify', 'magnification', 'shear', 'shift',
'gsparams', 'skip', 'current_val', 'safe'
]
# There are a few more that are specific to which key we have.
if key == 'gal':
ignore += [ 'resolution', 'signal_to_noise', 'redshift', 're_from_res' ]
# If redshift is present, parse it here, since it might be needed by the Build functions.
# All we actually care about is setting the current_val, so don't assign to anything.
if 'redshift' in ck:
galsim.config.ParseValue(ck, 'redshift', base, float)
elif key == 'psf':
ignore += [ 'saved_re' ]
elif key != 'pix':
# As long as key isn't psf or pix, allow resolution.
# Ideally, we'd like to check that it's something within the gal hierarchy, but
# I don't know an easy way to do that.
ignore += [ 'resolution' , 're_from_res' ]
# If we are specifying the size according to a resolution, then we
# need to get the PSF's half_light_radius.
if 'resolution' in ck:
if 'psf' not in base:
raise AttributeError(
"Cannot use gal.resolution if no psf is set.")
if 'saved_re' not in base['psf']:
raise AttributeError(
'Cannot use gal.resolution with psf.type = %s'%base['psf']['type'])
psf_re = base['psf']['saved_re']
resolution = galsim.config.ParseValue(ck, 'resolution', base, float)[0]
gal_re = resolution * psf_re
if 're_from_res' not in ck:
# The first time, check that half_light_radius isn't also specified.
if 'half_light_radius' in ck:
raise AttributeError(
'Cannot specify both gal.resolution and gal.half_light_radius')
ck['re_from_res'] = True
ck['half_light_radius'] = gal_re
# Make sure the PSF gets flux=1 unless explicitly overridden by the user.
if key == 'psf' and 'flux' not in ck:
ck['flux'] = 1
if 'gsparams' in ck:
gsparams = UpdateGSParams(gsparams, ck['gsparams'], 'gsparams', config)
# See if this type has a specialized build function:
if type in valid_gsobject_types:
build_func = eval(valid_gsobject_types[type])
gsobject, safe = build_func(ck, key, base, ignore, gsparams)
# Next, we check if this name is in the galsim dictionary.
elif type in galsim.__dict__:
if issubclass(galsim.__dict__[type], galsim.GSObject):
gsobject, safe = _BuildSimple(ck, key, base, ignore, gsparams)
else:
TypeError("Input config type = %s is not a GSObject."%type)
# Otherwise, it's not a valid type.
else:
raise NotImplementedError("Unrecognised config type = %s"%type)
# If this is a psf, try to save the half_light_radius in case gal uses resolution.
if key == 'psf':
try :
ck['saved_re'] = gsobject.getHalfLightRadius()
except :
pass
# Apply any dilation, ellip, shear, etc. modifications.
gsobject, safe1 = _TransformObject(gsobject, ck, base)
safe = safe and safe1
if 'no_save' not in base:
ck['current_val'] = gsobject
ck['safe'] = safe
return gsobject, safe
def UpdateGSParams(gsparams, config, key, base):
"""@brief Add additional items to the gsparams dict based on config['gsparams']
"""
opt = galsim.GSObject._gsparams
kwargs, safe = galsim.config.GetAllParams(config, key, base, opt=opt)
# When we update gsparams, we don't want to corrupt the original, so we need to
# make a copy first, then update with kwargs.
ret = {}
ret.update(gsparams)
ret.update(kwargs)
return ret
def _BuildNone(config, key, base, ignore, gsparams):
"""@brief Special type=None returns None
"""
return None, True
def _BuildAdd(config, key, base, ignore, gsparams):
"""@brief Build an Add object
"""
req = { 'items' : list }
opt = { 'flux' : float }
# Only Check, not Get. We need to handle items a bit differently, since it's a list.
galsim.config.CheckAllParams(config, key, req=req, opt=opt, ignore=ignore)
gsobjects = []
items = config['items']
if not isinstance(items,list):
raise AttributeError("items entry for config.%s entry is not a list."%type)
safe = True
for i in range(len(items)):
gsobject, safe1 = BuildGSObject(items, i, base, gsparams)
# Skip items with flux=0
if 'flux' in items[i] and galsim.config.value.GetCurrentValue(items[i],'flux') == 0.:
#print 'skip -- flux == 0'
continue
safe = safe and safe1
gsobjects.append(gsobject)
#print 'After built component items for ',type,' safe = ',safe
if len(gsobjects) == 0:
raise ValueError("No valid items for %s"%key)
elif len(gsobjects) == 1:
gsobject = gsobjects[0]
else:
# Special: if the last item in a Sum doesn't specify a flux, we scale it
# to bring the total flux up to 1.
if ('flux' not in items[-1]) and all('flux' in item for item in items[0:-1]):
sum = 0
for item in items[0:-1]:
sum += galsim.config.value.GetCurrentValue(item,'flux')
#print 'sum = ',sum
f = 1. - sum
#print 'f = ',f
if (f < 0):
import warnings
warnings.warn(
"Automatically scaling the last item in Sum to make the total flux\n" +
"equal 1 requires the last item to have negative flux = %f"%f)
gsobjects[-1].setFlux(f)
if gsparams: gsparams = galsim.GSParams(**gsparams)
else: gsparams = None
gsobject = galsim.Add(gsobjects,gsparams=gsparams)
if 'flux' in config:
flux, safe1 = galsim.config.ParseValue(config, 'flux', base, float)
#print 'flux = ',flux
gsobject.setFlux(flux)
safe = safe and safe1
return gsobject, safe
def _BuildConvolve(config, key, base, ignore, gsparams):
"""@brief Build a Convolve object
"""
req = { 'items' : list }
opt = { 'flux' : float }
# Only Check, not Get. We need to handle items a bit differently, since it's a list.
galsim.config.CheckAllParams(config, key, req=req, opt=opt, ignore=ignore)
gsobjects = []
items = config['items']
if not isinstance(items,list):
raise AttributeError("items entry for config.%s entry is not a list."%type)
safe = True
for i in range(len(items)):
gsobject, safe1 = BuildGSObject(items, i, base, gsparams)
safe = safe and safe1
gsobjects.append(gsobject)
#print 'After built component items for ',type,' safe = ',safe
if len(gsobjects) == 0:
raise ValueError("No valid items for %s"%key)
elif len(gsobjects) == 1:
gsobject = gsobjects[0]
else:
if gsparams: gsparams = galsim.GSParams(**gsparams)
else: gsparams = None
gsobject = galsim.Convolve(gsobjects,gsparams=gsparams)
if 'flux' in config:
flux, safe1 = galsim.config.ParseValue(config, 'flux', base, float)
#print 'flux = ',flux
gsobject.setFlux(flux)
safe = safe and safe1
return gsobject, safe
def _BuildList(config, key, base, ignore, gsparams):
"""@brief Build a GSObject selected from a List
"""
req = { 'items' : list }
opt = { 'index' : float , 'flux' : float }
# Only Check, not Get. We need to handle items a bit differently, since it's a list.
galsim.config.CheckAllParams(config, key, req=req, opt=opt, ignore=ignore)
items = config['items']
if not isinstance(items,list):
raise AttributeError("items entry for config.%s entry is not a list."%type)
# Setup the indexing sequence if it hasn't been specified using the length of items.
galsim.config.SetDefaultIndex(config, len(items))
index, safe = galsim.config.ParseValue(config, 'index', base, int)
if index < 0 or index >= len(items):
raise AttributeError("index %d out of bounds for config.%s"%(index,type))
#print items[index]['type']
#print 'index = ',index,' From ',key,' List: ',items[index]
gsobject, safe1 = BuildGSObject(items, index, base, gsparams)
safe = safe and safe1
if 'flux' in config:
flux, safe1 = galsim.config.ParseValue(config, 'flux', base, float)
#print 'flux = ',flux
gsobject.setFlux(flux)
safe = safe and safe1
return gsobject, safe
def _BuildRing(config, key, base, ignore, gsparams):
"""@brief Build a GSObject in a Ring
"""
req = { 'num' : int, 'first' : dict }
opt = { 'full_rotation' : galsim.Angle }
# Only Check, not Get. We need to handle first a bit differently, since it's a gsobject.
galsim.config.CheckAllParams(config, key, req=req, opt=opt, ignore=ignore)
num = galsim.config.ParseValue(config, 'num', base, int)[0]
if num <= 0:
raise ValueError("Attribute num for gal.type == Ring must be > 0")
if 'full_rotation' in config:
full_rotation = galsim.config.ParseValue(config, 'full_rotation', base, galsim.Angle)[0]
else:
import math
full_rotation = math.pi * galsim.radians
dtheta = full_rotation / num
#print 'dtheta = ',dtheta
k = base['seq_index']
#print 'k = ',k
if k % num == 0:
#print 'first pass -- rebuilding'
# Then this is the first in the Ring.
gsobject = BuildGSObject(config, 'first', base, gsparams)[0]
else:
#print 'not first pass rotate by ',dtheta
if not isinstance(config['first'],dict) or 'current_val' not in config['first']:
raise RuntimeError("Building Ring after the first item, but no current_val stored.")
gsobject = config['first']['current_val'].createRotated(k*dtheta)
return gsobject, False
def _BuildPixel(config, key, base, ignore, gsparams):
"""@brief Build a Pixel type GSObject from user input.
"""
kwargs, safe = galsim.config.GetAllParams(config, key, base,
req = galsim.__dict__['Pixel']._req_params,
opt = galsim.__dict__['Pixel']._opt_params,
single = galsim.__dict__['Pixel']._single_params,
ignore = ignore)
if gsparams: kwargs['gsparams'] = galsim.GSParams(**gsparams)
if 'yw' in kwargs.keys() and (kwargs['xw'] != kwargs['yw']):
import warnings
warnings.warn(
"xw != yw found (%f != %f) "%(kwargs['xw'], kwargs['yw']) +
"This is supported for the pixel, but not the draw routines. " +
"There might be weirdness....")
return galsim.Pixel(**kwargs), safe
def _BuildRealGalaxy(config, key, base, ignore, gsparams):
"""@brief Build a RealGalaxy type GSObject from user input.
"""
if 'real_catalog' not in base:
raise ValueError("No real galaxy catalog available for building type = RealGalaxy")
if 'num' in config:
num, safe = ParseValue(config, 'num', base, int)
else:
num, safe = (0, True)
ignore.append('num')
if num < 0:
raise ValueError("Invalid num < 0 supplied for RealGalaxy: num = %d"%num)
if num >= len(base['real_catalog']):
raise ValueError("Invalid num supplied for RealGalaxy (too large): num = %d"%num)
real_cat = base['real_catalog'][num]
# Special: if index is Sequence or Random, and max isn't set, set it to real_cat.nobjects-1
if 'id' not in config:
galsim.config.SetDefaultIndex(config, real_cat.nobjects)
kwargs, safe = galsim.config.GetAllParams(config, key, base,
req = galsim.__dict__['RealGalaxy']._req_params,
opt = galsim.__dict__['RealGalaxy']._opt_params,
single = galsim.__dict__['RealGalaxy']._single_params,
ignore = ignore)
if gsparams: kwargs['gsparams'] = galsim.GSParams(**gsparams)
if 'rng' not in base:
raise ValueError("No base['rng'] available for %s.type = RealGalaxy"%(key))
kwargs['rng'] = base['rng']
if 'index' in kwargs:
index = kwargs['index']
if index >= real_cat.nobjects:
raise IndexError(
"%s index has gone past the number of entries in the catalog"%param_name)
return galsim.RealGalaxy(real_cat, **kwargs), safe
def _BuildSimple(config, key, base, ignore, gsparams={}):
"""@brief Build a simple GSObject (i.e. one without a specialized _Build function) or
any other galsim object that defines _req_params, _opt_params and _single_params.
"""
# Build the kwargs according to the various params objects in the class definition.
type = config['type']
if type in galsim.__dict__:
init_func = eval("galsim."+type)
else:
init_func = eval(type)
kwargs, safe = galsim.config.GetAllParams(config, key, base,
req = init_func._req_params,
opt = init_func._opt_params,
single = init_func._single_params,
ignore = ignore)
if gsparams: kwargs['gsparams'] = galsim.GSParams(**gsparams)
if init_func._takes_rng:
if 'rng' not in base:
raise ValueError("No base['rng'] available for %s.type = %s"%(key,type))
kwargs['rng'] = base['rng']
safe = False
# Finally, after pulling together all the params, try making the GSObject.
return init_func(**kwargs), safe
def _TransformObject(gsobject, config, base):
"""@brief Applies ellipticity, rotation, gravitational shearing and centroid shifting to a
supplied GSObject, in that order, from user input.
@returns transformed GSObject.
"""
safe = True
orig = True
if 'dilate' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _DilateObject(gsobject, config, 'dilate', base)
safe = safe and safe1
if 'dilation' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _DilateObject(gsobject, config, 'dilation', base)
safe = safe and safe1
if 'ellip' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _EllipObject(gsobject, config, 'ellip', base)
safe = safe and safe1
if 'rotate' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _RotateObject(gsobject, config, 'rotate', base)
safe = safe and safe1
if 'rotation' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _RotateObject(gsobject, config, 'rotation', base)
safe = safe and safe1
if 'shear' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _EllipObject(gsobject, config, 'shear', base)
safe = safe and safe1
if 'magnify' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _MagnifyObject(gsobject, config, 'magnify', base)
safe = safe and safe1
if 'magnification' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _MagnifyObject(gsobject, config, 'magnification', base)
safe = safe and safe1
if 'shift' in config:
if orig: gsobject = gsobject.copy(); orig = False
gsobject, safe1 = _ShiftObject(gsobject, config, 'shift', base)
safe = safe and safe1
return gsobject, safe
def _EllipObject(gsobject, config, key, base):
"""@brief Applies ellipticity to a supplied GSObject from user input, also used for
gravitational shearing.
@returns transformed GSObject.
"""
shear, safe = galsim.config.ParseValue(config, key, base, galsim.Shear)
gsobject = gsobject.createSheared(shear)
return gsobject, safe
def _RotateObject(gsobject, config, key, base):
"""@brief Applies rotation to a supplied GSObject based on user input.
@returns transformed GSObject.
"""
theta, safe = galsim.config.ParseValue(config, key, base, galsim.Angle)
gsobject = gsobject.createRotated(theta)
return gsobject, safe
def _DilateObject(gsobject, config, key, base):
"""@brief Applies dilation to a supplied GSObject based on user input.
@returns transformed GSObject.
"""
scale, safe = galsim.config.ParseValue(config, key, base, float)
gsobject = gsobject.createDilated(scale)
return gsobject, safe
def _MagnifyObject(gsobject, config, key, base):
"""@brief Applies magnification to a supplied GSObject based on user input.
@returns transformed GSObject.
"""
mu, safe = galsim.config.ParseValue(config, key, base, float)
gsobject = gsobject.createMagnified(mu)
return gsobject, safe
def _ShiftObject(gsobject, config, key, base):
"""@brief Applies centroid shift to a supplied GSObject based on user input.
@returns transformed GSObject.
"""
shift, safe = galsim.config.ParseValue(config, key, base, galsim.PositionD)
gsobject = gsobject.createShifted(shift.x,shift.y)
return gsobject, safe
|
mardom/GalSim
|
galsim/config/gsobject.py
|
Python
|
gpl-3.0
| 22,000
|
[
"Galaxy"
] |
c4825923d029ce98e4d84d1a7ddcbaa5bf2b419128ca5bec848503d392cfab70
|
#!/usr/bin/python
"""
Copyright 2013 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import time
from datetime import timedelta, datetime
import random
import os
import sys
BASE_SCRIPT_URL = '/'
DEFAULT_THEME = 'crafter'
imgNum = random.randint(1,3)
def timeAgo(theTime):
try:
timeSinceEntered = datetime.fromtimestamp(time.time()) - theTime
tmpDays = timeSinceEntered.days
tmpStr = ''
if (tmpDays > 0):
if (tmpDays > 365):
tmpStr = str(tmpDays / 365) + " years, "
tmpDays = tmpDays % 365
return tmpStr + str(tmpDays)+" days"
elif (timeSinceEntered.seconds/3600 >= 1):
return str(timeSinceEntered.seconds/3600)+" hours"
elif (timeSinceEntered.seconds/60 >= 1):
return str(timeSinceEntered.seconds/60)+" minutes"
else:
return "less than a minute"
except:
return "no time"
def getNumberAbbr(theNumber):
if theNumber >= 1000000:
return "%0.1f" % (theNumber / 1000000.0) + "m"
elif theNumber >= 1000:
return str(theNumber / 1000) + "k"
else:
return str(theNumber)
def percOfRangeColor(percValue):
if (float(percValue) < 80):
return "statNormal"
elif (float(percValue) < 99):
return "statHigh"
else:
return "statMax"
def getActionName(action):
if action == 'a':
return 'Add'
if action == 'p':
return 'Planet Add'
if action == 'e':
return 'Edit'
if action == 'r':
return 'Cleanup'
if action == 'v':
return 'Verified'
if action == 'w':
return 'Waypoint'
return 'Unknown'
def convertText(text, fmt):
newStr = ""
if (text != None):
for i in range(len(text)):
if (text[i] == "\n"):
if fmt == "html":
newStr = newStr + "<br />"
else:
newStr = newStr + "\\n"
else:
newStr = newStr + text[i]
return newStr
|
clreinki/GalaxyHarvester
|
ghShared.py
|
Python
|
agpl-3.0
| 2,405
|
[
"Galaxy"
] |
83f42b219df2e26ddd24905ff24e6f9ed5dafea9f80c7c902ce18749241d7756
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
An implementation of a red–black tree as it is described in
Cormen, Leiserson, Rivest, Stein 2nd edition pg 273.
A the height of a red-black tree is never bigger than
2*log_2(n+1). This is achived with these properties:
RB1) Every node is either red or black.
RB2) The root is black.
RB3) Every leaf is black.
RB4) Every child of a red node is black.
RB5) For every node: The number of black nodes in all paths
to leafs is equal.
John Reids originals source code:
http://code.activestate.com/recipes/576817-red-black-tree/
My latest source code:
https://github.com/MartinThoma/algorithms/blob/master/datastructures/redBlackTree.py
"""
__author__ = "Original by John Reid, edited by Martin Thoma"
__credits__ = ["John Reid", "Martin Thoma"]
__version__ = "1.0.0"
__maintainer__ = "Martin Thoma"
__email__ = "info@martin-thoma.de"
class rbnode(object):
"""
A node in a red black tree.
"""
def __init__(self, key):
self._key = key
self._red = False
self._left = None # Left child
self._right = None # Right child
self._p = None # Parent
self._originalRed = False
self._isNil = False
key = property(fget=lambda self: self._key, doc="The node's key")
red = property(fget=lambda self: self._red, doc="Is the node red?")
left = property(fget=lambda self: self._left, doc="The node's left child")
right = property(fget=lambda self: self._right, doc="The node's right child")
p = property(fget=lambda self: self._p, doc="The node's parent")
originalRed = property(fget=lambda self: self._originalRed, doc="for internal usage")
isNil = property(fget=lambda self: self._isNil, doc="Is the node a NIL node?")
def __str__(self):
"String representation."
if self.isNil:
return "Node: NIL"
else:
return str("%s" % self.key)
def __repr__(self):
"String representation."
if self.isNil:
return "Node: NIL"
else:
return str("Node: %s (%s), (%s, %s)" % (self.key, repr(self.p), repr(self.left), repr(self.right)))
class rbtree(object):
"""
A red-black tree.
"""
def __init__(self, create_node=rbnode):
"Construct."
self._nil = create_node(key=None)
self._nil._isNil = True
"Our nil node, used for all leaves."
self._root = self.nil
"The root of the tree."
self._create_node = create_node
"A callable that creates a node."
root = property(fget=lambda self: self._root, doc="The tree's root node")
nil = property(fget=lambda self: self._nil, doc="The tree's nil node")
def search(self, key, x=None):
"""
Search the subtree rooted at x (or the root if not given)
iteratively for the key.
@return: self.nil if it cannot find it.
"""
if None == x:
x = self.root
while x != self.nil and key != x.key:
if key < x.key:
x = x.left
else:
x = x.right
return x
def minimum(self, x=None):
"""
Find the node with the minimum value of the subtree
rooted at x.
@param x: the root where you start your search.
@return: The node with the minimum value in the subtree
rooted at x.
"""
if None == x:
x = self.root
if x == self.nil:
return self.nil
while x.left != self.nil:
x = x.left
return x
def maximum(self, x=None):
"""
Find the maximum value of the subtree rooted at x.
@param x: the root where you start your search.
@return: The maximum value in the subtree rooted at x.
"""
if None == x:
x = self.root
if x == self.nil:
return self.nil
while x.right != self.nil:
x = x.right
return x
def insert_key(self, key):
"""
Insert a key into the tree.
@param key: the key you want to insert into the tree.
"""
self.insert_node(self._create_node(key=key))
def insert_node(self, z):
"""
Insert a node into the tree.
@param z: the node you want to insert into the tree.
"""
y = self.nil
x = self.root
while x != self.nil:
y = x
if z.key < x.key:
x = x.left
else:
x = x.right
z._p = y
if y == self.nil:
self._root = z
elif z.key < y.key:
y._left = z
else:
y._right = z
z._left = self.nil
z._right = self.nil
z._red = True
self._insert_fixup(z)
def _insert_fixup(self, z):
"""
Restore the red-black properties after insert.
"""
# You only get into trouble if the parent of z is red.
# Otherwise, all properties are still valid.
while z.p.red:
if z.p == z.p.p.left: # parent of z is a left child
y = z.p.p.right # the uncle of z
if y.red:
# parent of z and uncle of z are both red
# this means you can re-color them to black
# to make sure that the black-height didn't
# change, you have to re-color their parent to
# red. Then you have to continue checking.
z.p._red = False
y._red = False
z.p.p._red = True
z = z.p.p
else:
if z == z.p.right:
z = z.p
self._left_rotate(z)
z.p._red = False
z.p.p._red = True # this was black, as z.p is red
self._right_rotate(z.p.p)
else: # parent of z is a right child
y = z.p.p.left # the uncle of z
if y.red:
z.p._red = False
y._red = False
z.p.p._red = True
z = z.p.p
else:
if z == z.p.left:
z = z.p
self._right_rotate(z)
z.p._red = False
z.p.p._red = True
self._left_rotate(z.p.p)
self.root._red = False
def delete_key(self, key):
"""
Delete a key from the tree.
@param key: the key you want to delete from the tree.
@return: False if the key was not in the tree,
otherwise True.
"""
node = self.search(key)
if node == self.nil:
return False
self.delete_node(node)
return True
def delete_node(self, n):
"""
Delete a node from the tree.
@param n: the node you want to delete from the tree.
"""
# The following source was "translated" from
# this Java source:
# http://en.literateprograms.org/Red-black_tree_(Java)
if n.left != self.nil and n.right != self.nil:
pred = self.maximum(n.left)
n._key = pred.key
n = pred
assert n.left == self.nil or n.right == self.nil
if n.right == self.nil:
child = n.left
else:
child = n.right
if not n.red:
n._red = child.red
self._deleteCase1(n)
self._replaceNode(n, child)
if self.root.red:
self.root._red = False
def _replaceNode(self, oldn, newn):
if oldn.p == self.nil:
self._root = newn
else:
if oldn == oldn.p.left:
oldn.p._left = newn
else:
oldn.p._right = newn
if newn != self.nil:
newn._p = oldn.p
def _deleteCase1(self, n):
""" In this case, N has become the root node. The deletion
removed one black node from every path, so no properties
are violated.
"""
if n.p == self.nil:
return
else:
self._deleteCase2(n)
def _deleteCase2(self, n):
""" N has a red sibling. In this case we exchange the colors
of the parent and sibling, then rotate about the parent
so that the sibling becomes the parent of its former
parent. This does not restore the tree properties, but
reduces the problem to one of the remaining cases. """
if self._sibling(n).red:
n.p._red = True
self._sibling(n)._red = False
if n == n.p.left:
self._left_rotate(n.p)
else:
self._right_rotate(n.p)
self._deleteCase3(n)
def _deleteCase3(self, n):
""" In this case N's parent, sibling, and sibling's children
are black. In this case we paint the sibling red. Now
all paths passing through N's parent have one less black
node than before the deletion, so we must recursively run
this procedure from case 1 on N's parent.
"""
tmp = self._sibling(n)
if not n.p.red and not tmp.red and not tmp.left and not tmp.right:
tmp._red = True
self._deleteCase1(n.p)
else:
self._deleteCase4(n)
def _deleteCase4(self, n):
""" N's sibling and sibling's children are black, but its
parent is red. We exchange the colors of the sibling and
parent; this restores the tree properties.
"""
tmp = self._sibling(n)
if n.p.red and not tmp.red and not tmp.left.red and not tmp.right.red:
tmp._red = True
n.p._red = False
else:
self._deleteCase5(n)
def _deleteCase5(self, n):
""" There are two cases handled here which are mirror images
of one another:
N's sibling S is black, S's left child is red, S's
right child is black, and N is the left child of its
parent. We exchange the colors of S and its left
sibling and rotate right at S.
N's sibling S is black, S's right child is red,
S's left child is black, and N is the right child of
its parent. We exchange the colors of S and its right
sibling and rotate left at S.
Both of these function to reduce us to the situation
described in case 6. """
tmp = self._sibling(n)
if n == n.p.left and not tmp.red and tmp.left and not tmp.right:
tmp._red = True
tmp.left._red = False
self._right_rotate(tmp)
elif n == n.p.right and not tmp.red and tmp.right and not tmp.left:
tmp._red = True
tmp.right._red = False
self._left_rotate(tmp)
self._deleteCase6(n)
def _deleteCase6(self, n):
""" There are two cases handled here which are mirror images
of one another:
N's sibling S is black, S's right child is red, and N is
the left child of its parent. We exchange the colors of
N's parent and sibling, make S's right child black, then
rotate left at N's parent.
N's sibling S is black, S's left child is red, and N is
the right child of its parent. We exchange the colors of
N's parent and sibling, make S's left child black, then
rotate right at N's parent.
"""
tmp = self._sibling(n)
tmp._red = n.p.red
n.p._red = False
if n == n.p.left:
assert tmp.right.red
tmp.right._red = False
self._left_rotate(n.p)
else:
assert tmp.left.red
tmp.left._red = False
self._right_rotate(n.p)
def _sibling(self, n):
assert n.p != self.nil
if n == n.p.left:
return n.p.right
else:
return n.p.left
def _left_rotate(self, x):
""" Left rotate x. """
# W S
# / \ Right-Rotate(S,W) / \
# / \ --------> / \
# S Y G W
# / \ <-------- / \
# / \ Left-Rotate(W,S) / \
#G U U Y
y = x.right
x._right = y.left
if y.left != self.nil:
y.left._p = x
y._p = x.p
if x.p == self.nil:
self._root = y
elif x == x.p.left:
x.p._left = y
else:
x.p._right = y
y._left = x
x._p = y
def _right_rotate(self, y):
""" Left rotate y. """
x = y.left
y._left = x.right
if x.right != self.nil:
x.right._p = y
x._p = y.p
if y.p == self.nil:
self._root = x
elif y == y.p.right:
y.p._right = x
else:
y.p._left = x
x._right = y
y._p = x
def check_invariants(self):
"""
@return: True if satisfies all criteria to be red-black tree.
"""
def is_search_tree(node):
if node != None and node != self.nil:
if node.left != self.nil:
assert(node.left.key <= node.key)
is_search_tree(node.left)
if node.right != self.nil:
assert(node.right.key >= node.key)
is_search_tree(node.right)
def is_red_black_node(node):
"""
@return: the number of black nodes on the way to the
leaf (node does NOT count)
"""
# check has _left and _right or neither
assert not ((node.left and not node.right) or
(node.right and not node.left))
# leaves have to be black
assert not ((not node.left and not node.right) and node.red)
# if node is red, check children are black
if node.red and node.left and node.right:
assert not (node.left.red or node.right.red)
# has the current node a left child?
if node.left or node.right:
# check children's parents are correct
assert not (self.nil != node.right and node != node.right.p)
assert not (self.nil != node.left and node != node.left.p)
# check if children are ok
left_counts = is_red_black_node(node.left)
right_counts = is_red_black_node(node.right)
if not node.left.red:
left_counts += 1
if not node.right.red:
right_counts += 1
# check children's counts are ok
if left_counts != right_counts:
write_tree(self, "test", show_nil=True)
assert left_counts == right_counts
return left_counts
return 0
is_search_tree(self.root)
is_red_black_node(self.root)
return not self.root._red
def write_tree_as_dot(t, f, show_nil=False):
"""
Write the tree in the dot language format to f.
@param t: the tree
@param f: the file you want to write
@param schow_nil: should nil-nodes be printed?
"""
def node_id(node):
return 'N%d' % id(node)
def node_color(node):
if node.red:
return "red"
else:
return "black"
def visit_node(node):
"Visit a node."
print >> f, " %s [label=\"%s\", color=\"%s\"];" % (node_id(node), node, node_color(node))
if node.left:
if node.left != t.nil or show_nil:
visit_node(node.left)
print >> f, " %s -> %s ;" % (node_id(node), node_id(node.left))
if node.right:
if node.right != t.nil or show_nil:
visit_node(node.right)
print >> f, " %s -> %s ;" % (node_id(node), node_id(node.right))
print >> f, "// Created by rbtree.write_dot()"
print >> f, "digraph red_black_tree {"
visit_node(t.root)
print >> f, "}"
def write_tree(t, filename, show_nil=True):
import os
"Write the tree as an SVG file."
f = open('%s.dot' % filename, 'w')
write_tree_as_dot(t, f, show_nil)
f.close()
os.system('dot %s.dot -Tsvg -o %s.svg' % (filename, filename))
os.system('rm %s.dot' % filename)
def handMadeTests():
t = rbtree()
assert t.minimum() == t.nil
assert t.maximum() == t.nil
assert t.check_invariants()
t.insert_key(123)
assert repr(t.nil) == "Node: NIL"
assert repr(t.search(123)) == "Node: 123 (Node: NIL), (Node: NIL, Node: NIL)"
assert t.minimum().key == 123
assert t.maximum().key == 123
assert t.check_invariants()
t.insert_key(1000)
assert t.minimum().key == 123
assert t.maximum().key == 1000
assert t.check_invariants()
t.insert_key(99)
assert t.minimum().key == 99
assert t.maximum().key == 1000
assert t.check_invariants()
t.insert_key(124)
assert t.minimum().key == 99
assert t.maximum().key == 1000
assert t.check_invariants()
t.insert_key(125)
assert t.minimum().key == 99
assert t.maximum().key == 1000
assert t.check_invariants()
t.insert_key(100)
assert t.minimum().key == 99
assert t.maximum().key == 1000
assert t.check_invariants()
write_tree(t, "testHand", show_nil=True)
t.delete_key(99)
assert t.minimum().key == 100
assert t.maximum().key == 1000
assert t.check_invariants()
t.delete_key(123)
assert t.minimum().key == 100
assert t.maximum().key == 1000
assert t.check_invariants()
def test_tree(t, iKeys, dKeys):
"""
Insert iKeys one by one checking invariants and membership as
we go.
@param t: the tree that gets tested
@param iKeys: the keys that get inserted
@param dKeys: the keys that get deleted
"""
assert t.check_invariants()
for i, key in enumerate(iKeys):
for key2 in iKeys[:i]:
# make sure that the inserted nodes are still there
assert t.nil != t.search(key2)
for key2 in iKeys[i:]:
assert (t.nil == t.search(key2)) ^ (key2 in iKeys[:i])
t.insert_key(key)
assert t.check_invariants()
for i, key in enumerate(dKeys):
t.delete_key(key)
assert t.check_invariants()
handMadeTests()
if '__main__' == __name__: # pragma: no branch coverage
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-t", "--test",
action="store_true", dest="test",
default=False,
help="check if the tree implementation works")
parser.add_argument("--example",
action="store_true", dest="example",
default=False,
help="generate an example red-black tree")
args = parser.parse_args()
import sys, numpy.random
if args.test:
numpy.random.seed(2)
size = 50
iKeys = numpy.random.randint(-50, 50, size=size)
dKeys = numpy.random.randint(-50, 50, size=size)
t = rbtree()
test_tree(t, iKeys, dKeys)
if args.example: # pragma: no cover
tree = rbtree()
list = [17, 19, 9, 20, 3, 8, 11, -3, 6 , 7, 2, 2, 17, -4, 17, 5]
for k, el in enumerate(list):
tree.insert_key(el)
write_tree(tree, 'tree' + str(k))
|
saisai/algorithms_by_other
|
datastructures/redBlackTree.py
|
Python
|
mit
| 20,193
|
[
"VisIt"
] |
cd6ec130751714c3d4424cbb7c5fc5aeb0f5d4aa841894ccf869f87b6aad627b
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Alex Grigorevskiy
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Test module for state_space_main.py
"""
import unittest
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import GPy.models.state_space_setup as ss_setup
import GPy.models.state_space_main as ssm
def generate_x_points(points_num=100, x_interval = (0, 20), random=True):
"""
Function generates (sorted) points on the x axis.
Input:
---------------------------
points_num: int
How many points to generate
x_interval: tuple (a,b)
On which interval to generate points
random: bool
Regular points or random
Output:
---------------------------
x_points: np.array
Generated points
"""
x_interval = np.asarray( x_interval )
if random:
x_points = np.random.rand(points_num) * ( x_interval[1] - x_interval[0] ) + x_interval[0]
x_points = np.sort( x_points )
else:
x_points = np.linspace(x_interval[0], x_interval[1], num=points_num )
return x_points
def generate_sine_data(x_points=None, sin_period=2.0, sin_ampl=10.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 20), random=True):
"""
Function generates sinusoidal data.
Input:
--------------------------------
x_points: np.array
Previously generated X points
sin_period: float
Sine period
sin_ampl: float
Sine amplitude
noise_var: float
Gaussian noise variance added to the sine function
plot: bool
Whether to plot generated data
(if x_points is None, the the following parameters are used to generate
those. They are the same as in 'generate_x_points' function)
points_num: int
x_interval: tuple (a,b)
random: bool
"""
sin_function = lambda xx: sin_ampl * np.sin( 2*np.pi/sin_period * xx )
if x_points is None:
x_points = generate_x_points(points_num, x_interval, random)
y_points = sin_function( x_points ) + np.random.randn( len(x_points) ) * np.sqrt(noise_var)
if plot:
pass
return x_points, y_points
def generate_linear_data(x_points=None, tangent=2.0, add_term=1.0, noise_var=2.0,
plot = False, points_num=100, x_interval = (0, 20), random=True):
"""
Function generates linear data.
Input:
--------------------------------
x_points: np.array
Previously generated X points
tangent: float
Factor with which independent variable is multiplied in linear equation.
add_term: float
Additive term in linear equation.
noise_var: float
Gaussian noise variance added to the sine function
plot: bool
Whether to plot generated data
(if x_points is None, the the following parameters are used to generate
those. They are the same as in 'generate_x_points' function)
points_num: int
x_interval: tuple (a,b)
random: bool
"""
linear_function = lambda xx: tangent*xx + add_term
if x_points is None:
x_points = generate_x_points(points_num, x_interval, random)
y_points = linear_function( x_points ) + np.random.randn( len(x_points) ) * np.sqrt(noise_var)
if plot:
pass
return x_points, y_points
def generate_brownian_data(x_points=None, kernel_var = 2.0, noise_var = 2.0,
plot = False, points_num=100, x_interval = (0, 20), random=True):
"""
Generate brownian data - data from Brownian motion.
First point is always 0, and \Beta(0) = 0 - standard conditions for Brownian motion.
Input:
--------------------------------
x_points: np.array
Previously generated X points
variance: float
Gaussian noise variance added to the sine function
plot: bool
Whether to plot generated data
(if x_points is None, the the following parameters are used to generate
those. They are the same as in 'generate_x_points' function)
points_num: int
x_interval: tuple (a,b)
random: bool
"""
if x_points is None:
x_points = generate_x_points(points_num, x_interval, random)
if x_points[0] != 0:
x_points[0] = 0
y_points = np.zeros( (points_num,) )
for i in range(1, points_num):
noise = np.random.randn() * np.sqrt(kernel_var * (x_points[i] - x_points[i-1]))
y_points[i] = y_points[i-1] + noise
y_points += np.random.randn( len(x_points) ) * np.sqrt(noise_var)
return x_points, y_points
def generate_linear_plus_sin(x_points=None, tangent=2.0, add_term=1.0, noise_var=2.0,
sin_period=2.0, sin_ampl=10.0, plot = False,
points_num=100, x_interval = (0, 20), random=True):
"""
Generate the sum of linear trend and the sine function.
For parameters see the 'generate_linear' and 'generate_sine'.
Comment: Gaussian noise variance is added only once (for linear function).
"""
x_points, y_linear_points = generate_linear_data(x_points, tangent, add_term, noise_var,
False, points_num, x_interval, random)
x_points, y_sine_points = generate_sine_data(x_points, sin_period, sin_ampl, 0.0,
False, points_num, x_interval, random)
y_points = y_linear_points + y_sine_points
if plot:
pass
return x_points, y_points
def generate_random_y_data(samples, dim, ts_no):
"""
Generate data:
Input:
------------------
samples - how many samples
dim - dimensionality of the data
ts_no - number of time series
Output:
--------------------------
Y: np.array((samples, dim, ts_no))
"""
Y = np.empty((samples, dim, ts_no));
for i in range(0,samples):
for j in range(0,ts_no):
sample = np.random.randn(dim)
Y[i,:,j] = sample
if (Y.shape[2] == 1): # ts_no = 1
Y.shape=(Y.shape[0], Y.shape[1])
return Y
class StateSpaceKernelsTests(np.testing.TestCase):
def setUp(self):
pass
def run_descr_model(self, measurements, A,Q,H,R, true_states=None,
mean_compare_decimal=8,
m_init=None, P_init=None, dA=None,dQ=None,
dH=None,dR=None, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True):
#import pdb; pdb.set_trace()
state_dim = 1 if not isinstance(A,np.ndarray) else A.shape[0]
ts_no = 1 if (len(measurements.shape) < 3) else measurements.shape[2]
grad_params_no = None if dA is None else dA.shape[2]
ss_setup.use_cython = use_cython
global ssm
if (ssm.cython_code_available) and (ssm.use_cython != use_cython):
reload(ssm)
grad_calc_params = None
if calc_grad_log_likelihood:
grad_calc_params = {}
grad_calc_params['dA'] = dA
grad_calc_params['dQ'] = dQ
grad_calc_params['dH'] = dH
grad_calc_params['dR'] = dR
(f_mean, f_var, loglikelhood, g_loglikelhood, \
dynamic_callables_smoother) = ssm.DescreteStateSpace.kalman_filter(A, Q, H, R, measurements, index=None,
m_init=m_init, P_init=P_init, p_kalman_filter_type = kalman_filter_type,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
grad_params_no=grad_params_no,
grad_calc_params=grad_calc_params)
f_mean_squeezed = np.squeeze(f_mean[1:,:]) # exclude initial value
f_var_squeezed = np.squeeze(f_var[1:,:]) # exclude initial value
if true_states is not None:
#print np.max(np.abs(f_mean_squeezed-true_states))
np.testing.assert_almost_equal(np.max(np.abs(f_mean_squeezed- \
true_states)), 0, decimal=mean_compare_decimal)
np.testing.assert_equal(f_mean.shape, (measurements.shape[0]+1,state_dim,ts_no) )
np.testing.assert_equal(f_var.shape, (measurements.shape[0]+1,state_dim,state_dim) )
(M_smooth, P_smooth) = ssm.DescreteStateSpace.rts_smoother(state_dim, dynamic_callables_smoother, f_mean,
f_var)
return f_mean, f_var
def run_continuous_model(self, F, L, Qc, p_H, p_R, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=None, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=0, grad_calc_params=None):
#import pdb; pdb.set_trace()
state_dim = 1 if not isinstance(F,np.ndarray) else F.shape[0]
ts_no = 1 if (len(Y_data.shape) < 3) else Y_data.shape[2]
ss_setup.use_cython = use_cython
global ssm
if (ssm.cython_code_available) and (ssm.use_cython != use_cython):
reload(ssm)
(f_mean, f_var, loglikelhood, g_loglikelhood, \
dynamic_callables_smoother) = ssm.ContDescrStateSpace.cont_discr_kalman_filter(F, L, Qc, p_H, p_R,
P_inf, X_data, Y_data, index = None,
m_init=None, P_init=None,
p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False,
grad_params_no=0, grad_calc_params=grad_calc_params)
f_mean_squeezed = np.squeeze(f_mean[1:,:]) # exclude initial value
f_var_squeezed = np.squeeze(f_var[1:,:]) # exclude initial value
np.testing.assert_equal(f_mean.shape, (Y_data.shape[0]+1,state_dim,ts_no))
np.testing.assert_equal(f_var.shape, (Y_data.shape[0]+1,state_dim,state_dim))
(M_smooth, P_smooth) = ssm.ContDescrStateSpace.cont_discr_rts_smoother(state_dim, f_mean, \
f_var,dynamic_callables_smoother)
return f_mean, f_var
def test_discrete_ss_first(self,plot=False):
"""
Tests discrete State-Space model - first test.
"""
np.random.seed(235) # seed the random number generator
A = 1.0 # For cython code to run properly need float input
H = 1.0
Q = 1.0
R = 1.0
steps_num = 100
# generate data ->
true_states = np.zeros((steps_num,))
init_state = 0
measurements = np.zeros((steps_num,))
for s in range(0, steps_num):
if s== 0:
true_states[0] = init_state + np.sqrt(Q)*np.random.randn()
else:
true_states[s] = true_states[s-1] + np.sqrt(R)*np.random.randn()
measurements[s] = true_states[s] + np.sqrt(R)*np.random.randn()
# generate data <-
# descrete kalman filter ->
m_init = 0; P_init = 1
d_num = 1000
state_discr = np.linspace(-10,10,d_num)
state_trans_matrix = np.empty((d_num,d_num))
for i in range(d_num):
state_trans_matrix[:,i] = norm.pdf(state_discr, loc=A*state_discr[i], scale=np.sqrt(Q))
m_prev = norm.pdf(state_discr, loc = m_init, scale = np.sqrt(P_init)); #m_prev / np.sum(m_prev)
m = np.zeros((d_num, steps_num))
i_mean = np.zeros((steps_num,))
for s in range(0, steps_num):
# Prediction step:
if (s==0):
m[:,s] = np.dot(state_trans_matrix, m_prev)
else:
m[:,s] = np.dot(state_trans_matrix, m[:,s-1])
# Update step:
#meas_ind = np.argmin(np.abs(state_discr - measurements[s])
y_vec = np.zeros( (d_num,))
for i in range(d_num):
y_vec[i] = norm.pdf(measurements[s], loc=H*state_discr[i], scale=np.sqrt(R))
norm_const = np.dot( y_vec, m[:,s] )
m[:,s] = y_vec * m[:,s] / norm_const
i_mean[s] = state_discr[ np.argmax(m[:,s]) ]
# descrete kalman filter <-
(f_mean, f_var) = self.run_descr_model(measurements, A,Q,H,R, true_states=i_mean,
mean_compare_decimal=1,
m_init=m_init, P_init=P_init,use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=False)
(f_mean, f_var) = self.run_descr_model(measurements, A,Q,H,R, true_states=i_mean,
mean_compare_decimal=1,
m_init=m_init, P_init=P_init,use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=False)
(f_mean, f_var) = self.run_descr_model(measurements, A,Q,H,R, true_states=i_mean,
mean_compare_decimal=1,
m_init=m_init, P_init=P_init,use_cython=True,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=False)
if plot:
# plotting ->
plt.figure()
plt.plot( true_states, 'g.-',label='true states')
#plt.plot( measurements, 'b.-', label='measurements')
plt.plot( f_mean, 'r.-',label='Kalman filter estimates')
plt.plot( i_mean, 'k.-', label='Discretization')
plt.plot( f_mean + 2*np.sqrt(f_var), 'r.--')
plt.plot( f_mean - 2*np.sqrt(f_var), 'r.--')
plt.legend()
plt.show()
# plotting <-
return None
def test_discrete_ss_1D(self,plot=False):
"""
This function tests Kalman filter and smoothing when the state
dimensionality is one dimensional.
"""
np.random.seed(234) # seed the random number generator
# 1D ss model
state_dim = 1;
param_num = 2 # sigma_Q, sigma_R - parameters
measurement_dim = 1 # dimensionality od measurement
A = 1.0
Q = 2.0
dA= np.zeros((state_dim,state_dim,param_num))
dQ = np.zeros((state_dim,state_dim,param_num)); dQ[0,0,0] = 1.0
# measurement related parameters (subject to change) ->
H = np.ones((measurement_dim,state_dim ))
R = 0.5 * np.eye(measurement_dim)
dH = np.zeros((measurement_dim,state_dim,param_num))
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
# measurement related parameters (subject to change) <-
# 1D measurement, 1 ts_no ->
data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=True,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
if plot:
# plotting ->
plt.figure()
plt.plot( np.squeeze(data), 'g.-', label='measurements')
plt.plot( np.squeeze(f_mean[1:]), 'b.-',label='Kalman filter estimates')
plt.plot( np.squeeze(f_mean[1:]+H*f_var[1:]*H), 'b--')
plt.plot( np.squeeze(f_mean[1:]-H*f_var[1:]*H), 'b--')
# plt.plot( np.squeeze(M_sm[1:]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:]+H*P_sm[1:]*H), 'r--')
# plt.plot( np.squeeze(M_sm[1:]-H*P_sm[1:]*H), 'r--')
plt.legend()
plt.title("1D state-space, 1D measurements, 1 ts_no")
plt.show()
# plotting <-
# 1D measurement, 1 ts_no <-
# 1D measurement, 3 ts_no ->
data = generate_random_y_data(10, 1, 3) # np.array((samples, dim, ts_no))
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=True,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
#import pdb; pdb.set_trace()
if plot:
# plotting ->
plt.figure()
plt.plot( np.squeeze(data[:,:,1]), 'g.-', label='measurements')
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
plt.plot( np.squeeze(f_mean[1:,0,1])+np.squeeze(H*f_var[1:]*H), 'b--')
plt.plot( np.squeeze(f_mean[1:,0,1])-np.squeeze(H*f_var[1:]*H), 'b--')
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,0,1])+H*np.squeeze(P_sm[1:])*H, 'r--')
# plt.plot( np.squeeze(M_sm[1:,0,1])-H*np.squeeze(P_sm[1:])*H, 'r--')
plt.legend()
plt.title("1D state-space, 1D measurements, 3 ts_no. 2-nd ts ploted")
plt.show()
# plotting <-
# 1D measurement, 3 ts_no <-
measurement_dim = 2 # dimensionality of measurement
H = np.ones((measurement_dim,state_dim))
R = 0.5 * np.eye(measurement_dim)
dH = np.zeros((measurement_dim,state_dim,param_num))
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
# measurement related parameters (subject to change) <
data = generate_random_y_data(10, 2, 3) # np.array((samples, dim, ts_no))
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
# (f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
# mean_compare_decimal=16,
# m_init=None, P_init=None, dA=dA,dQ=dQ,
# dH=dH,dR=dR, use_cython=True,
# kalman_filter_type='svd',
# calc_log_likelihood=True,
# calc_grad_log_likelihood=True)
if plot:
# plotting ->
plt.figure()
plt.plot( np.squeeze(data[:,0,1]), 'g.-', label='measurements')
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
plt.plot( np.squeeze(f_mean[1:,0,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot( np.squeeze(f_mean[1:,0,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,0,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,0,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("1D state-space, 2D measurements, 3 ts_no. 1-st measurement, 2-nd ts ploted")
plt.show()
# plotting <-
# 2D measurement, 3 ts_no <-
def test_discrete_ss_2D(self,plot=False):
"""
This function tests Kalman filter and smoothing when the state
dimensionality is two dimensional.
"""
np.random.seed(234) # seed the random number generator
# 1D ss model
state_dim = 2;
param_num = 3 # sigma_Q, sigma_R, one parameters in A - parameters
measurement_dim = 1 # dimensionality od measurement
A = np.eye(state_dim); A[0,0] = 0.5
Q = np.ones((state_dim,state_dim));
dA = np.zeros((state_dim,state_dim,param_num)); dA[1,1,2] = 1
dQ = np.zeros((state_dim,state_dim,param_num)); dQ[:,:,1] = np.eye(measurement_dim)
# measurement related parameters (subject to change) ->
H = np.ones((measurement_dim,state_dim))
R = 0.5 * np.eye(measurement_dim)
dH = np.zeros((measurement_dim,state_dim,param_num))
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
# measurement related parameters (subject to change) <-
# 1D measurement, 1 ts_no ->
data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=True,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
if plot:
# plotting ->
plt.figure()
plt.plot( np.squeeze(data), 'g.-', label='measurements')
plt.plot( np.squeeze(f_mean[1:,0]), 'b.-',label='Kalman filter estimates')
plt.plot( np.squeeze(f_mean[1:,0])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot( np.squeeze(f_mean[1:,0])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,0]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,0])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,0])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("2D state-space, 1D measurements, 1 ts_no")
plt.show()
# plotting <-
# 1D measurement, 1 ts_no <-
# 1D measurement, 3 ts_no ->
data = generate_random_y_data(10, 1, 3) # np.array((samples, dim, ts_no))
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=True,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
if plot:
# plotting ->
plt.figure()
plt.plot( np.squeeze(data[:,:,1]), 'g.-', label='measurements')
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
plt.plot( np.squeeze(f_mean[1:,0,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot( np.squeeze(f_mean[1:,0,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,0,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,0,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("2D state-space, 1D measurements, 3 ts_no. 2-nd ts ploted")
plt.show()
# plotting <-
# 1D measurement, 3 ts_no <-
# 2D measurement, 3 ts_no ->
# measurement related parameters (subject to change) ->
measurement_dim = 2 # dimensionality od measurement
H = np.ones((measurement_dim,state_dim))
R = 0.5 * np.eye(measurement_dim)
dH = np.zeros((measurement_dim,state_dim,param_num))
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
# measurement related parameters (subject to change) <
data = generate_random_y_data(10, 2, 3) # np.array((samples, dim, ts_no))
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
mean_compare_decimal=16,
m_init=None, P_init=None, dA=dA,dQ=dQ,
dH=dH,dR=dR, use_cython=False,
kalman_filter_type='svd',
calc_log_likelihood=True,
calc_grad_log_likelihood=True)
# (f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
# mean_compare_decimal=16,
# m_init=None, P_init=None, dA=dA,dQ=dQ,
# dH=dH,dR=dR, use_cython=True,
# kalman_filter_type='svd',
# calc_log_likelihood=True,
# calc_grad_log_likelihood=True)
if plot:
# plotting ->
plt.figure()
plt.plot( np.squeeze(data[:,0,1]), 'g.-', label='measurements')
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
plt.plot( np.squeeze(f_mean[1:,0,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot( np.squeeze(f_mean[1:,0,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,0,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,0,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("2D state-space, 2D measurements, 3 ts_no. 1-st measurement, 2-nd ts ploted")
plt.show()
# plotting <-
# 2D measurement, 3 ts_no <-
def test_continuos_ss(self,plot=False):
"""
This function tests the continuos state-space model.
"""
# 1D measurements, 1 ts_no ->
measurement_dim = 1 # dimensionality of measurement
X_data = generate_x_points(points_num=10, x_interval = (0, 20), random=True)
Y_data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
try:
import GPy
except ImportError as e:
return None
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
(F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0) = periodic_kernel.sde()
state_dim = dFt.shape[0];
param_num = dFt.shape[2]
grad_calc_params = {}
grad_calc_params['dP_inf'] = dP_inft
grad_calc_params['dF'] = dFt
grad_calc_params['dQc'] = dQct
grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
grad_calc_params['dP_init'] = dP0
# dH matrix is None
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=False,
kalman_filter_type='rbc',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=True,
kalman_filter_type='rbc',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
if plot:
# plotting ->
plt.figure()
plt.plot( X_data, np.squeeze(Y_data[:,0]), 'g.-', label='measurements')
plt.plot( X_data, np.squeeze(f_mean[1:,15]), 'b.-',label='Kalman filter estimates')
plt.plot( X_data, np.squeeze(f_mean[1:,15])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot( X_data, np.squeeze(f_mean[1:,15])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,15]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,15])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,15])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("1D measurements, 1 ts_no")
plt.show()
# plotting <-
# 1D measurements, 1 ts_no <-
# 1D measurements, 3 ts_no ->
measurement_dim = 1 # dimensionality od measurement
X_data = generate_x_points(points_num=10, x_interval = (0, 20), random=True)
Y_data = generate_random_y_data(10, 1, 3) # np.array((samples, dim, ts_no))
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
(F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0) = periodic_kernel.sde()
state_dim = dFt.shape[0];
param_num = dFt.shape[2]
grad_calc_params = {}
grad_calc_params['dP_inf'] = dP_inft
grad_calc_params['dF'] = dFt
grad_calc_params['dQc'] = dQct
grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
grad_calc_params['dP_init'] = dP0
# dH matrix is None
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=False,
kalman_filter_type='rbc',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=True,
kalman_filter_type='rbc',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
if plot:
# plotting ->
plt.figure()
plt.plot(X_data, np.squeeze(Y_data[:,0,1]), 'g.-', label='measurements')
plt.plot(X_data, np.squeeze(f_mean[1:,15,1]), 'b.-',label='Kalman filter estimates')
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,15,1]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,15,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,15,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("1D measurements, 3 ts_no. 2-nd ts ploted")
plt.show()
# plotting <-
# 1D measurements, 3 ts_no <-
# 2D measurements, 3 ts_no ->
measurement_dim = 2 # dimensionality od measurement
X_data = generate_x_points(points_num=10, x_interval = (0, 20), random=True)
Y_data = generate_random_y_data(10, 2, 3) # np.array((samples, dim, ts_no))
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
(F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0) = periodic_kernel.sde()
H = np.vstack((H,H)) # make 2D measurements
R = 1.5 * np.eye(measurement_dim)
state_dim = dFt.shape[0];
param_num = dFt.shape[2]
grad_calc_params = {}
grad_calc_params['dP_inf'] = dP_inft
grad_calc_params['dF'] = dFt
grad_calc_params['dQc'] = dQct
grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
grad_calc_params['dP_init'] = dP0
# dH matrix is None
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, R, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=False,
kalman_filter_type='regular',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, R, P_inf, X_data, Y_data, index = None,
m_init=None, P_init=P0, use_cython=False,
kalman_filter_type='rbc',
calc_log_likelihood=True,
calc_grad_log_likelihood=True,
grad_params_no=param_num, grad_calc_params=grad_calc_params)
# (f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, R, P_inf, X_data, Y_data, index = None,
# m_init=None, P_init=P0, use_cython=True,
# kalman_filter_type='rbc',
# calc_log_likelihood=True,
# calc_grad_log_likelihood=True,
# grad_params_no=param_num, grad_calc_params=grad_calc_params)
if plot:
# plotting ->
plt.figure()
plt.plot(X_data, np.squeeze(Y_data[:,0,1]), 'g.-', label='measurements')
plt.plot(X_data, np.squeeze(f_mean[1:,15,1]), 'b.-',label='Kalman filter estimates')
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_sm[1:,15,1]), 'r.-',label='Smoother Estimates')
# plt.plot( np.squeeze(M_sm[1:,15,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
# plt.plot( np.squeeze(M_sm[1:,15,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
plt.legend()
plt.title("1D measurements, 3 ts_no. 2-nd ts ploted")
plt.show()
# plotting <-
# 2D measurements, 3 ts_no <-
#def test_EM_gradient(plot=False):
# """
# Test EM gradient calculation. This method works (the formulas are such)
# that it works only for time invariant matrices A, Q, H, R. For the continuous
# model it means that time intervals are the same.
# """
#
# np.random.seed(234) # seed the random number generator
#
# # 1D measurements, 1 ts_no ->
# measurement_dim = 1 # dimensionality of measurement
#
# x_data = generate_x_points(points_num=10, x_interval = (0, 20), random=False)
# data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
#
# import GPy
# #periodic_kernel = GPy.kern.sde_Matern32(1,active_dims=[0,])
# periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
# (F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0t) = periodic_kernel.sde()
#
# state_dim = dFt.shape[0];
# param_num = dFt.shape[2]
#
# grad_calc_params = {}
# grad_calc_params['dP_inf'] = dP_inft
# grad_calc_params['dF'] = dFt
# grad_calc_params['dQc'] = dQct
# grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
# grad_calc_params['dP_init'] = dP0t
# # dH matrix is None
#
#
# #(F,L,Qc,H,P_inf,dF,dQc,dP_inf) = ssm.balance_ss_model(F,L,Qc,H,P_inf,dF,dQc,dP_inf)
# # Use the Kalman filter to evaluate the likelihood
#
# #import pdb; pdb.set_trace()
# (M_kf, P_kf, log_likelihood,
# grad_log_likelihood,SmootherMatrObject) = ss.ContDescrStateSpace.cont_discr_kalman_filter(F,
# L, Qc, H, 1.5, P_inf, x_data, data, m_init=None,
# P_init=P0, calc_log_likelihood=True,
# calc_grad_log_likelihood=True,
# grad_params_no=param_num,
# grad_calc_params=grad_calc_params)
#
# if plot:
# # plotting ->
# plt.figure()
# plt.plot( np.squeeze(data[:,0]), 'g.-', label='measurements')
# plt.plot( np.squeeze(M_kf[1:,15]), 'b.-',label='Kalman filter estimates')
# plt.plot( np.squeeze(M_kf[1:,15])+np.einsum('ij,ajk,kl', H, P_kf[1:], H.T)[:,0,0], 'b--')
# plt.plot( np.squeeze(M_kf[1:,15])-np.einsum('ij,ajk,kl', H, P_kf[1:], H.T)[:,0,0], 'b--')
# plt.title("1D measurements, 1 ts_no")
# plt.show()
# # plotting <-
# # 1D measurements, 1 ts_no <-
if __name__ == '__main__':
print("Running state-space inference tests...")
unittest.main()
#tt = StateSpaceKernelsTests('test_discrete_ss_first')
#res = tt.test_discrete_ss_first(plot=True)
#res = tt.test_discrete_ss_1D(plot=True)
#res = tt.test_discrete_ss_2D(plot=False)
#res = tt.test_continuos_ss(plot=True)
|
mikecroucher/GPy
|
GPy/testing/state_space_main_tests.py
|
Python
|
bsd-3-clause
| 44,812
|
[
"Gaussian"
] |
f322c0281580d389193bfa1e57ae6cc7878ee943f749f66afdd609671a600148
|
from PyQt4 import QtCore, QtGui
import overlayDialogBase
import ilastik.gui.overlaySelectionDlg
from ilastik.core.overlays.thresholdOverlay import ThresholdOverlay
#*******************************************************************************
# S l i d e r R e c e i v e r *
#*******************************************************************************
class SliderReceiver(QtCore.QObject):
def __init__(self, dialog, index, oldValue):
QtCore.QObject.__init__(self)
self.dialog = dialog
self.index = index
self.oldValue = oldValue
def sliderMoved(self, value):
self.dialog.sliderMoved(self.index, value, self.oldValue)
self.oldValue = value
#*******************************************************************************
# M u l t i v a r i a t e T h r e s h o l d D i a l o g *
#*******************************************************************************
class MultivariateThresholdDialog(overlayDialogBase.OverlayDialogBase, QtGui.QDialog):
configuresClass = "ilastik.core.overlays.thresholdOverlay.ThresholdOverlay"
name = "Thresholding Overlay"
author = "C. N. S."
homepage = "hci"
description = "lazy evaluation thresholding"
def __init__(self, ilastik, instance = None):
QtGui.QDialog.__init__(self, ilastik)
self.setWindowTitle("Multi-variate Thresholding")
self.ilastik = ilastik
if instance != None:
self.overlayItem = instance
else:
ovm = self.ilastik.project.dataMgr[self.ilastik._activeImageNumber].overlayMgr
k = ovm.keys()[0]
ov = ovm[k]
self.overlayItem = ThresholdOverlay([ov], [])
self.volumeEditor = ilastik.labelWidget
self.project = ilastik.project
self.mainlayout = QtGui.QVBoxLayout()
self.setLayout(self.mainlayout)
self.mainwidget = QtGui.QWidget()
self.mainlayout.addWidget(self.mainwidget)
self.hbox = None
self.buildDialog()
self.acceptButton = QtGui.QPushButton("Ok")
self.connect(self.acceptButton, QtCore.SIGNAL('clicked()'), self.okClicked)
self.mainlayout.addWidget(self.acceptButton)
def buildDialog(self):
self.mainwidget.hide()
self.mainlayout.removeWidget(self.mainwidget)
self.mainwidget.close()
del self.mainwidget
self.mainwidget = QtGui.QWidget()
self.mainlayout.insertWidget(0, self.mainwidget)
self.hbox = QtGui.QHBoxLayout()
self.mainwidget.setLayout(self.hbox)
self.sliders = []
self.sliderReceivers = []
self.previousValues = []
self.totalValue = 0
for index, t in enumerate(self.overlayItem.foregrounds):
l = QtGui.QVBoxLayout()
#print t.name
#print len(self.overlayItem.thresholds)
#print index
self.sliderReceivers.append(SliderReceiver(self,index,self.overlayItem.thresholds[index] * 1000))
w = QtGui.QSlider(QtCore.Qt.Vertical)
#*******************************************************************************
# p r o b a b i l i t y *
#*******************************************************************************
w.setToolTip("Change the threshold for " + str(t.name) + "\n a low threshold compared to the other thresholds means a high class probability")
w.setRange(0,999)
w.setSingleStep(1)
w.setValue(self.overlayItem.thresholds[index] * 1000)
l.addWidget(w)
label = QtGui.QLabel(t.name)
l.addWidget(label)
self.sliderReceivers[-1].connect(w, QtCore.SIGNAL('sliderMoved(int)'), self.sliderReceivers[-1].sliderMoved)
self.sliders.append(w)
self.hbox.addLayout(l)
if len(self.overlayItem.backgrounds) > 0:
l = QtGui.QVBoxLayout()
self.sliderReceivers.append(SliderReceiver(self,len(self.sliders),self.overlayItem.thresholds[-1] * 1000))
w = QtGui.QSlider(QtCore.Qt.Vertical)
w.setRange(0,1000)
w.setSingleStep(1)
w.setValue(self.overlayItem.thresholds[-1] * 1000)
l.addWidget(w)
label = QtGui.QLabel('Background')
l.addWidget(label)
self.sliderReceivers[-1].connect(w, QtCore.SIGNAL('sliderMoved(int)'), self.sliderReceivers[-1].sliderMoved)
self.sliders.append(w)
self.hbox.addLayout(l)
l = QtGui.QVBoxLayout()
w = QtGui.QPushButton("Select Foreground")
self.connect(w, QtCore.SIGNAL("clicked()"), self.selectForegrounds)
l.addWidget(w)
w = QtGui.QPushButton("Select Background")
self.connect(w, QtCore.SIGNAL("clicked()"), self.selectBackgrounds)
l.addWidget(w)
l2 = QtGui.QHBoxLayout()
self.smoothing = QtGui.QCheckBox("Smooth")
self.smoothing.setToolTip("Smooth the input overlays with the specified pixel sigma using a gaussian\n Smoothing may take a while depending on the size of the data...")
self.smoothing.setCheckState(self.overlayItem.smoothing * 2)
self.connect(self.smoothing, QtCore.SIGNAL("stateChanged(int)"), self.smoothingChanged)
self.sigma = QtGui.QLineEdit(str(self.overlayItem.sigma))
self.sigma.setToolTip("sigma in pixels")
l2.addWidget(self.smoothing)
l2.addWidget(self.sigma)
l.addLayout(l2)
self.hbox.addLayout(l)
self.setMinimumHeight(300)
def smoothingChanged(self, state):
sigma = self.overlayItem.sigma
try:
sigma = float(self.sigma.text())
except:
pass
self.overlayItem.sigma = sigma
if state == QtCore.Qt.Checked:
self.overlayItem.smoothing = True
else:
self.overlayItem.smoothing = False
self.overlayItem.setForegrounds(self.overlayItem.foregrounds)
thresholds = []
for i,s in enumerate(self.sliders):
if s.value() > 0:
thresholds.append(s.value() / 1000.0)
else:
thresholds.append(-1 / 1000.0)
self.overlayItem.setThresholds(thresholds)
self.volumeEditor.repaint()
def selectForegrounds(self):
d = ilastik.gui.overlaySelectionDlg.OverlaySelectionDialog(self.ilastik, singleSelection = False)
o = d.exec_()
if len(o) > 0:
self.overlayItem.setForegrounds(o)
self.buildDialog()
def selectBackgrounds(self):
d = ilastik.gui.overlaySelectionDlg.OverlaySelectionDialog(self.ilastik, singleSelection = False)
o = d.exec_()
self.overlayItem.setBackgrounds(o)
self.buildDialog()
def sliderMoved(self, index, value, oldValue):
self.sliders[index].setValue(value)
thresholds = []
for i,s in enumerate(self.sliders):
if s.value() > 0:
thresholds.append(s.value() / 1000.0)
else:
thresholds.append(-1 / 1000.0)
self.overlayItem.setThresholds(thresholds)
self.volumeEditor.repaint()
def okClicked(self):
if len(self.overlayItem.dsets) >= 2:
self.accept()
else:
QtGui.QMessageBox.warning(self, "Error", "Please select more than one Overlay for thresholding - either more than one foreground overlays, or one foreground and one background overlay !")
def exec_(self):
if QtGui.QDialog.exec_(self) == QtGui.QDialog.Accepted:
return self.overlayItem
else:
return None
|
ilastik/ilastik-0.5
|
ilastik/gui/overlayDialogs/multivariateThresholdDialog.py
|
Python
|
bsd-2-clause
| 8,038
|
[
"Gaussian"
] |
0580773e57be5194d6ed065b4314aac2c02c2785eec4b8255dd15d7148da4424
|
import sys
import urllib
import urlparse
from resources import wwe
import xbmcaddon
import xbmcgui
import xbmcplugin
from datetime import datetime, timedelta
import pickle
base_url = sys.argv[0]
addon = xbmcaddon.Addon()
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
media_path = addon.getAddonInfo('path') + '/media/'
email = addon.getSetting('emailaddress')
password = addon.getSetting('password')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def get_list_item(network_item):
liz = xbmcgui.ListItem(network_item.name, iconImage=network_item.icon, thumbnailImage=network_item.thumbnail)
liz.setInfo(
type="Video",
infoLabels={
"tvshowtitle": network_item.show_name,
"title": network_item.title,
"plot": network_item.description,
"genre": network_item.genre,
"year": network_item.air_date[0:4],
"duration": network_item.duration,
"aired": network_item.air_date,
"date": network_item.air_date[8:10]+'.'+network_item.air_date[5:7]+'.'+network_item.air_date[0:4],
"episode": network_item.episode})
liz.setArt({'fanart': network_item.fan_art})
liz.setArt({'banner': network_item.banner})
if network_item.item_type == 'show':
if network_item.on_watchlist:
liz.addContextMenuItems([('Remove Series from Watchlist',
'XBMC.RunPlugin(' + build_url({'mode': 'remove_series_watchlist',
'id': network_item.media_id}) + ')')])
else:
liz.addContextMenuItems([('Add Series to Watchlist',
'XBMC.RunPlugin(' + build_url({'mode': 'add_series_watchlist',
'id': network_item.media_id}) + ')')])
if network_item.item_type == 'episode':
if network_item.on_watchlist:
liz.addContextMenuItems([('Remove Episode from Watchlist',
'XBMC.RunPlugin(' + build_url({'mode': 'remove_episode_watchlist',
'id': network_item.media_id}) + ')')])
else:
liz.addContextMenuItems([('Add Episode to Watchlist',
'XBMC.RunPlugin(' + build_url({'mode': 'add_episode_watchlist',
'id': network_item.media_id}) + ')')])
liz.setProperty('IsPlayable', 'true')
return liz
if email == '' or password == '':
xbmcgui.Dialog().ok("WWE Network", "Please visit www.WWENetwork.com", "and register for your login credentials")
return_value = xbmcgui.Dialog().input('Enter WWE Network Account Email')
if return_value and len(return_value) > 0:
addon.setSetting('emailaddress', str(return_value))
email = addon.getSetting('emailaddress')
return_value = xbmcgui.Dialog().input('Enter WWE Network Account Password')
if return_value and len(return_value) > 0:
addon.setSetting('password', str(return_value))
password = addon.getSetting('password')
wwe_network = wwe.Network(email, password)
cookie_exp_date = addon.getSetting('cookie_exp_date')
if cookie_exp_date != '' and pickle.loads(cookie_exp_date) > datetime.now():
cookies = addon.getSetting('cookies')
wwe_network.set_cookies(pickle.loads(cookies))
else:
wwe_network.login()
addon.setSetting('cookies', pickle.dumps(wwe_network.cookies))
addon.setSetting('cookie_exp_date', pickle.dumps(datetime.now() + timedelta(days=1)))
mode = args.get('mode', None)
if mode is None:
xbmcplugin.setContent(addon_handle, 'files')
live = wwe_network.get_live_stream()
li = get_list_item(live)
li.setIconImage(media_path + 'live.png')
li.setThumbnailImage(media_path + 'live.png')
li.setArt({'fanart': addon.getAddonInfo('fanart')})
li_url = build_url({'mode': live.item_type, "id": live.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=li_url, listitem=li)
recommended = xbmcgui.ListItem("Recommended", iconImage=media_path + 'recommended.png', thumbnailImage=media_path + 'recommended.png')
recommended.setArt({'fanart': addon.getAddonInfo('fanart')})
recommended_url = build_url({'mode': 'recommended'})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=recommended_url, listitem=recommended, isFolder=True)
on_demand = xbmcgui.ListItem("On Demand", iconImage=media_path + 'on_demand.png', thumbnailImage=media_path + 'on_demand.png')
on_demand.setArt({'fanart': addon.getAddonInfo('fanart')})
on_demand_url = build_url({'mode': 'on_demand'})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=on_demand_url, listitem=on_demand, isFolder=True)
episodes_watchlist = xbmcgui.ListItem("Watchlist - Episodes", iconImage=media_path + 'watchlist.png', thumbnailImage=media_path + 'watchlist.png')
episodes_watchlist.setArt({'fanart': addon.getAddonInfo('fanart')})
episodes_watchlist_url = build_url({'mode': 'episodes_watchlist'})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=episodes_watchlist_url, listitem=episodes_watchlist, isFolder=True)
series_watchlist = xbmcgui.ListItem("Watchlist - Series", iconImage=media_path + 'watchlist.png', thumbnailImage=media_path + 'watchlist.png')
series_watchlist.setArt({'fanart': addon.getAddonInfo('fanart')})
series_watchlist_url = build_url({'mode': 'series_watchlist'})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=series_watchlist_url, listitem=series_watchlist, isFolder=True)
search = xbmcgui.ListItem("Search", iconImage=media_path + 'search.png', thumbnailImage=media_path + 'search.png')
search.setArt({'fanart': addon.getAddonInfo('fanart')})
search_url = build_url({"mode": "search"})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=search_url, listitem=search, isFolder=True)
my_account = xbmcgui.ListItem("My Account", iconImage=media_path + 'my_account.png', thumbnailImage=media_path + 'my_account.png')
my_account.setArt({'fanart': addon.getAddonInfo('fanart')})
my_account_url = build_url({'mode': 'my_account'})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=my_account_url, listitem=my_account, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle, cacheToDisc=False)
elif mode[0] == 'recommended':
xbmcplugin.setContent(addon_handle, 'episodes')
for n in wwe_network.get_recommended():
li = get_list_item(n)
url = build_url({'mode': n.item_type, "id": n.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle, cacheToDisc=False)
elif mode[0] == 'on_demand':
xbmcplugin.setContent(addon_handle, 'tvshows')
for n in wwe_network.get_sections():
li = get_list_item(n)
url = build_url({'mode': n.item_type, "id": n.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'episodes_watchlist':
xbmcplugin.setContent(addon_handle, 'episodes')
for n in wwe_network.get_episodes_watchlist():
n.on_watchlist = True
li = get_list_item(n)
url = build_url({'mode': n.item_type, "id": n.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle, cacheToDisc=False)
elif mode[0] == 'add_episode_watchlist':
if wwe_network.add_episode_to_watchlist(args['id'][0]):
xbmcgui.Dialog().notification('Success', 'Added episode to watchlist')
else:
xbmcgui.Dialog().notification('Error occurred', 'Failed to add episode to watchlist')
elif mode[0] == 'remove_episode_watchlist':
if wwe_network.remove_episode_from_watchlist(args['id'][0]):
xbmcgui.Dialog().notification('Success', 'Removed episode from watchlist')
else:
xbmcgui.Dialog().notification('Error occurred', 'Failed to remove episode from watchlist')
elif mode[0] == 'series_watchlist':
xbmcplugin.setContent(addon_handle, 'tvshows')
for n in wwe_network.get_series_watchlist():
n.on_watchlist = True
li = get_list_item(n)
url = build_url({'mode': n.item_type, "id": n.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle, cacheToDisc=False)
elif mode[0] == 'add_series_watchlist':
if wwe_network.add_series_to_watchlist(args['id'][0]):
xbmcgui.Dialog().notification('Success', 'Added series to watchlist')
else:
xbmcgui.Dialog().notification('Error occurred', 'Failed to add series to watchlist')
elif mode[0] == 'remove_series_watchlist':
if wwe_network.remove_series_from_watchlist(args['id'][0]):
xbmcgui.Dialog().notification('Success', 'Removed series from watchlist')
else:
xbmcgui.Dialog().notification('Error occurred', 'Failed to remove series from watchlist')
elif mode[0] == 'search':
text = xbmcgui.Dialog().input('Search')
if text:
xbmcplugin.setContent(addon_handle, 'episodes')
for s in wwe_network.search(text):
li = get_list_item(s)
url = build_url({'mode': s.item_type, "id": s.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle, cacheToDisc=False)
elif mode[0] == 'my_account':
addon.openSettings()
elif mode[0] == 'section':
xbmcplugin.setContent(addon_handle, 'tvshows')
section_name = args['id'][0]
for n in wwe_network.get_shows(section_name):
li = get_list_item(n)
url = build_url({'mode': n.item_type, "id": n.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'show':
xbmcplugin.setContent(addon_handle, 'episodes')
show_name = args['id'][0]
episodes = wwe_network.get_episodes(show_name)
if episodes[0].type == 'collection':
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_EPISODE)
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE)
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_DURATION)
episode = 0
for n in episodes:
episode = episode + 1
n.episode = episode
li = get_list_item(n)
url = build_url({'mode': n.item_type, "id": n.media_id})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'episode':
media_id = args['id'][0]
try:
item = xbmcgui.ListItem(path=wwe_network.get_video_url(media_id, addon.getSetting('bitrate')))
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
except ValueError as e:
xbmcgui.Dialog().notification('Error occurred', str(e.message))
xbmcplugin.endOfDirectory(addon_handle)
|
alegag/plugin.video.wwen
|
wwen_plugin.py
|
Python
|
gpl-3.0
| 11,323
|
[
"VisIt"
] |
7610aa65756744a2ed99196dc666554d39025f5996f2784757d1c8947e0bece9
|
#!/usr/bin/python
# lightLEDs.py
# Button press loops through LEDs like this (0 is led off, 1 is led on): 000, 100, 110, 111, 011, 001, 000, 100, 110, 111, 011, 001, 000 etc.
# Author : Zachary Igielman
import RPi.GPIO as GPIO, time, sys
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT) #red
GPIO.setup(13, GPIO.OUT) #amber
GPIO.setup(21, GPIO.OUT) #green
GPIO.setup(26, GPIO.IN) #button
GPIO.output(11, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
GPIO.output(21, GPIO.LOW)
def setLEDs(tempArray):
GPIO.output(11, tempArray[0])
GPIO.output(13, tempArray[1])
GPIO.output(21, tempArray[2])
array=[[0,0,0],[1,0,0],[1,1,0],[1,1,1],[0,1,1],[0,0,1]]
a=0
try:
while True:
while GPIO.input(26)==1:
time.sleep(0.1)
setLEDs(array[a%6])
a=a+1
while GPIO.input(26)==0:
time.sleep(0.1)
finally:
GPIO.cleanup()
sys.exit(0)
|
soslug/projects
|
rpi/python_resources/smstext-and mypifi-code/lightLEDs.py
|
Python
|
gpl-3.0
| 852
|
[
"Amber"
] |
fc34d4fb3bc6f559e8a471b8e0e1a9769857dde3b4c2c2958669ad521ffbffdb
|
#
# 1st version by Fulvio Paleari
# This file is part of yambopy
#
import os
import re
import numpy as np
from qepy import PwIn
from yambopy.lattice import *
from itertools import product
import copy
from math import *
import fractions as frc
# Dimensional constants for reference (actually only b2a is really used in the code for now)
cm1_2_Tera=0.0299793 # Conversion from cm-1 to THz with 2pi factor included
Tera=1.e12
b2a =0.529177
hbar=6.5821e-16 # Planck's constant (eV*s)
kb=8.6173e-5 # Boltzmann's constant (eV/K)
Mp=1.0073 # Proton mass (reference, u)
cMp=Mp*1.660539*6.241509e-29 # Conversion of Mp in eV*\AA^{-2}*s^2
#
## ISSUES TO FIX ##
"""(iii) Small issue in nondiagonal supercell matrices for certain q-vectors
(iv) In nondiagonal supercells: the suggested kpoint mesh must be CONSISTENT and UNIFORM --> to fix
"""
## These two functions read phonons from qe output
def read_frequencies(modes_file,units='Tera'):
"""Read phonon frequencies from QE output phonon modes file
"""
Omega=[]
with open (modes_file) as fp:
for line in fp:
if line.strip()[0:4]=='freq':
w=re.findall(r"[-+]?\d*\.\d+|d+", line)
Omega.append(w)
Omega = np.float_(Omega)
if units=='Tera': Omega= Omega[:,0]
else: Omega= Omega[:,1]
return Omega
def read_eig(modes_file,basis):
""" Read phonon modes from QE output file
"""
modes=3*basis
eig = np.genfromtxt(modes_file, autostrip=True, comments='freq', skip_header=4, skip_footer=1, usecols=(1,2,3,4,5,6))
# Reshape data from quantum espresso output
eig = np.array([eig[:,0]+1j*eig[:,1], eig[:,2]+1j*eig[:,3], eig[:,4]+1j*eig[:,5]])
eig = eig.T
eig = np.reshape(eig, (modes,basis,3))
# eig[mode][atom][direction]
return eig
## Start of the proper supercell class
class Supercell():
"""
A class to generate custom supercells from a quantum espresso input file.
The supercell may be "non-diagonal" according to [Lloyd-Williams and Monserrat, Phys. Rev. B 92, 184301, 2015].
The atoms in the supercell may also be displaced along phonon modes if provided.
Input arguments:
- qe_input: a PwIn() instance of an input file in the unit cell (uc)
- Optional: QE-DFPT phonon modes output
How it works:
- Call self.d_sup(R) to build diagonal supercell
-- R is a list of repetitions of the uc in the cartesian directions
- Call self.nd_sup(Q) to build nondiagonal supercell
-- Q contains the fractional coordinates of the q-point to be folded at Gamma in a nondiagonal supercell like [[m1,m2,m3],[n1,n2,n3]]
- Call self.displace(modes_file,new_atoms,Temp=0.1) to displace supercell
-- modes_file is a QE-DFPT phonon-mode output
-- Temp is the width of the displacements in bohr
-- new_atoms is the output of (n)d_sup
Sample input script found at:
tutorials/supercell
NOTA BENE:
The Q-vector to be folded must be given in CRYSTAL COORDINATES. If computing its related phonons,
remember that the output of quantum espresso uses CARTESIAN COORDINATES
"""
def __init__(self,qe_input):
self.qe_input = qe_input
self.latvec = np.array(qe_input.cell_parameters)
self.basis = int(qe_input.system['nat'])
self.atoms = qe_input.atoms
self.uc_kpts = qe_input.kpoints
self.atypes = qe_input.atypes
self.aunits = qe_input.atomic_pos_type
"""
[START] Displacement-related functions
"""
def displace(self,modes_file,new_atoms,Temp=0.1,use_temp='no',write=True):
"""
Case of displaced supercell
"""
#Check if we are displacing the unit cell (i.e., gamma modes)
GAMMA = False
try: self.Q
except AttributeError: GAMMA = True
print('Applying displacements according to phonon modes...')
self.use_temp = use_temp
self.initialize_phonons(modes_file,self.atypes,Temp)
if GAMMA: #No phases and take only optical modes
phases = np.ones(self.sup_size)
expand_eigs = np.array([phases[i]*self.eigs for i in range(self.sup_size)])
self.print_expanded_eigs(expand_eigs,modes_file,GAMMA=GAMMA)
else:
phases = self.getPhases()
expand_eigs = np.array([phases[i]*self.eigs for i in range(self.sup_size)])
self.print_expanded_eigs(expand_eigs,modes_file,GAMMA=GAMMA) #Print expanded eigs
#Take real part
for cell in range(self.sup_size): expand_eigs[cell]= self.take_real(expand_eigs[cell])
disps = expand_eigs.real.astype(float)
#Force same gauge choice
#for cell in range(self.sup_size): disps[cell]= self.force_gauge(disps[cell])
#Transform eigenmodes in displacements
#disps[cell][mode][basis][direction]
disps = np.array([self.osc_length(disp_slice,GAMMA=GAMMA) for disp_slice in disps])
#disps[mode][cell][basis][direction]
self.disps = disps.swapaxes(0,1)
if GAMMA: self.disps = self.disps[3:]
if write:
#A list of PwIn() objects (one for each phonon mode) that can be printed, written to file, etc.
if 'Q' in globals(): mode='nd'
else: mode='diagonal'
self.modes_qe = [self.write(new_atoms,mode,phonon=disps_slice) for disps_slice in self.disps]
def print_expanded_eigs(self,exp_eigs,modes_file,GAMMA=False):
"""
Print expanded modes in QE-style, to compare and for reference
"""
if GAMMA:
q = np.array([0.,0.,0.])
ncells = 1
mode_start = 3
else:
q = np.array([float(self.Q[0,i])/float(self.Q[1,i]) for i in range(3)])
ncells = int(np.max(np.array(self.Q[1])))
mode_start = 0
sc_basis = self.basis*ncells
freq_THz,freq_cmm1=read_frequencies(modes_file,units='Tera'),read_frequencies(modes_file,units='cmm1')
exp_eigs = exp_eigs.swapaxes(0,1) #[mode][cell][basis][direction]
if GAMMA: filename = self.qe_input.control['prefix'][1:-1]+"_s.modes_GAMMA"
else: filename = self.qe_input.control['prefix'][1:-1]+"_s.modes_expanded"
exp_eigs_file = open(filename,"w")
exp_eigs_file.write(" diagonalizing the dynamical matrix ...\n")
exp_eigs_file.write("\n")
exp_eigs_file.write(" q = %2.5f %2.5f %2.5f\n"%(q[0],q[1],q[2]))
exp_eigs_file.write(" **************************************************************************\n")
for m in range(mode_start,3*self.basis):
exp_eigs_file.write(" freq ( %d) = %2.6f [THz] = %2.6f [cm-1]\n" \
%(m+1,freq_THz[m],freq_cmm1[m]))
for c in range(ncells):
for a in range(self.basis):
exp_eigs_file.write("( %2.6f %2.6f %2.6f %2.6f %2.6f %2.6f )\n" \
%(exp_eigs[m,c,a,0].real,exp_eigs[m,c,a,0].imag, \
exp_eigs[m,c,a,1].real,exp_eigs[m,c,a,1].imag, \
exp_eigs[m,c,a,2].real,exp_eigs[m,c,a,2].imag))
exp_eigs_file.write(" **************************************************************************")
exp_eigs_file.close()
def initialize_phonons(self,modes_file,atypes,Temp):
#Read frequencies
Omega = read_frequencies(modes_file)
self.Omega = Omega*Tera #in hertz
#Read phonon eigenmodes
self.eigs = read_eig(modes_file,self.basis)
#Temperature/displacement
self.Temp = Temp
#Atomic masses (in u)
self.m_at = np.array( [ float( atypes.get( self.atoms[i][0] )[0] ) for i in range(self.basis) ] )
def getPhases(self):
q = np.array([float(self.Q[0,i])/float(self.Q[1,i]) for i in range(3)])
arg = q[0]*self.T[:,0]+q[1]*self.T[:,1]+q[2]*self.T[:,2]
return np.exp(1j*2.*np.pi*arg)
def take_real(self,eig):
modes=3*self.basis
for i in range(modes):
#Check that there aren't purely imaginary modes
if np.real(eig[i]).all == 0.:
print("mode %i: Purely imaginary"%(i+1))
eig[i]=1j*eig[i]
eig = np.real(eig)
return eig
def force_gauge(self,eig):
"""
for each normal mode, the first nonzero element is set to be positive
"""
modes=3*self.basis
for i in range(modes):
a=np.nonzero(eig[i]) # for each normal mode
a=np.array(a) # I get the nonzero elements, of which the first
if eig[i,a[0,0],a[1,0]] < 0.: #one, given by these indices, must be >0
eig[i]=-eig[i]
return eig
def osc_length(self,eig,GAMMA=False):
"""
Oscillator lengths per mode (in ANGSTROM)
NB: Only ARBITRARY displacements can be set.
NB2: Eigenmodes from quantum espresso are ALREADY weighted by atomic masses
"""
RESCALE = b2a*self.Temp #Arbitrary displacement
temperature = 0.*self.Temp #Harmonic displacement (permanently set to zero for now)
modes=3*self.basis
displacements=[]
lengths_per_mode=[]
for nu,eig_slice in enumerate(eig):
if GAMMA and ( nu==0 or nu==1 or nu==2): l=0.
else: l=sqrt(hbar/(2.*cMp*self.Omega[nu]))
if temperature==0.: sigma2=l*l
else: sigma2=l*l*(2./(np.exp(hbar*self.Omega[nu]/(kb*temperature))-1.)+1)
if self.use_temp=='no': displacements.append(RESCALE*eig_slice) #Each mode (i.e. atomic displacement directions) is multiplied by the corresponding length
else: displacements.append(np.sqrt(sigma2)*eig_slice) #Displacement by harmonic sigma
#List of average "realistic" atomic displacements (not weighted by atomic mass)
lengths_per_mode.append(np.sqrt(sigma2))
displacements = np.array(displacements)
lengths_per_mode = np.array(lengths_per_mode)
#mass_ratio = np.array([sqrt(Mp/mass) for mass in self.m_at])
#for d,i in product(range(len(displacements)),range(self.basis)): displacements[d,i,:] *= mass_ratio[i] #Weigh the displ. with the different masses
return displacements #displacements[mode in order of ascending frequency][basis]
"""
[END] Displacement-related functions
"""
def lattice_constants(self,vec):
return [np.linalg.norm(vec[0]),np.linalg.norm(vec[1]),np.linalg.norm(vec[2])]
def d_sup(self,R,write=True):
"""
Case of diagonal supercell
"""
self.R = R
self.sup_size = int(R[0]*R[1]*R[2])
self.new_latvec = np.array([self.latvec[i]*R[i] for i in range(3)])
self.sup_size = int(self.R[0]*self.R[1]*self.R[2])
new_atoms = self.build_supercell()
if write:
#PwIn() object that can be printed, written to file, etc.
self.qe_d = self.write(new_atoms,mode='diagonal')
return new_atoms
def nd_sup(self,Q,write=True):
"""
Case of nondiagonal supercell
"""
self.Q = np.array(Q)
print('Nondiagonal supercell')
if (self.uc_kpts % self.Q[1] != 0).any():
print('ERROR: You must set a unit cell k-point mesh where%s Nx,Ny,Nz are multiples of %d,%d,%d, respectively.'%('\n',self.Q[1,0],self.Q[1,1],self.Q[1,2]))
exit()
self.R, self.new_latvec = self.find_nondiagonal()
self.sup_size = int(self.R[0]*self.R[1]*self.R[2])
new_atoms = self.build_supercell()
if write:
self.qe_nd = self.write(new_atoms,mode='nd')
return new_atoms
def build_supercell(self):
latvec = self.latvec
R = self.R
atoms = np.array([atom[1] for atom in self.atoms])
if self.aunits!='angstrom': atoms = red_car(atoms,latvec)
else: latvec = b2a*latvec
#new_atoms[cell][basis][direction]
new_atoms = np.array([atoms for n in range(self.sup_size)])
T = []
for nz,ny,nx in product(range(int(R[2])),range(int(R[1])),range(int(R[0])) ):
cell=int(nx+ny*R[0]+nz*R[0]*R[1])
translation = nx*latvec[0] +ny*latvec[1] +nz*latvec[2]
for b in range(self.basis):
new_atoms[cell,b]=new_atoms[cell,b] + translation
T.append(translation)
T = np.array(T) #Positions of the repeated unit cells
if self.aunits!='angstrom': self.T=car_red(T,self.latvec)
else: self.T=T
#new_atoms[super_basis][directions]$
new_atoms=new_atoms.reshape(self.basis*self.sup_size,3)
if self.aunits!='angstrom': new_atoms = car_red(new_atoms,self.new_latvec)
return new_atoms
def find_integers(self,nums,g23,g12,g31,g123):
"""
Compute integers for off-diagonal supercell matrix elements
Called by find_nondiagonal()
"""
if nums[1]==0: p=0
else:
#Compute p (it's a modulo equation)
if g23 == 1: p = 0
else:
for i in range(1,g23):
if (nums[1]+i*nums[2]) % g23 == 0:
p=i
break
if nums[0]==0: q,r=[0,1] #[POSSIBLE BUG for certain q-vectors] These conditions must be checked carefully
else:
#Compute q
g12_r = int(g12/g123)
g23_r = int(g23/g123)
g31_r = int(g31/g123)
if g12_r == 1: q = 0
else:
for i in range(1,g12_r):
if (g23_r*nums[0]+i*g31_r*nums[1]) % g12_r == 0:
q=i
break
#Compute r
gg_r = g31*g23/g123
z = g23*nums[0]/g12+g31*q*nums[1]/g12
if gg_r == 1: r = 0
else:
for i in range(1,gg_r):
if (z+i*nums[2]) % gg_r == 0:
r=i
break
return p,q,r
def find_nondiagonal(self):
"""
Nondiagonal supercell, based on [Phys. Rev. B 92, 184301]
"""
Q = self.Q
#Take care of components already at Gamma
Q[1,np.where(Q[0]==0)]=1
#Shift the q-point into the positive quadrant of the reciprocal unit cell
Q[0,np.where(Q[0]<0)]+=Q[1,np.where(Q[0]<0)]
#GCDs of Q[1] (in the logical order of the derivation)
g23 = frc.gcd(Q[1,1],Q[1,2])
g12 = frc.gcd(Q[1,0],Q[1,1])
g31 = frc.gcd(Q[1,2],Q[1,0])
g123 = frc.gcd(Q[1,0],frc.gcd(Q[1,1],Q[1,2]))
#Integers needed to solve the supercell matrix equation
p,q,r = self.find_integers(Q[0],g23,g12,g31,g123)
#Matrix elements (in order of derivation) and supercell matrix
S_33 = Q[1,2]
S_22 = Q[1,1]/g23
S_23 = p*Q[1,2]/g23
S_11 = g123*Q[1,0]/(g12*g31)
S_12 = q*g123*Q[1,1]/(g12*g23)
S_13 = r*g123*Q[1,2]/(g31*g23)
self.S = np.array([[S_11,S_12,S_13],[0,S_22,S_23],[0,0,S_33]])
#New lattice vectors and actual supercell size
new_latvec = np.einsum('ij,jx->ix',self.S,self.latvec)
R = [self.S[0,0],self.S[1,1],self.S[2,2]]
print(self.S)
return R, new_latvec
def reciprocal(self,mode):
"""
Function to compute reciprocal lattice
"""
#Unit cell
repvec = rec_lat(self.latvec)
alat=np.array(self.lattice_constants(self.latvec))
self.repvec = 2.*np.pi*np.multiply(1./alat,repvec)
#Supercell
if mode=='diagonal': self.new_repvec = np.array([self.repvec[i]/float(R[i]) for i in range(3)])
else:
self.S_inv_T = np.linalg.inv(self.S).T
self.new_repvec = np.einsum('ij,jx->ix',self.S_inv_T,self.repvec)
def atoms_input(self, new_atoms):
"""
Put the atomic element labels in the right order
"""
positions_input = new_atoms.tolist()
elements_input = [[self.qe_input.atoms[i][0] for i in range(int(self.basis))] for j in range(self.sup_size)]
elements_input = [ item for sublist in elements_input for item in sublist ]
atoms_input = [[elements_input[i], positions_input[i]] for i in range(self.sup_size*self.basis)]
return atoms_input
def posint(self,value):
return abs(int(round(value)))
def write(self,new_atoms,mode,phonon=None):
R = self.R
new_latvec = self.new_latvec
alat = self.lattice_constants(new_latvec)
qe = self.qe_input
if mode=='diagonal':
#A suggestion for a consistent new kpoint mesh
new_kpoints = [ceil(qe.kpoints[0]/R[0]), ceil(qe.kpoints[1]/R[1]), ceil(qe.kpoints[2]/R[2])]
else:
#The compulsory new kpoint mesh - (sub)multiples of it are also fine but not consistent
self.reciprocal('nondiagonal')
new_kpoints = np.dot(self.S_inv_T,np.array(qe.kpoints))
new_kpoints = [self.posint(new_kpoints[0]),self.posint(new_kpoints[1]),self.posint(new_kpoints[2])]
if phonon is not None:
phonon = phonon.reshape(self.basis*self.sup_size,3)
phonon = car_red(phonon,self.new_latvec)
new_atoms = new_atoms + phonon
qe_s = copy.deepcopy(qe)
qe_s.set_atoms(self.atoms_input(new_atoms))
qe_s.control['prefix'] = qe.control['prefix'][:-1]+"_s'"
#[POSSIBLE BUG] with only ibrav==0 and cell_parameters, it might fail the symmetry !?
if 'celldm(1)' in qe_s.system: del qe_s.system['celldm(1)']
if 'celldm(2)' in qe_s.system: del qe_s.system['celldm(2)']
if 'celldm(3)' in qe_s.system: del qe_s.system['celldm(3)']
"""
qe_s.system['celldm(1)'] = alat[0]
qe_s.system['celldm(2)'] = alat[1]/alat[0]
qe_s.system['celldm(3)'] = alat[2]/alat[0]
"""
qe_s.system['ibrav']=0
qe_s.cell_units = 'bohr'
qe_s.cell_parameters = new_latvec
#Just a suggestion for the new bands
if 'nbnd' in qe.system: qe_s.system['nbnd'] = self.sup_size*int(qe.system['nbnd'])
qe_s.system['nat'] = self.basis*self.sup_size
qe_s.kpoints = new_kpoints
return qe_s
|
alexmoratalla/yambopy
|
qepy/supercell.py
|
Python
|
bsd-3-clause
| 18,964
|
[
"CRYSTAL",
"Quantum ESPRESSO"
] |
25c52321b83b4cff304905eafce104f058ac3eecab904bee403812694d34f682
|
#!/usr/bin/env python3
import argparse, sys, math
def read_cmd():
"""Reads command line params"""
desc = "Generate even-tempered basis set for TeraChem"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-n", dest="nbasis", type=int, required=True, help="Number of basis funcs")
parser.add_argument("-f", "--amax", dest="amax", type=float, default=100.0, help="Maximum exponent")
parser.add_argument("-i", "--amin", dest="amin", type=float, help="Minimum exponent (NOT WORKING YET)")
return parser.parse_args()
def get_even_tempered_exponents(amin, amax, nbasis):
"""Get alpha exponents for even tempered basis
According to Ref. 64 in """
# TODO: Make beta dependent on amin
beta = 0.01
alphas = []
alphas.append(amax)
for i in range(1, nbasis):
a = amax * beta**(i / (nbasis-1))
alphas.append(a)
return alphas
def print_cgto(alphas, coeffs, l, f):
"""Print contracted Gaussian of given momentum"""
if len(alphas) != len(coeffs):
print("ERROR: incompatible inputs in \"print_cgto\"")
sys.exit(1)
f.write("%s %d\n" % (l, len(alphas)))
for i in range(len(alphas)):
f.write(" %15.7f %15.7f\n" % (alphas[i], coeffs[i]))
def print_basis(alphas):
"""Prints GTOs in a basis file format for TC"""
# For now, harcoded to print s,p,d functions for each alpha
# Completely decontracted basis
fname = "basis_evtemp_%d" % (len(alphas))
with open(fname, "w") as f:
for a in alphas:
for l in "SPD":
print_cgto([a], [1.0], l, f)
if __name__ == '__main__':
opts = read_cmd()
exponents = get_even_tempered_exponents(opts.amin, opts.amax, opts.nbasis)
print_basis(exponents)
|
PHOTOX/photoxrepo
|
INPUTS/FANOCI/BASIS-GENERATION/gen_even_tempered_basis.py
|
Python
|
mit
| 1,729
|
[
"Gaussian",
"TeraChem"
] |
597c238ffedd06a6dab77670eedbe382b1600ebf2499320a6f4c42a53a1c6303
|
from itertools import combinations_with_replacement
import numpy as np
from scipy import ndimage as ndi
from scipy import stats
from ..util import img_as_float, pad
from ..feature import peak_local_max
from ..feature.util import _prepare_grayscale_input_2D
from ..feature.corner_cy import _corner_fast
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import safe_as_int
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndi.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndi.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndi.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndi.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndi.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
def hessian_matrix(image, sigma=1, mode='constant', cval=0):
"""Compute Hessian matrix.
The Hessian matrix is defined as::
H = [Hxx Hxy]
[Hxy Hyy]
which is computed by convolving the image with the second derivatives
of the Gaussian kernel in the respective x- and y-directions.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Examples
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> Hxy
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., -1., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., -1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = img_as_float(image)
gaussian_filtered = ndi.gaussian_filter(image, sigma=sigma,
mode=mode, cval=cval)
gradients = np.gradient(gaussian_filtered)
axes = range(image.ndim)
H_elems = [np.gradient(gradients[ax0], axis=ax1)
for ax0, ax1 in combinations_with_replacement(axes, 2)]
if image.ndim == 2:
# The legacy 2D code followed (x, y) convention, so we swap the axis
# order to maintain compatibility with old code
H_elems.reverse()
return H_elems
def hessian_matrix_det(image, sigma=1):
"""Computes the approximate Hessian Determinant over an image.
This method uses box filters over integral images to compute the
approximate Hessian Determinant as described in [1]_.
Parameters
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float, optional
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.
Returns
-------
out : array
The array of the Determinant of Hessians.
References
----------
.. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Notes
-----
The running time of this method only depends on size of the image. It is
independent of `sigma` as one would expect. The downside is that the
result for `sigma` less than `3` is not accurate, i.e., not similar to
the result obtained if someone computed the Hessian and took it's
determinant.
"""
image = img_as_float(image)
image = integral_image(image)
return np.array(_hessian_matrix_det(image, sigma))
def _image_orthogonal_matrix22_eigvals(M00, M01, M11):
l1 = (M00 + M11) / 2 + np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
l2 = (M00 + M11) / 2 - np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
return l1, l2
def structure_tensor_eigvals(Axx, Axy, Ayy):
"""Compute Eigen values of structure tensor.
Parameters
----------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import structure_tensor, structure_tensor_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> structure_tensor_eigvals(Axx, Axy, Ayy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy)
def hessian_matrix_eigvals(Hxx, Hxy, Hyy):
"""Compute Eigen values of Hessian matrix.
Parameters
----------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]
array([[ 0., 0., 2., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 2., 0., -2., 0., 2.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 2., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Hxx, Hxy, Hyy)
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
/ (imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second
derivatives.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
"""
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
imxx, imxy = _compute_derivatives(imx, mode=mode, cval=cval)
imyx, imyy = _compute_derivatives(imy, mode=mode, cval=cval)
numerator = (imxx * imy ** 2 + imyy * imx ** 2 - 2 * imxy * imx * imy)
denominator = (imx ** 2 + imy ** 2)
response = np.zeros_like(image, dtype=np.double)
mask = denominator != 0
response[mask] = numerator[mask] / denominator[mask]
return response
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""Compute Harris corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
det(A) - k * trace(A)**2
or::
2 * det(A) / (trace(A) + eps)
Parameters
----------
image : ndarray
Input image.
method : {'k', 'eps'}, optional
Method to compute the response image from the auto-correlation matrix.
k : float, optional
Sensitivity factor to separate corners from edges, typically in range
`[0, 0.2]`. Small values of k result in detection of sharp corners.
eps : float, optional
Normalisation factor (Noble's corner measure).
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Harris response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_harris(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
if method == 'k':
response = detA - k * traceA ** 2
else:
response = 2 * detA / (traceA + eps)
return response
def corner_shi_tomasi(image, sigma=1):
"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as the smaller eigenvalue of A::
((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Shi-Tomasi response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_shi_tomasi, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# minimum eigenvalue of A
response = ((Axx + Ayy) - np.sqrt((Axx - Ayy) ** 2 + 4 * Axy ** 2)) / 2
return response
def corner_foerstner(image, sigma=1):
"""Compute Foerstner corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
w = det(A) / trace(A) (size of error ellipse)
q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
w : ndarray
Error ellipse sizes.
q : ndarray
Roundness of error ellipse.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_foerstner, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> w, q = corner_foerstner(square)
>>> accuracy_thresh = 0.5
>>> roundness_thresh = 0.3
>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
>>> corner_peaks(foerstner, min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
w = np.zeros_like(image, dtype=np.double)
q = np.zeros_like(image, dtype=np.double)
mask = traceA != 0
w[mask] = detA[mask] / traceA[mask]
q[mask] = 4 * detA[mask] / traceA[mask] ** 2
return w, q
def corner_fast(image, n=12, threshold=0.15):
"""Extract FAST corners for a given image.
Parameters
----------
image : 2D ndarray
Input image.
n : int
Minimum number of consecutive pixels out of 16 pixels on the circle
that should all be either brighter or darker w.r.t testpixel.
A point c on the circle is darker w.r.t test pixel p if
`Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
stands for the n in `FAST-n` corner detector.
threshold : float
Threshold used in deciding whether the pixels on the circle are
brighter, darker or similar w.r.t. the test pixel. Decrease the
threshold when more corners are desired and vice-versa.
Returns
-------
response : ndarray
FAST corner response image.
References
----------
.. [1] Edward Rosten and Tom Drummond
"Machine Learning for high-speed corner detection",
http://www.edwardrosten.com/work/rosten_2006_machine.pdf
.. [2] Wikipedia, "Features from accelerated segment test",
https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
Examples
--------
>>> from skimage.feature import corner_fast, corner_peaks
>>> square = np.zeros((12, 12))
>>> square[3:9, 3:9] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_fast(square, 9), min_distance=1)
array([[3, 3],
[3, 8],
[8, 3],
[8, 8]])
"""
image = _prepare_grayscale_input_2D(image)
image = np.ascontiguousarray(image)
response = _corner_fast(image, n, threshold)
return response
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""Determine subpixel position of corners.
A statistical test decides whether the corner is defined as the
intersection of two edges or a single peak. Depending on the classification
result, the subpixel corner location is determined based on the local
covariance of the grey-values. If the significance level for either
statistical test is not sufficient, the corner cannot be classified, and
the output subpixel position is set to NaN.
Parameters
----------
image : ndarray
Input image.
corners : (N, 2) ndarray
Corner coordinates `(row, col)`.
window_size : int, optional
Search window size for subpixel estimation.
alpha : float, optional
Significance level for corner classification.
Returns
-------
positions : (N, 2) ndarray
Subpixel corner positions. NaN for "not classified" corners.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/\
foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
>>> img = np.zeros((10, 10))
>>> img[:5, :5] = 1
>>> img[5:, 5:] = 1
>>> img.astype(int)
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> coords = corner_peaks(corner_harris(img), min_distance=2)
>>> coords_subpix = corner_subpix(img, coords, window_size=7)
>>> coords_subpix
array([[ 4.5, 4.5]])
"""
# window extent in one direction
wext = (window_size - 1) // 2
image = pad(image, pad_width=wext, mode='constant', constant_values=0)
# add pad width, make sure to not modify the input values in-place
corners = safe_as_int(corners + wext)
# normal equation arrays
N_dot = np.zeros((2, 2), dtype=np.double)
N_edge = np.zeros((2, 2), dtype=np.double)
b_dot = np.zeros((2, ), dtype=np.double)
b_edge = np.zeros((2, ), dtype=np.double)
# critical statistical test values
redundancy = window_size ** 2 - 2
t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
# coordinates of pixels within window
y, x = np.mgrid[- wext:wext + 1, - wext:wext + 1]
corners_subpix = np.zeros_like(corners, dtype=np.double)
for i, (y0, x0) in enumerate(corners):
# crop window around corner + border for sobel operator
miny = y0 - wext - 1
maxy = y0 + wext + 2
minx = x0 - wext - 1
maxx = x0 + wext + 2
window = image[miny:maxy, minx:maxx]
winx, winy = _compute_derivatives(window, mode='constant', cval=0)
# compute gradient suares and remove border
winx_winx = (winx * winx)[1:-1, 1:-1]
winx_winy = (winx * winy)[1:-1, 1:-1]
winy_winy = (winy * winy)[1:-1, 1:-1]
# sum of squared differences (mean instead of gaussian filter)
Axx = np.sum(winx_winx)
Axy = np.sum(winx_winy)
Ayy = np.sum(winy_winy)
# sum of squared differences weighted with coordinates
# (mean instead of gaussian filter)
bxx_x = np.sum(winx_winx * x)
bxx_y = np.sum(winx_winx * y)
bxy_x = np.sum(winx_winy * x)
bxy_y = np.sum(winx_winy * y)
byy_x = np.sum(winy_winy * x)
byy_y = np.sum(winy_winy * y)
# normal equations for subpixel position
N_dot[0, 0] = Axx
N_dot[0, 1] = N_dot[1, 0] = - Axy
N_dot[1, 1] = Ayy
N_edge[0, 0] = Ayy
N_edge[0, 1] = N_edge[1, 0] = Axy
N_edge[1, 1] = Axx
b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
# estimated positions
try:
est_dot = np.linalg.solve(N_dot, b_dot)
est_edge = np.linalg.solve(N_edge, b_edge)
except np.linalg.LinAlgError:
# if image is constant the system is singular
corners_subpix[i, :] = np.nan, np.nan
continue
# residuals
ry_dot = y - est_dot[0]
rx_dot = x - est_dot[1]
ry_edge = y - est_edge[0]
rx_edge = x - est_edge[1]
# squared residuals
rxx_dot = rx_dot * rx_dot
rxy_dot = rx_dot * ry_dot
ryy_dot = ry_dot * ry_dot
rxx_edge = rx_edge * rx_edge
rxy_edge = rx_edge * ry_edge
ryy_edge = ry_edge * ry_edge
# determine corner class (dot or edge)
# variance for different models
var_dot = np.sum(winx_winx * ryy_dot - 2 * winx_winy * rxy_dot
+ winy_winy * rxx_dot)
var_edge = np.sum(winy_winy * ryy_edge + 2 * winx_winy * rxy_edge
+ winx_winx * rxx_edge)
# test value (F-distributed)
if var_dot < np.spacing(1) and var_edge < np.spacing(1):
t = np.nan
elif var_dot == 0:
t = np.inf
else:
t = var_edge / var_dot
# 1 for edge, -1 for dot, 0 for "not classified"
corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
if corner_class == -1:
corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
elif corner_class == 0:
corners_subpix[i, :] = np.nan, np.nan
elif corner_class == 1:
corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
# subtract pad width
corners_subpix -= wext
return corners_subpix
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=0.1,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
* : *
See :py:meth:`skimage.feature.peak_local_max`.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> peak_local_max(response)
array([[2, 2],
[2, 3],
[3, 2],
[3, 3]])
>>> corner_peaks(response)
array([[2, 2]])
"""
peaks = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=num_peaks,
footprint=footprint, labels=labels)
if min_distance > 0:
coords = np.transpose(peaks.nonzero())
for r, c in coords:
if peaks[r, c]:
peaks[r - min_distance:r + min_distance + 1,
c - min_distance:c + min_distance + 1] = False
peaks[r, c] = True
if indices is True:
return np.transpose(peaks.nonzero())
else:
return peaks
|
ofgulban/scikit-image
|
skimage/feature/corner.py
|
Python
|
bsd-3-clause
| 26,732
|
[
"Gaussian"
] |
26b9dfed7ea0b784046b4ab40a628ccd0207f44f60f9ec1dc89ae07c57e20e9e
|
#!/usr/bin/env python
"""
Use the 1D data interpolation/extrapolation problem to benchmark convergence
variance. Comparison of training methods, EKF vs SGD.
"""
# Dependencies
from __future__ import division
import numpy as np; npl = np.linalg
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import kalmann
# Get some noisy training data, a fun compact function
stdev = 0.05
U = np.arange(-10, 10, 0.2)
Y = np.exp(-U**2) + 0.5*np.exp(-(U-3)**2) + np.random.normal(0, stdev, len(U))
# Repeat fitting experiment many times
nepochs_ekf = 100; nepochs_sgd = 400
ekf_results = []; sgd_results = []
for i in xrange(50):
# Create two identical KNN's that will be trained differently
knn_ekf = kalmann.KNN(nu=1, ny=1, nl=10, neuron='logistic')
knn_sgd = kalmann.KNN(nu=1, ny=1, nl=10, neuron='logistic')
# Train
RMS_ekf, trcov = knn_ekf.train(nepochs=nepochs_ekf, U=U, Y=Y, method='ekf', P=0.5, Q=0, R=stdev**2, pulse_T=-1)
RMS_sgd, _ = knn_sgd.train(nepochs=nepochs_sgd, U=U, Y=Y, method='sgd', step=0.05, pulse_T=-1)
# Store results
ekf_results.append(RMS_ekf[-1])
sgd_results.append(RMS_sgd[-1])
# Evaluation
fig = plt.figure()
xlim = [0.33, 0.36]
fig.suptitle("Histogram of Final RMS Errors", fontsize=22)
ax = fig.add_subplot(2, 1, 1)
ax.hist(ekf_results, 20, normed=1)
ax.set_xlim(xlim)
ax.set_ylabel("Using EKF", fontsize=18)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.hist(sgd_results, 20, normed=1)
ax.set_xlim(xlim)
ax.set_ylabel("Using SGD", fontsize=18)
ax.set_xlabel("RMS", fontsize=18)
ax.grid(True)
fig2 = plt.figure()
ax = fig2.add_subplot(1, 1, 1)
ax.set_title("Trace of Covariance During Training", fontsize=22)
ax.plot(trcov)
ax.set_xlabel("Iteration", fontsize=16)
ax.grid(True)
plt.show()
|
jnez71/kalmaNN
|
demos/1d_fit_analysis.py
|
Python
|
mit
| 1,764
|
[
"NEURON"
] |
7d27c7aeae0ef9b04bb2985906ef9aa6d36119ce22c413bc25a3e0eb3fb5395d
|
# -*- coding: utf-8 -*-
"""
Ideally, this script should
take (user?) inputs to determine x_predicted, y_predicte, and z_predicted arrays
take inputs from the second script to enter in data (that is, the temperature (T_time_avg_3d) and position (xyz_observed) data that was augmented in the second script).
TODO:
output the interpolated data. Specifically, T_prediction, y_prediction_MSE, and sigma.
Having at least the GP interpolation as a function
Created on Fri Feb 13 17:31:06 2015
@author: Richard Decal, decal@uw.edu
"""
from scipy.optimize import curve_fit
from sklearn import gaussian_process
import numpy as np
import matplotlib.pyplot as plt
import process_3D_data
def predictionLocations():
"""prediction locations, make ##finish your sentences, points deducted -Richard
TODO: document what these magic numbers mean
Do we want to feed any input into this fxn?"""
x_predict = np.atleast_2d(np.linspace(0, 254, 25)) #2 mm prediction sites
y_predict = np.atleast_2d(np.linspace(100, 850, 15))
z_predict = np.atleast_2d(np.linspace(80, 280, 20))
x1,x2,x3 = np.meshgrid(x_predict, y_predict, z_predict)
xyz_predict = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size), x3.reshape(x3.size)]).T #TODO: what is this line for?
return xyz_predict
def noiseCalc(temp_stdev, Temp_time_avg_3D):
nugget = (temp_stdev/Temp_time_avg_3D)**2
nugget = nugget #deletes repeated measurment locations
return nugget
def gaussianProcess(T_sd, T_time_avg_3d, xyz_observed):
"""
TODO: feed params for gp (corr, thetas, etc) into the parent fxn?
"""
xyz_predict = predictionLocations() #not sure if the best practice would be to put this line inside main()
mynugget = noiseCalc(T_sd, T_time_avg_3d)
gp = gaussian_process.GaussianProcess(corr = 'absolute_exponential',
theta0 = 1./25,
thetaL = 1e-1,
thetaU = .3,
normalize = True,
nugget = mynugget)
#when height = 1, thetaL = .1, thetaU = .3
#when height = 0, thetaL = 10e-2,thetaU = .3
gp.fit(xyz_observed.T, T_time_avg_3d)
#Target value error will come up with that last repeated row. It can't have
#multiple measurements at the same location. Consider deleting that repeated
#last row of measurements or take a mean or stack the timeseries onto the
#first measurement, which will effectivly average the values.
T_prediction, y_prediction_MSE = gp.predict(xyz_predict, eval_MSE = True) #produce predicted y values
sigma = np.sqrt(y_prediction_MSE) #get SD of fit at each x_predicted location (for confidence interval)
return T_prediction, y_prediction_MSE, sigma
def main():
#load vars from second script
T_sd, T_time_avg_3d, xyz_observed = process_3D_data.main()
#run Gaussian process
T_prediction, y_prediction_MSE, sigma = gaussianProcess(T_sd, T_time_avg_3d, xyz_observed)
return T_prediction, y_prediction_MSE, sigma
if __name__ == "__main__":
T_prediction, y_prediction_MSE, sigma = main()
|
isomerase/GP-temperature-interpolation
|
GaussianProc_predictor_3D.py
|
Python
|
mit
| 3,256
|
[
"Gaussian"
] |
00266f6ef9e9f611dcc7f66d12364cf76db6608dec35efc9a6a802db2e2e8948
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Autosuspend udev rule generator
This script is executed at build time to generate udev rules. The
resulting rules file is installed on the device, the script itself
is not.
"""
from __future__ import print_function
# List of USB devices (vendorid:productid) for which it is safe to enable
# autosuspend.
USB_IDS = []
# Host Controllers and internal hubs
USB_IDS += [
# Linux Host Controller (UHCI) (most older x86 boards)
'1d6b:0001',
# Linux Host Controller (EHCI) (all boards)
'1d6b:0002',
# Linux Host Controller (XHCI) (most newer boards)
'1d6b:0003',
# SMSC (Internal HSIC Hub) (most Exynos boards)
'0424:3503',
# Intel (Rate Matching Hub) (all x86 boards)
'05e3:0610',
# Intel (Internal Hub?) (peppy, falco)
'8087:0024',
# Genesys Logic (Internal Hub) (rambi)
'8087:8000',
# Microchip (Composite HID + CDC) (kefka)
'04d8:0b28',
]
# Webcams
USB_IDS += [
# Chicony (zgb)
'04f2:b1d8',
# Chicony (mario)
'04f2:b262',
# Chicony (stout)
'04f2:b2fe',
# Chicony (butterfly)
'04f2:b35f',
# Chicony (rambi)
'04f2:b443',
# Chicony (glados)
'04f2:b552',
# LiteOn (spring)
'058f:b001',
# Foxlink? (butterfly)
'05c8:0351',
# Foxlink? (butterfly)
'05c8:0355',
# Cheng Uei? (falco)
'05c8:036e',
# SuYin (parrot)
'064e:d251',
# Realtek (falco)
'0bda:571c',
# IMC Networks (squawks)
'13d3:5657',
# Sunplus (parrot)
'1bcf:2c17',
# (C-13HDO10B39N) (alex)
'2232:1013',
# (C-10HDP11538N) (lumpy)
'2232:1017',
# (Namuga) (link)
'2232:1033',
# (C-03FFM12339N) (daisy)
'2232:1037',
# (C-10HDO13531N) (peach)
'2232:1056',
# (NCM-G102) (samus)
'2232:6001',
# Acer (stout)
'5986:0299',
]
# Bluetooth Host Controller
USB_IDS += [
# Hon-hai (parrot)
'0489:e04e',
# Hon-hai (peppy)
'0489:e056',
# Hon-hai (Kahlee)
'0489:e09f',
# QCA6174A (delan)
'0489:e0a2',
# LiteOn (parrot)
'04ca:3006',
# LiteOn (aleena)
'04ca:3016',
# LiteOn (scarlet)
'04ca:301a',
# Realtek (blooglet)
'0bda:b00c',
# Atheros (stumpy, stout)
'0cf3:3004',
# Atheros (AR3011) (mario, alex, zgb)
'0cf3:3005',
# Atheros (stumyp)
'0cf3:3007',
# Atheros (butterfly)
'0cf3:311e',
# Atheros (scarlet)
'0cf3:e300',
# Marvell (rambi)
'1286:2046',
# Marvell (gru)
'1286:204e',
# Intel (rambi, samus)
'8087:07dc',
# Intel (strago, glados)
'8087:0a2a',
# Intel (octopus)
'8087:0aaa',
# Intel (hatch)
'8087:0026',
# Intel (atlas)
'8087:0025',
]
# WWAN (LTE)
USB_IDS += [
# Huawei (ME936) (kip)
'12d1:15bb',
# Fibocom (L850-GL) (coral, nautilus, sarien)
'2cb7:0007',
]
# Mass Storage
USB_IDS += [
# Genesys (SD card reader) (lumpy, link, peppy)
'05e3:0727',
# Realtek (SD card reader) (mario, alex)
'0bda:0138',
# Realtek (SD card reader) (helios)
'0bda:0136',
# Realtek (SD card reader) (falco)
'0bda:0177',
]
# Security Key
USB_IDS += [
# Yubico.com
'1050:0211',
# Yubico.com (HID firmware)
'1050:0200',
# Google Titan key
'18d1:5026',
]
# USB Audio devices
USB_IDS += [
# Google USB-C to 3.5mm Digital Headphone Jack Adapter 'Mir'
'18d1:5025',
# Google USB-C to 3.5mm Digital Headphone Jack Adapter 'Mir' (HID only)
'18d1:5029',
# Google USB-C to 3.5mm Digital Headphone Jack Adapter 2018 'Condor'
'18d1:5034',
# Google Pixel USB-C Earbuds 'Blackbird'
'18d1:5033',
# Libratone Q Adapt In-Ear USB-C Earphones, Made for Google
'03eb:2433',
# Moshi USB-C to 3.5 mm Adapter/Charger, Made for Google
'282b:48f0',
# Moshi USB-C to 3.5 mm Adapter/Charger, Made for Google (HID only)
'282b:0026',
# AiAiAi TMA-2 C60 Cable, Made for Google
'0572:1a08',
# Apple USB-C to 3.5mm Headphone Jack Adapter
'05ac:110a',
]
# List of PCI devices (vendorid:deviceid) for which it is safe to enable
# autosuspend.
PCI_IDS = []
# Intel
PCI_IDS += [
# Host bridge
'8086:590c',
# i915
'8086:591e',
# proc_thermal
'8086:1903',
# SPT PCH xHCI controller
'8086:9d2f',
# CNP PCH xHCI controller
'8086:9ded',
# intel_pmc_core
'8086:9d21',
# i801_smbus
'8086:9d23',
# iwlwifi
'8086:095a',
# GMM
'8086:1911',
# Thermal
'8086:9d31',
# MME
'8086:9d3a',
# CrOS EC
'8086:9d4b',
# PCH SPI
'8086:9d24',
# SATA
'8086:02d3',
# RAM memory
'8086:02ef',
# ISA bridge
'8086:0284',
# Communication controller
'8086:02e0',
# Network controller
'8086:02f0',
# Serial bus controller
'8086:02a4',
# USB controller
'8086:02ed',
# Graphics
'8086:9b41',
# DSP
'8086:02f9',
# Host bridge
'8086:9b61',
# Host bridge
'8086:9b71',
# PCI Bridge
'8086:02b0',
# i915 (atlas)
'8086:591c',
# iwlwifi (atlas)
'8086:2526',
# i915 (kefka)
'8086:22b1',
# proc_thermal (kefka)
'8086:22dc',
# xchi_hdc (kefka)
'8086:22b5',
# snd_hda (kefka)
'8086:2284',
# pcieport (kefka)
'8086:22c8',
'8086:22cc',
# lpc_ich (kefka)
'8086:229c',
# iosf_mbi_pci (kefka)
'8086:2280',
]
# Samsung
PCI_IDS += [
# NVMe KUS030205M-B001
'144d:a806',
# NVMe MZVLB256HAHQ
'144d:a808',
]
# Lite-on
PCI_IDS += [
# 3C07110288
'14a4:9100',
]
# Seagate
PCI_IDS += [
# ZP256CM30011
'7089:5012',
]
# Kingston
PCI_IDS += [
# RBUSNS8154P3128GJ3
'2646:5008',
]
# Do not edit below this line. #################################################
UDEV_RULE = """\
ACTION!="add", GOTO="autosuspend_end"
SUBSYSTEM!="i2c|pci|usb", GOTO="autosuspend_end"
SUBSYSTEM=="i2c", GOTO="autosuspend_i2c"
SUBSYSTEM=="pci", GOTO="autosuspend_pci"
SUBSYSTEM=="usb", GOTO="autosuspend_usb"
# I2C rules
LABEL="autosuspend_i2c"
ATTR{name}=="cyapa", ATTR{power/control}="on", GOTO="autosuspend_end"
GOTO="autosuspend_end"
# PCI rules
LABEL="autosuspend_pci"
%(pci_rules)s\
GOTO="autosuspend_end"
# USB rules
LABEL="autosuspend_usb"
%(usb_rules)s\
GOTO="autosuspend_end"
# Enable autosuspend
LABEL="autosuspend_enable"
TEST=="power/control", ATTR{power/control}="auto", GOTO="autosuspend_end"
LABEL="autosuspend_end"
"""
def main():
pci_rules = ''
for dev_ids in PCI_IDS:
vendor, device = dev_ids.split(':')
pci_rules += ('ATTR{vendor}=="0x%s", ATTR{device}=="0x%s", '
'GOTO="autosuspend_enable"\n' % (vendor, device))
usb_rules = ''
for dev_ids in USB_IDS:
vid, pid = dev_ids.split(':')
usb_rules += ('ATTR{idVendor}=="%s", ATTR{idProduct}=="%s", '
'GOTO="autosuspend_enable"\n' % (vid, pid))
print(UDEV_RULE % {'pci_rules': pci_rules, 'usb_rules': usb_rules})
if __name__ == '__main__':
main()
|
walyong/systemd
|
tools/chromiumos/gen_autosuspend_rules.py
|
Python
|
gpl-2.0
| 7,167
|
[
"Octopus"
] |
6c5305b284bfc9ad035b070ecf782fc52c064e5690cbbf883abc40637e7d5649
|
# Version: 0.17
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r", encoding="utf-8") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.17) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename, encoding="utf-8") as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w", encoding="utf-8") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w", encoding="utf-8") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w", encoding="utf-8") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a", encoding="utf-8") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w", encoding="utf-8") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r", encoding="utf-8") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a", encoding="utf-8") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a", encoding="utf-8") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a", encoding="utf-8") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r", encoding="utf-8") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
CINPLA/exdir
|
versioneer.py
|
Python
|
mit
| 68,821
|
[
"Brian"
] |
e194903e591c769a92f08ff86db25eb3bab9771b5b00738211590c1081adebdd
|
#!/usr/local/bin/python
# encoding: utf-8
"""
*Import ned_d catalogue into sherlock-catalogues database*
:Author:
David Young
"""
from __future__ import print_function
from __future__ import division
from builtins import zip
from past.utils import old_div
import sys
import os
os.environ['TERM'] = 'vt100'
import readline
import csv
import time
import glob
import pickle
import codecs
import string
import re
from fundamentals.mysql import writequery, readquery
from astrocalc.coords import unit_conversion
from sloancone import check_coverage
from neddy import namesearch
from docopt import docopt
from ._base_importer import _base_importer
class ned_d(_base_importer):
"""
*Import the * `NED-D <https://ned.ipac.caltech.edu/Library/Distances/>`_ *galaxy catalogue in to the sherlock-catalogues database*
**Key Arguments**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``pathToDataFile`` -- path to the ned_d data file
- ``version`` -- version of the ned_d catalogue
- ``catalogueName`` -- the name of the catalogue
**Usage**
To import the ned_d catalogue catalogue, run the following:
```python
from sherlock.imports import ned_d
catalogue = ned_d(
log=log,
settings=settings,
pathToDataFile="/path/to/ned_d.txt",
version="1.0",
catalogueName="ned_d"
)
catalogue.ingest()
```
.. todo ::
- abstract this module out into its own stand alone script
"""
# INITIALISATION
def ingest(self):
"""Import the ned_d catalogue into the catalogues database
The method first generates a list of python dictionaries from the ned_d datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage**
See class docstring for usage
.. todo ::
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
dictList = self._create_dictionary_of_ned_d()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
tableName = self.dbTableName
createStatement = u"""
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`Method` varchar(150) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`dist_derived_from_sn` varchar(150) DEFAULT NULL,
`dist_in_ned_flag` varchar(10) DEFAULT NULL,
`dist_index_id` mediumint(9) DEFAULT NULL,
`dist_mod` double DEFAULT NULL,
`dist_mod_err` double DEFAULT NULL,
`dist_mpc` double DEFAULT NULL,
`galaxy_index_id` mediumint(9) DEFAULT NULL,
`hubble_const` double DEFAULT NULL,
`lmc_mod` double DEFAULT NULL,
`notes` varchar(500) DEFAULT NULL,
`primary_ned_id` varchar(150) DEFAULT NULL,
`redshift` double DEFAULT NULL,
`ref` varchar(150) DEFAULT NULL,
`ref_date` int(11) DEFAULT NULL,
`master_row` tinyint(4) DEFAULT '0',
`major_diameter_arcmin` double DEFAULT NULL,
`ned_notes` varchar(700) DEFAULT NULL,
`object_type` varchar(100) DEFAULT NULL,
`redshift_err` double DEFAULT NULL,
`redshift_quality` varchar(100) DEFAULT NULL,
`magnitude_filter` varchar(10) DEFAULT NULL,
`minor_diameter_arcmin` double DEFAULT NULL,
`morphology` varchar(50) DEFAULT NULL,
`hierarchy` varchar(50) DEFAULT NULL,
`galaxy_morphology` varchar(50) DEFAULT NULL,
`radio_morphology` varchar(50) DEFAULT NULL,
`activity_type` varchar(50) DEFAULT NULL,
`in_ned` tinyint(4) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`eb_v` double DEFAULT NULL,
`sdss_coverage` TINYINT DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;
CREATE
VIEW `view_%(tableName)s_master_recorders` AS
(SELECT
`%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,
`%(tableName)s`.`object_type` AS `object_type`,
`%(tableName)s`.`raDeg` AS `raDeg`,
`%(tableName)s`.`decDeg` AS `decDeg`,
`%(tableName)s`.`dist_mpc` AS `dist_mpc`,
`%(tableName)s`.`dist_mod` AS `dist_mod`,
`%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,
`%(tableName)s`.`Method` AS `dist_measurement_method`,
`%(tableName)s`.`redshift` AS `redshift`,
`%(tableName)s`.`redshift_err` AS `redshift_err`,
`%(tableName)s`.`redshift_quality` AS `redshift_quality`,
`%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,
`%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,
`%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,
`%(tableName)s`.`eb_v` AS `gal_eb_v`,
`%(tableName)s`.`hierarchy` AS `hierarchy`,
`%(tableName)s`.`morphology` AS `morphology`,
`%(tableName)s`.`radio_morphology` AS `radio_morphology`,
`%(tableName)s`.`activity_type` AS `activity_type`,
`%(tableName)s`.`ned_notes` AS `ned_notes`,
`%(tableName)s`.`in_ned` AS `in_ned`,
`%(tableName)s`.`primaryId` AS `primaryId`
FROM
`%(tableName)s`
WHERE
(`%(tableName)s`.`master_row` = 1));
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self._clean_up_columns()
self._get_metadata_for_galaxies()
self._update_sdss_coverage()
self.log.debug('completed the ``get`` method')
return None
def _create_dictionary_of_ned_d(
self):
"""create a list of dictionaries containing all the rows in the ned_d catalogue
**Return**
- ``dictList`` - a list of dictionaries containing all the rows in the ned_d catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned_d`` method')
count = 0
with open(self.pathToDataFile, 'r') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
totalRows = sum(1 for row in csvReader)
csvFile.close()
totalCount = totalRows
with open(self.pathToDataFile, 'r') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
theseKeys = []
dictList = []
for row in csvReader:
if len(theseKeys) == 0:
totalRows -= 1
if "Exclusion Code" in row and "Hubble const." in row:
for i in row:
if i == "redshift (z)":
theseKeys.append("redshift")
elif i == "Hubble const.":
theseKeys.append("hubble_const")
elif i == "G":
theseKeys.append("galaxy_index_id")
elif i == "err":
theseKeys.append("dist_mod_err")
elif i == "D (Mpc)":
theseKeys.append("dist_mpc")
elif i == "Date (Yr. - 1980)":
theseKeys.append("ref_date")
elif i == "REFCODE":
theseKeys.append("ref")
elif i == "Exclusion Code":
theseKeys.append("dist_in_ned_flag")
elif i == "Adopted LMC modulus":
theseKeys.append("lmc_mod")
elif i == "m-M":
theseKeys.append("dist_mod")
elif i == "Notes":
theseKeys.append("notes")
elif i == "SN ID":
theseKeys.append("dist_derived_from_sn")
elif i == "method":
theseKeys.append("dist_method")
elif i == "Galaxy ID":
theseKeys.append("primary_ned_id")
elif i == "D":
theseKeys.append("dist_index_id")
else:
theseKeys.append(i)
continue
if len(theseKeys):
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (old_div(float(count), float(totalCount))) * 100.
print(
"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory" % locals())
rowDict = {}
for t, r in zip(theseKeys, row):
rowDict[t] = r
if t == "ref_date":
try:
rowDict[t] = int(r) + 1980
except:
rowDict[t] = None
if rowDict["dist_index_id"] != "999999":
dictList.append(rowDict)
csvFile.close()
self.log.debug(
'completed the ``_create_dictionary_of_ned_d`` method')
return dictList
def _clean_up_columns(
self):
"""clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
tableName = self.dbTableName
print("cleaning up %(tableName)s columns" % locals())
sqlQuery = u"""
set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;
update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = "";
update %(tableName)s set notes = null where notes = "";
update %(tableName)s set redshift = null where redshift = 0;
update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = "";
update %(tableName)s set hubble_const = null where hubble_const = 0;
update %(tableName)s set lmc_mod = null where lmc_mod = 0;
update %(tableName)s set master_row = 0;
update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None
def _get_metadata_for_galaxies(
self):
"""get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_get_metadata_for_galaxies`` method')
total, batches = self._count_galaxies_requiring_metadata()
print("%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals())
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
while self.total:
thisCount += 1
self._get_3000_galaxies_needing_metadata()
dictList = self._query_ned_and_add_results_to_database(thisCount)
self.add_data_to_database_table(
dictList=dictList,
createStatement=False
)
self._count_galaxies_requiring_metadata()
self.log.debug('completed the ``_get_metadata_for_galaxies`` method')
return None
def _count_galaxies_requiring_metadata(
self):
""" count galaxies requiring metadata
**Return**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_count_galaxies_requiring_metadata`` method')
tableName = self.dbTableName
sqlQuery = u"""
select count(*) as count from %(tableName)s where master_row = 1 and in_ned is null
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.total = rows[0]["count"]
self.batches = int(old_div(self.total, 3000.)) + 1
if self.total == 0:
self.batches = 0
self.log.debug(
'completed the ``_count_galaxies_requiring_metadata`` method')
return self.total, self.batches
def _get_3000_galaxies_needing_metadata(
self):
""" get 3000 galaxies needing metadata
**Return**
- ``len(self.theseIds)`` -- the number of NED IDs returned
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_get_3000_galaxies_needing_metadata`` method')
tableName = self.dbTableName
# SELECT THE DATA FROM NED TABLE
self.theseIds = {}
sqlQuery = u"""
select primaryId, primary_ned_id from %(tableName)s where master_row = 1 and in_ned is null limit 3000;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
self.theseIds[row["primary_ned_id"]] = row["primaryId"]
self.log.debug(
'completed the ``_get_3000_galaxies_needing_metadata`` method')
return len(self.theseIds)
def _query_ned_and_add_results_to_database(
self,
batchCount):
""" query ned and add results to database
**Key Arguments**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_query_ned_and_add_results_to_database`` method')
tableName = self.dbTableName
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print("requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals())
search = namesearch(
log=self.log,
names=list(self.theseIds.keys()),
quiet=True
)
results = search.get()
print("results returned from ned -- starting to add to database" % locals())
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter",
"ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"]
if not len(results):
for k, v in list(self.theseIds.items()):
dictList.append({
"in_ned": 0,
"primaryID": v
})
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in list(thisDict.items()):
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, ("".__class__, u"".__class__)) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
if thisDict["ra"] != "null" and thisDict["dec"] != "null":
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
else:
thisDict["raDeg"] = None
thisDict["decDeg"] = None
thisDict["in_ned"] = 1
thisDict["eb_v"] = thisDict["eb-v"]
row = {}
row["primary_ned_id"] = thisDict["input_name"]
try:
row["primaryID"] = self.theseIds[thisDict["input_name"]]
for c in colList:
if thisDict[c] == "null":
row[c] = None
else:
row[c] = thisDict[c]
dictList.append(row)
except:
g = thisDict["input_name"]
self.log.error(
"Cannot find database table %(tableName)s primaryID for '%(g)s'\n\n" % locals())
dictList.append({
"in_ned": 0,
"primary_ned_id": thisDict["input_name"]
})
else:
dictList.append({
"primary_ned_id": thisDict["input_name"],
"in_ned": 0,
"primaryID": self.theseIds[thisDict["input_name"]]
})
self.log.debug(
'completed the ``_query_ned_and_add_results_to_database`` method')
return dictList
def _update_sdss_coverage(
self):
""" update sdss coverage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_sdss_coverage`` method')
tableName = self.dbTableName
# SELECT THE LOCATIONS NEEDING TO BE CHECKED
sqlQuery = u"""
select primary_ned_id, primaryID, raDeg, decDeg, sdss_coverage from %(tableName)s where sdss_coverage is null and master_row = 1 and in_ned = 1 order by dist_mpc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
totalCount = len(rows)
count = 0
for row in rows:
count += 1
if count > 1:
# Cursor up three lines and clear
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (old_div(float(count), float(totalCount))) * 100.
primaryID = row["primaryID"]
raDeg = float(row["raDeg"])
decDeg = float(row["decDeg"])
primary_ned_id = row["primary_ned_id"]
# SDSS CAN ONLY ACCEPT 60 QUERIES/MIN
time.sleep(1.1)
print("%(count)s / %(totalCount)s (%(percent)1.1f%%) NED galaxies checked for SDSS coverage" % locals())
print("NED NAME: ", primary_ned_id)
# covered = True | False | 999 (i.e. not sure)
sdss_coverage = check_coverage(
log=self.log,
ra=raDeg,
dec=decDeg
).get()
if sdss_coverage == 999:
sdss_coverage_flag = "null"
elif sdss_coverage == True:
sdss_coverage_flag = 1
elif sdss_coverage == False:
sdss_coverage_flag = 0
else:
self.log.error('cound not get sdss coverage' % locals())
sys.exit(0)
# UPDATE THE DATABASE FLAG
sqlQuery = u"""
update %(tableName)s set sdss_coverage = %(sdss_coverage_flag)s where primaryID = %(primaryID)s
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_update_sdss_coverage`` method')
return None
# use the tab-trigger below for new method
# xt-class-method
|
thespacedoctor/sherlock
|
sherlock/imports/ned_d.py
|
Python
|
mit
| 24,917
|
[
"Galaxy"
] |
074dc285ae69327823e699803efcfb8109a32b2013eebb325be98ed8bcd7b8f9
|
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import datetime
from netCDF4 import Dataset
import netCDF4
import time_tools_attractor as ti
import io_tools_attractor as io
import data_tools_attractor as dt
import nowcasting as nw
import run_fieldextra_c1 as rf1
import run_fieldextra as rfe
def produce_radar_observation_with_accumulation(startValidTimeStr, endValidTimeStr, newAccumulationMin=10, domainSize=512, product='RZC',rainThreshold=0.08):
# base accumulation is 5 min
baseAccumMin = 5
accumFactor = int(newAccumulationMin/baseAccumMin)
# datetime format
startValidTime = ti.timestring2datetime(startValidTimeStr)
endValidTime = ti.timestring2datetime(endValidTimeStr)
leadTimeMin = int((endValidTime - startValidTime).total_seconds()/60)
# start to compute the correct accumulation at t0
if newAccumulationMin>baseAccumMin:
startTimeToPass = startValidTime - datetime.timedelta(minutes=newAccumulationMin-baseAccumMin)
else:
startTimeToPass = startValidTime
startTimeToPassStr = ti.datetime2timestring(startTimeToPass)
with np.errstate(invalid='ignore'):
radar_observations_5min, radar_mask_5min, timestamps, r = nw.get_radar_observations(startTimeToPassStr, leadTimeMin+newAccumulationMin-baseAccumMin, product=product, rainThreshold = 0)
# convert to mm for computing accumulations
radar_observations_5min = radar_observations_5min/60*baseAccumMin
# aggregate to new accumulation
if newAccumulationMin>baseAccumMin:
radar_observations_new = nw.aggregate_in_time(radar_observations_5min,timeAccumMin=newAccumulationMin,type='sum')
radar_mask_new = nw.aggregate_in_time(radar_mask_5min,timeAccumMin=newAccumulationMin,type='nansum')
radar_mask_new[radar_mask_new==0] = np.nan
radar_mask_new[radar_mask_new>0] = 1
timestamps = timestamps[accumFactor-1::accumFactor]
else:
radar_observations_new = radar_observations_5min
radar_mask_new = radar_mask_5min
# convert to mm/h
radar_observations_new = radar_observations_new/newAccumulationMin*60
# Apply rain threshold
radar_observations_new[radar_observations_new<=rainThreshold] = 0
# [time,y,x]
radar_observations_new = np.rollaxis(radar_observations_new,2,0)
radar_mask_new = np.rollaxis(radar_mask_new,2,0)
return radar_observations_new,radar_mask_new,timestamps
def get_radar_extrapolation(startValidTimeStr,endValidTimeStr, newAccumulationMin=10, domainSize=512, rainThreshold=0.08, product='RZC', outBaseDir='/scratch/ned/data/'):
# datetime format
startValidTime = ti.timestring2datetime(startValidTimeStr)
endValidTime = ti.timestring2datetime(endValidTimeStr)
leadTimeMin = int((endValidTime - startValidTime).total_seconds()/60)
# Check if the nc file already exists
year = startValidTime.year
yearStr = str(year)[2:4]
julianDay = startValidTime.timetuple().tm_yday
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
outDir = outBaseDir + startValidTime.strftime("%Y") + '/' + startValidTime.strftime("%y") + julianDayStr + '/'
fcstName = 'radar-extrapolation_' + startValidTime.strftime("%Y%m%d%H%M") + '_' + str(int(leadTimeMin/60)) + 'hours'
fcstFile = r'%s' % (outDir + fcstName + '.nc')
# base accumulation is 5 min
baseAccumMin = 5
accumFactor = int(newAccumulationMin/baseAccumMin)
# if not produce forecasts
if os.path.isfile(fcstFile) == False:
# produce 5-min radar extrapolation
radar_extrapolation_5min, timestamps_5min = nw.radar_extrapolation(startValidTimeStr,leadTimeMin,finalDomainSize=domainSize,product=product,rainThreshold=rainThreshold)
if product=='RZC':
r = io.read_bin_image(startValidTimeStr, fftDomainSize=domainSize, inBaseDir = '/scratch/ned/data/')
else:
r = io.read_gif_image(startValidTimeStr, fftDomainSize=domainSize, inBaseDir = '/scratch/ned/data/')
if accumFactor>1:
# convert to mm
radar_extrapolation_5min = radar_extrapolation_5min/60*baseAccumMin
# aggregate to new accumulation (to match COSMO1 resolution)
radar_extrapolation_new = nw.aggregate_in_time(radar_extrapolation_5min,timeAccumMin=newAccumulationMin,type='sum')
timestamps_new = timestamps_5min[accumFactor-1::accumFactor]
# convert to mm/h
radar_extrapolation_new = radar_extrapolation_new/newAccumulationMin*60
# get observations at t0 [mm/h]
radar_observations_t0, _, _ = produce_radar_observation_with_accumulation(startValidTimeStr, startValidTimeStr, newAccumulationMin, domainSize=domainSize, product=product)
radar_observations_t0 = np.squeeze(radar_observations_t0)[:,:,None]
else:
# no need to aggregate forecasts
radar_extrapolation_new = radar_extrapolation_5min
timestamps_new = timestamps_5min
# get observations at t0
radar_observations_t0 = r.rainrate[:,:,None]
# add observation for t0
radar_extrapolation_new = np.concatenate((radar_observations_t0,radar_extrapolation_new),axis=2)
timestamps_new.insert(0,startValidTime)
timestamps_new = np.array(timestamps_new)
# [time,y,x]
radar_extrapolation_new = np.rollaxis(radar_extrapolation_new,2,0)
# save netcdf
Xcoords = r.subXcoords
Ycoords = r.subYcoords
save_3darray_netcdf(fcstFile, radar_extrapolation_new, 'radar_extrapolation',\
timestamps_new,Xcoords,Ycoords)
print('Read: ' + fcstFile)
# Now read the NetCDF file
radar_extrapolation, timestamps, Xcoords, Ycoords = load_3darray_netcdf(fcstFile)
testplot=False
if testplot:
n=0
while n<100:
n+=1
for t in xrange(timestamps.shape[0]):
plt.clf()
plt.imshow(radar_extrapolation[t,:,:],interpolation ='nearest',vmin=0,vmax=65, extent=[Xcoords.min(), Xcoords.max(), Ycoords.min(), Ycoords.max()])
plt.title(timestamps[t])
plt.pause(1)
return radar_extrapolation, timestamps, Xcoords, Ycoords
def get_ensemble_radar_extrapolation(startValidTimeStr,endValidTimeStr, NumberMembers = 2, NumberLevels = 1, newAccumulationMin=10, rainThreshold = 0.08, domainSize=512, local_level = 0, seed = 42, product='RZC', outBaseDir='/scratch/ned/data/'):
# datetime format
startValidTime = ti.timestring2datetime(startValidTimeStr)
endValidTime = ti.timestring2datetime(endValidTimeStr)
leadTimeMin = int((endValidTime - startValidTime).total_seconds()/60)
# Check if the nc file already exists
year = startValidTime.year
yearStr = str(year)[2:4]
julianDay = startValidTime.timetuple().tm_yday
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
outDir = outBaseDir + startValidTime.strftime("%Y") + '/' + startValidTime.strftime("%y") + julianDayStr + '/'
fcstName = 'ensemble-radar-extrapolation_' + startValidTime.strftime("%Y%m%d%H%M") + '_' + str(int(leadTimeMin/60)) + 'hours' + '_' + str(NumberMembers) + 'members' + '_' + str(NumberLevels) + 'levels' + '_' + str(newAccumulationMin) + 'min' + '_' + product + '_' + str(domainSize) + 'km' + '_' + str(local_level) + 'local_seed' + str(seed)
fcstNameMask = fcstName + '_mask'
fcstFile = r'%s' % (outDir + fcstName + '.nc')
fcstFileMask = r'%s' % (outDir + fcstNameMask + '.nc')
# base accumulation is 5 min
baseAccumMin = 5
accumFactor = int(newAccumulationMin/baseAccumMin)
# if not produce forecasts
if os.path.isfile(fcstFile) == False:
# produce 5-min radar extrapolation
radar_extrapolation_5min, timestamps_5min, radarMask_5min = nw.probabilistic_radar_extrapolation(startValidTimeStr,leadTimeMin,finalDomainSize=domainSize,NumberMembers=NumberMembers,NumberLevels=NumberLevels,product=product,local_level=local_level,seed=seed)
if product=='RZC':
r = io.read_bin_image(startValidTimeStr, fftDomainSize=domainSize, product=product, inBaseDir = '/scratch/ned/data/')
else:
r = io.read_gif_image(startValidTimeStr, fftDomainSize=domainSize, product=product, inBaseDir = '/scratch/ned/data/')
# print(radarMask_5min.shape)
if accumFactor>1:
# convert to mm
radar_extrapolation_5min = radar_extrapolation_5min/60*baseAccumMin
# aggregate to new accumulation (to match COSMO1 resolution)
for m in xrange(NumberMembers):
radar_extrapolation_new_member = nw.aggregate_in_time(radar_extrapolation_5min[:,:,:,m],timeAccumMin=newAccumulationMin,type='sum')
if m==0:
radar_extrapolation_new = np.zeros((radar_extrapolation_new_member.shape[0],radar_extrapolation_new_member.shape[1],radar_extrapolation_new_member.shape[2],NumberMembers))
radar_extrapolation_new[:,:,:,m] = radar_extrapolation_new_member
timestamps_new = timestamps_5min[accumFactor-1::accumFactor]
radarMask_new = radarMask_5min[:,:,accumFactor-1::accumFactor]
# print(radarMask_new.shape)
# convert to mm/h
radar_extrapolation_new = radar_extrapolation_new/newAccumulationMin*60
# get observations at t0 [mm/h]
radar_observations_t0, _, _ = produce_radar_observation_with_accumulation(startValidTimeStr, startValidTimeStr, newAccumulationMin, domainSize, product=product)
radar_observations_t0 = np.squeeze(radar_observations_t0)
radarmask_t0 = r.mask.copy()
radarmask_t0 = np.array(np.isnan(radarmask_t0),dtype=int)
# print(radarmask_t0.shape)
else:
# no need to aggregate forecasts
radar_extrapolation_new = radar_extrapolation_5min
timestamps_new = timestamps_5min
radarMask_new = radarMask_5min
# get observations at t0
radar_observations_t0 = r.rainrate.copy()
radarmask_t0 = r.mask.copy()
radarmask_t0 = np.array(np.isnan(radarmask_t0),dtype=int)
radar_observations_t0_allmembers = np.zeros((radar_observations_t0.shape[0],radar_observations_t0.shape[1],NumberMembers))
for m in xrange(NumberMembers):
radar_observations_t0_allmembers[:,:,m] = radar_observations_t0.copy()
radar_observations_t0_allmembers = radar_observations_t0_allmembers[:,:,None,:]
radarmask_t0 = radarmask_t0[:,:,None]
# print(radarmask_t0.shape)
# add observation for t0
radar_extrapolation_new = np.concatenate((radar_observations_t0_allmembers,radar_extrapolation_new),axis=2)
radarMask_new = np.concatenate((radarmask_t0,radarMask_new),axis=2)
timestamps_new.insert(0,startValidTime)
timestamps_new = np.array(timestamps_new)
# [time,member,y,x]
radar_extrapolation_new = np.rollaxis(radar_extrapolation_new,2,0)
radar_extrapolation_new = np.rollaxis(radar_extrapolation_new,3,1)
radarMask_new = np.rollaxis(radarMask_new,2,0)
# Apply rain threshold
radar_extrapolation_new[radar_extrapolation_new<=rainThreshold] = 0
# save netcdf
Xcoords = r.subXcoords
Ycoords = r.subYcoords
save_4darray_netcdf(fcstFile, radar_extrapolation_new, 'ensemble_radar_extrapolation',\
timestamps_new,NumberMembers,Xcoords,Ycoords)
save_3darray_netcdf(fcstFileMask, radarMask_new, 'ensemble_radar_extrapolation_mask',\
timestamps_new,Xcoords,Ycoords)
print('Read: ' + fcstFile)
# Now read the NetCDF file
ensemble_radar_extrapolation, timestamps, members, Ycoords, Xcoords = load_4darray_netcdf(fcstFile)
if os.path.isfile(fcstFileMask):
ensemble_radar_extrapolation_mask, _, _, _ = load_3darray_netcdf(fcstFileMask)
else:
print('Radar mask file not found.')
ensemble_radar_extrapolation_mask = np.zeros((timestamps.size,ensemble_radar_extrapolation.shape[2],ensemble_radar_extrapolation.shape[3]))*np.nan
testplot=False
if testplot:
r = io.read_gif_image(startValidTimeStr)
nmember=0
n=0
while n<100:
n+=1
for t in xrange(timestamps.shape[0]):
plt.clf()
plt.imshow(ensemble_radar_extrapolation[t,nmember,:,:],interpolation ='nearest',norm=r.norm,cmap=r.cmap)
plt.title(timestamps[t])
plt.pause(1)
return ensemble_radar_extrapolation, timestamps, members, Xcoords, Ycoords, ensemble_radar_extrapolation_mask
def get_cosmo1(startValidTimeStr, endValidTimeStr,domainSize=512, leadTimeMin = 12*60, outBaseDir='/scratch/ned/data/',rainThreshold=0.08):
# Get most recent run
analysisTimeStr = rf1.find_nearest_run_cosmo1(startValidTimeStr)
analysisTime = ti.timestring2datetime(analysisTimeStr)
# Check if the nc file already exists
year = analysisTime.year
yearStr = str(year)[2:4]
julianDay = analysisTime.timetuple().tm_yday
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
outDir = outBaseDir + analysisTime.strftime("%Y") + '/' + analysisTime.strftime("%y") + julianDayStr + '/'
fcstName = analysisTime.strftime("%Y%m%d%H%M") + '_' + str(int(leadTimeMin/60)) + 'hours'
fcstFile = r'%s' % (outDir + 'cosmo-1_TOT_PREC_' + fcstName + '.nc')
# if not call fieldextra
if os.path.isfile(fcstFile) == False:
rf1.run_fieldextra_c1(analysisTimeStr,leadTimeMin,outBaseDir=outBaseDir)
print('Read: ' + fcstFile)
# Now read the NetCDF file
cosmo1_data, timestamps, Xcoords, Ycoords = load_3darray_netcdf(fcstFile)
# exclude first time step (it's all NaNs!)
cosmo1_data=cosmo1_data[1:,:,:]
timestamps=timestamps[1:]
# convert to mm/h
cosmo1_data = cosmo1_data*6
# flip and extract middle domain
new_data = np.zeros((cosmo1_data.shape[0],domainSize,domainSize))
for i in range(cosmo1_data.shape[0]):
# flip frames
cosmo1_data[i,:,:] = np.flipud(cosmo1_data[i,:,:])
# cut domain
if domainSize>0:
new_data[i,:,:] = dt.extract_middle_domain(cosmo1_data[i,:,:], domainSize, domainSize)
else:
new_data[i,:,:] = cosmo1_data[i,:,:]
cosmo1_data = new_data
# Apply rain threshold
cosmo1_data[cosmo1_data<=rainThreshold] = 0
# Get coordinates of reduced domain
if domainSize>0:
extent = dt.get_reduced_extent(Xcoords.shape[0], Ycoords.shape[0], domainSize, domainSize)
Xmin = Xcoords[extent[0]]
Ymin = Ycoords[extent[1]]
Xmax = Xcoords[extent[2]]
Ymax = Ycoords[extent[3]]
extent = (Xmin, Xmax, Ymin, Ymax)
subXcoords = np.arange(Xmin,Xmax,1000)
subYcoords = np.arange(Ymin,Ymax,1000)
# Extract timestamps
idxKeep1 = timestamps >= ti.timestring2datetime(startValidTimeStr)
idxKeep2 = timestamps <= ti.timestring2datetime(endValidTimeStr)
cosmo1_data = cosmo1_data[idxKeep1*idxKeep2,:,:]
timestamps = timestamps[idxKeep1*idxKeep2]
testplot=False
if testplot:
n=0
while n<100:
n+=1
for t in xrange(timestamps.shape[0]):
plt.clf()
plt.imshow(cosmo1_data[t,:,:],interpolation ='nearest',vmin=0,vmax=65, extent=[subXcoords.min(), subXcoords.max(), subYcoords.min(), subYcoords.max()])
plt.title(timestamps[t])
plt.pause(1)
return cosmo1_data, timestamps, subXcoords, subYcoords
def get_cosmoE(startValidTimeStr, endValidTimeStr, analysisTimeStr = [], domainSize=512, leadTimeMin = 24*60, outBaseDir='/scratch/ned/data/',rainThreshold=0.08, latencyTimeMin=100, overwrite = False):
# Get most recent run
if len(analysisTimeStr)==0:
analysisTimeStr = rfe.find_nearest_forecast(startValidTimeStr, 'cosmo-e', lat_timeMin = latencyTimeMin)
analysisTime = ti.timestring2datetime(analysisTimeStr)
leadtimeHrs = int (( ti.timestring2datetime(endValidTimeStr) - analysisTime ).total_seconds()/3600 )
# # Check if the nc file already exists
# year = analysisTime.year
# yearStr = str(year)[2:4]
# julianDay = analysisTime.timetuple().tm_yday
# julianDayStr = '%03i' % julianDay
# yearJulianStr = yearStr + julianDayStr
# outDir = outBaseDir + analysisTime.strftime("%Y") + '/' + analysisTime.strftime("%y") + julianDayStr + '/'
# # fcstName = analysisTime.strftime("%Y%m%d%H%M") + '_' + str(int(leadTimeMin/60)) + 'hours'
# fcstName = analysisTimeStr + '_' + startValidTimeStr + '_' + endValidTimeStr
# fcstFile = r'%s' % (outDir + 'cosmo-E_TOT_PREC_' + fcstName + '.nc')
# # if not call fieldextra
# if os.path.isfile(fcstFile) == False:
# rfe.run_fieldextra_ce(analysisTimeStr,startValidTimeStr,endValidTimeStr,outBaseDir=outBaseDir)
outFile = rfe.run_fieldextra_forecast(analysisTimeStr, leadtimeHrs, fieldName='TOT_PREC', outBaseDir = '/scratch/ned/data/', modelName='cosmo-e', deltaMin = 60, overwrite = overwrite)
print('Read: ' + outFile)
# Now read the NetCDF file
cosmoe_data, timestamps, members, Ycoords, Xcoords = load_4darray_netcdf(outFile)
# exclude first time step (it's all NaNs!)
# cosmoe_data=cosmoe_data[1:,:,:,:]
# timestamps=timestamps[1:]
# flip and extract middle domain
new_data = np.zeros((cosmoe_data.shape[0], cosmoe_data.shape[1],domainSize,domainSize))
for i in xrange(cosmoe_data.shape[0]):
for m in xrange(cosmoe_data.shape[1]):
# flip frames
cosmoe_data[i,m,:,:] = np.flipud(cosmoe_data[i,m,:,:])
# cut domain
if domainSize>0:
new_data[i,m,:,:] = dt.extract_middle_domain(cosmoe_data[i,m,:,:], domainSize, domainSize)
else:
new_data[i,m,:,:] = cosmoe_data[i,m,:,:]
cosmoe_data = new_data
# Apply rain threshold
cosmoe_data[cosmoe_data<=rainThreshold] = 0
# Get coordinates of reduced domain
if domainSize>0:
extent = dt.get_reduced_extent(Xcoords.shape[0], Ycoords.shape[0], domainSize, domainSize)
Xmin = Xcoords[extent[0]]
Ymin = Ycoords[extent[1]]
Xmax = Xcoords[extent[2]]
Ymax = Ycoords[extent[3]]
extent = (Xmin, Xmax, Ymin, Ymax)
subXcoords = np.arange(Xmin,Xmax,1000)
subYcoords = np.arange(Ymin,Ymax,1000)
# Extract timestamps
idxKeep1 = timestamps >= ti.timestring2datetime(startValidTimeStr)
idxKeep2 = timestamps <= ti.timestring2datetime(endValidTimeStr)
cosmoe_data = cosmoe_data[idxKeep1*idxKeep2,:,:,:]
timestamps = timestamps[idxKeep1*idxKeep2]
testplot=False
if testplot:
nmember=0
n=0
while n<100:
n+=1
for t in xrange(timestamps.shape[0]):
plt.clf()
plt.imshow(cosmoe_data[t,nmember,:,:],interpolation ='nearest',vmin=0,vmax=65, extent=[subXcoords.min(), subXcoords.max(), subYcoords.min(), subYcoords.max()])
plt.title(timestamps[t])
plt.pause(1)
return cosmoe_data, timestamps, subXcoords, subYcoords
def get_lagged_cosmo1(startValidTimeStr, endValidTimeStr, domainSize=512, leadTimeMin = 6*60, outBaseDir='/scratch/ned/data/', rainThreshold=0.08):
# get all individual forecasts' starting times
latencyTimeHr = 1#100/60
maxLeadTimeHr=33
fcstStarts,fcstMembers,fcstLeadTimeStartsMin,fcstLeadTimeStopsMin = rf1.get_lagged_ensemble_members(startValidTimeStr, endValidTimeStr, latencyTimeHr, maxLeadTimeHr)
nmembers = len(fcstMembers)
# if necessary, extract them with fieldextra
filesOut = []
analysisTimes = []
for n in xrange(nmembers):
print('----------------------------------------------------')
print(fcstMembers[n] + ', analysis time: ' + fcstStarts[n].strftime("%Y%m%d%H%M%S") + ', ' + str(fcstLeadTimeStartsMin[n]/60) + ' to ' + str(fcstLeadTimeStopsMin[n]/60) + ' hours.')
# Check if the nc file already exists
year = fcstStarts[n].year
yearStr = str(year)[2:4]
julianDay = fcstStarts[n].timetuple().tm_yday
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
outDir = outBaseDir + fcstStarts[n].strftime("%Y") + '/' + fcstStarts[n].strftime("%y") + julianDayStr + '/'
it_exists = False
LeadTimeToCheck = fcstLeadTimeStopsMin[n]/60
while (it_exists == False) and (LeadTimeToCheck <= maxLeadTimeHr):
fcstName = fcstStarts[n].strftime("%Y%m%d%H%M") + '_' + str(int(LeadTimeToCheck)) + 'hours'
fcstFile = r'%s' % (outDir + 'cosmo-1_TOT_PREC_' + fcstName + '.nc')
if os.path.isfile(fcstFile) == False:
LeadTimeToCheck += 1
else:
it_exists = True
break
if it_exists == False:
LeadTimeToGetHrs = np.minimum(maxLeadTimeHr, np.ceil(( fcstLeadTimeStopsMin[n]/60 )/12)*12)
fcstFile = rf1.run_fieldextra_c1(fcstStarts[n].strftime("%Y%m%d%H%M"),LeadTimeToGetHrs*60,outBaseDir=outBaseDir)
print('read: ' + fcstFile)
# Now read the NetCDF file
cosmo1_data, timestamps, Xcoords, Ycoords = load_3darray_netcdf(fcstFile)
# exclude first time step (it's all NaNs!)
cosmo1_data=cosmo1_data[1:,:,:]
timestamps=timestamps[1:]
# convert to mm/h
cosmo1_data = cosmo1_data*6
# flip and extract middle domain
new_data = np.zeros((cosmo1_data.shape[0],domainSize,domainSize))
for i in range(cosmo1_data.shape[0]):
# flip frames
cosmo1_data[i,:,:] = np.flipud(cosmo1_data[i,:,:])
# cut domain
if domainSize>0:
new_data[i,:,:] = dt.extract_middle_domain(cosmo1_data[i,:,:], domainSize, domainSize)
else:
new_data[i,:,:] = cosmo1_data[i,:,:]
cosmo1_data = new_data
# Apply rain threshold
cosmo1_data[cosmo1_data<=rainThreshold] = 0
# Extract timestamps
idxKeep1 = timestamps >= ti.timestring2datetime(startValidTimeStr)
idxKeep2 = timestamps <= ti.timestring2datetime(endValidTimeStr)
cosmo1_data = cosmo1_data[idxKeep1*idxKeep2,:,:]
timestamps = timestamps[idxKeep1*idxKeep2]
# Build 4D array with all members
if n==0:
cosmo1_lagged_data = np.zeros((cosmo1_data.shape[0],nmembers,cosmo1_data.shape[1],cosmo1_data.shape[2]))
cosmo1_lagged_data[:,n,:,:] = cosmo1_data.copy()
# Get coordinates of reduced domain
if domainSize>0:
extent = dt.get_reduced_extent(Xcoords.shape[0], Ycoords.shape[0], domainSize, domainSize)
Xmin = Xcoords[extent[0]]
Ymin = Ycoords[extent[1]]
Xmax = Xcoords[extent[2]]
Ymax = Ycoords[extent[3]]
extent = (Xmin, Xmax, Ymin, Ymax)
subXcoords = np.arange(Xmin,Xmax,1000)
subYcoords = np.arange(Ymin,Ymax,1000)
testplot=False
if testplot:
nmember=0
n=0
while n<100:
n+=1
for t in xrange(timestamps.shape[0]):
plt.clf()
plt.imshow(cosmo1_lagged_data[t,nmember,:,:],interpolation ='nearest',vmin=0,vmax=65, extent=[subXcoords.min(), subXcoords.max(), subYcoords.min(), subYcoords.max()])
plt.title(timestamps[t])
plt.pause(1)
return cosmo1_lagged_data, timestamps, subXcoords, subYcoords
def get_cosmoE10min(startValidTimeStr, endValidTimeStr, members = 'all', domainSize=512, outBaseDir='/scratch/ned/data/', cosmoBaseDir='/store/s83/tsm/EXP_TST/611/',rainThreshold=0.08,latencyTimeMin=100,lag=0,overwrite=False,useavailable=True):
# datetime format
startValidTime = ti.timestring2datetime(startValidTimeStr)
endValidTime = ti.timestring2datetime(endValidTimeStr)
timebounds = [startValidTime, endValidTime]
# Check if the single nc file already exists
if members=='all':
year = startValidTime.year
yearStr = str(year)[2:4]
julianDay = startValidTime.timetuple().tm_yday
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
outDir = outBaseDir + startValidTime.strftime("%Y") + '/' + startValidTime.strftime("%y") + julianDayStr + '/'
fcstName = 'COSMOE10min_' + startValidTime.strftime("%Y%m%d%H%M") + '_' + endValidTime.strftime("%Y%m%d%H%M") + '_lag' + str(lag) + '_ltMin' + str(int(latencyTimeMin))
fcstFile = r'%s' % (outDir + fcstName + '.nc')
else:
fcstFile = 'donotsavethisfile'
# if not load original forecasts
if (not os.path.isfile(fcstFile)) or overwrite:
print(fcstFile + ' not found.')
analysis_not_found = True
nloops = 0
while analysis_not_found and (nloops < 2):
# Get most recent run (or second most recent)
analysisTimeStr = rfe.find_nearest_forecast(startValidTimeStr, 'cosmo-e', lat_timeMin = latencyTimeMin, lag = lag)
print('run time: ',analysisTimeStr)
analysisTime = ti.timestring2datetime(analysisTimeStr)
# list of EPS members to load
if members == 'all':
members = range(21)
# Folder to the nc files
outDir = cosmoBaseDir + 'FCST' + analysisTime.strftime("%y") + '/' + analysisTime.strftime("%y%m%d%H") + '_611/output/'
if os.path.isdir(outDir) == False:
print('Folder not found: ' + outDir)
if useavailable:
startValidTime -= datetime.timedelta(hours=12)
startValidTimeStr = ti.datetime2timestring(startValidTime)
nloops+=1
else:
return None,None,None,None
else:
analysis_not_found=False
if analysis_not_found:
return None,None,None,None
# Load individual EPS member and merge them in one array
countm=0
for member in members:
thisFcstFile = outDir + 'cosmo-e_TOT_PREC_' + str(member).zfill(3) + '.nc'
if not os.path.isfile(thisFcstFile):
print('File not found: ' + thisFcstFile)
sys.exit()
else:
print('Read: ' + thisFcstFile)
# read the NetCDF file
# this_member, timestamps, Xcoords, Ycoords = load_3darray_netcdf_with_bounds(thisFcstFile, timebounds, domainSize) # need to fix this...
this_member, timestamps, Xcoords, Ycoords = load_3darray_netcdf(thisFcstFile)
# print(timebounds)
# print(this_member.shape)
# flip and merge together the members
if member == members[0]:
if domainSize>0:
cosmoe_data = np.zeros((this_member.shape[0], len(members), domainSize, domainSize))
else:
cosmoe_data = np.zeros((this_member.shape[0], len(members),this_member.shape[2],this_member.shape[3]))
for i in xrange(this_member.shape[0]):
# flip frames
this_frame = np.flipud(this_member[i,0,:,:])
if domainSize>0:
cosmoe_data[i,countm,:,:] = dt.extract_middle_domain(this_frame, domainSize, domainSize)
else:
cosmoe_data[i,countm,:,:] = this_frame
del this_member
countm+=1
# convert to mm/h
cosmoe_data = cosmoe_data*6
# Get coordinates of reduced domain
if domainSize>0:
extent = dt.get_reduced_extent(Xcoords.shape[0], Ycoords.shape[0], domainSize, domainSize)
Xmin = Xcoords[extent[0]]
Ymin = Ycoords[extent[1]]
Xmax = Xcoords[extent[2]]
Ymax = Ycoords[extent[3]]
extent = (Xmin, Xmax, Ymin, Ymax)
Xcoords = np.arange(Xmin,Xmax,1000)
Ycoords = np.arange(Ymin,Ymax,1000)
else:
extent = (Xcoords.min(), Xcoords.max(), Ycoords.min(), Ycoords.max())
# Extract timestamps
idxKeep1 = timestamps >= timebounds[0]
idxKeep2 = timestamps <= timebounds[1]
idxKeep = np.logical_and(idxKeep1,idxKeep2)
cosmoe_data = cosmoe_data[idxKeep,:,:,:]
timestamps = timestamps[idxKeep]
# and store it before loading it again
if (not fcstFile=='donotsavethisfile'):
# print(fcstFile,cosmoe_data.shape,timestamps.shape,Xcoords.shape,Ycoords.shape)
save_4darray_netcdf(fcstFile, cosmoe_data, 'COSMO-E10min',\
timestamps,cosmoe_data.shape[1],Xcoords,Ycoords)
else:
print('Read: ' + fcstFile)
# Now read the NetCDF file
cosmoe_data, timestamps, members, Ycoords, Xcoords = load_4darray_netcdf(fcstFile)
# Apply rain threshold
cosmoe_data[cosmoe_data<=rainThreshold] = 0
testplot=False
if testplot:
nmember=0
n=0
while n<100:
n+=1
for t in xrange(timestamps.shape[0]):
plt.clf()
plt.imshow(10*np.log10(cosmoe_data[t,nmember,:,:]),interpolation ='nearest',vmin=-12,vmax=20, extent=[Xcoords.min(), Xcoords.max(), Ycoords.min(), Ycoords.max()])
plt.title(timestamps[t])
plt.pause(1)
return cosmoe_data, timestamps, Xcoords, Ycoords
def save_3darray_netcdf(fileName, dataArray, product,\
timestamps,xgrid,ygrid, \
noData=-99999.0):
# Get dimensions of output array to write
nx = dataArray.shape[2]; ny = dataArray.shape[1]
nt = dataArray.shape[0]
# Set no data values
dataArray[np.isnan(dataArray)] = noData
# Make folder if necessary
outDir = os.path.dirname(fileName)
cmd = 'mkdir -p ' + outDir
os.system(cmd)
# Create netCDF Dataset
w_nc_fid = netCDF4.Dataset(fileName, 'w', format='NETCDF4')
w_nc_fid.Conventions = 'CF-1.6'
w_nc_fid.ConventionsURL = 'http://www.unidata.ucar.edu/software/netcdf/conventions.html'
w_nc_fid.institution = 'MeteoSwiss, Locarno-Monti'
w_nc_fid.source = 'product: %s, product_category: determinist' % product
w_nc_fid.history = 'Produced the ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Time dimension
nrSamples = nt
w_nc_fid.createDimension('time', nrSamples) # Much larger file if putting 'None' (unlimited size)
w_nc_time = w_nc_fid.createVariable('time', 'float', dimensions=('time'))
w_nc_time.standard_name = 'time'
w_nc_time.long_name = 'time'
w_nc_time.units = 'seconds since %s' % timestamps[0].strftime("%Y-%m-%d %H:%M:%S")
w_nc_time.calendar = 'gregorian'
w_nc_time[:] = netCDF4.date2num(timestamps, units = w_nc_time.units, calendar = w_nc_time.calendar)
# Spatial dimension
dimNames = ['x_1','y_1']
dimensions = [int(nx),
int(ny)]
for i in range(len(dimensions)):
w_nc_fid.createDimension(dimNames[i],dimensions[i])
# Write out coordinates
w_nc_x = w_nc_fid.createVariable('x_1',np.dtype('float32').char,('x_1',))
w_nc_x.axis = 'X'
w_nc_x.long_name = 'x-coordinate in Swiss coordinate system'
w_nc_x.standard_name = 'projection_x_coordinate'
w_nc_x.units = 'm'
w_nc_x[:] = xgrid
w_nc_y = w_nc_fid.createVariable('y_1',np.dtype('float32').char,('y_1',))
w_nc_y.axis = 'Y'
w_nc_y.long_name = 'y-coordinate in Swiss coordinate system'
w_nc_y.standard_name = 'projection_y_coordinate'
w_nc_y.units = 'm'
w_nc_y[:] = ygrid
# Write out forecasts
w_nc_PRECIP_INT = w_nc_fid.createVariable('PRECIP_INT', np.dtype('float32').char, dimensions=('time', 'y_1', 'x_1'), zlib=True, fill_value=noData)
w_nc_PRECIP_INT.units = 'mm h-1'
w_nc_PRECIP_INT.long_name = 'Precipitation intensity'
w_nc_PRECIP_INT.coordinates = 'y_1 x_1'
w_nc_PRECIP_INT[:] = dataArray
w_nc_fid.close()
print('Saved: ' + fileName)
def save_4darray_netcdf(fileName, dataArray, product,\
timestamps,nmebers,xgrid,ygrid, \
noData=-99999.0):
# Get dimensions of output array to write
nx = dataArray.shape[3]; ny = dataArray.shape[2]
nt = dataArray.shape[0]
nm = dataArray.shape[1]
# Set no data values
dataArray[np.isnan(dataArray)] = noData
# Make folder if necessary
outDir = os.path.dirname(fileName)
cmd = 'mkdir -p ' + outDir
os.system(cmd)
# Create netCDF Dataset
w_nc_fid = netCDF4.Dataset(fileName, 'w', format='NETCDF4')
w_nc_fid.Conventions = 'CF-1.6'
w_nc_fid.ConventionsURL = 'http://www.unidata.ucar.edu/software/netcdf/conventions.html'
w_nc_fid.institution = 'MeteoSwiss, Locarno-Monti'
w_nc_fid.source = 'product: %s, product_category: probabilist' % product
w_nc_fid.history = 'Produced the ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Time dimension
nrSamples = nt
w_nc_fid.createDimension('time', nrSamples) # Much larger file if putting 'None' (unlimited size)
w_nc_time = w_nc_fid.createVariable('time', 'float', dimensions=('time'))
w_nc_time.standard_name = 'time'
w_nc_time.long_name = 'time'
w_nc_time.units = 'seconds since %s' % timestamps[0].strftime("%Y-%m-%d %H:%M:%S")
w_nc_time.calendar = 'gregorian'
w_nc_time[:] = netCDF4.date2num(timestamps, units = w_nc_time.units, calendar = w_nc_time.calendar)
# Member numbers
nrMembers = nm
w_nc_fid.createDimension('epsd_1', nrMembers)
w_nc_epsd = w_nc_fid.createVariable('epsd_1', 'int', dimensions=('epsd_1'))
w_nc_epsd.standard_name = 'Epsd1'
w_nc_epsd.long_name = 'Ensemble member number'
w_nc_epsd.units = 'member number'
w_nc_epsd[:] = range(nrMembers)
# Spatial dimension
dimNames = ['x_1','y_1']
dimensions = [int(nx),
int(ny)]
for i in range(len(dimensions)):
w_nc_fid.createDimension(dimNames[i],dimensions[i])
# Write out coordinates
w_nc_x = w_nc_fid.createVariable('x_1',np.dtype('float32').char,('x_1',))
w_nc_x.axis = 'X'
w_nc_x.long_name = 'x-coordinate in Swiss coordinate system'
w_nc_x.standard_name = 'projection_x_coordinate'
w_nc_x.units = 'm'
w_nc_x[:] = xgrid
w_nc_y = w_nc_fid.createVariable('y_1',np.dtype('float32').char,('y_1',))
w_nc_y.axis = 'Y'
w_nc_y.long_name = 'y-coordinate in Swiss coordinate system'
w_nc_y.standard_name = 'projection_y_coordinate'
w_nc_y.units = 'm'
w_nc_y[:] = ygrid
# Write out forecasts
w_nc_PRECIP_INT = w_nc_fid.createVariable('PRECIP_INT', np.dtype('float32').char, dimensions=('time', 'epsd_1', 'y_1', 'x_1'), zlib=True, fill_value=noData)
w_nc_PRECIP_INT.units = 'mm h-1'
w_nc_PRECIP_INT.long_name = 'Precipitation intensity'
w_nc_PRECIP_INT.coordinates = 'y_1 x_1'
w_nc_PRECIP_INT[:] = dataArray
w_nc_fid.close()
print('Saved: ' + fileName)
def load_3darray_netcdf(filename):
# read netcdf file
nc_fid = netCDF4.Dataset(filename, 'r', format='NETCDF4')
variableNames = [str(var) for var in nc_fid.variables]
# load time
time_var = nc_fid.variables['time']
timestamps = netCDF4.num2date(time_var[:],time_var.units)
# load coordinates
x = nc_fid.variables["x_1"][:]
y = nc_fid.variables["y_1"][:]
Xcoords = np.array(x).squeeze()
Ycoords = np.array(y).squeeze()
# load precip data
data = nc_fid.variables[variableNames[-1]]
noData = nc_fid.variables[variableNames[-1]]._FillValue
# convert to numpy array
data = np.array(data)
# change noData to Nan
data[data==noData] = np.nan
return data, timestamps, Xcoords, Ycoords
def load_3darray_netcdf_with_bounds(filename, timebounds = [], domainSize = []):
print('Error: this function needs to be fixed. Aborting.')
sys.exit()
# read netcdf file
nc_fid = netCDF4.Dataset(filename, 'r', format='NETCDF4')
variableNames = [str(var) for var in nc_fid.variables]
# load time
time_var = nc_fid.variables['time']
timestamps = netCDF4.num2date(time_var[:],time_var.units)
# load coordinates
x = nc_fid.variables["x_1"][:]
y = nc_fid.variables["y_1"][:]
Xcoords = np.array(x).squeeze() - 500 # move coordinates to the lower left corner
Ycoords = np.array(y).squeeze() - 500
# prepare bounds
if len(timebounds) == 0:
timebounds = [timestamps[0], timestamps[-1]]
if isinstance(domainSize,int):
extent = dt.get_reduced_extent(Xcoords.shape[0], Ycoords.shape[0], domainSize, domainSize)
xbounds = [Xcoords[extent[0]], Xcoords[extent[2]]]
ybounds = [Ycoords[extent[1]], Ycoords[extent[3]]]
else:
xbounds = [Xcoords[0], Xcoords[-1]]
ybounds = [Ycoords[0], Ycoords[-1]]
# time lower and upper index
timeli = np.argmin( np.abs( timestamps - timebounds[0] ) )
timeui = np.argmin( np.abs( timestamps - timebounds[1] ) ) + 1
timestamps = timestamps[timeli:timeui]
# x coords lower and upper index
xli = np.argmin( np.abs( Xcoords - xbounds[0] ) )
xui = np.argmin( np.abs( Xcoords - xbounds[1] ) )
Xcoords = Xcoords[xli:xui]
# y coords lower and upper index
yli = np.argmin( np.abs( Ycoords - ybounds[0] ) )
yui = np.argmin( np.abs( Ycoords - ybounds[1] ) )
Ycoords = Ycoords[yli:yui]
# load precip data
data = nc_fid.variables[variableNames[-1]][timeli:timeui,:,xli:xui,yli:yui]
noData = nc_fid.variables[variableNames[-1]]._FillValue
# convert to numpy array
data = np.array(data)
# change noData to Nan
data[data==noData] = np.nan
return data, timestamps, Xcoords, Ycoords
def load_4darray_netcdf(filename):
# COSMO-E
# read netcdf file
nc_fid = netCDF4.Dataset(filename, 'r', format='NETCDF4')
variableNames = [str(var) for var in nc_fid.variables]
# load time
time_var = nc_fid.variables['time']
timestamps = netCDF4.num2date(time_var[:],time_var.units)
# load precip
data = nc_fid.variables[variableNames[-1]]
noData = data._FillValue
# convert to numpy array
data = np.array(data)
# change noData to Nan
data[data==noData] = np.nan
# load coordinates
x = nc_fid.variables["x_1"][:]
y = nc_fid.variables["y_1"][:]
Xcoords = np.array(x).squeeze()
Ycoords = np.array(y).squeeze()
# load member numbers
members_var = nc_fid.variables['epsd_1']
members = np.array(members_var)
return data, timestamps, members, Ycoords, Xcoords
def produce_forecast(timeStartStr,leadTimeMin):
produce_radar_observations = False
produce_radar_extrapolation = False
produce_cosmo_1 = True
# parameters
rainThreshold = 0.08
## RADAR OBSERVATIONS
if produce_radar_observations:
print('Retrieve radar observations for ' + timeStartStr + ' + ' + str(leadTimeMin) + ' min')
# start 5 min earlier to compute 10min accumulation at t0
startTimeToPass = ti.timestring2datetime(timeStartStr) - datetime.timedelta(minutes=5)
startTimeToPassStr = ti.datetime2timestring(startTimeToPass)
radar_observations_5min, r = nw.get_radar_observations(startTimeToPassStr, leadTimeMin+5, product='RZC')
# aggregate to 10-min forecast (to match COSMO1 resolution)
radar_observations_10min = nw.aggregate_in_time(radar_observations_5min,timeAccumMin=10,type='sum')
## EXTRAPOLATION FORECAST
if produce_radar_extrapolation:
print('Run radar extrapolation for ' + timeStartStr + ' + ' + str(leadTimeMin) + ' min')
# produce 5-min radar extrapolation
radar_extrapolation_5min, radar_mask_5min = nw.radar_extrapolation(timeStartStr,leadTimeMin, product='RZC')
# aggregate to 10-min forecast (to match COSMO1 resolution)
radar_extrapolation_10min = nw.aggregate_in_time(radar_extrapolation_5min,timeAccumMin=10,type='sum')
radar_mask_10min = nw.aggregate_in_time(radar_mask_5min,timeAccumMin=10,type='mean')
# add observation at t0
radar_extrapolation_10min = np.concatenate((radar_observations_10min[:,:,0,np.newaxis],radar_extrapolation_10min),axis=2)
## COSMO-1 FORECASTS
if produce_cosmo_1:
print('Retrive COSMO-1 for ' + timeStartStr + ' + ' + str(leadTimeMin) + ' min')
cosmo1_10min = nw.get_cosmo1(timeStartStr, leadTimeMin)
cosmo1_10min[cosmo1_10min <= rainThreshold] = np.nan
# save_netcdf(radar_observations_10min,radar_extrapolation_10min,cosmo1_10min,\
# timeStartStr,leadTimeMin,\
# r.subXcoords,r.subYcoords)
def probability_matching(initialarray,targetarray):
# zeros in initial image
idxZeros = initialarray == 0
# flatten the arrays
arrayshape = initialarray.shape
target = targetarray.flatten()
array = initialarray.flatten()
# rank target values
order = target.argsort()
ranked = target[order]
# rank initial values order
orderin = array.argsort()
ranks = np.empty(len(array), int)
ranks[orderin] = np.arange(len(array))
# get ranked values from target and rearrange with inital order
outputarray = ranked[ranks]
# reshape as 2D array
outputarray = outputarray.reshape(arrayshape)
# reassing original zeros
outputarray[idxZeros] = 0
return outputarray
# Probability matched mean
def build_PMM(ensemble):
# Compute ensemble mean and build the rainrate PDF
try: # list
fieldShape = ensemble[0].shape
nmembers = len(ensemble)
ensemble_mean = np.zeros(fieldShape)
members_per_pixel = np.zeros(fieldShape)
for m in range(nmembers):
memberField = ensemble[m].copy()
idxNan = np.isnan(memberField)
ensemble_mean[~idxNan] += memberField[~idxNan]
members_per_pixel[~idxNan] += 1
if m==0:
rainrate_PDF = memberField.flatten()
else:
rainrate_PDF = np.concatenate((rainrate_PDF,memberField.flatten()))
except TypeError: # numpy array
fieldShape = ensemble[0,:,:].shape
nmembers = ensemble.shape[0]
ensemble_mean = np.zeros(fieldShape)
members_per_pixel = np.zeros(fieldShape)
for m in range(nmembers):
memberField = ensemble[m,:,:].copy()
idxNan = np.isnan(memberField)
ensemble_mean[~idxNan] += memberField[~idxNan]
members_per_pixel[~idxNan] += 1
if m==0:
rainrate_PDF = memberField.flatten()
else:
rainrate_PDF = np.concatenate((rainrate_PDF,memberField.flatten()))
ensemble_mean /= members_per_pixel
rainrate_PDF = np.sort(rainrate_PDF)[::-1]
rainrate_PDF_nth = rainrate_PDF[::nmembers]
pmm = probability_matching(ensemble_mean,rainrate_PDF_nth)
return pmm
def load_n_random_radar_images(n,warThr = 10, yearStart = 2005, yearEnd = 2016, product='RZC'):
images=[];timestamps=[]
maxiter = n*50
iter = 0
naccepted = 0
while (naccepted < n) and (iter < maxiter):
iter+=1
# Compute random date
randomYear = int(np.random.uniform(yearStart,yearEnd+1,1))
randomMonth = int(np.random.uniform(1,13,1))
if randomMonth==12:
daysMonth = 31
else:
daysMonth = (datetime.date(randomYear, randomMonth+1, 1) - datetime.date(randomYear, randomMonth, 1)).days
randomDay = int(np.random.uniform(1,daysMonth+1))
randomHour = int(np.random.uniform(0,24))
randomMin = int(np.random.uniform(0,56))
randomMin = int(5 * round(randomMin/5))
randomDate = str(randomYear) + str(randomMonth).zfill(2) + str(randomDay).zfill(2) + str(randomHour).zfill(2) + str(randomMin).zfill(2)
if product=='RZC':
r = io.read_bin_image(randomDate, inBaseDir = '/scratch/ned/data/')
else:
r = io.read_gif_image(randomDate, inBaseDir = '/scratch/ned/data/')
if r.war > warThr:
naccepted+=1
images.append(r.rainrate)
timestamps.append(randomDate)
print(randomDate,int(r.war))
return images,timestamps
|
meteoswiss-mdr/precipattractor
|
pymodules/load_forecasts.py
|
Python
|
gpl-3.0
| 46,753
|
[
"NetCDF"
] |
a2acbbeaac11b0c199423288f13155a5f6da362d61783ab80ee9ace28bf6b747
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
repository_name = 'bed_to_gff_0130'
repository_description = "Converter: BED to GFF"
repository_long_description = "Convert bed to gff"
category_name = 'Test 0130 Datatype Converters'
category_description = 'Test 0130 Datatype Converters'
'''
1) Install the bed_to_gff_converter repository.
2) Make sure the page section to select a tool panel section is NOT displayed since the tool will not be displayed in the Galaxy tool panel.
3) Make sure the bed_to_gff_converter tool is not displayed in the tool panel.
'''
class TestDatatypeConverters( ShedTwillTestCase ):
'''Test features related to datatype converters.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts."""
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
galaxy_admin_user = self.test_db_util.get_galaxy_user( common.admin_email )
assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
galaxy_admin_user_private_role = self.test_db_util.get_galaxy_private_role( galaxy_admin_user )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_create_bed_to_gff_repository( self ):
'''Create and populate bed_to_gff_0130.'''
category = self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
# Create a repository named bed_to_gff_0130 owned by user1.
repository = self.get_or_create_repository( name=repository_name,
description=repository_description,
long_description=repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
# Upload bed_to_gff_converter.tar to the repository, if the repository is new.
self.upload_file( repository,
filename='bed_to_gff_converter/bed_to_gff_converter.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded bed_to_gff_converter.tar.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_install_datatype_converter_to_galaxy( self ):
'''Install bed_to_gff_converter_0130 into the running Galaxy instance.'''
'''
We are at step 1 - Install the bed_to_gff_converter repository.
Install bed_to_gff_converter_0130, checking that the option to select the tool panel section is *not* displayed.
'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
repository = self.test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
preview_strings_displayed = [ repository.name, self.get_repository_tip( repository ) ]
strings_displayed = [ 'Choose the configuration file' ]
strings_not_displayed = [ 'tool panel section' ]
self.install_repository( repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
preview_strings_displayed=preview_strings_displayed,
strings_displayed=strings_displayed,
strings_not_displayed=strings_not_displayed,
post_submit_strings_displayed=[ repository.name, 'New' ],
includes_tools_for_display_in_tool_panel=False )
def test_0015_uninstall_and_verify_tool_panel_section( self ):
'''Uninstall bed_to_gff_converter_0130 and verify that the saved tool_panel_section is None.'''
'''
We are at step 3 - Make sure the bed_to_gff_converter tool is not displayed in the tool panel.
The previous tool panel section for a tool is only recorded in the metadata when a repository is uninstalled,
so we have to uninstall it first, then verify that it was not assigned a tool panel section.
'''
repository = self.test_db_util.get_installed_repository_by_name_owner( repository_name, common.test_user_1_name )
self.uninstall_repository( repository )
self.verify_installed_repository_no_tool_panel_section( repository )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_1130_datatype_converters.py
|
Python
|
gpl-3.0
| 5,777
|
[
"Galaxy"
] |
bdc0c9a4fbdac3852dde9db6160b1a6a71cd62a3cb35649cce09449d58fbfa7e
|
"""
This pipeline is intended to extract Haralick features from T2W images.
"""
import os
import numpy as np
from protoclass.data_management import T2WModality
from protoclass.data_management import GTModality
from protoclass.preprocessing import RicianNormalization
from protoclass.preprocessing import GaussianNormalization
from protoclass.extraction import GaborBankExtraction
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the modality to normalize
path_t2w = 'T2W'
# Define the path of the ground for the prostate
path_gt = 'GT_inv/prostate'
# Define the label of the ground-truth which will be provided
label_gt = ['prostate']
# Define the path where the information for the gaussian normalization are
path_gaussian = '/data/prostate/pre-processing/mp-mri-prostate/gaussian-t2w'
# Define the path where the information for the rician normalization are
path_rician = '/data/prostate/pre-processing/mp-mri-prostate/rician-t2w'
# Define the path to store the Tofts data
path_store = '/data/prostate/extraction/mp-mri-prostate/gabor-t2w'
# ID of the patient for which we need to use the Gaussian Normalization
ID_GAUSSIAN = '387'
# Set the value of the extremum
EXTREM = (-4.48, 22.11)
# Generate the different path to be later treated
path_patients_list_t2w = []
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the T2W data
path_patients_list_t2w.append(os.path.join(path_patients, id_patient,
path_t2w))
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient,
path_gt)])
# List where to store the different minimum
for id_p, (p_t2w, p_gt) in enumerate(zip(path_patients_list_t2w,
path_patients_list_gt)):
print 'Processing {}'.format(id_patient_list[id_p])
# Remove a part of the string to have only the id
nb_patient = id_patient_list[id_p].replace('Patient ', '')
# Read the image data
t2w_mod = T2WModality()
t2w_mod.read_data_from_path(p_t2w)
# Read the GT
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt, p_gt)
if not nb_patient == ID_GAUSSIAN:
# Rician Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_rician, pat_chg)
t2w_norm = RicianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
else:
# Gaussian Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_gaussian, pat_chg)
t2w_norm = GaussianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
# Rescale the data on 8 bits
t2w_mod.data_ = ((t2w_mod.data_ - EXTREM[0]) *
(255. / (EXTREM[1] - EXTREM[0])))
# Update the histogram
t2w_mod.update_histogram()
# Create the different parameters for the filter bank
frequencies = np.linspace(0.05, 0.25, num=4, endpoint=True)
alphas = np.linspace(0., np.pi, num=4, endpoint=True)
gammas = np.linspace(0., 2. * np.pi, num=8, endpoint=True)
# We have less resolution in z
scale_sigmas = np.array([1., 1., 2.])
# Create the Gabor extractor
gab_ext = GaborBankExtraction(t2w_mod, frequencies=frequencies,
alphas=alphas, gammas=gammas,
scale_sigmas=scale_sigmas)
# Fit the data
print 'Compute the response for the Gabor filter bank'
gab_ext.fit(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Extract the data
print 'Extract the only the necessary pixel'
data = gab_ext.transform(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Store the data
print 'Store the data in the right directory'
# Check that the path is existing
if not os.path.exists(path_store):
os.makedirs(path_store)
pat_chg = (id_patient_list[id_p].lower().replace(' ', '_') +
'_gabor_t2w.npy')
filename = os.path.join(path_store, pat_chg)
np.save(filename, data)
|
I2Cvb/mp-mri-prostate
|
pipeline/feature-extraction/t2w/pipeline_extraction_gabor_t2w.py
|
Python
|
mit
| 4,610
|
[
"Gaussian"
] |
1c3bda81cb1f0b688310e2f6aa9f1fb540a5dd89ead56dbd0848486b1ec93ddd
|
#!/usr/bin/python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Test restraints module.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import os
import math
import copy
import numpy as np
from simtk import openmm, unit
import openmmtools as mmtools
from openmmtools import testsystems, states, multistate
import nose
from nose.plugins.attrib import attr
import yank.restraints
from yank import experiment, Topography
from yank.analyze import YankMultiStateSamplerAnalyzer
from yank.utils import get_data_filename
OpenMM73 = yank.restraints.OpenMM73 # TODO: Document this
# =============================================================================================
# UNIT TESTS
# =============================================================================================
class HostGuestNoninteracting(testsystems.HostGuestVacuum):
"""CB7:B2 host-guest system in vacuum with no nonbonded interactions.
Parameters
----------
Same as HostGuestVacuum
Examples
--------
Create host:guest system with no nonbonded interactions.
>>> testsystem = HostGuestVacuumNoninteracting()
>>> system, positions = testsystem.system, testsystem.positions
Properties
----------
receptor_atoms : list of int
Indices of receptor atoms
ligand_atoms : list of int
Indices of ligand atoms
"""
def __init__(self, **kwargs):
super(HostGuestNoninteracting, self).__init__(**kwargs)
# Store receptor and ligand atom indices
self.receptor_atoms = range(0, 126)
self.ligand_atoms = range(126, 156)
# Remove nonbonded interactions
force_indices = {self.system.getForce(index).__class__.__name__: index
for index in range(self.system.getNumForces())}
self.system.removeForce(force_indices['NonbondedForce'])
@staticmethod
def build_test_case():
"""Create a new ThermodynamicState, SamplerState and Topography."""
# Create a test system
t = HostGuestNoninteracting()
# Create states and topography encoding the info to determine the parameters.
topography = Topography(t.topology, ligand_atoms='resname B2')
sampler_state = states.SamplerState(positions=t.positions)
thermodynamic_state = states.ThermodynamicState(system=t.system, temperature=300.0*unit.kelvin)
return thermodynamic_state, sampler_state, topography
expected_restraints = {
'Harmonic': yank.restraints.Harmonic,
'FlatBottom': yank.restraints.FlatBottom,
'Boresch': yank.restraints.Boresch,
'PeriodicTorsionBoresch': yank.restraints.PeriodicTorsionBoresch,
'RMSD': yank.restraints.RMSD,
}
restraint_test_yaml = """
---
options:
minimize: no
verbose: no
output_dir: %(output_directory)s
temperature: 300*kelvin
pressure: null
anisotropic_dispersion_cutoff: null
platform: CPU
hydrogen_mass: 3*amu
mcmc_moves:
langevin:
type: LangevinSplittingDynamicsMove
timestep: 4.0*femtoseconds
collision_rate: 1.0 / picosecond
n_steps: 50
reassign_velocities: yes
n_restart_attempts: 4
splitting: 'V R O R V'
samplers:
sams:
type: SAMSSampler
mcmc_moves: langevin
number_of_iterations: %(number_of_iter)s
state_update_scheme: global-jump
gamma0: 2.0
flatness_threshold: 5.0
online_analysis_interval: 200
online_analysis_minimum_iterations: 50
online_analysis_target_error: 0.1
repex:
type: ReplicaExchangeSampler
mcmc_moves: langevin
number_of_iterations: %(number_of_iter)s
online_analysis_interval: 50
online_analysis_minimum_iterations: 25
online_analysis_target_error: 0.1
solvents:
vacuum:
nonbonded_method: PME
nonbonded_cutoff: 0.59 * nanometer
systems:
ship:
phase1_path: [%(input_directory)s/benzene-toluene-standard-state/standard_state_complex.inpcrd, %(input_directory)s/benzene-toluene-standard-state/standard_state_complex.prmtop]
phase2_path: [%(input_directory)s/benzene-toluene-standard-state/standard_state_complex.inpcrd, %(input_directory)s/benzene-toluene-standard-state/standard_state_complex.prmtop]
ligand_dsl: resname ene
solvent: vacuum
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_restraints: [0.0, 0.05, 0.10, 0.15, 0.25, 0.5, 0.75, 1.0]
lambda_electrostatics: [0.0, 0.00, 0.00, 0.00, 0.00, 0.0, 0.00, 0.0]
lambda_sterics: [0.0, 0.00, 0.00, 0.00, 0.00, 0.0, 0.00, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [0.0, 0.0]
lambda_sterics: [0.0, 0.0]
experiments:
system: ship
sampler: repex
protocol: absolute-binding
restraint:
type: %(restraint_type)s
"""
def general_restraint_run(options):
"""
Generalized restraint simulation run to test free energy = standard state correction.
options : Dict. A dictionary of substitutions for restraint_test_yaml
"""
with mmtools.utils.temporary_directory() as output_directory:
# TODO refactor this to use AlchemicalPhase API rather than a YAML script.
options['input_directory'] = get_data_filename(os.path.join('tests', 'data'))
options['output_directory'] = output_directory
# run both setup and experiment
yaml_builder = experiment.ExperimentBuilder(restraint_test_yaml % options)
yaml_builder.run_experiments()
# Estimate Free Energies
ncfile_path = os.path.join(output_directory, 'experiments', 'complex.nc')
reporter = multistate.MultiStateReporter(ncfile_path, open_mode='r')
#analyzer = multistate.MultiStateSamplerAnalyzer(reporter)
analyzer = YankMultiStateSamplerAnalyzer(reporter)
Deltaf_ij, dDeltaf_ij = analyzer.get_free_energy()
# Correct the sign for the fact that we are adding vs removing the restraints
DeltaF_simulated = Deltaf_ij[-1, 0]
dDeltaF_simulated = dDeltaf_ij[-1, 0]
print('Standard state correction:')
#ncfile = netcdf.Dataset(ncfile_path, 'r')
#print(ncfile.groups['metadata'].variables['standard_state_correction'][:])
#print(float(ncfile.groups['metadata'].variables['standard_state_correction'][:]))
#ncfile.close()
DeltaF_restraints = analyzer.get_standard_state_correction()
# Check if they are close
msg = ''
msg += 'Computed: %8.3f kT\n' % (DeltaF_restraints)
msg += 'Actual: %8.3f +- %8.3f kT\n' % (DeltaF_simulated, dDeltaF_simulated)
msg += 'ERROR: %8.3f +- %8.3f kT\n' % (DeltaF_restraints - DeltaF_simulated, dDeltaF_simulated)
# DEBUG
print(msg)
assert np.allclose(DeltaF_restraints, DeltaF_simulated, rtol=2*dDeltaF_simulated), 'Standard state correction is inaccurate.\n' + msg
@attr('slow') # Skip on Travis-CI
def test_harmonic_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
options = {'number_of_iter': '1000',
'restraint_type': 'Harmonic'}
general_restraint_run(options)
@attr('slow') # Skip on Travis-CI
def test_flat_bottom_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
options = {'number_of_iter': '1000',
'restraint_type': 'FlatBottom'}
general_restraint_run(options)
@attr('slow') # Skip on Travis-CI
def test_Boresch_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
# These need more samples to converge
options = {'number_of_iter': '1000',
'restraint_type': 'Boresch'}
general_restraint_run(options)
@attr('slow') # Skip on Travis-CI
def test_PeriodicTorsionBoresch_free_energy():
"""
Test that the harmonic restraint simulated free energy equals the standard state correction
"""
# These need more samples to converge
options = {'number_of_iter': '1000',
'restraint_type': 'PeriodicTorsionBoresch'}
general_restraint_run(options)
def test_harmonic_standard_state_analytical():
"""
Perform some analytical tests of the Harmonic standard state correction.
Also ensures that PBC is being handled correctly
"""
LJ_fluid = testsystems.LennardJonesFluid()
# Create Harmonic restraint.
restraint = yank.restraints.create_restraint('Harmonic', restrained_receptor_atoms=1)
# Determine other parameters.
ligand_atoms = [3, 4, 5]
topography = Topography(LJ_fluid.topology, ligand_atoms=ligand_atoms)
sampler_state = states.SamplerState(positions=LJ_fluid.positions)
thermodynamic_state = states.ThermodynamicState(system=LJ_fluid.system,
temperature=300.0 * unit.kelvin)
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
spring_constant = restraint.spring_constant
# Compute standard-state volume for a single molecule in a box of size (1 L) / (avogadros number)
liter = 1000.0 * unit.centimeters ** 3 # one liter
box_volume = liter / (unit.AVOGADRO_CONSTANT_NA * unit.mole) # standard state volume
analytical_shell_volume = (2 * math.pi / (spring_constant * thermodynamic_state.beta))**(3.0/2)
analytical_standard_state_G = - math.log(box_volume / analytical_shell_volume)
restraint_standard_state_G = restraint.get_standard_state_correction(thermodynamic_state)
np.testing.assert_allclose(analytical_standard_state_G, restraint_standard_state_G)
def test_BoreschLike_standard_state_analytical():
"""
Perform some analytical tests of the Boresch standard state correction.
Also ensures that PBC is being handled correctly
"""
LJ_fluid = testsystems.LennardJonesFluid()
# Define receptor and ligand atoms
receptor_atoms = [0, 1, 2]
ligand_atoms = [3, 4, 5]
# Create restraint
K_r = 1.0*unit.kilocalories_per_mole/unit.angstrom**2
r_0 = 0.0*unit.angstrom
K_theta = 0.0*unit.kilocalories_per_mole/unit.degrees**2
theta_0 = 30.0*unit.degrees
topography = Topography(LJ_fluid.topology, ligand_atoms=ligand_atoms)
sampler_state = states.SamplerState(positions=LJ_fluid.positions)
thermodynamic_state = states.ThermodynamicState(system=LJ_fluid.system,
temperature=300.0 * unit.kelvin)
for restraint_name in ['Boresch', 'PeriodicTorsionBoresch']:
restraint = yank.restraints.create_restraint('Boresch',
restrained_receptor_atoms=receptor_atoms,
restrained_ligand_atoms=ligand_atoms,
K_r=K_r, r_aA0=r_0,
K_thetaA=K_theta, theta_A0=theta_0,
K_thetaB=K_theta, theta_B0=theta_0,
K_phiA=K_theta, phi_A0=theta_0,
K_phiB=K_theta, phi_B0=theta_0,
K_phiC=K_theta, phi_C0=theta_0)
# Determine other parameters
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
# Compute standard-state volume for a single molecule in a box of size (1 L) / (avogadros number)
liter = 1000.0 * unit.centimeters ** 3 # one liter
box_volume = liter / (unit.AVOGADRO_CONSTANT_NA * unit.mole) # standard state volume
analytical_shell_volume = (2 * math.pi / (K_r * thermodynamic_state.beta))**(3.0/2)
analytical_standard_state_G = - math.log(box_volume / analytical_shell_volume)
restraint_standard_state_G = restraint.get_standard_state_correction(thermodynamic_state)
msg = 'Failed test for restraint {}'.format(restraint_name)
np.testing.assert_allclose(analytical_standard_state_G, restraint_standard_state_G, err_msg=msg)
# ==============================================================================
# RESTRAINT PARAMETER DETERMINATION
# ==============================================================================
def test_partial_parametrization():
"""The automatic restraint parametrization doesn't overwrite user values."""
# Create states and identify ligand/receptor.
test_system = testsystems.HostGuestVacuum()
topography = Topography(test_system.topology, ligand_atoms='resname B2')
sampler_state = states.SamplerState(positions=test_system.positions)
thermodynamic_state = states.ThermodynamicState(test_system.system,
temperature=300.0*unit.kelvin)
# Test case: (restraint_type, constructor_kwargs)
boresch = dict(restrained_ligand_atoms=[130, 131, 136], K_r=1.0*unit.kilojoule_per_mole/unit.angstroms**2)
test_cases = [
('Harmonic', dict(spring_constant=2.0*unit.kilojoule_per_mole/unit.nanometer**2,
restrained_receptor_atoms=[5])),
('FlatBottom', dict(well_radius=1.0*unit.angstrom, restrained_ligand_atoms=[130])),
('Boresch', boresch),
('PeriodicTorsionBoresch', boresch),
]
if OpenMM73.dev_validate:
test_cases.append(('RMSD', dict(restrained_ligand_atoms=[130, 131, 136],
K_RMSD=1.0 * unit.kilojoule_per_mole / unit.angstroms ** 2)))
for restraint_type, kwargs in test_cases:
state = copy.deepcopy(thermodynamic_state)
restraint = yank.restraints.create_restraint(restraint_type, **kwargs)
# Test-precondition: The restraint has undefined parameters.
with nose.tools.assert_raises(yank.restraints.RestraintParameterError):
restraint.restrain_state(state)
# The automatic parametrization maintains user values.
restraint.determine_missing_parameters(state, sampler_state, topography)
for parameter_name, parameter_value in kwargs.items():
assert getattr(restraint, parameter_name) == parameter_value
# The rest of the parameters has been determined.
restraint.get_standard_state_correction(state)
# The force has been configured correctly.
restraint.restrain_state(state)
system = state.system
for force in system.getForces():
# RadiallySymmetricRestraint between two single atoms.
if isinstance(force, openmm.CustomBondForce):
particle1, particle2, _ = force.getBondParameters(0)
assert particle1 == restraint.restrained_receptor_atoms[0]
assert particle2 == restraint.restrained_ligand_atoms[0]
# Boresch restraint.
elif isinstance(force, openmm.CustomCompoundBondForce):
particles, _ = force.getBondParameters(0)
assert particles == tuple(restraint.restrained_receptor_atoms + restraint.restrained_ligand_atoms)
# RMSD restraint.
elif OpenMM73.dev_validate and isinstance(force, openmm.CustomCVForce):
rmsd_cv = force.getCollectiveVariable(0)
particles = rmsd_cv.getParticles()
assert particles == tuple(restraint.restrained_receptor_atoms + restraint.restrained_ligand_atoms)
def restraint_selection_template(topography_ligand_atoms=None,
restrained_receptor_atoms=None,
restrained_ligand_atoms=None,
topography_regions=None):
"""The DSL atom selection works as expected."""
test_system = testsystems.HostGuestVacuum()
topography = Topography(test_system.topology, ligand_atoms=topography_ligand_atoms)
if topography_regions is not None:
for region, selection in topography_regions.items():
topography.add_region(region, selection)
sampler_state = states.SamplerState(positions=test_system.positions)
thermodynamic_state = states.ThermodynamicState(test_system.system,
temperature=300.0 * unit.kelvin)
# Initialize with DSL and without processing the string raises an error.
restraint = yank.restraints.Harmonic(spring_constant=2.0 * unit.kilojoule_per_mole / unit.nanometer ** 2,
restrained_receptor_atoms=restrained_receptor_atoms,
restrained_ligand_atoms=restrained_ligand_atoms)
with nose.tools.assert_raises(yank.restraints.RestraintParameterError):
restraint.restrain_state(thermodynamic_state)
# After parameter determination, the indices of the restrained atoms are correct.
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
assert len(restraint.restrained_receptor_atoms) == 14
assert len(restraint.restrained_ligand_atoms) == 30
# The bond force is configured correctly.
restraint.restrain_state(thermodynamic_state)
system = thermodynamic_state.system
for force in system.getForces():
if isinstance(force, openmm.CustomCentroidBondForce):
assert force.getBondParameters(0)[0] == (0, 1)
assert len(force.getGroupParameters(0)[0]) == 14
assert len(force.getGroupParameters(1)[0]) == 30
assert isinstance(force, openmm.CustomCentroidBondForce) # We have found a force.
def test_restraint_dsl_selection():
"""The DSL atom selection works as expected."""
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms="(resname CUC) and (name =~ 'O[0-9]+')",
restrained_ligand_atoms='resname B2')
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms="(resname CUC) and (name =~ 'O[0-9]+')",
restrained_ligand_atoms='(mass > 0.5) and (resname B2)')
def test_restraint_region_selection():
"""Test that the region atom selection works as expected"""
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms='choice_res_residue and the_oxygen',
restrained_ligand_atoms='choice_lig_residue',
topography_regions={'choice_lig_residue': 'resname B2',
'choice_res_residue': 'resname CUC',
'the_oxygen': "name =~ 'O[0-9]+'"})
def test_restraint_region_dsl_mix():
"""Test that the region atom selection works as expected"""
restraint_selection_template(topography_ligand_atoms='resname B2',
restrained_receptor_atoms='choice_res_residue and the_oxygen',
restrained_ligand_atoms='resname B2',
topography_regions={'choice_lig_residue': 'resname B2',
'choice_res_residue': 'resname CUC',
'the_oxygen': "name =~ 'O[0-9]+'"})
# ==============================================================================
# RESTRAINT FACTORY FUNCTIONS
# ==============================================================================
def test_available_restraint_classes():
"""Test to make sure expected restraint classes are available."""
available_restraint_classes = yank.restraints.available_restraint_classes()
available_restraint_types = yank.restraints.available_restraint_types()
# We shouldn't have `None` (from the base class) as an available type
assert None not in available_restraint_classes
assert None not in available_restraint_types
for restraint_type, restraint_class in expected_restraints.items():
msg = "Failed comparing restraint type '%s' with %s" % (restraint_type, str(available_restraint_classes))
assert restraint_type in available_restraint_classes, msg
assert available_restraint_classes[restraint_type] is restraint_class, msg
assert restraint_type in available_restraint_types, msg
def test_restraint_dispatch():
"""Test dispatch of various restraint types."""
thermodynamic_state, sampler_state, topography = HostGuestNoninteracting.build_test_case()
for restraint_type, restraint_class in expected_restraints.items():
# Trap the dev and ignore it
try:
valid = restraint_class.dev_validate
if not valid:
continue
except AttributeError:
pass
# Add restraints and determine parameters.
thermo_state = copy.deepcopy(thermodynamic_state)
restraint = yank.restraints.create_restraint(restraint_type)
restraint.determine_missing_parameters(thermo_state, sampler_state, topography)
# Check that we got the right restraint class.
assert restraint.__class__.__name__ == restraint_type
assert restraint.__class__ is restraint_class
def test_restraint_force_group():
"""Test that the restraint force should be placed in its own force group for optimization."""
thermodynamic_state, sampler_state, topography = HostGuestNoninteracting.build_test_case()
for restraint_type, restraint_class in expected_restraints.items():
# Trap the dev and ignore it
try:
valid = restraint_class.dev_validate
if not valid:
continue
except AttributeError:
pass
# Add restraints and determine parameters.
thermo_state = copy.deepcopy(thermodynamic_state)
restraint = yank.restraints.create_restraint(restraint_type)
restraint.determine_missing_parameters(thermo_state, sampler_state, topography)
restraint.restrain_state(thermo_state)
# Find the force group of the restraint force.
system = thermo_state.system
for force_idx, force in enumerate(system.getForces()):
try:
num_parameters = force.getNumGlobalParameters()
except AttributeError:
continue
for parameter_idx in range(num_parameters):
parameter_name = force.getGlobalParameterName(parameter_idx)
if parameter_name == 'lambda_restraints':
restraint_force_idx = force_idx
restraint_force_group = force.getForceGroup()
break
# No other force should have the same force group.
for force_idx, force in enumerate(system.getForces()):
if force_idx != restraint_force_idx:
assert force.getForceGroup() != restraint_force_group
# ==============================================================================
# RESTRAINT STATE
# ==============================================================================
class TestRestraintState(object):
"""Test class RestraintState."""
@classmethod
def setup_class(cls):
lysozyme = testsystems.LysozymeImplicit()
system, positions = lysozyme.system, lysozyme.positions
thermodynamic_state = states.ThermodynamicState(system, 300*unit.kelvin)
sampler_state = states.SamplerState(positions)
topography = Topography(lysozyme.topology, ligand_atoms='resname TMP')
cls.lysozyme_test_case = (thermodynamic_state, sampler_state, topography)
def get_restraint_cases(self):
for cls_name, cls in yank.restraints.available_restraint_classes().items():
# Create restraint and automatically determine parameters.
# Trap the dev and ignore it
try:
valid = cls.dev_validate
if not valid:
continue
except AttributeError:
pass
restraint = cls()
thermodynamic_state, sampler_state, topography = copy.deepcopy(self.lysozyme_test_case)
restraint.determine_missing_parameters(thermodynamic_state, sampler_state, topography)
# Apply restraint.
restraint.restrain_state(thermodynamic_state)
# Create compound state to control the strength of the restraint.
restraint_state = yank.restraints.RestraintState(lambda_restraints=1.0)
compound_state = states.CompoundThermodynamicState(thermodynamic_state=thermodynamic_state,
composable_states=[restraint_state])
yield compound_state
def test_apply_to_system(self):
"""The System parameters are updated when lambda_restraints is set on the compound state."""
for compound_state in self.get_restraint_cases():
# Test pre-condition.
assert compound_state.lambda_restraints == 1.0
# Changing the attribute changes the internal representation of a system.
compound_state.lambda_restraints = 0.5
for force, parameter_name, parameter_id in compound_state._get_system_controlled_parameters(
compound_state.system, parameters_name_suffix=None):
assert force.getGlobalParameterDefaultValue(parameter_id) == 0.5
def test_apply_to_context(self):
"""The Context parameters are updated when the compound state is applied."""
for compound_state in self.get_restraint_cases():
compound_state.lambda_restraints = 0.5
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
context = compound_state.create_context(integrator)
assert context.getParameter('lambda_restraints') == 0.5
compound_state.lambda_restraints = 0.0
compound_state.apply_to_context(context)
assert context.getParameter('lambda_restraints') == 0.0
del context, integrator
def test_compatibility(self):
"""States differing only by the strength of the restraint are compatible."""
unrestrained_system = self.lysozyme_test_case[0].system
for compound_state in self.get_restraint_cases():
compound_state.lambda_restraints = 1.0
compatible_state = copy.deepcopy(compound_state)
compatible_state.lambda_restraints = 0.0
assert compound_state.is_state_compatible(compatible_state)
# Trying to assign a System without a Restraint raises an error.
with nose.tools.assert_raises(mmtools.states.GlobalParameterError):
compound_state.system = unrestrained_system
def test_find_force_groups_to_update(self):
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
for compound_state in self.get_restraint_cases():
context = compound_state.create_context(copy.deepcopy(integrator))
# Find the restraint force group.
system = context.getSystem()
force, _, _ = next(yank.restraints.RestraintState._get_system_controlled_parameters(
system, parameters_name_suffix=None))
force_group = force.getForceGroup()
# No force group should be updated if we don't move.
assert compound_state._find_force_groups_to_update(context, compound_state, memo={}) == set()
# We need to update the force if the current state changes.
compound_state2 = copy.deepcopy(compound_state)
compound_state2.lambda_restraints = 0.5
assert compound_state._find_force_groups_to_update(context, compound_state2, memo={}) == {force_group}
# ==============================================================================
# MAIN
# ==============================================================================
if __name__ == '__main__':
test_restraint_dispatch()
|
choderalab/yank
|
Yank/tests/test_restraints.py
|
Python
|
mit
| 28,233
|
[
"NetCDF",
"OpenMM"
] |
69c8b5b5daf4b8bfd7feb39c013f646277fdbedf0f2cc8e937925b23311f5aac
|
#!/usr/bin/env python
"""Ninja build configurator for mdns library"""
import sys
import os
sys.path.insert( 0, os.path.join( 'build', 'ninja' ) )
import generator
dependlibs = [ 'network', 'foundation' ]
generator = generator.Generator( project = 'mdns', dependlibs = dependlibs, variables = [ ( 'bundleidentifier', 'com.rampantpixels.mdns.$(binname)' ) ] )
target = generator.target
writer = generator.writer
toolchain = generator.toolchain
mdns_lib = generator.lib( module = 'mdns', sources = [
'discovery.c', 'mdns.c', 'query.c', 'record.c', 'response.c', 'service.c', 'socket.c', 'string.c', 'version.c' ] )
#if not target.is_ios() and not target.is_android():
# configs = [ config for config in toolchain.configs if config not in [ 'profile', 'deploy' ] ]
# if not configs == []:
# generator.bin( 'blast', [ 'main.c', 'client.c', 'server.c' ], 'blast', basepath = 'tools', implicit_deps = [ mdns_lib ], libs = [ 'network' ], configs = configs )
includepaths = generator.test_includepaths()
test_cases = [
'dnsds'
]
if target.is_ios() or target.is_android() or target.is_pnacl():
#Build one fat binary with all test cases
test_resources = []
test_extrasources = []
test_cases += [ 'all' ]
if target.is_ios():
test_resources = [ os.path.join( 'all', 'ios', item ) for item in [ 'test-all.plist', 'Images.xcassets', 'test-all.xib' ] ]
elif target.is_android():
test_resources = [ os.path.join( 'all', 'android', item ) for item in [
'AndroidManifest.xml', os.path.join( 'layout', 'main.xml' ), os.path.join( 'values', 'strings.xml' ),
os.path.join( 'drawable-ldpi', 'icon.png' ), os.path.join( 'drawable-mdpi', 'icon.png' ), os.path.join( 'drawable-hdpi', 'icon.png' ),
os.path.join( 'drawable-xhdpi', 'icon.png' ), os.path.join( 'drawable-xxhdpi', 'icon.png' ), os.path.join( 'drawable-xxxhdpi', 'icon.png' )
] ]
test_extrasources = [ os.path.join( 'all', 'android', 'java', 'com', 'rampantpixels', 'foundation', 'test', item ) for item in [
'TestActivity.java'
] ]
if target.is_pnacl():
generator.bin( module = '', sources = [ os.path.join( module, 'main.c' ) for module in test_cases ] + test_extrasources, binname = 'test-all', basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'mdns', 'network', 'test', 'foundation' ], resources = test_resources, includepaths = includepaths )
else:
generator.app( module = '', sources = [ os.path.join( module, 'main.c' ) for module in test_cases ] + test_extrasources, binname = 'test-all', basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'mdns', 'network', 'test', 'foundation' ], resources = test_resources, includepaths = includepaths )
else:
#Build one binary per test case
generator.bin( module = 'all', sources = [ 'main.c' ], binname = 'test-all', basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'network', 'foundation' ], includepaths = includepaths )
for test in test_cases:
generator.bin( module = test, sources = [ 'main.c' ], binname = 'test-' + test, basepath = 'test', implicit_deps = [ mdns_lib ], libs = [ 'test', 'mdns', 'network', 'foundation' ], includepaths = includepaths )
|
rampantpixels/mdns_lib
|
configure.py
|
Python
|
unlicense
| 3,163
|
[
"BLAST"
] |
654e87a2d709657fbde00e65c8b9de1791b83aad4ae0d86f3d83cfc3cf9cffb1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.